参考:
https://github.com/lm-sys/FastChat
https://blog.csdn.net/qq128252/article/details/132759107
##安装
pip3 install "fschat[model_worker,webui]"
模型下载:
##模型下载;huggingface下载慢,可以在modelscope下载:https://modelscope.cn/my/overviewfrom modelscope.models import Model
model = Model.from_pretrained('ZhipuAI/chatglm2-6b', revision='v1.0.9')
1、chatglm2-6b测试
python3 -m fastchat.serve.cli --model-path ./chatglm2-6b --num-gpus 2
web使用
1)启动控制器
python3 -m fastchat.serve.controller
2)启动模型工作
python3 -m fastchat.serve.model_worker --model-path ./chatglm2-6b --num-gpus 2 --host=0.0.0.0 --port=21002
3)web服务启动
python3 -m fastchat.serve.gradio_web_server
打开网址查看:
api服务
1)启动控制器
python3 -m fastchat.serve.controller
2)启动模型工作
python3 -m fastchat.serve.model_worker --model-path ./chatglm2-6b --num-gpus 2 --host=0.0.0.0 --port=21002
3)web服务启动
python3 -m fastchat.serve.api --host 0.0.0.0
4)客户端访问
import requests
import jsonheaders = {"Content-Type": "application/json"}
pload = {"model": "chatglm2-6b","messages": [{"role": "system","content": "AI专家"},{"role": "user","content": "你AI小助手,小名昵称为小乐,你主要擅长是智慧城***过30个字"},{"role": "assistant","content": "好的,小乐很乐意为你服务"},{"role": "user","content": "你能做啥?"}]}
response = requests.post("http://192****:8000/v1/chat/completions", headers=headers, json=pload, stream=True)
print(response.text)
自己该写自己api、web页面
1)启动控制器
python3 -m fastchat.serve.controller
2)启动模型工作
python3 -m fastchat.serve.model_worker --model-path ./chatglm2-6b --num-gpus 2 --host=0.0.0.0 --port=21002
3)启动 api页面或者web页面
***3-1.api 访问
import requests
import jsonheaders = {"Content-Type": "application/json"}
pload = {"model": "chatglm2-6b","prompt": [["Human","你名字叫****更智慧,让世界更安全"],["Assistant","好的,小杰很乐意为你服务"],["Human","澳门一日游推荐"], ["Assistant", None]],"stop": "###","max_new_tokens": 512,}
response = requests.post("http://192.1***:21002/worker_generate_stream", headers=headers, json=pload, stream=True,timeout=3)
# print(response.text)
for chunk in response.iter_lines(chunk_size=1024,decode_unicode=False, delimiter=b"\0"):if chunk:# print(chunk.decode("utf-8"))data = json.loads(chunk.decode("utf-8"))print(data["text"])# print(data["text"].split(" "))
***3-1.web 访问
streamlit run stream_web.py
stream_web.py
import json
# import torch
import streamlit as st
import requests
# from transformers import AutoModel, AutoTokenizerst.set_page_config(page_title="小*智能")
st.title("小*智能")def get_response(text):headers = {"Content-Type": "application/json"}pload = {"model": "chatglm2-6b","prompt": text,"stop": "###","max_new_tokens": 5000,}print("pload",pload)response = requests.post("http://192****4:21002/worker_generate_stream", headers=headers, json=pload, stream=True)# print(response.text)return responsedef clear_chat_history():del st.session_state.messagesst.session_state.history1 = [st.session_state.history1[0]] # 保留初始记录def init_chat_history():with st.chat_message("assistant", avatar='🤖'):st.markdown("您好,我是小*智能助手,很高兴为您服务🥰")if "messages" in st.session_state:for message in st.session_state.messages:avatar = '🧑💻' if message["role"] == "user" else '🤖'with st.chat_message(message["role"], avatar=avatar):st.markdown(message["content"])else:st.session_state.messages = []return st.session_state.messages# 初始化变量
if 'history1' not in st.session_state:st.session_state.history1 = [["Human","你名字****界更安全"],["Assistant","好的,小杰很乐意为你服务"]]
# 初始化 session_state
if "enter_pressed" not in st.session_state:st.session_state.enter_pressed = Falsedef main():# model, tokenizer = init_model()messages = init_chat_history()print("history1:",st.session_state.history1)if prompt := st.chat_input("Shift + Enter 换行, Enter 发送"):with st.chat_message("user", avatar='🧑💻'):st.markdown(prompt)messages.append({"role": "user", "content": prompt})print(f"[user] {prompt}", flush=True)with st.chat_message("assistant", avatar='🤖'):placeholder = st.empty()st.session_state.history1.append(["Human",prompt])st.session_state.history1.append(["Assistant",None])print("history1:",st.session_state.history1)results = get_response(st.session_state.history1)for chunk in results.iter_lines(chunk_size=1024,decode_unicode=False, delimiter=b"\0"):if chunk:# print(chunk.decode("utf-8"))response = json.loads(chunk.decode("utf-8"))["text"]# print(response) placeholder.markdown(response[(len(prompt)+1):])print(prompt,response[(len(prompt)+1):])st.session_state.history1[-1][1] =response[(len(prompt)+1):]messages.append({"role": "assistant", "content": response[(len(prompt)+1):]})print(json.dumps(messages, ensure_ascii=False), flush=True)st.button("清空对话", on_click=clear_chat_history)if __name__ == "__main__":main()
2、Baichuan2-13B-Chat测试
##运行命令:python3 -m fastchat.serve.cli --model-path ./Baichuan2-13B-Chat --num-gpus 4
1)ValueError: Tokenizer class BaichuanTokenizer does not exist or is not currently imported. 2)offload报错,ValueError: The current device_map
had weights offloaded to the disk. Please provide an offload_folder
for them.也需要增加
按照报错信息需要更改:
/site-packages/fastchat/serve/inference.py
增加trust_remote_code=True