diff --git a/README.md b/README.md index eba611d..dd11324 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ - ✅ 讯飞星火大模型/Spark - ✅ 腾讯混元大模型/Hunyuan - ✅ DeepSeek +- ✅ 智谱/ChatGLM ### Install ```shell diff --git a/chatchat/cli/config.py b/chatchat/cli/config.py index 440d9b1..154c132 100644 --- a/chatchat/cli/config.py +++ b/chatchat/cli/config.py @@ -7,6 +7,7 @@ 'tencent': ['secret_id', 'secret_key'], 'xunfei': ['app_id', 'api_key', 'api_secret'], 'deepseek': ['api_key'], + 'zhipu': ['api_key'], } def supported_platforms(): @@ -20,24 +21,19 @@ def parse_config(args): if args.list: supported_platforms() elif args.cfgs: - cfg = args.cfgs.split('.') + cfg = args.cfgs.split('=') + plat_key = cfg[0].split('.') usage = 'Usage: chatchat config platform.key=value' - if len(cfg) != 2: + if len(cfg) != 2 and len(plat_key) != 2: print(usage) return - plat = cfg[0] + (plat, key), value = plat_key, cfg[1] if plat not in __platform_config__: print(f'Platform <{plat}> is currently NOT supported!') supported_platforms() return - kv = cfg[1].split('=') - if len(kv) != 2: - print(usage) - return - key, value = kv - if key not in __platform_config__[plat]: print(f'Platform <{plat}> do NOT has secret key <{key}>!\nYou can set the following keys:') for key in __platform_config__[plat]: diff --git a/chatchat/zhipu.py b/chatchat/zhipu.py new file mode 100644 index 0000000..09ae29b --- /dev/null +++ b/chatchat/zhipu.py @@ -0,0 +1,75 @@ +from chatchat.base import Base +import httpx + +class Completion(Base): + def __init__(self, model='glm-4-flash', proxy=None, timeout=None): + super().__init__() + + plat = 'zhipu' + self.verify_secret_data(plat, ('api_key',)) + self.jdata = self.secret_data[plat] + + self.model_type = set([ + 'glm-4-0520', + 'glm-4', + 'glm-4-air', + 'glm-4-airx', + 'glm-4-flash', + 'glm-4v', + 'glm-3-turbo', + ]) + + if model not in self.model_type: + raise RuntimeError(f'supported chat type: {list(self.model_type)}') + self.model = model + + self.url = 'https://open.bigmodel.cn/api/paas/v4/chat/completions' + self.client = httpx.Client(proxy=proxy, timeout=timeout) + self.headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'Authorization': f'Bearer {self.jdata["api_key"]}', + } + + def create(self, message, max_tokens=1024, temperature=0.95, top_p=0.7, stream=False): + jmsg = { + 'model': self.model, + 'messages': [{ + "role": "user", + "content": message, + }], + 'max_tokens': max_tokens, + 'temperature': temperature, + 'top_p': top_p, + 'stream': stream, + } + r = self.client.post(self.url, headers=self.headers, json=jmsg) + return r.json() + +class Chat(Completion): + def __init__(self, model='glm-4-flash', history=[], proxy=None, timeout=None): + super().__init__(model=model, proxy=proxy, timeout=timeout) + self.history = history + + def chat(self, message, max_tokens=1024, temperature=0.95, top_p=0.7, stream=False): + self.history.append({ + 'role': 'user', + 'content': message, + }) + + jmsg = { + 'model': self.model, + 'messages': self.history, + 'max_tokens': max_tokens, + 'temperature': temperature, + 'top_p': top_p, + 'stream': stream, + } + r = self.client.post(self.url, headers=self.headers, json=jmsg) + r = r.json() + + if 'choices' in r: + assistant_output = r['choices'][0]['message'] + self.history.append(assistant_output) + + return r diff --git a/examples/zhipu_chat.py b/examples/zhipu_chat.py new file mode 100644 index 0000000..f31f176 --- /dev/null +++ b/examples/zhipu_chat.py @@ -0,0 +1,8 @@ +from chatchat.zhipu import Chat + +chat = Chat(model='glm-3-turbo') +while True: + user = input('user: ') + r = chat.chat(user) + message = r['choices'][0]['message'] + print(f"{message['role']}: {message['content']}") diff --git a/examples/zhipu_completion.py b/examples/zhipu_completion.py new file mode 100644 index 0000000..66d694f --- /dev/null +++ b/examples/zhipu_completion.py @@ -0,0 +1,19 @@ +from chatchat.zhipu import Completion + +completion = Completion(model='glm-3-turbo') +r = completion.create('Hi', max_tokens=64) +# { +# 'choices': [ +# { +# 'finish_reason': 'stop', 'index': 0, 'message': +# { +# 'content': "Hello 👋! I'm ChatGLM(智谱清言).", +# 'role': 'assistant' +# } +# } +# ], +# 'created': 1111, 'id': '2222', 'model': 'glm-3-turbo', 'request_id': '8730203825971097305', 'usage': { +# 'completion_tokens': 39, 'prompt_tokens': 6, 'total_tokens': 45 +# } +# } +print(r) diff --git a/setup.py b/setup.py index effef3c..3800f45 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ setup( name = 'chatchat', packages = find_packages(exclude=['examples']), - version = '0.1.6', + version = '0.1.7', license = 'GPL-2.0', description = 'Large Language Model API', author = 'JiauZhang',