From eaa404711ef3699445ea3beb5824778bc021f444 Mon Sep 17 00:00:00 2001 From: Jael Gu Date: Thu, 14 Sep 2023 10:52:26 +0800 Subject: [PATCH] Add files Signed-off-by: Jael Gu --- README.md | 93 +++++++++++++++++++++++- __init__.py | 5 ++ __pycache__/__init__.cpython-38.pyc | Bin 0 -> 330 bytes __pycache__/zhipuai_chat.cpython-38.pyc | Bin 0 -> 2098 bytes requirements.txt | 1 + zhipuai_chat.py | 85 ++++++++++++++++++++++ 6 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 __init__.py create mode 100644 __pycache__/__init__.cpython-38.pyc create mode 100644 __pycache__/zhipuai_chat.cpython-38.pyc create mode 100644 requirements.txt create mode 100644 zhipuai_chat.py diff --git a/README.md b/README.md index 76d5d4a..6a7e31d 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,93 @@ -# chatglm +# Zhipu AI + +*author: Jael* + +
+ +## Description 描述 + +This operator is implemented with [ChatGLM services from Zhipu AI](https://open.bigmodel.cn). +It directly returns the original response in dictionary without parsing. +Please note you will need [API Key](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys) to access the service. + +LLM/ZhipuAI 利用了来自[智谱AI开放平台](https://open.bigmodel.cn)的大语言模型服务。该算子以字典的形式直接返回原始的模型回复。请注意,您需要[API Key](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys)才能访问该服务。 + +
+ +## Code Example 代码示例 + +*Write a pipeline with explicit inputs/outputs name specifications:* + +```python +from towhee import pipe, ops + +p = ( + pipe.input('messages') + .map('messages', 'response', ops.LLM.ZhipuAI( + api_key=ZHIPUAI_API_KEY, + model_name='chatglm_130b', # or 'chatglm_6b' + temperature=0.5, + max_tokens=50, + )) + .output('response') +) + +messages=[ + {'system': '你是一个资深的软件工程师,善于回答关于科技项目的问题。'}, + {'question': 'Zilliz Cloud 是什么?', 'answer': 'Zilliz Cloud 是一种全托管的向量检索服务。'}, + {'question': '它和 Milvus 的关系是什么?'} + ] +response = p(messages).get()[0] +answer = response['choices'][0]['content'] +token_usage = response['usage'] +``` + +
+ +## Factory Constructor 接口说明 + +Create the operator via the following factory method: + +***LLM.ZhipuAI(api_key: str, model_name: str, \*\*kwargs)*** + +**Parameters:** + + +***api_key***: *str=None* + +The Zhipu AI API key in string, defaults to None. If None, it will use the environment variable `ZHIPUAI_API_KEY`. + +***model_name***: *str='chatglm_130b'* + +The model used in Zhipu AI service, defaults to 'chatglm_130b'. Visit Zhipu AI documentation for supported models. + +***\*\*kwargs*** + +Other ChatGLM parameters such as temperature, etc. + +
+ +## Interface 使用说明 + +The operator takes a piece of text in string as input. +It returns answer in json. + +***\_\_call\_\_(txt)*** + +**Parameters:** + +***messages***: *list* + +​ A list of messages to set up chat. +Must be a list of dictionaries with key value from "system", "question", "answer". For example, [{"question": "a past question?", "answer": "a past answer."}, {"question": "current question?"}]. +It also accepts the orignal ChatGLM message format like [{"role": "user", "content": "a question?"}, {"role": "assistant", "content": "an answer."}] + +**Returns**: + +*response: dict* + + The original llm response in dictionary, including next answer and token usage. + +
+ diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..3f86b97 --- /dev/null +++ b/__init__.py @@ -0,0 +1,5 @@ +from .zhipuai_chat import ZhipuaiChat + + +def chatglm(*args, **kwargs): + return ZhipuaiChat(*args, **kwargs) diff --git a/__pycache__/__init__.cpython-38.pyc b/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5133b2eaa1f02901f079a7719ab3d3cf429d0729 GIT binary patch literal 330 zcmWIL<>g`kg12WTrQ`wW#~=4m62IcnwaUFkyw%p6oLbgIm|%p55&bFD@zz^7-|^%7=gMO{fa=+ znoPG?5{uG{Z?R>UgXonEMQlLfUmp6Q#i>Qb`njoj=~81L5CHdtUsj2$;1*t`e zCHX)(A0JdS;@dy!~rDz sG?}A#svs_pPX;=@2;@|-6xaz!1UpdT7Kcr4eoARhsvXGJV1sy=0j%>%5&!@I literal 0 HcmV?d00001 diff --git a/__pycache__/zhipuai_chat.cpython-38.pyc b/__pycache__/zhipuai_chat.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82b2330e6567e18d6b18ed0d44c58083afcadf0b GIT binary patch literal 2098 zcmZWq&2Jk;6rY*>u;bWi8X0Ixfo*OU$Oa)HgsQ4hTOeu(6;*{;sFsaqVsGO8=*&1# zto4QDz=;DFEkUSCEi$5;9Kl&Z=;Q%iR}@d)z<8d%_pB zb;4^eD6jLSQ#Y(Ha2k9WoF(4mty2;<#4=yu*G|a^4V&D7RoBgOI82IhoZKJAdUEq? z85giA$oB%+mThhYmLF_B-ZITTR)bL*eemJ?yE`3b8fnhOD9Ykgm|9#U(SbPbI}2V| zC+z+aNCg;5Rx~v^qnw^o%_66yC5hb9!RT=UbJ$LJE&P1z@zc$%2yaG@9)8pD4a=4B z2SSVN(9|X~b7c+sj2~4t82{iXmIKvsjjP0HPx?5lBxDn-Ux{47^w)OpsS;B4Qjrbz zlXx)hb#;C;6rz{UV@g>&+jIEDYV^*wmva$r2q!H=tYD z7m2q@eL86`AlkhULcHs#P%0h>)n`_C{#lSb(0!2l9z#jaNC|+ZRMUO7@9crI@0I}V z1;fhwh5baQj92ReUp}xr&en-to%tm@N4AxWJIDzarTxyi&!cFzl*U}(w0uRG|K|SKlGFAFh3A- z8T<-()0(M`^0CH^jw@UC=(^QfmqHZ)R~SFe)RB-?jAp~NN+62*@n{r9x3N>NV<5Ca z>(r&}rR%tV`(B$iSo?o?&n{iT*yhS5#y?`Cfv~10Os#2D)d~H94# z0ol8yV{zqfwPt7Fo>NMm-z=Ro_nbiXlkeEnoq8pz{ea0`?w20wkWKxp4PG6*L%wv$ zLsX!{uXKD0Eer|Y*qLl^RcHq3Sm|I_1aUAzf3OwElfFj&#WE2pI7;*|K!vGb4;_ zcivtmx1Lmq9waKravdBC9jI|pM>t<%ZvEB0PMuy_mJTO)QlE!*pCn~x&Xh^)wXWY^6o zdOnUv^OIT>@w^YV1GD5RZc6JYS_;xw+W1z{mKHeEM0V+831|-7vMy5X|1)kR)<5C6jHDb8RP5YBFP5T(^Z2q?OBiSUiA3xZj4lR cAY5Hsc(Sr!)_d;EoV;sy$|^<{GMkD20<97Vk^lez literal 0 HcmV?d00001 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..85c199c --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +zhipuai \ No newline at end of file diff --git a/zhipuai_chat.py b/zhipuai_chat.py new file mode 100644 index 0000000..c3c0faa --- /dev/null +++ b/zhipuai_chat.py @@ -0,0 +1,85 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import List + +import zhipuai +from towhee.operator.base import PyOperator + + +class ZhipuaiChat(PyOperator): + '''Wrapper of OpenAI Chat API''' + def __init__(self, + model_name: str = 'chatglm_std', + api_key: str = None, + **kwargs + ): + zhipuai.api_key = api_key or os.getenv("ZHIPUAI_API_KEY") + self._model = model_name + self.kwargs = kwargs + + def __call__(self, messages: List[dict]): + messages = self.parse_inputs(messages) + + self.stream = self.kwargs.pop('stream', False) + + if self.stream: + response = zhipuai.model_api.sse_invoke( + model=self._model, + prompt=messages, + **self.kwargs + ) + else: + response = zhipuai.model_api.invoke( + model=self._model, + prompt=messages, + **self.kwargs + ) + if self.stream: + for x in response.events(): + yield {'event': x.event, 'id': x.id, 'data': x.data, 'meta': x.meta} + else: + return response + + def parse_inputs(self, messages: List[dict]): + assert isinstance(messages, list), \ + 'Inputs must be a list of dictionaries with keys from ["question", "answer"].' + new_messages = [] + for m in messages: + if ('role' and 'content' in m) and (m['role'] in ['assistant', 'user']): + new_messages.append(m) + else: + for k, v in m.items(): + if k == 'question': + new_m = {'role': 'user', 'content': v} + elif k == 'answer': + new_m = {'role': 'assistant', 'content': v} + else: + 'Invalid message key: only accept key value from ["question", "answer"].' + new_messages.append(new_m) + return new_messages + + def stream_output(self, response): + raise RuntimeError('Stream is not yet supported.') + + @staticmethod + def supported_model_names(): + model_list = [ + 'chatglm_130b', + 'chatglm_6b' + ] + model_list.sort() + return model_list +