6 changed files with 183 additions and 1 deletions
@ -1,2 +1,93 @@ |
|||||
# chatglm |
|
||||
|
# Zhipu AI |
||||
|
|
||||
|
*author: Jael* |
||||
|
|
||||
|
<br /> |
||||
|
|
||||
|
## Description 描述 |
||||
|
|
||||
|
This operator is implemented with [ChatGLM services from Zhipu AI](https://open.bigmodel.cn). |
||||
|
It directly returns the original response in dictionary without parsing. |
||||
|
Please note you will need [API Key](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys) to access the service. |
||||
|
|
||||
|
LLM/ZhipuAI 利用了来自[智谱AI开放平台](https://open.bigmodel.cn)的大语言模型服务。该算子以字典的形式直接返回原始的模型回复。请注意,您需要[API Key](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys)才能访问该服务。 |
||||
|
|
||||
|
<br /> |
||||
|
|
||||
|
## Code Example 代码示例 |
||||
|
|
||||
|
*Write a pipeline with explicit inputs/outputs name specifications:* |
||||
|
|
||||
|
```python |
||||
|
from towhee import pipe, ops |
||||
|
|
||||
|
p = ( |
||||
|
pipe.input('messages') |
||||
|
.map('messages', 'response', ops.LLM.ZhipuAI( |
||||
|
api_key=ZHIPUAI_API_KEY, |
||||
|
model_name='chatglm_130b', # or 'chatglm_6b' |
||||
|
temperature=0.5, |
||||
|
max_tokens=50, |
||||
|
)) |
||||
|
.output('response') |
||||
|
) |
||||
|
|
||||
|
messages=[ |
||||
|
{'system': '你是一个资深的软件工程师,善于回答关于科技项目的问题。'}, |
||||
|
{'question': 'Zilliz Cloud 是什么?', 'answer': 'Zilliz Cloud 是一种全托管的向量检索服务。'}, |
||||
|
{'question': '它和 Milvus 的关系是什么?'} |
||||
|
] |
||||
|
response = p(messages).get()[0] |
||||
|
answer = response['choices'][0]['content'] |
||||
|
token_usage = response['usage'] |
||||
|
``` |
||||
|
|
||||
|
<br /> |
||||
|
|
||||
|
## Factory Constructor 接口说明 |
||||
|
|
||||
|
Create the operator via the following factory method: |
||||
|
|
||||
|
***LLM.ZhipuAI(api_key: str, model_name: str, \*\*kwargs)*** |
||||
|
|
||||
|
**Parameters:** |
||||
|
|
||||
|
|
||||
|
***api_key***: *str=None* |
||||
|
|
||||
|
The Zhipu AI API key in string, defaults to None. If None, it will use the environment variable `ZHIPUAI_API_KEY`. |
||||
|
|
||||
|
***model_name***: *str='chatglm_130b'* |
||||
|
|
||||
|
The model used in Zhipu AI service, defaults to 'chatglm_130b'. Visit Zhipu AI documentation for supported models. |
||||
|
|
||||
|
***\*\*kwargs*** |
||||
|
|
||||
|
Other ChatGLM parameters such as temperature, etc. |
||||
|
|
||||
|
<br /> |
||||
|
|
||||
|
## Interface 使用说明 |
||||
|
|
||||
|
The operator takes a piece of text in string as input. |
||||
|
It returns answer in json. |
||||
|
|
||||
|
***\_\_call\_\_(txt)*** |
||||
|
|
||||
|
**Parameters:** |
||||
|
|
||||
|
***messages***: *list* |
||||
|
|
||||
|
A list of messages to set up chat. |
||||
|
Must be a list of dictionaries with key value from "system", "question", "answer". For example, [{"question": "a past question?", "answer": "a past answer."}, {"question": "current question?"}]. |
||||
|
It also accepts the orignal ChatGLM message format like [{"role": "user", "content": "a question?"}, {"role": "assistant", "content": "an answer."}] |
||||
|
|
||||
|
**Returns**: |
||||
|
|
||||
|
*response: dict* |
||||
|
|
||||
|
The original llm response in dictionary, including next answer and token usage. |
||||
|
|
||||
|
<br /> |
||||
|
|
||||
|
|
||||
|
@ -0,0 +1,5 @@ |
|||||
|
from .zhipuai_chat import ZhipuaiChat |
||||
|
|
||||
|
|
||||
|
def chatglm(*args, **kwargs): |
||||
|
return ZhipuaiChat(*args, **kwargs) |
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@ |
|||||
|
zhipuai |
@ -0,0 +1,85 @@ |
|||||
|
# Copyright 2021 Zilliz. All rights reserved. |
||||
|
# |
||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
|
# you may not use this file except in compliance with the License. |
||||
|
# You may obtain a copy of the License at |
||||
|
# |
||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
# |
||||
|
# Unless required by applicable law or agreed to in writing, software |
||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
|
# See the License for the specific language governing permissions and |
||||
|
# limitations under the License. |
||||
|
|
||||
|
import os |
||||
|
from typing import List |
||||
|
|
||||
|
import zhipuai |
||||
|
from towhee.operator.base import PyOperator |
||||
|
|
||||
|
|
||||
|
class ZhipuaiChat(PyOperator): |
||||
|
'''Wrapper of OpenAI Chat API''' |
||||
|
def __init__(self, |
||||
|
model_name: str = 'chatglm_std', |
||||
|
api_key: str = None, |
||||
|
**kwargs |
||||
|
): |
||||
|
zhipuai.api_key = api_key or os.getenv("ZHIPUAI_API_KEY") |
||||
|
self._model = model_name |
||||
|
self.kwargs = kwargs |
||||
|
|
||||
|
def __call__(self, messages: List[dict]): |
||||
|
messages = self.parse_inputs(messages) |
||||
|
|
||||
|
self.stream = self.kwargs.pop('stream', False) |
||||
|
|
||||
|
if self.stream: |
||||
|
response = zhipuai.model_api.sse_invoke( |
||||
|
model=self._model, |
||||
|
prompt=messages, |
||||
|
**self.kwargs |
||||
|
) |
||||
|
else: |
||||
|
response = zhipuai.model_api.invoke( |
||||
|
model=self._model, |
||||
|
prompt=messages, |
||||
|
**self.kwargs |
||||
|
) |
||||
|
if self.stream: |
||||
|
for x in response.events(): |
||||
|
yield {'event': x.event, 'id': x.id, 'data': x.data, 'meta': x.meta} |
||||
|
else: |
||||
|
return response |
||||
|
|
||||
|
def parse_inputs(self, messages: List[dict]): |
||||
|
assert isinstance(messages, list), \ |
||||
|
'Inputs must be a list of dictionaries with keys from ["question", "answer"].' |
||||
|
new_messages = [] |
||||
|
for m in messages: |
||||
|
if ('role' and 'content' in m) and (m['role'] in ['assistant', 'user']): |
||||
|
new_messages.append(m) |
||||
|
else: |
||||
|
for k, v in m.items(): |
||||
|
if k == 'question': |
||||
|
new_m = {'role': 'user', 'content': v} |
||||
|
elif k == 'answer': |
||||
|
new_m = {'role': 'assistant', 'content': v} |
||||
|
else: |
||||
|
'Invalid message key: only accept key value from ["question", "answer"].' |
||||
|
new_messages.append(new_m) |
||||
|
return new_messages |
||||
|
|
||||
|
def stream_output(self, response): |
||||
|
raise RuntimeError('Stream is not yet supported.') |
||||
|
|
||||
|
@staticmethod |
||||
|
def supported_model_names(): |
||||
|
model_list = [ |
||||
|
'chatglm_130b', |
||||
|
'chatglm_6b' |
||||
|
] |
||||
|
model_list.sort() |
||||
|
return model_list |
||||
|
|
Loading…
Reference in new issue