diff --git a/README.md b/README.md
index 76d5d4a..6a7e31d 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,93 @@
-# chatglm
+# Zhipu AI
+
+*author: Jael*
+
+
+
+## Description 描述
+
+This operator is implemented with [ChatGLM services from Zhipu AI](https://open.bigmodel.cn).
+It directly returns the original response in dictionary without parsing.
+Please note you will need [API Key](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys) to access the service.
+
+LLM/ZhipuAI 利用了来自[智谱AI开放平台](https://open.bigmodel.cn)的大语言模型服务。该算子以字典的形式直接返回原始的模型回复。请注意,您需要[API Key](https://open.bigmodel.cn/login?redirect=%2Fusercenter%2Fapikeys)才能访问该服务。
+
+
+
+## Code Example 代码示例
+
+*Write a pipeline with explicit inputs/outputs name specifications:*
+
+```python
+from towhee import pipe, ops
+
+p = (
+ pipe.input('messages')
+ .map('messages', 'response', ops.LLM.ZhipuAI(
+ api_key=ZHIPUAI_API_KEY,
+ model_name='chatglm_130b', # or 'chatglm_6b'
+ temperature=0.5,
+ max_tokens=50,
+ ))
+ .output('response')
+)
+
+messages=[
+ {'system': '你是一个资深的软件工程师,善于回答关于科技项目的问题。'},
+ {'question': 'Zilliz Cloud 是什么?', 'answer': 'Zilliz Cloud 是一种全托管的向量检索服务。'},
+ {'question': '它和 Milvus 的关系是什么?'}
+ ]
+response = p(messages).get()[0]
+answer = response['choices'][0]['content']
+token_usage = response['usage']
+```
+
+
+
+## Factory Constructor 接口说明
+
+Create the operator via the following factory method:
+
+***LLM.ZhipuAI(api_key: str, model_name: str, \*\*kwargs)***
+
+**Parameters:**
+
+
+***api_key***: *str=None*
+
+The Zhipu AI API key in string, defaults to None. If None, it will use the environment variable `ZHIPUAI_API_KEY`.
+
+***model_name***: *str='chatglm_130b'*
+
+The model used in Zhipu AI service, defaults to 'chatglm_130b'. Visit Zhipu AI documentation for supported models.
+
+***\*\*kwargs***
+
+Other ChatGLM parameters such as temperature, etc.
+
+
+
+## Interface 使用说明
+
+The operator takes a piece of text in string as input.
+It returns answer in json.
+
+***\_\_call\_\_(txt)***
+
+**Parameters:**
+
+***messages***: *list*
+
+ A list of messages to set up chat.
+Must be a list of dictionaries with key value from "system", "question", "answer". For example, [{"question": "a past question?", "answer": "a past answer."}, {"question": "current question?"}].
+It also accepts the orignal ChatGLM message format like [{"role": "user", "content": "a question?"}, {"role": "assistant", "content": "an answer."}]
+
+**Returns**:
+
+*response: dict*
+
+ The original llm response in dictionary, including next answer and token usage.
+
+
+
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000..3f86b97
--- /dev/null
+++ b/__init__.py
@@ -0,0 +1,5 @@
+from .zhipuai_chat import ZhipuaiChat
+
+
+def chatglm(*args, **kwargs):
+ return ZhipuaiChat(*args, **kwargs)
diff --git a/__pycache__/__init__.cpython-38.pyc b/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000..5133b2e
Binary files /dev/null and b/__pycache__/__init__.cpython-38.pyc differ
diff --git a/__pycache__/zhipuai_chat.cpython-38.pyc b/__pycache__/zhipuai_chat.cpython-38.pyc
new file mode 100644
index 0000000..82b2330
Binary files /dev/null and b/__pycache__/zhipuai_chat.cpython-38.pyc differ
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..85c199c
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1 @@
+zhipuai
\ No newline at end of file
diff --git a/zhipuai_chat.py b/zhipuai_chat.py
new file mode 100644
index 0000000..c3c0faa
--- /dev/null
+++ b/zhipuai_chat.py
@@ -0,0 +1,85 @@
+# Copyright 2021 Zilliz. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import List
+
+import zhipuai
+from towhee.operator.base import PyOperator
+
+
+class ZhipuaiChat(PyOperator):
+ '''Wrapper of OpenAI Chat API'''
+ def __init__(self,
+ model_name: str = 'chatglm_std',
+ api_key: str = None,
+ **kwargs
+ ):
+ zhipuai.api_key = api_key or os.getenv("ZHIPUAI_API_KEY")
+ self._model = model_name
+ self.kwargs = kwargs
+
+ def __call__(self, messages: List[dict]):
+ messages = self.parse_inputs(messages)
+
+ self.stream = self.kwargs.pop('stream', False)
+
+ if self.stream:
+ response = zhipuai.model_api.sse_invoke(
+ model=self._model,
+ prompt=messages,
+ **self.kwargs
+ )
+ else:
+ response = zhipuai.model_api.invoke(
+ model=self._model,
+ prompt=messages,
+ **self.kwargs
+ )
+ if self.stream:
+ for x in response.events():
+ yield {'event': x.event, 'id': x.id, 'data': x.data, 'meta': x.meta}
+ else:
+ return response
+
+ def parse_inputs(self, messages: List[dict]):
+ assert isinstance(messages, list), \
+ 'Inputs must be a list of dictionaries with keys from ["question", "answer"].'
+ new_messages = []
+ for m in messages:
+ if ('role' and 'content' in m) and (m['role'] in ['assistant', 'user']):
+ new_messages.append(m)
+ else:
+ for k, v in m.items():
+ if k == 'question':
+ new_m = {'role': 'user', 'content': v}
+ elif k == 'answer':
+ new_m = {'role': 'assistant', 'content': v}
+ else:
+ 'Invalid message key: only accept key value from ["question", "answer"].'
+ new_messages.append(new_m)
+ return new_messages
+
+ def stream_output(self, response):
+ raise RuntimeError('Stream is not yet supported.')
+
+ @staticmethod
+ def supported_model_names():
+ model_list = [
+ 'chatglm_130b',
+ 'chatglm_6b'
+ ]
+ model_list.sort()
+ return model_list
+