Browse Source
Update llama-7b-chat
Signed-off-by: Jael Gu <mengjia.gu@zilliz.com>
main
2 changed files with
5 additions and
5 deletions
-
README.md
-
llama2.py
|
|
@ -25,7 +25,7 @@ Use the default model to continue the conversation from given messages. |
|
|
|
```python |
|
|
|
from towhee import ops |
|
|
|
|
|
|
|
chat = ops.LLM.Llama_2('path/to/model_file.bin', max_tokens=2048) |
|
|
|
chat = ops.LLM.Llama_2('llama-2-13b-chat', max_tokens=2048) |
|
|
|
|
|
|
|
message = [{"question": "Building a website can be done in 10 simple steps:"}] |
|
|
|
answer = chat(message) |
|
|
@ -100,8 +100,8 @@ A dictionary of supported models with model name as key and huggingface hub id & |
|
|
|
|
|
|
|
{ |
|
|
|
'llama-2-7b-chat': { |
|
|
|
'hf_id': 'TheBloke/Llama-2-7B-GGML', |
|
|
|
'filename': 'llama-2-7b.ggmlv3.q4_0.bin' |
|
|
|
'hf_id': 'TheBloke/Llama-2-7B-Chat-GGML', |
|
|
|
'filename': 'llama-2-7b-chat.ggmlv3.q4_0.bin' |
|
|
|
}, |
|
|
|
'llama-2-13b-chat': { |
|
|
|
'hf_id': 'TheBloke/Llama-2-13B-chat-GGML', |
|
|
|
|
|
@ -77,8 +77,8 @@ class LlamaCpp(PyOperator): |
|
|
|
def supported_model_names(): |
|
|
|
models = { |
|
|
|
'llama-2-7b-chat': { |
|
|
|
'hf_id': 'TheBloke/Llama-2-7B-GGML', |
|
|
|
'filename': 'llama-2-7b.ggmlv3.q4_0.bin' |
|
|
|
'hf_id': 'TheBloke/Llama-2-7B-Chat-GGML', |
|
|
|
'filename': 'llama-2-7b-chat.ggmlv3.q4_0.bin' |
|
|
|
}, |
|
|
|
'llama-2-13b-chat': { |
|
|
|
'hf_id': 'TheBloke/Llama-2-13B-chat-GGML', |
|
|
|