Browse Source
Update readme
Signed-off-by: Jael Gu <mengjia.gu@zilliz.com>
main
3 changed files with
5 additions and
6 deletions
README.md
__init__.py
llama2.py
@ -59,9 +59,9 @@ p = (
pipe.input('question', 'docs', 'history')
.map(('question', 'docs', 'history'),
'prompt',
ops.prompt.template(temp, ['question', 'context'], sys_me ssa ge ))
ops.prompt.template(temp, ['question', 'context'], system _msg))
.map('prompt', 'answer',
ops.LLM.Llama_2(max_tokens=20 0))
ops.LLM.Llama_2(temperature= 0))
.output('answer')
)
@ -72,7 +72,7 @@ print(q1, ans1)
history.append((q1, ans1))
docs.append('Towhee is a cutting-edge framework designed to streamline the processing of unstructured data through the use of Large Language Model (LLM) based pipeline orchestration.')
ans2 = p(q2, docs, history)
ans2 = p(q2, docs, history).get()[0]
print(q2, ans2)
```
@ -1,5 +1,5 @@
from . llama2 import LlamaCpp
def l lama_2( * args , * * kwargs ) :
def L lama_2( * args , * * kwargs ) :
return LlamaCpp ( * args , * * kwargs )
@ -39,7 +39,6 @@ class LlamaCpp(PyOperator):
self . model_path = model_name_or_file
assert os . path . isfile ( self . model_path ) , f ' Invalid model path: { self . model_path } '
print ( 111 , self . model_path )
self . model = Llama ( model_path = self . model_path )
def __call__ ( self , messages : List [ dict ] ) :
@ -52,7 +51,7 @@ class LlamaCpp(PyOperator):
assert isinstance ( messages , list ) , \
' Inputs must be a list of dictionaries with keys from [ " system " , " question " , " answer " ]. '
prompt = ' '
question = messages . pop [ - 1 ]
question = messages . pop ( - 1 )
assert len ( question ) == 1 and ' question ' in question . keys ( )
question = question [ ' question ' ]
for m in messages :