|
@ -1,7 +1,9 @@ |
|
|
from typing import List, Tuple, Dict, Optional |
|
|
from typing import List, Tuple, Dict, Optional |
|
|
|
|
|
import logging |
|
|
|
|
|
|
|
|
from towhee.operator import PyOperator |
|
|
from towhee.operator import PyOperator |
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger() |
|
|
|
|
|
|
|
|
gpt_prompt = """Use the following pieces of context to answer the question at the end. |
|
|
gpt_prompt = """Use the following pieces of context to answer the question at the end. |
|
|
If you don't know the answer, just say that you don't know, don't try to make up an answer. |
|
|
If you don't know the answer, just say that you don't know, don't try to make up an answer. |
|
@ -26,9 +28,14 @@ class QAPrompt(PyOperator): |
|
|
if temp: |
|
|
if temp: |
|
|
self._template = temp |
|
|
self._template = temp |
|
|
else: |
|
|
else: |
|
|
if llm_name.lower() == 'dolly': |
|
|
|
|
|
|
|
|
if not llm_name: |
|
|
|
|
|
self._template = gpt_prompt |
|
|
|
|
|
elif llm_name.lower() == 'dolly': |
|
|
self._template = dolly_prompt |
|
|
self._template = dolly_prompt |
|
|
|
|
|
elif llm_name.lower() == 'openai': |
|
|
|
|
|
self._template = gpt_prompt |
|
|
else: |
|
|
else: |
|
|
|
|
|
logger.warning('Unkown llm_name, use default prompt') |
|
|
self._template = gpt_prompt |
|
|
self._template = gpt_prompt |
|
|
|
|
|
|
|
|
def __call__(self, question: str, docs: List[str], history=Optional[List[Tuple]]) -> List[Dict[str, str]]: |
|
|
def __call__(self, question: str, docs: List[str], history=Optional[List[Tuple]]) -> List[Dict[str, str]]: |
|
|