Commit 215498a6 authored by John Wang's avatar John Wang

Merge branch 'feat/universal-chat' into deploy/dev

parents b3783ed5 b221f72c
...@@ -19,7 +19,7 @@ def check_file_for_chinese_comments(file_path): ...@@ -19,7 +19,7 @@ def check_file_for_chinese_comments(file_path):
def main(): def main():
has_chinese = False has_chinese = False
excluded_files = ["model_template.py", 'stopwords.py', 'commands.py', 'indexing_runner.py'] excluded_files = ["model_template.py", 'stopwords.py', 'commands.py', 'indexing_runner.py', 'web_reader_tool.py']
for root, _, files in os.walk("."): for root, _, files in os.walk("."):
for file in files: for file in files:
......
import logging import logging
import re
from typing import Optional, List, Union, Tuple from typing import Optional, List, Union, Tuple
from langchain.base_language import BaseLanguageModel from langchain.base_language import BaseLanguageModel
...@@ -270,6 +271,9 @@ And answer according to the language of the user's question. ...@@ -270,6 +271,9 @@ And answer according to the language of the user's question.
messages.append(human_message) messages.append(human_message)
for message in messages:
message.content = re.sub(r'<\|.*?\|>', '', message.content)
return messages, ['\nHuman:', '</histories>'] return messages, ['\nHuman:', '</histories>']
@classmethod @classmethod
......
...@@ -31,6 +31,7 @@ class OrchestratorRuleParser: ...@@ -31,6 +31,7 @@ class OrchestratorRuleParser:
def __init__(self, tenant_id: str, app_model_config: AppModelConfig): def __init__(self, tenant_id: str, app_model_config: AppModelConfig):
self.tenant_id = tenant_id self.tenant_id = tenant_id
self.app_model_config = app_model_config self.app_model_config = app_model_config
self.agent_summary_model_name = "gpt-3.5-turbo-16k"
def to_agent_executor(self, conversation_message_task: ConversationMessageTask, memory: Optional[BaseChatMemory], def to_agent_executor(self, conversation_message_task: ConversationMessageTask, memory: Optional[BaseChatMemory],
rest_tokens: int, chain_callback: MainChainGatherCallbackHandler) \ rest_tokens: int, chain_callback: MainChainGatherCallbackHandler) \
...@@ -71,7 +72,7 @@ class OrchestratorRuleParser: ...@@ -71,7 +72,7 @@ class OrchestratorRuleParser:
summary_llm = LLMBuilder.to_llm( summary_llm = LLMBuilder.to_llm(
tenant_id=self.tenant_id, tenant_id=self.tenant_id,
model_name=agent_model_name, model_name=self.agent_summary_model_name,
temperature=0, temperature=0,
max_tokens=500, max_tokens=500,
callbacks=[DifyStdOutCallbackHandler()] callbacks=[DifyStdOutCallbackHandler()]
...@@ -80,7 +81,7 @@ class OrchestratorRuleParser: ...@@ -80,7 +81,7 @@ class OrchestratorRuleParser:
tools = self.to_tools( tools = self.to_tools(
tool_configs=tool_configs, tool_configs=tool_configs,
conversation_message_task=conversation_message_task, conversation_message_task=conversation_message_task,
model_name=agent_model_name, model_name=self.agent_summary_model_name,
rest_tokens=rest_tokens, rest_tokens=rest_tokens,
callbacks=[agent_callback, DifyStdOutCallbackHandler()] callbacks=[agent_callback, DifyStdOutCallbackHandler()]
) )
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment