Commit 3f5177b6 authored by John Wang's avatar John Wang

feat: replace args in universal chat

parent 055d2c45
...@@ -12,6 +12,7 @@ from controllers.console import api ...@@ -12,6 +12,7 @@ from controllers.console import api
from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \ from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
from controllers.console.universal_chat.wraps import UniversalChatResource from controllers.console.universal_chat.wraps import UniversalChatResource
from core.constant import llm_constant
from core.conversation_message_task import PubHandler from core.conversation_message_task import PubHandler
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \ from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError
...@@ -30,8 +31,17 @@ class UniversalChatApi(UniversalChatResource): ...@@ -30,8 +31,17 @@ class UniversalChatApi(UniversalChatResource):
parser.add_argument('tools', type=list, required=True, location='json') parser.add_argument('tools', type=list, required=True, location='json')
args = parser.parse_args() args = parser.parse_args()
# todo update app model config app_model_config = app_model.app_model_config
args['model_config'] = {}
# update app model config
args['model_config'] = app_model_config.to_dict()
args['model_config']['model']['name'] = args['model']
if not llm_constant.models[args['model']]:
raise ValueError("Model not exists.")
args['model_config']['model']['provider'] = llm_constant.models[args['model']]
args['model_config']['agent_mode']['tools'] = args['tools']
args['inputs'] = {} args['inputs'] = {}
...@@ -44,7 +54,8 @@ class UniversalChatApi(UniversalChatResource): ...@@ -44,7 +54,8 @@ class UniversalChatApi(UniversalChatResource):
user=current_user, user=current_user,
args=args, args=args,
from_source='console', from_source='console',
streaming=True streaming=True,
is_model_config_override=True,
) )
return compact_response(response) return compact_response(response)
......
...@@ -130,6 +130,22 @@ class AppModelConfig(db.Model): ...@@ -130,6 +130,22 @@ class AppModelConfig(db.Model):
def agent_mode_dict(self) -> dict: def agent_mode_dict(self) -> dict:
return json.loads(self.agent_mode) if self.agent_mode else {"enabled": False, "strategy": None, "tools": []} return json.loads(self.agent_mode) if self.agent_mode else {"enabled": False, "strategy": None, "tools": []}
def to_dict(self) -> dict:
return {
"provider": "",
"model_id": "",
"configs": {},
"opening_statement": self.opening_statement,
"suggested_questions": self.suggested_questions_list,
"suggested_questions_after_answer": self.suggested_questions_after_answer_dict,
"speech_to_text": self.speech_to_text_dict,
"more_like_this": self.more_like_this_dict,
"sensitive_word_avoidance": self.sensitive_word_avoidance_dict,
"model": self.model_dict,
"user_input_form": self.user_input_form_list,
"pre_prompt": self.pre_prompt,
"agent_mode": self.agent_mode_dict
}
class RecommendedApp(db.Model): class RecommendedApp(db.Model):
__tablename__ = 'recommended_apps' __tablename__ = 'recommended_apps'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment