Unverified Commit 2f9cb8c4 authored by Yeuoly's avatar Yeuoly

Merge branch 'main' into feat/agent-image

parents 0debd75b 625b0afa
This diff is collapsed.
......@@ -107,20 +107,33 @@ class AppListApi(Resource):
# validate config
model_config_dict = args['model_config']
# get model provider
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=current_user.current_tenant_id,
model_type=ModelType.LLM
# Get provider configurations
provider_manager = ProviderManager()
provider_configurations = provider_manager.get_configurations(current_user.current_tenant_id)
# get available models from provider_configurations
available_models = provider_configurations.get_models(
model_type=ModelType.LLM,
only_active=True
)
if not model_instance:
raise ProviderNotInitializeError(
f"No Default System Reasoning Model available. Please configure "
f"in the Settings -> Model Provider.")
else:
model_config_dict["model"]["provider"] = model_instance.provider
model_config_dict["model"]["name"] = model_instance.model
# check if model is available
available_models_names = [f'{model.provider.provider}.{model.model}' for model in available_models]
provider_model = f"{model_config_dict['model']['provider']}.{model_config_dict['model']['name']}"
if provider_model not in available_models_names:
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=current_user.current_tenant_id,
model_type=ModelType.LLM
)
if not model_instance:
raise ProviderNotInitializeError(
f"No Default System Reasoning Model available. Please configure "
f"in the Settings -> Model Provider.")
else:
model_config_dict["model"]["provider"] = model_instance.provider
model_config_dict["model"]["name"] = model_instance.model
model_configuration = AppModelConfigService.validate_configuration(
tenant_id=current_user.current_tenant_id,
......
......@@ -11,6 +11,7 @@ from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.entities.model_entities import ModelFeature
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.moderation.base import ModerationException
from core.tools.entities.tool_entities import ToolRuntimeVariablePool
......@@ -169,7 +170,7 @@ class AssistantApplicationRunner(AppRunner):
# load tool variables
tool_conversation_variables = self._load_tool_variables(conversation_id=conversation.id,
user_id=application_generate_entity.user_id,
tanent_id=application_generate_entity.tenant_id)
tenant_id=application_generate_entity.tenant_id)
# convert db variables to tool variables
tool_variables = self._convert_db_variables_to_tool_variables(tool_conversation_variables)
......@@ -194,6 +195,13 @@ class AssistantApplicationRunner(AppRunner):
memory=memory,
)
# change function call strategy based on LLM model
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
model_schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials)
if set([ModelFeature.MULTI_TOOL_CALL, ModelFeature.TOOL_CALL]).intersection(model_schema.features):
agent_entity.strategy = AgentEntity.Strategy.FUNCTION_CALLING
# start agent runner
if agent_entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT:
assistant_cot_runner = AssistantCotApplicationRunner(
......@@ -209,9 +217,9 @@ class AssistantApplicationRunner(AppRunner):
prompt_messages=prompt_message,
variables_pool=tool_variables,
db_variables=tool_conversation_variables,
model_instance=model_instance
)
invoke_result = assistant_cot_runner.run(
model_instance=model_instance,
conversation=conversation,
message=message,
query=query,
......@@ -229,10 +237,10 @@ class AssistantApplicationRunner(AppRunner):
memory=memory,
prompt_messages=prompt_message,
variables_pool=tool_variables,
db_variables=tool_conversation_variables
db_variables=tool_conversation_variables,
model_instance=model_instance
)
invoke_result = assistant_fc_runner.run(
model_instance=model_instance,
conversation=conversation,
message=message,
query=query,
......@@ -246,13 +254,13 @@ class AssistantApplicationRunner(AppRunner):
agent=True
)
def _load_tool_variables(self, conversation_id: str, user_id: str, tanent_id: str) -> ToolConversationVariables:
def _load_tool_variables(self, conversation_id: str, user_id: str, tenant_id: str) -> ToolConversationVariables:
"""
load tool variables from database
"""
tool_variables: ToolConversationVariables = db.session.query(ToolConversationVariables).filter(
ToolConversationVariables.conversation_id == conversation_id,
ToolConversationVariables.tenant_id == tanent_id
ToolConversationVariables.tenant_id == tenant_id
).first()
if tool_variables:
......@@ -263,7 +271,7 @@ class AssistantApplicationRunner(AppRunner):
tool_variables = ToolConversationVariables(
conversation_id=conversation_id,
user_id=user_id,
tenant_id=tanent_id,
tenant_id=tenant_id,
variables_str='[]',
)
db.session.add(tool_variables)
......
import logging
import json
from typing import Optional, List, Tuple, Union
from typing import Optional, List, Tuple, Union, cast
from datetime import datetime
from mimetypes import guess_extension
......@@ -12,7 +12,7 @@ from models.model import MessageAgentThought, Message, MessageFile
from models.tools import ToolConversationVariables
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMessageBinary, \
ToolRuntimeVariablePool, ToolParamter
ToolRuntimeVariablePool, ToolParameter
from core.tools.tool.tool import Tool
from core.tools.tool_manager import ToolManager
from core.tools.tool_file_manager import ToolFileManager
......@@ -27,7 +27,10 @@ from core.entities.application_entities import ModelConfigEntity, \
AgentEntity, AppOrchestrationConfigEntity, ApplicationGenerateEntity, InvokeFrom
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.entities.model_entities import ModelFeature
from core.model_runtime.utils.encoders import jsonable_encoder
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_manager import ModelInstance
from core.file.message_file_parser import FileTransferMethod
logger = logging.getLogger(__name__)
......@@ -45,6 +48,7 @@ class BaseAssistantApplicationRunner(AppRunner):
prompt_messages: Optional[List[PromptMessage]] = None,
variables_pool: Optional[ToolRuntimeVariablePool] = None,
db_variables: Optional[ToolConversationVariables] = None,
model_instance: ModelInstance = None
) -> None:
"""
Agent runner
......@@ -71,6 +75,7 @@ class BaseAssistantApplicationRunner(AppRunner):
self.history_prompt_messages = prompt_messages
self.variables_pool = variables_pool
self.db_variables_pool = db_variables
self.model_instance = model_instance
# init callback
self.agent_callback = DifyAgentCallbackHandler()
......@@ -95,9 +100,17 @@ class BaseAssistantApplicationRunner(AppRunner):
MessageAgentThought.message_id == self.message.id,
).count()
def _repacket_app_orchestration_config(self, app_orchestration_config: AppOrchestrationConfigEntity) -> AppOrchestrationConfigEntity:
# check if model supports stream tool call
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
model_schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials)
if model_schema and ModelFeature.STREAM_TOOL_CALL in (model_schema.features or []):
self.stream_tool_call = True
else:
self.stream_tool_call = False
def _repack_app_orchestration_config(self, app_orchestration_config: AppOrchestrationConfigEntity) -> AppOrchestrationConfigEntity:
"""
Repacket app orchestration config
Repack app orchestration config
"""
if app_orchestration_config.prompt_template.simple_prompt_template is None:
app_orchestration_config.prompt_template.simple_prompt_template = ''
......@@ -113,7 +126,7 @@ class BaseAssistantApplicationRunner(AppRunner):
if response.type == ToolInvokeMessage.MessageType.TEXT:
result += response.message
elif response.type == ToolInvokeMessage.MessageType.LINK:
result += f"result link: {response.message}. please dirct user to check it."
result += f"result link: {response.message}. please tell user to check it."
elif response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or \
response.type == ToolInvokeMessage.MessageType.IMAGE:
result += f"image has been created and sent to user already, you should tell user to check it now."
......@@ -172,20 +185,20 @@ class BaseAssistantApplicationRunner(AppRunner):
for parameter in parameters:
parameter_type = 'string'
enum = []
if parameter.type == ToolParamter.ToolParameterType.STRING:
if parameter.type == ToolParameter.ToolParameterType.STRING:
parameter_type = 'string'
elif parameter.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter.type == ToolParameter.ToolParameterType.BOOLEAN:
parameter_type = 'boolean'
elif parameter.type == ToolParamter.ToolParameterType.NUMBER:
elif parameter.type == ToolParameter.ToolParameterType.NUMBER:
parameter_type = 'number'
elif parameter.type == ToolParamter.ToolParameterType.SELECT:
elif parameter.type == ToolParameter.ToolParameterType.SELECT:
for option in parameter.options:
enum.append(option.value)
parameter_type = 'string'
else:
raise ValueError(f"parameter type {parameter.type} is not supported")
if parameter.form == ToolParamter.ToolParameterForm.FORM:
if parameter.form == ToolParameter.ToolParameterForm.FORM:
# get tool parameter from form
tool_parameter_config = tool.tool_parameters.get(parameter.name)
if not tool_parameter_config:
......@@ -194,7 +207,7 @@ class BaseAssistantApplicationRunner(AppRunner):
if not tool_parameter_config and parameter.required:
raise ValueError(f"tool parameter {parameter.name} not found in tool config")
if parameter.type == ToolParamter.ToolParameterType.SELECT:
if parameter.type == ToolParameter.ToolParameterType.SELECT:
# check if tool_parameter_config in options
options = list(map(lambda x: x.value, parameter.options))
if tool_parameter_config not in options:
......@@ -202,7 +215,7 @@ class BaseAssistantApplicationRunner(AppRunner):
# convert tool parameter config to correct type
try:
if parameter.type == ToolParamter.ToolParameterType.NUMBER:
if parameter.type == ToolParameter.ToolParameterType.NUMBER:
# check if tool parameter is integer
if isinstance(tool_parameter_config, int):
tool_parameter_config = tool_parameter_config
......@@ -213,11 +226,11 @@ class BaseAssistantApplicationRunner(AppRunner):
tool_parameter_config = float(tool_parameter_config)
else:
tool_parameter_config = int(tool_parameter_config)
elif parameter.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter.type == ToolParameter.ToolParameterType.BOOLEAN:
tool_parameter_config = bool(tool_parameter_config)
elif parameter.type not in [ToolParamter.ToolParameterType.SELECT, ToolParamter.ToolParameterType.STRING]:
elif parameter.type not in [ToolParameter.ToolParameterType.SELECT, ToolParameter.ToolParameterType.STRING]:
tool_parameter_config = str(tool_parameter_config)
elif parameter.type == ToolParamter.ToolParameterType:
elif parameter.type == ToolParameter.ToolParameterType:
tool_parameter_config = str(tool_parameter_config)
except Exception as e:
raise ValueError(f"tool parameter {parameter.name} value {tool_parameter_config} is not correct type")
......@@ -225,7 +238,7 @@ class BaseAssistantApplicationRunner(AppRunner):
# save tool parameter to tool entity memory
runtime_parameters[parameter.name] = tool_parameter_config
elif parameter.form == ToolParamter.ToolParameterForm.LLM:
elif parameter.form == ToolParameter.ToolParameterForm.LLM:
message_tool.parameters['properties'][parameter.name] = {
"type": parameter_type,
"description": parameter.llm_description or '',
......@@ -279,20 +292,20 @@ class BaseAssistantApplicationRunner(AppRunner):
for parameter in tool_runtime_parameters:
parameter_type = 'string'
enum = []
if parameter.type == ToolParamter.ToolParameterType.STRING:
if parameter.type == ToolParameter.ToolParameterType.STRING:
parameter_type = 'string'
elif parameter.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter.type == ToolParameter.ToolParameterType.BOOLEAN:
parameter_type = 'boolean'
elif parameter.type == ToolParamter.ToolParameterType.NUMBER:
elif parameter.type == ToolParameter.ToolParameterType.NUMBER:
parameter_type = 'number'
elif parameter.type == ToolParamter.ToolParameterType.SELECT:
elif parameter.type == ToolParameter.ToolParameterType.SELECT:
for option in parameter.options:
enum.append(option.value)
parameter_type = 'string'
else:
raise ValueError(f"parameter type {parameter.type} is not supported")
if parameter.form == ToolParamter.ToolParameterForm.LLM:
if parameter.form == ToolParameter.ToolParameterForm.LLM:
prompt_tool.parameters['properties'][parameter.name] = {
"type": parameter_type,
"description": parameter.llm_description or '',
......
......@@ -12,7 +12,7 @@ from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage, LLMRes
from core.model_manager import ModelInstance
from core.tools.errors import ToolInvokeError, ToolNotFoundError, \
ToolNotSupportedError, ToolProviderNotFoundError, ToolParamterValidationError, \
ToolNotSupportedError, ToolProviderNotFoundError, ToolParameterValidationError, \
ToolProviderCredentialValidationError
from core.features.assistant_base_runner import BaseAssistantApplicationRunner
......@@ -20,8 +20,7 @@ from core.features.assistant_base_runner import BaseAssistantApplicationRunner
from models.model import Conversation, Message
class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
def run(self, model_instance: ModelInstance,
conversation: Conversation,
def run(self, conversation: Conversation,
message: Message,
query: str,
) -> Union[Generator, LLMResult]:
......@@ -29,7 +28,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
Run Cot agent application
"""
app_orchestration_config = self.app_orchestration_config
self._repacket_app_orchestration_config(app_orchestration_config)
self._repack_app_orchestration_config(app_orchestration_config)
agent_scratchpad: List[AgentScratchpadUnit] = []
......@@ -72,7 +71,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
}
final_answer = ''
def increse_usage(final_llm_usage_dict: Dict[str, LLMUsage], usage: LLMUsage):
def increase_usage(final_llm_usage_dict: Dict[str, LLMUsage], usage: LLMUsage):
if not final_llm_usage_dict['usage']:
final_llm_usage_dict['usage'] = usage
else:
......@@ -82,6 +81,8 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
llm_usage.prompt_price += usage.prompt_price
llm_usage.completion_price += usage.completion_price
model_instance = self.model_instance
while function_call_state and iteration_step <= max_iteration_steps:
# continue to run until there is not any tool call
function_call_state = False
......@@ -104,7 +105,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
# update prompt messages
prompt_messages = self._originze_cot_prompt_messages(
prompt_messages = self._organize_cot_prompt_messages(
mode=app_orchestration_config.model_config.mode,
prompt_messages=prompt_messages,
tools=prompt_messages_tools,
......@@ -137,7 +138,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
# get llm usage
if llm_result.usage:
increse_usage(llm_usage, llm_result.usage)
increase_usage(llm_usage, llm_result.usage)
# publish agent thought if it's first iteration
if iteration_step == 1:
......@@ -207,7 +208,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
try:
tool_response = tool_instance.invoke(
user_id=self.user_id,
tool_paramters=tool_call_args if isinstance(tool_call_args, dict) else json.loads(tool_call_args)
tool_parameters=tool_call_args if isinstance(tool_call_args, dict) else json.loads(tool_call_args)
)
# transform tool response to llm friendly response
tool_response = self.transform_tool_invoke_messages(tool_response)
......@@ -225,15 +226,15 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
message_file_ids = [message_file.id for message_file, _ in message_files]
except ToolProviderCredentialValidationError as e:
error_response = f"Plese check your tool provider credentials"
error_response = f"Please check your tool provider credentials"
except (
ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError
) as e:
error_response = f"there is not a tool named {tool_call_name}"
except (
ToolParamterValidationError
ToolParameterValidationError
) as e:
error_response = f"tool paramters validation error: {e}, please check your tool paramters"
error_response = f"tool parameters validation error: {e}, please check your tool parameters"
except ToolInvokeError as e:
error_response = f"tool invoke error: {e}"
except Exception as e:
......@@ -390,7 +391,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
# remove Action: xxx from agent thought
agent_thought = re.sub(r'Action:.*', '', agent_thought, flags=re.IGNORECASE)
if action_name and action_input:
if action_name and action_input is not None:
return AgentScratchpadUnit(
agent_response=content,
thought=agent_thought,
......@@ -468,7 +469,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
if not next_iteration.find("{{observation}}") >= 0:
raise ValueError("{{observation}} is required in next_iteration")
def _convert_strachpad_list_to_str(self, agent_scratchpad: List[AgentScratchpadUnit]) -> str:
def _convert_scratchpad_list_to_str(self, agent_scratchpad: List[AgentScratchpadUnit]) -> str:
"""
convert agent scratchpad list to str
"""
......@@ -480,7 +481,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
return result
def _originze_cot_prompt_messages(self, mode: Literal["completion", "chat"],
def _organize_cot_prompt_messages(self, mode: Literal["completion", "chat"],
prompt_messages: List[PromptMessage],
tools: List[PromptMessageTool],
agent_scratchpad: List[AgentScratchpadUnit],
......@@ -489,7 +490,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
input: str,
) -> List[PromptMessage]:
"""
originze chain of thought prompt messages, a standard prompt message is like:
organize chain of thought prompt messages, a standard prompt message is like:
Respond to the human as helpfully and accurately as possible.
{{instruction}}
......@@ -527,7 +528,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
.replace("{{tools}}", tools_str) \
.replace("{{tool_names}}", tool_names)
# originze prompt messages
# organize prompt messages
if mode == "chat":
# override system message
overrided = False
......@@ -558,7 +559,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
return prompt_messages
elif mode == "completion":
# parse agent scratchpad
agent_scratchpad_str = self._convert_strachpad_list_to_str(agent_scratchpad)
agent_scratchpad_str = self._convert_scratchpad_list_to_str(agent_scratchpad)
# parse prompt messages
return [UserPromptMessage(
content=first_prompt.replace("{{instruction}}", instruction)
......
This diff is collapsed.
......@@ -78,6 +78,7 @@ class ModelFeature(Enum):
MULTI_TOOL_CALL = "multi-tool-call"
AGENT_THOUGHT = "agent-thought"
VISION = "vision"
STREAM_TOOL_CALL = "stream-tool-call"
class DefaultParameterName(Enum):
......
......@@ -36,6 +36,7 @@ LLM_BASE_MODELS = [
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
......@@ -80,6 +81,7 @@ LLM_BASE_MODELS = [
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
......@@ -124,6 +126,7 @@ LLM_BASE_MODELS = [
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
......@@ -198,6 +201,7 @@ LLM_BASE_MODELS = [
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
......@@ -272,6 +276,7 @@ LLM_BASE_MODELS = [
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
......
......@@ -324,6 +324,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
tools: Optional[list[PromptMessageTool]] = None) -> Generator:
index = 0
full_assistant_content = ''
delta_assistant_message_function_call_storage: ChoiceDeltaFunctionCall = None
real_model = model
system_fingerprint = None
completion = ''
......@@ -333,12 +334,32 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
delta = chunk.choices[0]
if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ''):
if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == '') and \
delta.delta.function_call is None:
continue
# assistant_message_tool_calls = delta.delta.tool_calls
assistant_message_function_call = delta.delta.function_call
# extract tool calls from response
if delta_assistant_message_function_call_storage is not None:
# handle process of stream function call
if assistant_message_function_call:
# message has not ended ever
delta_assistant_message_function_call_storage.arguments += assistant_message_function_call.arguments
continue
else:
# message has ended
assistant_message_function_call = delta_assistant_message_function_call_storage
delta_assistant_message_function_call_storage = None
else:
if assistant_message_function_call:
# start of stream function call
delta_assistant_message_function_call_storage = assistant_message_function_call
if delta_assistant_message_function_call_storage.arguments is None:
delta_assistant_message_function_call_storage.arguments = ''
continue
# extract tool calls from response
# tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
function_call = self._extract_response_function_call(assistant_message_function_call)
......@@ -489,7 +510,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
else:
raise ValueError(f"Got unknown type {message}")
if message.name is not None:
if message.name:
message_dict["name"] = message.name
return message_dict
......@@ -586,7 +607,6 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
num_tokens = 0
for tool in tools:
num_tokens += len(encoding.encode('type'))
num_tokens += len(encoding.encode(tool.get("type")))
num_tokens += len(encoding.encode('function'))
# calculate num tokens for function object
......
......@@ -5,7 +5,7 @@ from typing import Generator, List, Optional, cast
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessage, PromptMessageFunction,
PromptMessageTool, SystemPromptMessage, UserPromptMessage)
PromptMessageTool, SystemPromptMessage, UserPromptMessage, ToolPromptMessage)
from core.model_runtime.errors.invoke import (InvokeAuthorizationError, InvokeBadRequestError, InvokeConnectionError,
InvokeError, InvokeRateLimitError, InvokeServerUnavailableError)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
......@@ -194,6 +194,10 @@ class ChatGLMLargeLanguageModel(LargeLanguageModel):
elif isinstance(message, SystemPromptMessage):
message = cast(SystemPromptMessage, message)
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolPromptMessage):
# check if last message is user message
message = cast(ToolPromptMessage, message)
message_dict = {"role": "function", "content": message.content}
else:
raise ValueError(f"Unknown message type {type(message)}")
......
......@@ -4,6 +4,8 @@ label:
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 16384
......
......@@ -4,6 +4,8 @@ label:
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 32768
......
......@@ -16,7 +16,7 @@ class MinimaxChatCompletion(object):
"""
def generate(self, model: str, api_key: str, group_id: str,
prompt_messages: List[MinimaxMessage], model_parameters: dict,
tools: Dict[str, Any], stop: List[str] | None, stream: bool, user: str) \
tools: List[Dict[str, Any]], stop: List[str] | None, stream: bool, user: str) \
-> Union[MinimaxMessage, Generator[MinimaxMessage, None, None]]:
"""
generate chat completion
......@@ -162,7 +162,6 @@ class MinimaxChatCompletion(object):
continue
for choice in choices:
print(choice)
message = choice['delta']
yield MinimaxMessage(
content=message,
......
......@@ -17,7 +17,7 @@ class MinimaxChatCompletionPro(object):
"""
def generate(self, model: str, api_key: str, group_id: str,
prompt_messages: List[MinimaxMessage], model_parameters: dict,
tools: Dict[str, Any], stop: List[str] | None, stream: bool, user: str) \
tools: List[Dict[str, Any]], stop: List[str] | None, stream: bool, user: str) \
-> Union[MinimaxMessage, Generator[MinimaxMessage, None, None]]:
"""
generate chat completion
......@@ -82,6 +82,10 @@ class MinimaxChatCompletionPro(object):
**extra_kwargs
}
if tools:
body['functions'] = tools
body['function_call'] = { 'type': 'auto' }
try:
response = post(
url=url, data=dumps(body), headers=headers, stream=stream, timeout=(10, 300))
......@@ -135,6 +139,7 @@ class MinimaxChatCompletionPro(object):
"""
handle stream chat generate response
"""
function_call_storage = None
for line in response.iter_lines():
if not line:
continue
......@@ -148,7 +153,7 @@ class MinimaxChatCompletionPro(object):
msg = data['base_resp']['status_msg']
self._handle_error(code, msg)
if data['reply']:
if data['reply'] or 'usage' in data and data['usage']:
total_tokens = data['usage']['total_tokens']
message = MinimaxMessage(
role=MinimaxMessage.Role.ASSISTANT.value,
......@@ -160,6 +165,12 @@ class MinimaxChatCompletionPro(object):
'total_tokens': total_tokens
}
message.stop_reason = data['choices'][0]['finish_reason']
if function_call_storage:
function_call_message = MinimaxMessage(content='', role=MinimaxMessage.Role.ASSISTANT.value)
function_call_message.function_call = function_call_storage
yield function_call_message
yield message
return
......@@ -168,11 +179,28 @@ class MinimaxChatCompletionPro(object):
continue
for choice in choices:
message = choice['messages'][0]['text']
if not message:
continue
message = choice['messages'][0]
if 'function_call' in message:
if not function_call_storage:
function_call_storage = message['function_call']
if 'arguments' not in function_call_storage or not function_call_storage['arguments']:
function_call_storage['arguments'] = ''
continue
else:
function_call_storage['arguments'] += message['function_call']['arguments']
continue
else:
if function_call_storage:
message['function_call'] = function_call_storage
function_call_storage = None
yield MinimaxMessage(
content=message,
role=MinimaxMessage.Role.ASSISTANT.value
)
\ No newline at end of file
minimax_message = MinimaxMessage(content='', role=MinimaxMessage.Role.ASSISTANT.value)
if 'function_call' in message:
minimax_message.function_call = message['function_call']
if 'text' in message:
minimax_message.content = message['text']
yield minimax_message
\ No newline at end of file
......@@ -2,7 +2,7 @@ from typing import Generator, List
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessage, PromptMessageTool,
SystemPromptMessage, UserPromptMessage)
SystemPromptMessage, UserPromptMessage, ToolPromptMessage)
from core.model_runtime.errors.invoke import (InvokeAuthorizationError, InvokeBadRequestError, InvokeConnectionError,
InvokeError, InvokeRateLimitError, InvokeServerUnavailableError)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
......@@ -84,6 +84,13 @@ class MinimaxLargeLanguageModel(LargeLanguageModel):
"""
client: MinimaxChatCompletionPro = self.model_apis[model]()
if tools:
tools = [{
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters
} for tool in tools]
response = client.generate(
model=model,
api_key=credentials['minimax_api_key'],
......@@ -109,7 +116,19 @@ class MinimaxLargeLanguageModel(LargeLanguageModel):
elif isinstance(prompt_message, UserPromptMessage):
return MinimaxMessage(role=MinimaxMessage.Role.USER.value, content=prompt_message.content)
elif isinstance(prompt_message, AssistantPromptMessage):
if prompt_message.tool_calls:
message = MinimaxMessage(
role=MinimaxMessage.Role.ASSISTANT.value,
content=''
)
message.function_call={
'name': prompt_message.tool_calls[0].function.name,
'arguments': prompt_message.tool_calls[0].function.arguments
}
return message
return MinimaxMessage(role=MinimaxMessage.Role.ASSISTANT.value, content=prompt_message.content)
elif isinstance(prompt_message, ToolPromptMessage):
return MinimaxMessage(role=MinimaxMessage.Role.FUNCTION.value, content=prompt_message.content)
else:
raise NotImplementedError(f'Prompt message type {type(prompt_message)} is not supported')
......@@ -151,6 +170,28 @@ class MinimaxLargeLanguageModel(LargeLanguageModel):
finish_reason=message.stop_reason if message.stop_reason else None,
),
)
elif message.function_call:
if 'name' not in message.function_call or 'arguments' not in message.function_call:
continue
yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(
content='',
tool_calls=[AssistantPromptMessage.ToolCall(
id='',
type='function',
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
name=message.function_call['name'],
arguments=message.function_call['arguments']
)
)]
),
),
)
else:
yield LLMResultChunk(
model=model,
......
......@@ -7,13 +7,23 @@ class MinimaxMessage:
USER = 'USER'
ASSISTANT = 'BOT'
SYSTEM = 'SYSTEM'
FUNCTION = 'FUNCTION'
role: str = Role.USER.value
content: str
usage: Dict[str, int] = None
stop_reason: str = ''
function_call: Dict[str, Any] = None
def to_dict(self) -> Dict[str, Any]:
if self.function_call and self.role == MinimaxMessage.Role.ASSISTANT.value:
return {
'sender_type': 'BOT',
'sender_name': '专家',
'text': '',
'function_call': self.function_call
}
return {
'sender_type': self.role,
'sender_name': '我' if self.role == 'USER' else '专家',
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 4096
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 16385
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 16385
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 16385
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 4096
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 32768
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
......
......@@ -6,6 +6,7 @@ model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 8192
......
......@@ -671,7 +671,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
else:
raise ValueError(f"Got unknown type {message}")
if message.name is not None:
if message.name:
message_dict["name"] = message.name
return message_dict
......
......@@ -41,7 +41,7 @@ class OpenLLMGenerate(object):
if not server_url:
raise InvalidAuthenticationError('Invalid server URL')
defautl_llm_config = {
default_llm_config = {
"max_new_tokens": 128,
"min_length": 0,
"early_stopping": False,
......@@ -75,19 +75,19 @@ class OpenLLMGenerate(object):
}
if 'max_tokens' in model_parameters and type(model_parameters['max_tokens']) == int:
defautl_llm_config['max_new_tokens'] = model_parameters['max_tokens']
default_llm_config['max_new_tokens'] = model_parameters['max_tokens']
if 'temperature' in model_parameters and type(model_parameters['temperature']) == float:
defautl_llm_config['temperature'] = model_parameters['temperature']
default_llm_config['temperature'] = model_parameters['temperature']
if 'top_p' in model_parameters and type(model_parameters['top_p']) == float:
defautl_llm_config['top_p'] = model_parameters['top_p']
default_llm_config['top_p'] = model_parameters['top_p']
if 'top_k' in model_parameters and type(model_parameters['top_k']) == int:
defautl_llm_config['top_k'] = model_parameters['top_k']
default_llm_config['top_k'] = model_parameters['top_k']
if 'use_cache' in model_parameters and type(model_parameters['use_cache']) == bool:
defautl_llm_config['use_cache'] = model_parameters['use_cache']
default_llm_config['use_cache'] = model_parameters['use_cache']
headers = {
'Content-Type': 'application/json',
......@@ -104,7 +104,7 @@ class OpenLLMGenerate(object):
data = {
'stop': stop if stop else [],
'prompt': '\n'.join([message.content for message in prompt_messages]),
'llm_config': defautl_llm_config,
'llm_config': default_llm_config,
}
try:
......
......@@ -3,14 +3,14 @@ from typing import Generator, Iterator, List, Optional, Union, cast
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessage, PromptMessageTool,
SystemPromptMessage, UserPromptMessage)
SystemPromptMessage, UserPromptMessage, ToolPromptMessage)
from core.model_runtime.entities.model_entities import (AIModelEntity, FetchFrom, ModelPropertyKey, ModelType,
ParameterRule, ParameterType)
ParameterRule, ParameterType, ModelFeature)
from core.model_runtime.errors.invoke import (InvokeAuthorizationError, InvokeBadRequestError, InvokeConnectionError,
InvokeError, InvokeRateLimitError, InvokeServerUnavailableError)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.model_providers.xinference.llm.xinference_helper import (XinferenceHelper,
from core.model_runtime.model_providers.xinference.xinference_helper import (XinferenceHelper,
XinferenceModelExtraParameter)
from core.model_runtime.utils import helper
from openai import (APIConnectionError, APITimeoutError, AuthenticationError, ConflictError, InternalServerError,
......@@ -33,6 +33,12 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
see `core.model_runtime.model_providers.__base.large_language_model.LargeLanguageModel._invoke`
"""
if 'temperature' in model_parameters:
if model_parameters['temperature'] < 0.01:
model_parameters['temperature'] = 0.01
elif model_parameters['temperature'] > 1.0:
model_parameters['temperature'] = 0.99
return self._generate(
model=model, credentials=credentials, prompt_messages=prompt_messages, model_parameters=model_parameters,
tools=tools, stop=stop, stream=stream, user=user,
......@@ -65,6 +71,9 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
credentials['completion_type'] = 'completion'
else:
raise ValueError(f'xinference model ability {extra_param.model_ability} is not supported')
if extra_param.support_function_call:
credentials['support_function_call'] = True
except RuntimeError as e:
raise CredentialsValidateFailedError(f'Xinference credentials validate failed: {e}')
......@@ -220,6 +229,9 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
elif isinstance(message, SystemPromptMessage):
message = cast(SystemPromptMessage, message)
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolPromptMessage):
message = cast(ToolPromptMessage, message)
message_dict = {"tool_call_id": message.tool_call_id, "role": "tool", "content": message.content}
else:
raise ValueError(f"Unknown message type {type(message)}")
......@@ -237,7 +249,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
label=I18nObject(
zh_Hans='温度',
en_US='Temperature'
)
),
),
ParameterRule(
name='top_p',
......@@ -282,6 +294,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
completion_type = LLMMode.COMPLETION.value
else:
raise ValueError(f'xinference model ability {extra_args.model_ability} is not supported')
support_function_call = credentials.get('support_function_call', False)
entity = AIModelEntity(
model=model,
......@@ -290,6 +304,9 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.LLM,
features=[
ModelFeature.TOOL_CALL
] if support_function_call else [],
model_properties={
ModelPropertyKey.MODE: completion_type,
},
......@@ -310,6 +327,12 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
extra_model_kwargs can be got by `XinferenceHelper.get_xinference_extra_parameter`
"""
if 'server_url' not in credentials:
raise CredentialsValidateFailedError('server_url is required in credentials')
if credentials['server_url'].endswith('/'):
credentials['server_url'] = credentials['server_url'][:-1]
client = OpenAI(
base_url=f'{credentials["server_url"]}/v1',
api_key='abc',
......
......@@ -2,7 +2,7 @@ import time
from typing import Optional
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType, ModelPropertyKey
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (InvokeAuthorizationError, InvokeBadRequestError, InvokeConnectionError,
InvokeError, InvokeRateLimitError, InvokeServerUnavailableError)
......@@ -10,6 +10,7 @@ from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
from xinference_client.client.restful.restful_client import Client, RESTfulEmbeddingModelHandle, RESTfulModelHandle
from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper
class XinferenceTextEmbeddingModel(TextEmbeddingModel):
"""
......@@ -35,7 +36,10 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
"""
server_url = credentials['server_url']
model_uid = credentials['model_uid']
if server_url.endswith('/'):
server_url = server_url[:-1]
client = Client(base_url=server_url)
try:
......@@ -102,8 +106,15 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
:return:
"""
try:
server_url = credentials['server_url']
model_uid = credentials['model_uid']
extra_args = XinferenceHelper.get_xinference_extra_parameter(server_url=server_url, model_uid=model_uid)
if extra_args.max_tokens:
credentials['max_tokens'] = extra_args.max_tokens
self._invoke(model=model, credentials=credentials, texts=['ping'])
except InvokeAuthorizationError:
except (InvokeAuthorizationError, RuntimeError):
raise CredentialsValidateFailedError('Invalid api key')
@property
......@@ -160,6 +171,7 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
"""
used to define customizable model schema
"""
entity = AIModelEntity(
model=model,
label=I18nObject(
......@@ -167,7 +179,10 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.TEXT_EMBEDDING,
model_properties={},
model_properties={
ModelPropertyKey.MAX_CHUNKS: 1,
ModelPropertyKey.CONTEXT_SIZE: 'max_tokens' in credentials and credentials['max_tokens'] or 512,
},
parameter_rules=[]
)
......
from threading import Lock
from time import time
from typing import List
from os import path
from requests import get
from requests.adapters import HTTPAdapter
......@@ -12,11 +13,16 @@ class XinferenceModelExtraParameter(object):
model_format: str
model_handle_type: str
model_ability: List[str]
max_tokens: int = 512
support_function_call: bool = False
def __init__(self, model_format: str, model_handle_type: str, model_ability: List[str]) -> None:
def __init__(self, model_format: str, model_handle_type: str, model_ability: List[str],
support_function_call: bool, max_tokens: int) -> None:
self.model_format = model_format
self.model_handle_type = model_handle_type
self.model_ability = model_ability
self.support_function_call = support_function_call
self.max_tokens = max_tokens
cache = {}
cache_lock = Lock()
......@@ -49,7 +55,7 @@ class XinferenceHelper:
get xinference model extra parameter like model_format and model_handle_type
"""
url = f'{server_url}/v1/models/{model_uid}'
url = path.join(server_url, 'v1/models', model_uid)
# this methid is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3
session = Session()
......@@ -66,10 +72,12 @@ class XinferenceHelper:
response_json = response.json()
model_format = response_json['model_format']
model_ability = response_json['model_ability']
model_format = response_json.get('model_format', 'ggmlv3')
model_ability = response_json.get('model_ability', [])
if model_format == 'ggmlv3' and 'chatglm' in response_json['model_name']:
if response_json.get('model_type') == 'embedding':
model_handle_type = 'embedding'
elif model_format == 'ggmlv3' and 'chatglm' in response_json['model_name']:
model_handle_type = 'chatglm'
elif 'generate' in model_ability:
model_handle_type = 'generate'
......@@ -78,8 +86,13 @@ class XinferenceHelper:
else:
raise NotImplementedError(f'xinference model handle type {model_handle_type} is not supported')
support_function_call = 'tools' in model_ability
max_tokens = response_json.get('max_tokens', 512)
return XinferenceModelExtraParameter(
model_format=model_format,
model_handle_type=model_handle_type,
model_ability=model_ability
model_ability=model_ability,
support_function_call=support_function_call,
max_tokens=max_tokens
)
\ No newline at end of file
......@@ -2,6 +2,10 @@ model: glm-3-turbo
label:
en_US: glm-3-turbo
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
parameter_rules:
......
......@@ -2,6 +2,10 @@ model: glm-4
label:
en_US: glm-4
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
parameter_rules:
......
......@@ -194,6 +194,27 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
'content': prompt_message.content,
'tool_call_id': prompt_message.tool_call_id
})
elif isinstance(prompt_message, AssistantPromptMessage):
if prompt_message.tool_calls:
params['messages'].append({
'role': 'assistant',
'content': prompt_message.content,
'tool_calls': [
{
'id': tool_call.id,
'type': tool_call.type,
'function': {
'name': tool_call.function.name,
'arguments': tool_call.function.arguments
}
} for tool_call in prompt_message.tool_calls
]
})
else:
params['messages'].append({
'role': 'assistant',
'content': prompt_message.content
})
else:
params['messages'].append({
'role': prompt_message.role.value,
......
......@@ -218,15 +218,30 @@ class ProviderManager:
)
if available_models:
available_model = available_models[0]
default_model = TenantDefaultModel(
tenant_id=tenant_id,
model_type=model_type.to_origin_model_type(),
provider_name=available_model.provider.provider,
model_name=available_model.model
)
db.session.add(default_model)
db.session.commit()
found = False
for available_model in available_models:
if available_model.model == "gpt-3.5-turbo-1106":
default_model = TenantDefaultModel(
tenant_id=tenant_id,
model_type=model_type.to_origin_model_type(),
provider_name=available_model.provider.provider,
model_name=available_model.model
)
db.session.add(default_model)
db.session.commit()
found = True
break
if not found:
available_model = available_models[0]
default_model = TenantDefaultModel(
tenant_id=tenant_id,
model_type=model_type.to_origin_model_type(),
provider_name=available_model.provider.provider,
model_name=available_model.model
)
db.session.add(default_model)
db.session.commit()
if not default_model:
return None
......
......@@ -125,7 +125,7 @@ from openai import OpenAI
class DallE3Tool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -135,7 +135,7 @@ class DallE3Tool(BuiltinTool):
)
# prompt
prompt = tool_paramters.get('prompt', '')
prompt = tool_parameters.get('prompt', '')
if not prompt:
return self.create_text_message('Please input prompt')
......@@ -163,7 +163,7 @@ Next, we use Vectorizer.AI to convert the PNG icon generated by DallE3 into a ve
```python
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.errors import ToolProviderCredentialValidationError
from typing import Any, Dict, List, Union
......@@ -171,20 +171,20 @@ from httpx import post
from base64 import b64decode
class VectorizerTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
Tool invocation, the image variable name needs to be passed in from here, so that we can get the image from the variable pool
"""
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
"""
Override the tool parameter list, we can dynamically generate the parameter list based on the actual situation in the current variable pool, so that the LLM can generate the form based on the parameter list
"""
def is_tool_avaliable(self) -> bool:
def is_tool_available(self) -> bool:
"""
Whether the current tool is available, if there is no image in the current variable pool, then we don't need to display this tool, just return False here
"""
......@@ -194,7 +194,7 @@ Next, let's implement these three functions
```python
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.errors import ToolProviderCredentialValidationError
from typing import Any, Dict, List, Union
......@@ -202,7 +202,7 @@ from httpx import post
from base64 import b64decode
class VectorizerTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -214,7 +214,7 @@ class VectorizerTool(BuiltinTool):
raise ToolProviderCredentialValidationError('Please input api key name and value')
# Get image_id, the definition of image_id can be found in get_runtime_parameters
image_id = tool_paramters.get('image_id', '')
image_id = tool_parameters.get('image_id', '')
if not image_id:
return self.create_text_message('Please input image id')
......@@ -241,24 +241,24 @@ class VectorizerTool(BuiltinTool):
meta={'mime_type': 'image/svg+xml'})
]
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
"""
override the runtime parameters
"""
# Here, we override the tool parameter list, define the image_id, and set its option list to all images in the current variable pool. The configuration here is consistent with the configuration in yaml.
return [
ToolParamter.get_simple_instance(
ToolParameter.get_simple_instance(
name='image_id',
llm_description=f'the image id that you want to vectorize, \
and the image id should be specified in \
{[i.name for i in self.list_default_image_variables()]}',
type=ToolParamter.ToolParameterType.SELECT,
type=ToolParameter.ToolParameterType.SELECT,
required=True,
options=[i.name for i in self.list_default_image_variables()]
)
]
def is_tool_avaliable(self) -> bool:
def is_tool_available(self) -> bool:
# Only when there are images in the variable pool, the LLM needs to use this tool
return len(self.list_default_image_variables()) > 0
```
......
......@@ -146,13 +146,13 @@ from typing import Any, Dict, List, Union
class GoogleSearchTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
query = tool_paramters['query']
result_type = tool_paramters['result_type']
query = tool_parameters['query']
result_type = tool_parameters['result_type']
api_key = self.runtime.credentials['serpapi_api_key']
# TODO: search with serpapi
result = SerpAPI(api_key).run(query, result_type=result_type)
......@@ -163,7 +163,7 @@ class GoogleSearchTool(BuiltinTool):
```
### Parameters
The overall logic of the tool is in the `_invoke` method, this method accepts two parameters: `user_id` and `tool_paramters`, which represent the user ID and tool parameters respectively
The overall logic of the tool is in the `_invoke` method, this method accepts two parameters: `user_id` and `tool_parameters`, which represent the user ID and tool parameters respectively
### Return Data
When the tool returns, you can choose to return one message or multiple messages, here we return one message, using `create_text_message` and `create_link_message` can create a text message or a link message.
......@@ -195,7 +195,7 @@ class GoogleProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"query": "test",
"result_type": "link"
},
......
......@@ -125,7 +125,7 @@ from openai import OpenAI
class DallE3Tool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -135,7 +135,7 @@ class DallE3Tool(BuiltinTool):
)
# prompt
prompt = tool_paramters.get('prompt', '')
prompt = tool_parameters.get('prompt', '')
if not prompt:
return self.create_text_message('Please input prompt')
......@@ -163,7 +163,7 @@ class DallE3Tool(BuiltinTool):
```python
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.errors import ToolProviderCredentialValidationError
from typing import Any, Dict, List, Union
......@@ -171,20 +171,20 @@ from httpx import post
from base64 import b64decode
class VectorizerTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
工具调用,图片变量名需要从这里传递进来,从而我们就可以从变量池中获取到图片
"""
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
"""
重写工具参数列表,我们可以根据当前变量池里的实际情况来动态生成参数列表,从而LLM可以根据参数列表来生成表单
"""
def is_tool_avaliable(self) -> bool:
def is_tool_available(self) -> bool:
"""
当前工具是否可用,如果当前变量池中没有图片,那么我们就不需要展示这个工具,这里返回False即可
"""
......@@ -194,7 +194,7 @@ class VectorizerTool(BuiltinTool):
```python
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.errors import ToolProviderCredentialValidationError
from typing import Any, Dict, List, Union
......@@ -202,7 +202,7 @@ from httpx import post
from base64 import b64decode
class VectorizerTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -214,7 +214,7 @@ class VectorizerTool(BuiltinTool):
raise ToolProviderCredentialValidationError('Please input api key name and value')
# 获取image_id,image_id的定义可以在get_runtime_parameters中找到
image_id = tool_paramters.get('image_id', '')
image_id = tool_parameters.get('image_id', '')
if not image_id:
return self.create_text_message('Please input image id')
......@@ -241,24 +241,24 @@ class VectorizerTool(BuiltinTool):
meta={'mime_type': 'image/svg+xml'})
]
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
"""
override the runtime parameters
"""
# 这里,我们重写了工具参数列表,定义了image_id,并设置了它的选项列表为当前变量池中的所有图片,这里的配置与yaml中的配置是一致的
return [
ToolParamter.get_simple_instance(
ToolParameter.get_simple_instance(
name='image_id',
llm_description=f'the image id that you want to vectorize, \
and the image id should be specified in \
{[i.name for i in self.list_default_image_variables()]}',
type=ToolParamter.ToolParameterType.SELECT,
type=ToolParameter.ToolParameterType.SELECT,
required=True,
options=[i.name for i in self.list_default_image_variables()]
)
]
def is_tool_avaliable(self) -> bool:
def is_tool_available(self) -> bool:
# 只有当变量池中有图片时,LLM才需要使用这个工具
return len(self.list_default_image_variables()) > 0
```
......
......@@ -146,13 +146,13 @@ from typing import Any, Dict, List, Union
class GoogleSearchTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
query = tool_paramters['query']
result_type = tool_paramters['result_type']
query = tool_parameters['query']
result_type = tool_parameters['result_type']
api_key = self.runtime.credentials['serpapi_api_key']
# TODO: search with serpapi
result = SerpAPI(api_key).run(query, result_type=result_type)
......@@ -163,7 +163,7 @@ class GoogleSearchTool(BuiltinTool):
```
### 参数
工具的整体逻辑都在`_invoke`方法中,这个方法接收两个参数:`user_id``tool_paramters`,分别表示用户ID和工具参数
工具的整体逻辑都在`_invoke`方法中,这个方法接收两个参数:`user_id``tool_parameters`,分别表示用户ID和工具参数
### 返回数据
在工具返回时,你可以选择返回一个消息或者多个消息,这里我们返回一个消息,使用`create_text_message``create_link_message`可以创建一个文本消息或者一个链接消息。
......@@ -195,7 +195,7 @@ class GoogleProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"query": "test",
"result_type": "link"
},
......
from pydantic import BaseModel
from typing import Dict, Optional, Any, List
from core.tools.entities.tool_entities import ToolProviderType, ToolParamter
from core.tools.entities.tool_entities import ToolProviderType, ToolParameter
class ApiBasedToolBundle(BaseModel):
"""
......@@ -16,7 +16,7 @@ class ApiBasedToolBundle(BaseModel):
# operation_id
operation_id: str = None
# parameters
parameters: Optional[List[ToolParamter]] = None
parameters: Optional[List[ToolParameter]] = None
# author
author: str
# icon
......
......@@ -89,11 +89,11 @@ class ToolInvokeMessageBinary(BaseModel):
url: str = Field(..., description="The url of the binary")
save_as: str = ''
class ToolParamterOption(BaseModel):
class ToolParameterOption(BaseModel):
value: str = Field(..., description="The value of the option")
label: I18nObject = Field(..., description="The label of the option")
class ToolParamter(BaseModel):
class ToolParameter(BaseModel):
class ToolParameterType(Enum):
STRING = "string"
NUMBER = "number"
......@@ -115,12 +115,12 @@ class ToolParamter(BaseModel):
default: Optional[str] = None
min: Optional[Union[float, int]] = None
max: Optional[Union[float, int]] = None
options: Optional[List[ToolParamterOption]] = None
options: Optional[List[ToolParameterOption]] = None
@classmethod
def get_simple_instance(cls,
name: str, llm_description: str, type: ToolParameterType,
required: bool, options: Optional[List[str]] = None) -> 'ToolParamter':
required: bool, options: Optional[List[str]] = None) -> 'ToolParameter':
"""
get a simple tool parameter
......@@ -130,9 +130,9 @@ class ToolParamter(BaseModel):
:param required: if the parameter is required
:param options: the options of the parameter
"""
# convert options to ToolParamterOption
# convert options to ToolParameterOption
if options:
options = [ToolParamterOption(value=option, label=I18nObject(en_US=option, zh_Hans=option)) for option in options]
options = [ToolParameterOption(value=option, label=I18nObject(en_US=option, zh_Hans=option)) for option in options]
return cls(
name=name,
label=I18nObject(en_US='', zh_Hans=''),
......@@ -184,7 +184,7 @@ class ToolProviderCredentials(BaseModel):
raise ValueError(f'invalid mode value {value}')
@staticmethod
def defaut(value: str) -> str:
def default(value: str) -> str:
return ""
name: str = Field(..., description="The name of the credentials")
......
......@@ -4,7 +4,7 @@ from typing import List, Dict, Optional
from core.tools.entities.common_entities import I18nObject
from core.tools.entities.tool_entities import ToolProviderCredentials
from core.tools.tool.tool import ToolParamter
from core.tools.tool.tool import ToolParameter
class UserToolProvider(BaseModel):
class ProviderType(Enum):
......@@ -46,4 +46,4 @@ class UserTool(BaseModel):
name: str # identifier
label: I18nObject # label
description: I18nObject
parameters: Optional[List[ToolParamter]]
\ No newline at end of file
parameters: Optional[List[ToolParameter]]
\ No newline at end of file
......@@ -4,7 +4,7 @@ class ToolProviderNotFoundError(ValueError):
class ToolNotFoundError(ValueError):
pass
class ToolParamterValidationError(ValueError):
class ToolParameterValidationError(ValueError):
pass
class ToolProviderCredentialValidationError(ValueError):
......
......@@ -123,12 +123,12 @@ class ApiBasedToolProviderController(ToolProviderController):
return self.tools
def get_tools(self, user_id: str, tanent_id: str) -> List[ApiTool]:
def get_tools(self, user_id: str, tenant_id: str) -> List[ApiTool]:
"""
fetch tools from database
:param user_id: the user id
:param tanent_id: the tanent id
:param tenant_id: the tenant id
:return: the tools
"""
if self.tools is not None:
......@@ -136,9 +136,9 @@ class ApiBasedToolProviderController(ToolProviderController):
tools: List[Tool] = []
# get tanent api providers
# get tenant api providers
db_providers: List[ApiToolProvider] = db.session.query(ApiToolProvider).filter(
ApiToolProvider.tenant_id == tanent_id,
ApiToolProvider.tenant_id == tenant_id,
ApiToolProvider.name == self.identity.name
).all()
......
from typing import Any, Dict, List
from core.tools.entities.tool_entities import ToolProviderType, ToolParamter, ToolParamterOption
from core.tools.entities.tool_entities import ToolProviderType, ToolParameter, ToolParameterOption
from core.tools.tool.tool import Tool
from core.tools.entities.common_entities import I18nObject
from core.tools.provider.tool_provider import ToolProviderController
......@@ -71,7 +71,7 @@ class AppBasedToolProviderEntity(ToolProviderController):
variable_name = input_form[form_type]['variable_name']
options = input_form[form_type].get('options', [])
if form_type == 'paragraph' or form_type == 'text-input':
tool['parameters'].append(ToolParamter(
tool['parameters'].append(ToolParameter(
name=variable_name,
label=I18nObject(
en_US=label,
......@@ -82,13 +82,13 @@ class AppBasedToolProviderEntity(ToolProviderController):
zh_Hans=label
),
llm_description=label,
form=ToolParamter.ToolParameterForm.FORM,
type=ToolParamter.ToolParameterType.STRING,
form=ToolParameter.ToolParameterForm.FORM,
type=ToolParameter.ToolParameterType.STRING,
required=required,
default=default
))
elif form_type == 'select':
tool['parameters'].append(ToolParamter(
tool['parameters'].append(ToolParameter(
name=variable_name,
label=I18nObject(
en_US=label,
......@@ -99,11 +99,11 @@ class AppBasedToolProviderEntity(ToolProviderController):
zh_Hans=label
),
llm_description=label,
form=ToolParamter.ToolParameterForm.FORM,
type=ToolParamter.ToolParameterType.SELECT,
form=ToolParameter.ToolParameterForm.FORM,
type=ToolParameter.ToolParameterType.SELECT,
required=required,
default=default,
options=[ToolParamterOption(
options=[ToolParameterOption(
value=option,
label=I18nObject(
en_US=option,
......
......@@ -13,7 +13,7 @@ class AzureDALLEProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"prompt": "cute girl, blue eyes, white hair, anime style",
"size": "square",
"n": 1
......
......@@ -2,13 +2,13 @@ identity:
author: Leslie
name: azuredalle
label:
en_US: AzureDALL-E
zh_Hans: AzureDALL-E 绘画
pt_BR: AzureDALL-E
en_US: Azure DALL-E
zh_Hans: Azure DALL-E 绘画
pt_BR: Azure DALL-E
description:
en_US: AZURE DALL-E art
zh_Hans: AZURE DALL-E 绘画
pt_BR: AZURE DALL-E art
en_US: Azure DALL-E art
zh_Hans: Azure DALL-E 绘画
pt_BR: Azure DALL-E art
icon: icon.png
credentials_for_provider:
azure_openai_api_key:
......@@ -21,26 +21,26 @@ credentials_for_provider:
help:
en_US: Please input your Azure OpenAI API key
zh_Hans: 请输入你的 Azure OpenAI API key
pt_BR: Please input your Azure OpenAI API key
pt_BR: Introduza a sua chave de API OpenAI do Azure
placeholder:
en_US: Please input your Azure OpenAI API key
zh_Hans: 请输入你的 Azure OpenAI API key
pt_BR: Please input your Azure OpenAI API key
pt_BR: Introduza a sua chave de API OpenAI do Azure
azure_openai_api_model_name:
type: text-input
required: true
label:
en_US: Deployment Name
zh_Hans: 部署名称
pt_BR: Deployment Name
pt_BR: Nome da Implantação
help:
en_US: Please input the name of your Azure Openai DALL-E API deployment
zh_Hans: 请输入你的 Azure Openai DALL-E API 部署名称
pt_BR: Please input the name of your Azure Openai DALL-E API deployment
pt_BR: Insira o nome da implantação da API DALL-E do Azure Openai
placeholder:
en_US: Please input the name of your Azure Openai DALL-E API deployment
zh_Hans: 请输入你的 Azure Openai DALL-E API 部署名称
pt_BR: Please input the name of your Azure Openai DALL-E API deployment
pt_BR: Insira o nome da implantação da API DALL-E do Azure Openai
azure_openai_base_url:
type: text-input
required: true
......@@ -49,13 +49,13 @@ credentials_for_provider:
zh_Hans: API 域名
pt_BR: API Endpoint URL
help:
en_US: Please input your Azure OpenAI Endpoint URL,eg:https://xxx.openai.azure.com/
en_US: Please input your Azure OpenAI Endpoint URL, e.g. https://xxx.openai.azure.com/
zh_Hans: 请输入你的 Azure OpenAI API域名,例如:https://xxx.openai.azure.com/
pt_BR: Please input your Azure OpenAI Endpoint URL,eg:https://xxx.openai.azure.com/
pt_BR: Introduza a URL do Azure OpenAI Endpoint, e.g. https://xxx.openai.azure.com/
placeholder:
en_US: Please input your Azure OpenAI Endpoint URL,eg:https://xxx.openai.azure.com/
en_US: Please input your Azure OpenAI Endpoint URL, e.g. https://xxx.openai.azure.com/
zh_Hans: 请输入你的 Azure OpenAI API域名,例如:https://xxx.openai.azure.com/
pt_BR: Please input your Azure OpenAI Endpoint URL,eg:https://xxx.openai.azure.com/
pt_BR: Introduza a URL do Azure OpenAI Endpoint, e.g. https://xxx.openai.azure.com/
azure_openai_api_version:
type: text-input
required: true
......@@ -64,10 +64,10 @@ credentials_for_provider:
zh_Hans: API 版本
pt_BR: API Version
help:
en_US: Please input your Azure OpenAI API Version,eg:2023-12-01-preview
en_US: Please input your Azure OpenAI API Version,e.g. 2023-12-01-preview
zh_Hans: 请输入你的 Azure OpenAI API 版本,例如:2023-12-01-preview
pt_BR: Please input your Azure OpenAI API Version,eg:2023-12-01-preview
pt_BR: Introduza a versão da API OpenAI do Azure,e.g. 2023-12-01-preview
placeholder:
en_US: Please input your Azure OpenAI API Version,eg:2023-12-01-preview
en_US: Please input your Azure OpenAI API Version,e.g. 2023-12-01-preview
zh_Hans: 请输入你的 Azure OpenAI API 版本,例如:2023-12-01-preview
pt_BR: Please input your Azure OpenAI API Version,eg:2023-12-01-preview
pt_BR: Introduza a versão da API OpenAI do Azure,e.g. 2023-12-01-preview
......@@ -10,7 +10,7 @@ from openai import AzureOpenAI
class DallE3Tool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -28,19 +28,19 @@ class DallE3Tool(BuiltinTool):
}
# prompt
prompt = tool_paramters.get('prompt', '')
prompt = tool_parameters.get('prompt', '')
if not prompt:
return self.create_text_message('Please input prompt')
# get size
size = SIZE_MAPPING[tool_paramters.get('size', 'square')]
size = SIZE_MAPPING[tool_parameters.get('size', 'square')]
# get n
n = tool_paramters.get('n', 1)
n = tool_parameters.get('n', 1)
# get quality
quality = tool_paramters.get('quality', 'standard')
quality = tool_parameters.get('quality', 'standard')
if quality not in ['standard', 'hd']:
return self.create_text_message('Invalid quality')
# get style
style = tool_paramters.get('style', 'vivid')
style = tool_parameters.get('style', 'vivid')
if style not in ['natural', 'vivid']:
return self.create_text_message('Invalid style')
......
identity:
name: dalle3
name: azure_dalle3
author: Leslie
label:
en_US: DALL-E 3
zh_Hans: DALL-E 3 绘画
pt_BR: DALL-E 3
en_US: Azure DALL-E 3
zh_Hans: Azure DALL-E 3 绘画
pt_BR: Azure DALL-E 3
description:
en_US: DALL-E 3 is a powerful drawing tool that can draw the image you want based on your prompt, compared to DallE 2, DallE 3 has stronger drawing ability, but it will consume more resources
zh_Hans: DALL-E 3 是一个强大的绘画工具,它可以根据您的提示词绘制出您想要的图像,相比于DallE 2, DallE 3拥有更强的绘画能力,但会消耗更多的资源
pt_BR: DALL-E 3 is a powerful drawing tool that can draw the image you want based on your prompt, compared to DallE 2, DallE 3 has stronger drawing ability, but it will consume more resources
pt_BR: DALL-E 3 é uma poderosa ferramenta de desenho que pode desenhar a imagem que você deseja com base em seu prompt, em comparação com DallE 2, DallE 3 tem uma capacidade de desenho mais forte, mas consumirá mais recursos
description:
human:
en_US: DALL-E is a text to image tool
zh_Hans: DALL-E 是一个文本到图像的工具
pt_BR: DALL-E is a text to image tool
pt_BR: DALL-E é uma ferramenta de texto para imagem
llm: DALL-E is a tool used to generate images from text
parameters:
- name: prompt
......@@ -26,7 +26,7 @@ parameters:
human_description:
en_US: Image prompt, you can check the official documentation of DallE 3
zh_Hans: 图像提示词,您可以查看DallE 3 的官方文档
pt_BR: Image prompt, you can check the official documentation of DallE 3
pt_BR: Imagem prompt, você pode verificar a documentação oficial do DallE 3
llm_description: Image prompt of DallE 3, you should describe the image you want to generate as a list of words as possible as detailed
form: llm
- name: size
......@@ -35,18 +35,18 @@ parameters:
human_description:
en_US: selecting the image size
zh_Hans: 选择图像大小
pt_BR: selecting the image size
pt_BR: seleccionar o tamanho da imagem
label:
en_US: Image size
zh_Hans: 图像大小
pt_BR: Image size
pt_BR: Tamanho da imagem
form: form
options:
- value: square
label:
en_US: Squre(1024x1024)
zh_Hans: 方(1024x1024)
pt_BR: Squre(1024x1024)
pt_BR: Squire(1024x1024)
- value: vertical
label:
en_US: Vertical(1024x1792)
......@@ -64,11 +64,11 @@ parameters:
human_description:
en_US: selecting the number of images
zh_Hans: 选择图像数量
pt_BR: selecting the number of images
pt_BR: seleccionar o número de imagens
label:
en_US: Number of images
zh_Hans: 图像数量
pt_BR: Number of images
pt_BR: Número de imagens
form: form
min: 1
max: 1
......@@ -79,18 +79,18 @@ parameters:
human_description:
en_US: selecting the image quality
zh_Hans: 选择图像质量
pt_BR: selecting the image quality
pt_BR: seleccionar a qualidade da imagem
label:
en_US: Image quality
zh_Hans: 图像质量
pt_BR: Image quality
pt_BR: Qualidade da imagem
form: form
options:
- value: standard
label:
en_US: Standard
zh_Hans: 标准
pt_BR: Standard
pt_BR: Normal
- value: hd
label:
en_US: HD
......@@ -103,18 +103,18 @@ parameters:
human_description:
en_US: selecting the image style
zh_Hans: 选择图像风格
pt_BR: selecting the image style
pt_BR: seleccionar o estilo da imagem
label:
en_US: Image style
zh_Hans: 图像风格
pt_BR: Image style
pt_BR: Estilo da imagem
form: form
options:
- value: vivid
label:
en_US: Vivid
zh_Hans: 生动
pt_BR: Vivid
pt_BR: Vívido
- value: natural
label:
en_US: Natural
......
......@@ -16,7 +16,7 @@ class ChartProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"data": "1,3,5,7,9,2,4,6,8,10",
},
)
......
......@@ -6,9 +6,9 @@ import io
from typing import Any, Dict, List, Union
class BarChartTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
data = tool_paramters.get('data', '')
data = tool_parameters.get('data', '')
if not data:
return self.create_text_message('Please input data')
data = data.split(';')
......@@ -19,7 +19,7 @@ class BarChartTool(BuiltinTool):
else:
data = [float(i) for i in data]
axis = tool_paramters.get('x_axis', None) or None
axis = tool_parameters.get('x_axis', None) or None
if axis:
axis = axis.split(';')
if len(axis) != len(data):
......
......@@ -8,14 +8,14 @@ from typing import Any, Dict, List, Union
class LinearChartTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
data = tool_paramters.get('data', '')
data = tool_parameters.get('data', '')
if not data:
return self.create_text_message('Please input data')
data = data.split(';')
axis = tool_paramters.get('x_axis', None) or None
axis = tool_parameters.get('x_axis', None) or None
if axis:
axis = axis.split(';')
if len(axis) != len(data):
......
......@@ -8,13 +8,13 @@ from typing import Any, Dict, List, Union
class PieChartTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
data = tool_paramters.get('data', '')
data = tool_parameters.get('data', '')
if not data:
return self.create_text_message('Please input data')
data = data.split(';')
categories = tool_paramters.get('categories', None) or None
categories = tool_parameters.get('categories', None) or None
# if all data is int, convert to int
if all([i.isdigit() for i in data]):
......
......@@ -13,7 +13,7 @@ class DALLEProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"prompt": "cute girl, blue eyes, white hair, anime style",
"size": "small",
"n": 1
......
......@@ -10,7 +10,7 @@ from openai import OpenAI
class DallE2Tool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -37,15 +37,15 @@ class DallE2Tool(BuiltinTool):
}
# prompt
prompt = tool_paramters.get('prompt', '')
prompt = tool_parameters.get('prompt', '')
if not prompt:
return self.create_text_message('Please input prompt')
# get size
size = SIZE_MAPPING[tool_paramters.get('size', 'large')]
size = SIZE_MAPPING[tool_parameters.get('size', 'large')]
# get n
n = tool_paramters.get('n', 1)
n = tool_parameters.get('n', 1)
# call openapi dalle2
response = client.images.generate(
......
......@@ -10,7 +10,7 @@ from openai import OpenAI
class DallE3Tool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -37,19 +37,19 @@ class DallE3Tool(BuiltinTool):
}
# prompt
prompt = tool_paramters.get('prompt', '')
prompt = tool_parameters.get('prompt', '')
if not prompt:
return self.create_text_message('Please input prompt')
# get size
size = SIZE_MAPPING[tool_paramters.get('size', 'square')]
size = SIZE_MAPPING[tool_parameters.get('size', 'square')]
# get n
n = tool_paramters.get('n', 1)
n = tool_parameters.get('n', 1)
# get quality
quality = tool_paramters.get('quality', 'standard')
quality = tool_parameters.get('quality', 'standard')
if quality not in ['standard', 'hd']:
return self.create_text_message('Invalid quality')
# get style
style = tool_paramters.get('style', 'vivid')
style = tool_parameters.get('style', 'vivid')
if style not in ['natural', 'vivid']:
return self.create_text_message('Invalid style')
......
identity:
author: CharlirWei
author: CharlieWei
name: gaode
label:
en_US: GaoDe
en_US: Autonavi
zh_Hans: 高德
pt_BR: GaoDe
pt_BR: Autonavi
description:
en_US: Autonavi Open Platform service toolkit.
zh_Hans: 高德开放平台服务工具包。
......@@ -19,11 +19,11 @@ credentials_for_provider:
zh_Hans: API Key
pt_BR: Fogo a chave
placeholder:
en_US: Please enter your GaoDe API Key
en_US: Please enter your Autonavi API Key
zh_Hans: 请输入你的高德开放平台 API Key
pt_BR: Insira sua chave de API GaoDe
pt_BR: Insira sua chave de API Autonavi
help:
en_US: Get your API Key from GaoDe
en_US: Get your API Key from Autonavi
zh_Hans: 从高德获取您的 API Key
pt_BR: Obtenha sua chave de API do GaoDe
pt_BR: Obtenha sua chave de API do Autonavi
url: https://console.amap.com/dev/key/app
......@@ -6,11 +6,11 @@ from typing import Any, Dict, List, Union
class GaodeRepositoriesTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
city = tool_paramters.get('city', '')
city = tool_parameters.get('city', '')
if not city:
return self.create_text_message('Please tell me your city')
......
identity:
author: CharlirWei
author: CharlieWei
name: github
label:
en_US: Github
......
......@@ -9,12 +9,12 @@ from typing import Any, Dict, List, Union
class GihubRepositoriesTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
top_n = tool_paramters.get('top_n', 5)
query = tool_paramters.get('query', '')
top_n = tool_parameters.get('top_n', 5)
query = tool_parameters.get('query', '')
if not query:
return self.create_text_message('Please input symbol')
......
identity:
name: repositories
name: github_repositories
author: CharlieWei
label:
en_US: Search Repositories
......@@ -24,7 +24,7 @@ parameters:
en_US: You want to find the project development language, keywords, For example. Find 10 Python developed PDF document parsing projects.
zh_Hans: 你想要找的项目开发语言、关键字,如:找10个Python开发的PDF文档解析项目。
pt_BR: Você deseja encontrar a linguagem de desenvolvimento do projeto, palavras-chave, Por exemplo. Encontre 10 projetos de análise de documentos PDF desenvolvidos em Python.
llm_description: The query of you want to search, format query condition like "keywords+language:js", language can be other dev languages, por exemplo. Procuro um projeto de análise de documentos PDF desenvolvido em Python.
llm_description: The query of you want to search, format query condition like "keywords+language:js", language can be other dev languages.
form: llm
- name: top_n
type: number
......
......@@ -14,7 +14,7 @@ class GoogleProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"query": "test",
"result_type": "link"
},
......
......@@ -148,13 +148,13 @@ class SerpAPI:
class GoogleSearchTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
query = tool_paramters['query']
result_type = tool_paramters['result_type']
query = tool_parameters['query']
result_type = tool_parameters['result_type']
api_key = self.runtime.credentials['serpapi_api_key']
result = SerpAPI(api_key).run(query, result_type=result_type)
if result_type == 'text':
......
......@@ -14,7 +14,7 @@ class StableDiffusionProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"prompt": "cat",
"lora": "",
"steps": 1,
......
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter, ToolParamterOption
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolParameterOption
from core.tools.entities.common_entities import I18nObject
from core.tools.errors import ToolProviderCredentialValidationError
......@@ -60,7 +60,7 @@ DRAW_TEXT_OPTIONS = {
}
class StableDiffusionTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......@@ -86,25 +86,25 @@ class StableDiffusionTool(BuiltinTool):
# prompt
prompt = tool_paramters.get('prompt', '')
prompt = tool_parameters.get('prompt', '')
if not prompt:
return self.create_text_message('Please input prompt')
# get negative prompt
negative_prompt = tool_paramters.get('negative_prompt', '')
negative_prompt = tool_parameters.get('negative_prompt', '')
# get size
width = tool_paramters.get('width', 1024)
height = tool_paramters.get('height', 1024)
width = tool_parameters.get('width', 1024)
height = tool_parameters.get('height', 1024)
# get steps
steps = tool_paramters.get('steps', 1)
steps = tool_parameters.get('steps', 1)
# get lora
lora = tool_paramters.get('lora', '')
lora = tool_parameters.get('lora', '')
# get image id
image_id = tool_paramters.get('image_id', '')
image_id = tool_parameters.get('image_id', '')
if image_id.strip():
image_variable = self.get_default_image_variable()
if image_variable:
......@@ -188,6 +188,8 @@ class StableDiffusionTool(BuiltinTool):
if lora:
draw_options['prompt'] = f'{lora},{prompt}'
else:
draw_options['prompt'] = prompt
draw_options['width'] = width
draw_options['height'] = height
......@@ -210,32 +212,32 @@ class StableDiffusionTool(BuiltinTool):
return self.create_text_message('Failed to generate image')
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
parameters = [
ToolParamter(name='prompt',
ToolParameter(name='prompt',
label=I18nObject(en_US='Prompt', zh_Hans='Prompt'),
human_description=I18nObject(
en_US='Image prompt, you can check the official documentation of Stable Diffusion',
zh_Hans='图像提示词,您可以查看 Stable Diffusion 的官方文档',
),
type=ToolParamter.ToolParameterType.STRING,
form=ToolParamter.ToolParameterForm.LLM,
type=ToolParameter.ToolParameterType.STRING,
form=ToolParameter.ToolParameterForm.LLM,
llm_description='Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English.',
required=True),
]
if len(self.list_default_image_variables()) != 0:
parameters.append(
ToolParamter(name='image_id',
ToolParameter(name='image_id',
label=I18nObject(en_US='image_id', zh_Hans='image_id'),
human_description=I18nObject(
en_US='Image id of the image you want to generate based on, if you want to generate image based on the default image, you can leave this field empty.',
zh_Hans='您想要生成的图像的图像 ID,如果您想要基于默认图像生成图像,则可以将此字段留空。',
),
type=ToolParamter.ToolParameterType.STRING,
form=ToolParamter.ToolParameterForm.LLM,
type=ToolParameter.ToolParameterType.STRING,
form=ToolParameter.ToolParameterForm.LLM,
llm_description='Image id of the original image, you can leave this field empty if you want to generate a new image.',
required=True,
options=[ToolParamterOption(
options=[ToolParameterOption(
value=i.name,
label=I18nObject(en_US=i.name, zh_Hans=i.name)
) for i in self.list_default_image_variables()])
......
......@@ -10,7 +10,7 @@ class WikiPediaProvider(BuiltinToolProviderController):
try:
CurrentTimeTool().invoke(
user_id='',
tool_paramters={},
tool_parameters={},
)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))
\ No newline at end of file
......@@ -8,7 +8,7 @@ from datetime import datetime, timezone
class CurrentTimeTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
......
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.provider.builtin.vectorizer.tools.test_data import VECTORIZER_ICON_PNG
from core.tools.errors import ToolProviderCredentialValidationError
......@@ -8,21 +8,21 @@ from httpx import post
from base64 import b64decode
class VectorizerTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
api_key_name = self.runtime.credentials.get('api_key_name', None)
api_key_value = self.runtime.credentials.get('api_key_value', None)
mode = tool_paramters.get('mode', 'test')
mode = tool_parameters.get('mode', 'test')
if mode == 'production':
mode = 'preview'
if not api_key_name or not api_key_value:
raise ToolProviderCredentialValidationError('Please input api key name and value')
image_id = tool_paramters.get('image_id', '')
image_id = tool_parameters.get('image_id', '')
if not image_id:
return self.create_text_message('Please input image id')
......@@ -54,21 +54,21 @@ class VectorizerTool(BuiltinTool):
meta={'mime_type': 'image/svg+xml'})
]
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
"""
override the runtime parameters
"""
return [
ToolParamter.get_simple_instance(
ToolParameter.get_simple_instance(
name='image_id',
llm_description=f'the image id that you want to vectorize, \
and the image id should be specified in \
{[i.name for i in self.list_default_image_variables()]}',
type=ToolParamter.ToolParameterType.SELECT,
type=ToolParameter.ToolParameterType.SELECT,
required=True,
options=[i.name for i in self.list_default_image_variables()]
)
]
def is_tool_avaliable(self) -> bool:
def is_tool_available(self) -> bool:
return len(self.list_default_image_variables()) > 0
\ No newline at end of file
......@@ -14,7 +14,7 @@ class VectorizerProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"mode": "test",
"image_id": "__test_123"
},
......
......@@ -7,14 +7,14 @@ from typing import Any, Dict, List, Union
class WebscraperTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
try:
url = tool_paramters.get('url', '')
user_agent = tool_paramters.get('user_agent', '')
url = tool_parameters.get('url', '')
user_agent = tool_parameters.get('user_agent', '')
if not url:
return self.create_text_message('Please input url')
......
......@@ -14,7 +14,7 @@ class WebscraperProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
'url': 'https://www.google.com',
'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
},
......
......@@ -14,12 +14,12 @@ class WikipediaInput(BaseModel):
class WikiPediaSearchTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
query = tool_paramters.get('query', '')
query = tool_parameters.get('query', '')
if not query:
return self.create_text_message('Please input query')
......
......@@ -12,7 +12,7 @@ class WikiPediaProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"query": "misaka mikoto",
},
)
......
......@@ -11,12 +11,12 @@ class WolframAlphaTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_paramters: Dict[str, Any],
tool_parameters: Dict[str, Any],
) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
query = tool_paramters.get('query', '')
query = tool_parameters.get('query', '')
if not query:
return self.create_text_message('Please input query')
appid = self.runtime.credentials.get('appid', '')
......
......@@ -16,7 +16,7 @@ class GoogleProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"query": "1+2+....+111",
},
)
......
......@@ -9,23 +9,23 @@ from yfinance import download
import pandas as pd
class YahooFinanceAnalyticsTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
symbol = tool_paramters.get('symbol', '')
symbol = tool_parameters.get('symbol', '')
if not symbol:
return self.create_text_message('Please input symbol')
time_range = [None, None]
start_date = tool_paramters.get('start_date', '')
start_date = tool_parameters.get('start_date', '')
if start_date:
time_range[0] = start_date
else:
time_range[0] = '1800-01-01'
end_date = tool_paramters.get('end_date', '')
end_date = tool_parameters.get('end_date', '')
if end_date:
time_range[1] = end_date
else:
......
......@@ -7,13 +7,13 @@ from requests.exceptions import HTTPError, ReadTimeout
import yfinance
class YahooFinanceSearchTickerTool(BuiltinTool):
def _invoke(self,user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self,user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
'''
invoke tools
'''
query = tool_paramters.get('symbol', '')
query = tool_parameters.get('symbol', '')
if not query:
return self.create_text_message('Please input symbol')
......
......@@ -7,12 +7,12 @@ from requests.exceptions import HTTPError, ReadTimeout
from yfinance import Ticker
class YahooFinanceSearchTickerTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
query = tool_paramters.get('symbol', '')
query = tool_parameters.get('symbol', '')
if not query:
return self.create_text_message('Please input symbol')
......
......@@ -12,7 +12,7 @@ class YahooFinanceProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"ticker": "MSFT",
},
)
......
......@@ -7,23 +7,23 @@ from datetime import datetime
from googleapiclient.discovery import build
class YoutubeVideosAnalyticsTool(BuiltinTool):
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) \
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) \
-> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
"""
invoke tools
"""
channel = tool_paramters.get('channel', '')
channel = tool_parameters.get('channel', '')
if not channel:
return self.create_text_message('Please input symbol')
time_range = [None, None]
start_date = tool_paramters.get('start_date', '')
start_date = tool_parameters.get('start_date', '')
if start_date:
time_range[0] = start_date
else:
time_range[0] = '1800-01-01'
end_date = tool_paramters.get('end_date', '')
end_date = tool_parameters.get('end_date', '')
if end_date:
time_range[1] = end_date
else:
......
......@@ -12,7 +12,7 @@ class YahooFinanceProvider(BuiltinToolProviderController):
}
).invoke(
user_id='',
tool_paramters={
tool_parameters={
"channel": "TOKYO GIRLS COLLECTION",
"start_date": "2020-01-01",
"end_date": "2024-12-31",
......
......@@ -5,13 +5,13 @@ from os import path, listdir
from yaml import load, FullLoader
from core.tools.entities.tool_entities import ToolProviderType, \
ToolParamter, ToolProviderCredentials
ToolParameter, ToolProviderCredentials
from core.tools.tool.tool import Tool
from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.provider.tool_provider import ToolProviderController
from core.tools.entities.user_entities import UserToolProviderCredentials
from core.tools.errors import ToolNotFoundError, ToolProviderNotFoundError, \
ToolParamterValidationError, ToolProviderCredentialValidationError
ToolParameterValidationError, ToolProviderCredentialValidationError
import importlib
......@@ -109,7 +109,7 @@ class BuiltinToolProviderController(ToolProviderController):
"""
return next(filter(lambda x: x.identity.name == tool_name, self.get_tools()), None)
def get_parameters(self, tool_name: str) -> List[ToolParamter]:
def get_parameters(self, tool_name: str) -> List[ToolParameter]:
"""
returns the parameters of the tool
......@@ -148,62 +148,62 @@ class BuiltinToolProviderController(ToolProviderController):
"""
tool_parameters_schema = self.get_parameters(tool_name)
tool_parameters_need_to_validate: Dict[str, ToolParamter] = {}
tool_parameters_need_to_validate: Dict[str, ToolParameter] = {}
for parameter in tool_parameters_schema:
tool_parameters_need_to_validate[parameter.name] = parameter
for parameter in tool_parameters:
if parameter not in tool_parameters_need_to_validate:
raise ToolParamterValidationError(f'parameter {parameter} not found in tool {tool_name}')
raise ToolParameterValidationError(f'parameter {parameter} not found in tool {tool_name}')
# check type
parameter_schema = tool_parameters_need_to_validate[parameter]
if parameter_schema.type == ToolParamter.ToolParameterType.STRING:
if parameter_schema.type == ToolParameter.ToolParameterType.STRING:
if not isinstance(tool_parameters[parameter], str):
raise ToolParamterValidationError(f'parameter {parameter} should be string')
raise ToolParameterValidationError(f'parameter {parameter} should be string')
elif parameter_schema.type == ToolParamter.ToolParameterType.NUMBER:
elif parameter_schema.type == ToolParameter.ToolParameterType.NUMBER:
if not isinstance(tool_parameters[parameter], (int, float)):
raise ToolParamterValidationError(f'parameter {parameter} should be number')
raise ToolParameterValidationError(f'parameter {parameter} should be number')
if parameter_schema.min is not None and tool_parameters[parameter] < parameter_schema.min:
raise ToolParamterValidationError(f'parameter {parameter} should be greater than {parameter_schema.min}')
raise ToolParameterValidationError(f'parameter {parameter} should be greater than {parameter_schema.min}')
if parameter_schema.max is not None and tool_parameters[parameter] > parameter_schema.max:
raise ToolParamterValidationError(f'parameter {parameter} should be less than {parameter_schema.max}')
raise ToolParameterValidationError(f'parameter {parameter} should be less than {parameter_schema.max}')
elif parameter_schema.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter_schema.type == ToolParameter.ToolParameterType.BOOLEAN:
if not isinstance(tool_parameters[parameter], bool):
raise ToolParamterValidationError(f'parameter {parameter} should be boolean')
raise ToolParameterValidationError(f'parameter {parameter} should be boolean')
elif parameter_schema.type == ToolParamter.ToolParameterType.SELECT:
elif parameter_schema.type == ToolParameter.ToolParameterType.SELECT:
if not isinstance(tool_parameters[parameter], str):
raise ToolParamterValidationError(f'parameter {parameter} should be string')
raise ToolParameterValidationError(f'parameter {parameter} should be string')
options = parameter_schema.options
if not isinstance(options, list):
raise ToolParamterValidationError(f'parameter {parameter} options should be list')
raise ToolParameterValidationError(f'parameter {parameter} options should be list')
if tool_parameters[parameter] not in [x.value for x in options]:
raise ToolParamterValidationError(f'parameter {parameter} should be one of {options}')
raise ToolParameterValidationError(f'parameter {parameter} should be one of {options}')
tool_parameters_need_to_validate.pop(parameter)
for parameter in tool_parameters_need_to_validate:
parameter_schema = tool_parameters_need_to_validate[parameter]
if parameter_schema.required:
raise ToolParamterValidationError(f'parameter {parameter} is required')
raise ToolParameterValidationError(f'parameter {parameter} is required')
# the parameter is not set currently, set the default value if needed
if parameter_schema.default is not None:
default_value = parameter_schema.default
# parse default value into the correct type
if parameter_schema.type == ToolParamter.ToolParameterType.STRING or \
parameter_schema.type == ToolParamter.ToolParameterType.SELECT:
if parameter_schema.type == ToolParameter.ToolParameterType.STRING or \
parameter_schema.type == ToolParameter.ToolParameterType.SELECT:
default_value = str(default_value)
elif parameter_schema.type == ToolParamter.ToolParameterType.NUMBER:
elif parameter_schema.type == ToolParameter.ToolParameterType.NUMBER:
default_value = float(default_value)
elif parameter_schema.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter_schema.type == ToolParameter.ToolParameterType.BOOLEAN:
default_value = bool(default_value)
tool_parameters[parameter] = default_value
......
......@@ -4,11 +4,11 @@ from typing import List, Dict, Any, Optional
from pydantic import BaseModel
from core.tools.entities.tool_entities import ToolProviderType, \
ToolProviderIdentity, ToolParamter, ToolProviderCredentials
ToolProviderIdentity, ToolParameter, ToolProviderCredentials
from core.tools.tool.tool import Tool
from core.tools.entities.user_entities import UserToolProviderCredentials
from core.tools.errors import ToolNotFoundError, \
ToolParamterValidationError, ToolProviderCredentialValidationError
ToolParameterValidationError, ToolProviderCredentialValidationError
class ToolProviderController(BaseModel, ABC):
identity: Optional[ToolProviderIdentity] = None
......@@ -50,7 +50,7 @@ class ToolProviderController(BaseModel, ABC):
"""
pass
def get_parameters(self, tool_name: str) -> List[ToolParamter]:
def get_parameters(self, tool_name: str) -> List[ToolParameter]:
"""
returns the parameters of the tool
......@@ -80,62 +80,62 @@ class ToolProviderController(BaseModel, ABC):
"""
tool_parameters_schema = self.get_parameters(tool_name)
tool_parameters_need_to_validate: Dict[str, ToolParamter] = {}
tool_parameters_need_to_validate: Dict[str, ToolParameter] = {}
for parameter in tool_parameters_schema:
tool_parameters_need_to_validate[parameter.name] = parameter
for parameter in tool_parameters:
if parameter not in tool_parameters_need_to_validate:
raise ToolParamterValidationError(f'parameter {parameter} not found in tool {tool_name}')
raise ToolParameterValidationError(f'parameter {parameter} not found in tool {tool_name}')
# check type
parameter_schema = tool_parameters_need_to_validate[parameter]
if parameter_schema.type == ToolParamter.ToolParameterType.STRING:
if parameter_schema.type == ToolParameter.ToolParameterType.STRING:
if not isinstance(tool_parameters[parameter], str):
raise ToolParamterValidationError(f'parameter {parameter} should be string')
raise ToolParameterValidationError(f'parameter {parameter} should be string')
elif parameter_schema.type == ToolParamter.ToolParameterType.NUMBER:
elif parameter_schema.type == ToolParameter.ToolParameterType.NUMBER:
if not isinstance(tool_parameters[parameter], (int, float)):
raise ToolParamterValidationError(f'parameter {parameter} should be number')
raise ToolParameterValidationError(f'parameter {parameter} should be number')
if parameter_schema.min is not None and tool_parameters[parameter] < parameter_schema.min:
raise ToolParamterValidationError(f'parameter {parameter} should be greater than {parameter_schema.min}')
raise ToolParameterValidationError(f'parameter {parameter} should be greater than {parameter_schema.min}')
if parameter_schema.max is not None and tool_parameters[parameter] > parameter_schema.max:
raise ToolParamterValidationError(f'parameter {parameter} should be less than {parameter_schema.max}')
raise ToolParameterValidationError(f'parameter {parameter} should be less than {parameter_schema.max}')
elif parameter_schema.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter_schema.type == ToolParameter.ToolParameterType.BOOLEAN:
if not isinstance(tool_parameters[parameter], bool):
raise ToolParamterValidationError(f'parameter {parameter} should be boolean')
raise ToolParameterValidationError(f'parameter {parameter} should be boolean')
elif parameter_schema.type == ToolParamter.ToolParameterType.SELECT:
elif parameter_schema.type == ToolParameter.ToolParameterType.SELECT:
if not isinstance(tool_parameters[parameter], str):
raise ToolParamterValidationError(f'parameter {parameter} should be string')
raise ToolParameterValidationError(f'parameter {parameter} should be string')
options = parameter_schema.options
if not isinstance(options, list):
raise ToolParamterValidationError(f'parameter {parameter} options should be list')
raise ToolParameterValidationError(f'parameter {parameter} options should be list')
if tool_parameters[parameter] not in [x.value for x in options]:
raise ToolParamterValidationError(f'parameter {parameter} should be one of {options}')
raise ToolParameterValidationError(f'parameter {parameter} should be one of {options}')
tool_parameters_need_to_validate.pop(parameter)
for parameter in tool_parameters_need_to_validate:
parameter_schema = tool_parameters_need_to_validate[parameter]
if parameter_schema.required:
raise ToolParamterValidationError(f'parameter {parameter} is required')
raise ToolParameterValidationError(f'parameter {parameter} is required')
# the parameter is not set currently, set the default value if needed
if parameter_schema.default is not None:
default_value = parameter_schema.default
# parse default value into the correct type
if parameter_schema.type == ToolParamter.ToolParameterType.STRING or \
parameter_schema.type == ToolParamter.ToolParameterType.SELECT:
if parameter_schema.type == ToolParameter.ToolParameterType.STRING or \
parameter_schema.type == ToolParameter.ToolParameterType.SELECT:
default_value = str(default_value)
elif parameter_schema.type == ToolParamter.ToolParameterType.NUMBER:
elif parameter_schema.type == ToolParameter.ToolParameterType.NUMBER:
default_value = float(default_value)
elif parameter_schema.type == ToolParamter.ToolParameterType.BOOLEAN:
elif parameter_schema.type == ToolParameter.ToolParameterType.BOOLEAN:
default_value = bool(default_value)
tool_parameters[parameter] = default_value
......
......@@ -8,6 +8,7 @@ from core.tools.errors import ToolProviderCredentialValidationError
import httpx
import requests
import json
class ApiTool(Tool):
api_bundle: ApiBasedToolBundle
......@@ -79,11 +80,29 @@ class ApiTool(Tool):
if isinstance(response, httpx.Response):
if response.status_code >= 400:
raise ToolProviderCredentialValidationError(f"Request failed with status code {response.status_code}")
return response.text
if not response.content:
return 'Empty response from the tool, please check your parameters and try again.'
try:
response = response.json()
try:
return json.dumps(response, ensure_ascii=False)
except Exception as e:
return json.dumps(response)
except Exception as e:
return response.text
elif isinstance(response, requests.Response):
if not response.ok:
raise ToolProviderCredentialValidationError(f"Request failed with status code {response.status_code}")
return response.text
if not response.content:
return 'Empty response from the tool, please check your parameters and try again.'
try:
response = response.json()
try:
return json.dumps(response, ensure_ascii=False)
except Exception as e:
return json.dumps(response)
except Exception as e:
return response.text
else:
raise ValueError(f'Invalid response type {type(response)}')
......@@ -204,15 +223,15 @@ class ApiTool(Tool):
return response
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) -> ToolInvokeMessage | List[ToolInvokeMessage]:
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) -> ToolInvokeMessage | List[ToolInvokeMessage]:
"""
invoke http request
"""
# assemble request
headers = self.assembling_request(tool_paramters)
headers = self.assembling_request(tool_parameters)
# do http request
response = self.do_http_request(self.api_bundle.server_url, self.api_bundle.method, headers, tool_paramters)
response = self.do_http_request(self.api_bundle.server_url, self.api_bundle.method, headers, tool_parameters)
# validate response
response = self.validate_and_parse_response(response)
......
from typing import Any, Dict, List, Union
from core.features.dataset_retrieval import DatasetRetrievalFeature
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParamter, ToolIdentity, ToolDescription
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolIdentity, ToolDescription
from core.tools.tool.tool import Tool
from core.tools.entities.common_entities import I18nObject
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
......@@ -63,23 +63,23 @@ class DatasetRetrieverTool(Tool):
return tools
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
return [
ToolParamter(name='query',
ToolParameter(name='query',
label=I18nObject(en_US='', zh_Hans=''),
human_description=I18nObject(en_US='', zh_Hans=''),
type=ToolParamter.ToolParameterType.STRING,
form=ToolParamter.ToolParameterForm.LLM,
type=ToolParameter.ToolParameterType.STRING,
form=ToolParameter.ToolParameterForm.LLM,
llm_description='Query for the dataset to be used to retrieve the dataset.',
required=True,
default=''),
]
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) -> ToolInvokeMessage | List[ToolInvokeMessage]:
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) -> ToolInvokeMessage | List[ToolInvokeMessage]:
"""
invoke dataset retriever tool
"""
query = tool_paramters.get('query', None)
query = tool_parameters.get('query', None)
if not query:
return self.create_text_message(text='please input query')
......
......@@ -5,13 +5,13 @@ from abc import abstractmethod, ABC
from enum import Enum
from core.tools.entities.tool_entities import ToolIdentity, ToolInvokeMessage,\
ToolParamter, ToolDescription, ToolRuntimeVariablePool, ToolRuntimeVariable, ToolRuntimeImageVariable
ToolParameter, ToolDescription, ToolRuntimeVariablePool, ToolRuntimeVariable, ToolRuntimeImageVariable
from core.tools.tool_file_manager import ToolFileManager
from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler
class Tool(BaseModel, ABC):
identity: ToolIdentity = None
parameters: Optional[List[ToolParamter]] = None
parameters: Optional[List[ToolParameter]] = None
description: ToolDescription = None
is_team_authorization: bool = False
agent_callback: Optional[DifyAgentCallbackHandler] = None
......@@ -166,22 +166,22 @@ class Tool(BaseModel, ABC):
return result
def invoke(self, user_id: str, tool_paramters: Dict[str, Any]) -> List[ToolInvokeMessage]:
# update tool_paramters
def invoke(self, user_id: str, tool_parameters: Dict[str, Any]) -> List[ToolInvokeMessage]:
# update tool_parameters
if self.runtime.runtime_parameters:
tool_paramters.update(self.runtime.runtime_parameters)
tool_parameters.update(self.runtime.runtime_parameters)
# hit callback
if self.use_callback:
self.agent_callback.on_tool_start(
tool_name=self.identity.name,
tool_inputs=tool_paramters
tool_inputs=tool_parameters
)
try:
result = self._invoke(
user_id=user_id,
tool_paramters=tool_paramters,
tool_parameters=tool_parameters,
)
except Exception as e:
if self.use_callback:
......@@ -195,7 +195,7 @@ class Tool(BaseModel, ABC):
if self.use_callback:
self.agent_callback.on_tool_end(
tool_name=self.identity.name,
tool_inputs=tool_paramters,
tool_inputs=tool_parameters,
tool_outputs=self._convert_tool_response_to_str(result)
)
......@@ -210,7 +210,7 @@ class Tool(BaseModel, ABC):
if response.type == ToolInvokeMessage.MessageType.TEXT:
result += response.message
elif response.type == ToolInvokeMessage.MessageType.LINK:
result += f"result link: {response.message}. please dirct user to check it."
result += f"result link: {response.message}. please tell user to check it."
elif response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or \
response.type == ToolInvokeMessage.MessageType.IMAGE:
result += f"image has been created and sent to user already, you should tell user to check it now."
......@@ -225,7 +225,7 @@ class Tool(BaseModel, ABC):
return result
@abstractmethod
def _invoke(self, user_id: str, tool_paramters: Dict[str, Any]) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
def _invoke(self, user_id: str, tool_parameters: Dict[str, Any]) -> Union[ToolInvokeMessage, List[ToolInvokeMessage]]:
pass
def validate_credentials(self, credentials: Dict[str, Any], parameters: Dict[str, Any]) -> None:
......@@ -237,7 +237,7 @@ class Tool(BaseModel, ABC):
"""
pass
def get_runtime_parameters(self) -> List[ToolParamter]:
def get_runtime_parameters(self) -> List[ToolParameter]:
"""
get the runtime parameters
......@@ -247,11 +247,11 @@ class Tool(BaseModel, ABC):
"""
return self.parameters
def is_tool_avaliable(self) -> bool:
def is_tool_available(self) -> bool:
"""
check if the tool is avaliable
check if the tool is available
:return: if the tool is avaliable
:return: if the tool is available
"""
return True
......
......@@ -13,7 +13,7 @@ from core.tools.errors import ToolProviderNotFoundError
from core.tools.provider.api_tool_provider import ApiBasedToolProviderController
from core.tools.provider.app_tool_provider import AppBasedToolProviderEntity
from core.tools.entities.user_entities import UserToolProvider
from core.tools.utils.configration import ToolConfiguration
from core.tools.utils.configuration import ToolConfiguration
from core.tools.utils.encoder import serialize_base_model_dict
from core.tools.provider.builtin._positions import BuiltinToolProviderSort
from core.provider_manager import ProviderManager
......@@ -117,7 +117,7 @@ class ToolManager:
return tool
@staticmethod
def get_tool(provider_type: str, provider_id: str, tool_name: str, tanent_id: str = None) \
def get_tool(provider_type: str, provider_id: str, tool_name: str, tenant_id: str = None) \
-> Union[BuiltinTool, ApiTool]:
"""
get the tool
......@@ -131,9 +131,9 @@ class ToolManager:
if provider_type == 'builtin':
return ToolManager.get_builtin_tool(provider_id, tool_name)
elif provider_type == 'api':
if tanent_id is None:
raise ValueError('tanent id is required for api provider')
api_provider, _ = ToolManager.get_api_provider_controller(tanent_id, provider_id)
if tenant_id is None:
raise ValueError('tenant id is required for api provider')
api_provider, _ = ToolManager.get_api_provider_controller(tenant_id, provider_id)
return api_provider.get_tool(tool_name)
elif provider_type == 'app':
raise NotImplementedError('app provider not implemented')
......@@ -188,7 +188,7 @@ class ToolManager:
elif provider_type == 'api':
if tenant_id is None:
raise ValueError('tanent id is required for api provider')
raise ValueError('tenant id is required for api provider')
api_provider, credentials = ToolManager.get_api_provider_controller(tenant_id, provider_name)
......@@ -202,7 +202,7 @@ class ToolManager:
})
elif provider_type == 'model':
if tenant_id is None:
raise ValueError('tanent id is required for model provider')
raise ValueError('tenant id is required for model provider')
# get model provider
model_provider = ToolManager.get_model_provider(tenant_id, provider_name)
......@@ -374,7 +374,7 @@ class ToolManager:
schema = provider.get_credentials_schema()
for name, value in schema.items():
result_providers[provider.identity.name].team_credentials[name] = \
ToolProviderCredentials.CredentialsType.defaut(value.type)
ToolProviderCredentials.CredentialsType.default(value.type)
# check if the provider need credentials
if not provider.need_credentials:
......@@ -476,7 +476,7 @@ class ToolManager:
return BuiltinToolProviderSort.sort(list(result_providers.values()))
@staticmethod
def get_api_provider_controller(tanent_id: str, provider_id: str) -> Tuple[ApiBasedToolProviderController, Dict[str, Any]]:
def get_api_provider_controller(tenant_id: str, provider_id: str) -> Tuple[ApiBasedToolProviderController, Dict[str, Any]]:
"""
get the api provider
......@@ -486,7 +486,7 @@ class ToolManager:
"""
provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
ApiToolProvider.id == provider_id,
ApiToolProvider.tenant_id == tanent_id,
ApiToolProvider.tenant_id == tenant_id,
).first()
if provider is None:
......@@ -513,7 +513,7 @@ class ToolManager:
).first()
if provider is None:
raise ValueError(f'yout have not added provider {provider}')
raise ValueError(f'you have not added provider {provider}')
try:
credentials = json.loads(provider.credentials_str) or {}
......
......@@ -18,7 +18,7 @@ class ToolConfiguration(BaseModel):
def encrypt_tool_credentials(self, credentials: Dict[str, str]) -> Dict[str, str]:
"""
encrypt tool credentials with tanent id
encrypt tool credentials with tenant id
return a deep copy of credentials with encrypted values
"""
......@@ -59,7 +59,7 @@ class ToolConfiguration(BaseModel):
def decrypt_tool_credentials(self, credentials: Dict[str, str]) -> Dict[str, str]:
"""
decrypt tool credentials with tanent id
decrypt tool credentials with tenant id
return a deep copy of credentials with decrypted values
"""
......
from core.tools.entities.tool_bundle import ApiBasedToolBundle
from core.tools.entities.tool_entities import ToolParamter, ToolParamterOption, ApiProviderSchemaType
from core.tools.entities.tool_entities import ToolParameter, ToolParameterOption, ApiProviderSchemaType
from core.tools.entities.common_entities import I18nObject
from core.tools.errors import ToolProviderNotFoundError, ToolNotSupportedError, \
ToolApiSchemaError
......@@ -47,7 +47,7 @@ class ApiBasedToolSchemaParser:
parameters = []
if 'parameters' in interface['operation']:
for parameter in interface['operation']['parameters']:
parameters.append(ToolParamter(
parameters.append(ToolParameter(
name=parameter['name'],
label=I18nObject(
en_US=parameter['name'],
......@@ -57,9 +57,9 @@ class ApiBasedToolSchemaParser:
en_US=parameter.get('description', ''),
zh_Hans=parameter.get('description', '')
),
type=ToolParamter.ToolParameterType.STRING,
type=ToolParameter.ToolParameterType.STRING,
required=parameter.get('required', False),
form=ToolParamter.ToolParameterForm.LLM,
form=ToolParameter.ToolParameterForm.LLM,
llm_description=parameter.get('description'),
default=parameter['default'] if 'default' in parameter else None,
))
......@@ -87,7 +87,7 @@ class ApiBasedToolSchemaParser:
required = body_schema['required'] if 'required' in body_schema else []
properties = body_schema['properties'] if 'properties' in body_schema else {}
for name, property in properties.items():
parameters.append(ToolParamter(
parameters.append(ToolParameter(
name=name,
label=I18nObject(
en_US=name,
......@@ -97,9 +97,9 @@ class ApiBasedToolSchemaParser:
en_US=property['description'] if 'description' in property else '',
zh_Hans=property['description'] if 'description' in property else ''
),
type=ToolParamter.ToolParameterType.STRING,
type=ToolParameter.ToolParameterType.STRING,
required=name in required,
form=ToolParamter.ToolParameterForm.LLM,
form=ToolParameter.ToolParameterForm.LLM,
llm_description=property['description'] if 'description' in property else '',
default=property['default'] if 'default' in property else None,
))
......@@ -114,6 +114,10 @@ class ApiBasedToolSchemaParser:
if count > 1:
warning['duplicated_parameter'] = f'Parameter {name} is duplicated.'
# check if there is a operation id, use $path_$method as operation id if not
if 'operationId' not in interface['operation']:
interface['operation']['operationId'] = f'{interface["path"]}_{interface["method"]}'
bundles.append(ApiBasedToolBundle(
server_url=server_url + interface['path'],
method=interface['method'],
......
......@@ -100,7 +100,7 @@ class ApiToolProvider(db.Model):
schema_type_str = db.Column(db.String(40), nullable=False)
# who created this tool
user_id = db.Column(UUID, nullable=False)
# tanent id
# tenant id
tenant_id = db.Column(UUID, nullable=False)
# description of the provider
description = db.Column(db.Text, nullable=False)
......@@ -135,7 +135,7 @@ class ApiToolProvider(db.Model):
return db.session.query(Account).filter(Account.id == self.user_id).first()
@property
def tanent(self) -> Tenant:
def tenant(self) -> Tenant:
return db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
class ToolModelInvoke(db.Model):
......@@ -150,7 +150,7 @@ class ToolModelInvoke(db.Model):
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
# who invoke this tool
user_id = db.Column(UUID, nullable=False)
# tanent id
# tenant id
tenant_id = db.Column(UUID, nullable=False)
# provider
provider = db.Column(db.String(40), nullable=False)
......@@ -190,7 +190,7 @@ class ToolConversationVariables(db.Model):
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
# conversation user id
user_id = db.Column(UUID, nullable=False)
# tanent id
# tenant id
tenant_id = db.Column(UUID, nullable=False)
# conversation id
conversation_id = db.Column(UUID, nullable=False)
......@@ -218,7 +218,7 @@ class ToolFile(db.Model):
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
# conversation user id
user_id = db.Column(UUID, nullable=False)
# tanent id
# tenant id
tenant_id = db.Column(UUID, nullable=False)
# conversation id
conversation_id = db.Column(UUID, nullable=False)
......
coverage~=7.2.4
beautifulsoup4==4.12.2
flask~=2.3.2
Flask-SQLAlchemy~=3.0.3
flask~=3.0.1
Flask-SQLAlchemy~=3.0.5
SQLAlchemy~=1.4.28
flask-login==0.6.2
flask-migrate~=4.0.4
flask-restful==0.3.9
flask-session2==1.3.1
flask-cors==3.0.10
flask-login~=0.6.3
flask-migrate~=4.0.5
flask-restful~=0.3.10
flask-cors~=4.0.0
gunicorn~=21.2.0
gevent~=23.9.1
langchain==0.0.250
......@@ -25,7 +24,7 @@ cachetools~=5.3.0
weaviate-client~=3.21.0
mailchimp-transactional~=1.0.50
scikit-learn==1.2.2
sentry-sdk[flask]~=1.21.1
sentry-sdk[flask]~=1.39.2
sympy==1.12
jieba==0.42.1
celery==5.2.7
......@@ -48,10 +47,10 @@ dashscope[tokenizer]~=1.14.0
huggingface_hub~=0.16.4
transformers~=4.31.0
pandas==1.5.3
xinference-client~=0.6.4
xinference-client~=0.8.1
safetensors==0.3.2
zhipuai==1.0.7
werkzeug==2.3.8
werkzeug~=3.0.1
pymilvus==2.3.0
qdrant-client==1.6.4
cohere~=4.44
......
......@@ -12,7 +12,7 @@ from core.tools.provider.tool_provider import ToolProviderController
from core.tools.provider.api_tool_provider import ApiBasedToolProviderController
from core.tools.utils.parser import ApiBasedToolSchemaParser
from core.tools.utils.encoder import serialize_base_model_array, serialize_base_model_dict
from core.tools.utils.configration import ToolConfiguration
from core.tools.utils.configuration import ToolConfiguration
from core.tools.errors import ToolProviderCredentialValidationError, ToolProviderNotFoundError, ToolNotFoundError
from services.model_provider_service import ModelProviderService
......@@ -26,26 +26,26 @@ import json
class ToolManageService:
@staticmethod
def list_tool_providers(user_id: str, tanent_id: str):
def list_tool_providers(user_id: str, tenant_id: str):
"""
list tool providers
:return: the list of tool providers
"""
result = [provider.to_dict() for provider in ToolManager.user_list_providers(
user_id, tanent_id
user_id, tenant_id
)]
# add icon url prefix
for provider in result:
ToolManageService.repacket_provider(provider)
ToolManageService.repack_provider(provider)
return result
@staticmethod
def repacket_provider(provider: dict):
def repack_provider(provider: dict):
"""
repacket provider
repack provider
:param provider: the provider dict
"""
......@@ -290,7 +290,7 @@ class ToolManageService:
).first()
if provider is None:
raise ValueError(f'yout have not added provider {provider}')
raise ValueError(f'you have not added provider {provider}')
return json.loads(
serialize_base_model_array([
......@@ -341,25 +341,33 @@ class ToolManageService:
"""
update builtin tool provider
"""
# get if the provider exists
provider: BuiltinToolProvider = db.session.query(BuiltinToolProvider).filter(
BuiltinToolProvider.tenant_id == tenant_id,
BuiltinToolProvider.provider == provider_name,
).first()
try:
# get provider
provider_controller = ToolManager.get_builtin_provider(provider_name)
if not provider_controller.need_credentials:
raise ValueError(f'provider {provider_name} does not need credentials')
tool_configuration = ToolConfiguration(tenant_id=tenant_id, provider_controller=provider_controller)
# get original credentials if exists
if provider is not None:
original_credentials = tool_configuration.decrypt_tool_credentials(provider.credentials)
masked_credentials = tool_configuration.mask_tool_credentials(original_credentials)
# check if the credential has changed, save the original credential
for name, value in credentials.items():
if name in masked_credentials and value == masked_credentials[name]:
credentials[name] = original_credentials[name]
# validate credentials
provider_controller.validate_credentials(credentials)
# encrypt credentials
tool_configuration = ToolConfiguration(tenant_id=tenant_id, provider_controller=provider_controller)
credentials = tool_configuration.encrypt_tool_credentials(credentials)
except (ToolProviderNotFoundError, ToolNotFoundError, ToolProviderCredentialValidationError) as e:
raise ValueError(str(e))
# get if the provider exists
provider: BuiltinToolProvider = db.session.query(BuiltinToolProvider).filter(
BuiltinToolProvider.tenant_id == tenant_id,
BuiltinToolProvider.provider == provider_name,
).first()
if provider is None:
# create provider
provider = BuiltinToolProvider(
......@@ -444,7 +452,7 @@ class ToolManageService:
).first()
if provider is None:
raise ValueError(f'yout have not added provider {provider}')
raise ValueError(f'you have not added provider {provider}')
db.session.delete(provider)
db.session.commit()
......@@ -493,7 +501,7 @@ class ToolManageService:
).first()
if provider is None:
raise ValueError(f'yout have not added provider {provider}')
raise ValueError(f'you have not added provider {provider}')
db.session.delete(provider)
db.session.commit()
......@@ -521,10 +529,10 @@ class ToolManageService:
if schema_type not in [member.value for member in ApiProviderSchemaType]:
raise ValueError(f'invalid schema type {schema_type}')
if schema_type == ApiProviderSchemaType.OPENAPI.value:
tool_bundles = ApiBasedToolSchemaParser.parse_openapi_yaml_to_tool_bundle(schema)
else:
raise ValueError(f'invalid schema type {schema_type}')
try:
tool_bundles, _ = ApiBasedToolSchemaParser.auto_parse_to_tool_bundle(schema)
except Exception as e:
raise ValueError(f'invalid schema')
# get tool bundle
tool_bundle = next(filter(lambda tb: tb.operation_id == tool_name, tool_bundles), None)
......
......@@ -19,58 +19,86 @@ class MockXinferenceClass(object):
raise RuntimeError('404 Not Found')
if 'generate' == model_uid:
return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url)
return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url, auth_headers={})
if 'chat' == model_uid:
return RESTfulChatModelHandle(model_uid, base_url=self.base_url)
return RESTfulChatModelHandle(model_uid, base_url=self.base_url, auth_headers={})
if 'embedding' == model_uid:
return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url)
return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url, auth_headers={})
if 'rerank' == model_uid:
return RESTfulRerankModelHandle(model_uid, base_url=self.base_url)
return RESTfulRerankModelHandle(model_uid, base_url=self.base_url, auth_headers={})
raise RuntimeError('404 Not Found')
def get(self: Session, url: str, **kwargs):
if '/v1/models/' in url:
response = Response()
response = Response()
if 'v1/models/' in url:
# get model uid
model_uid = url.split('/')[-1]
if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', model_uid) and \
model_uid not in ['generate', 'chat', 'embedding', 'rerank']:
response.status_code = 404
raise ConnectionError('404 Not Found')
return response
# check if url is valid
if not re.match(r'^(https?):\/\/[^\s\/$.?#].[^\s]*$', url):
response.status_code = 404
raise ConnectionError('404 Not Found')
return response
if model_uid in ['generate', 'chat']:
response.status_code = 200
response._content = b'''{
"model_type": "LLM",
"address": "127.0.0.1:43877",
"accelerators": [
"0",
"1"
],
"model_name": "chatglm3-6b",
"model_lang": [
"en"
],
"model_ability": [
"generate",
"chat"
],
"model_description": "latest chatglm3",
"model_format": "pytorch",
"model_size_in_billions": 7,
"quantization": "none",
"model_hub": "huggingface",
"revision": null,
"context_length": 2048,
"replica": 1
}'''
return response
elif model_uid == 'embedding':
response.status_code = 200
response._content = b'''{
"model_type": "embedding",
"address": "127.0.0.1:43877",
"accelerators": [
"0",
"1"
],
"model_name": "bge",
"model_lang": [
"en"
],
"revision": null,
"max_tokens": 512
}'''
return response
elif 'v1/cluster/auth' in url:
response.status_code = 200
response._content = b'''{
"model_type": "LLM",
"address": "127.0.0.1:43877",
"accelerators": [
"0",
"1"
],
"model_name": "chatglm3-6b",
"model_lang": [
"en"
],
"model_ability": [
"generate",
"chat"
],
"model_description": "latest chatglm3",
"model_format": "pytorch",
"model_size_in_billions": 7,
"quantization": "none",
"model_hub": "huggingface",
"revision": null,
"context_length": 2048,
"replica": 1
"auth": true
}'''
return response
def _check_cluster_authenticated(self):
self._cluster_authed = True
def rerank(self: RESTfulRerankModelHandle, documents: List[str], query: str, top_n: int) -> dict:
# check if self._model_uid is a valid uuid
if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \
......@@ -133,6 +161,7 @@ MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'
def setup_xinference_mock(request, monkeypatch: MonkeyPatch):
if MOCK:
monkeypatch.setattr(Client, 'get_model', MockXinferenceClass.get_chat_model)
monkeypatch.setattr(Client, '_check_cluster_authenticated', MockXinferenceClass._check_cluster_authenticated)
monkeypatch.setattr(Session, 'get', MockXinferenceClass.get)
monkeypatch.setattr(RESTfulEmbeddingModelHandle, 'create_embedding', MockXinferenceClass.create_embedding)
monkeypatch.setattr(RESTfulRerankModelHandle, 'rerank', MockXinferenceClass.rerank)
......
"""
LocalAI Embedding Interface is temporarily unavaliable due to
LocalAI Embedding Interface is temporarily unavailable due to
we could not find a way to test it for now.
"""
\ No newline at end of file
......@@ -153,7 +153,7 @@ const NewAppDialog = ({ show, onSuccess, onClose }: NewAppDialogProps) => {
<div className={style.listItemHeading}>
<div className={style.listItemHeadingContent}>{t('app.newApp.chatApp')}</div>
</div>
<div className='shrink-0 flex items-center h-[18px] border border-indigo-300 px-1 rounded-[5px] text-xs font-medium text-indigo-600 uppercase'>{t('app.newApp.agentAssistant')}</div>
<div className='flex items-center h-[18px] border border-indigo-300 px-1 rounded-[5px] text-xs font-medium text-indigo-600 uppercase truncate'>{t('app.newApp.agentAssistant')}</div>
</div>
<div className={`${style.listItemDescription} ${style.noClip}`}>{t('app.newApp.chatAppIntro')}</div>
{/* <div className={classNames(style.listItemFooter, 'justify-end')}>
......
......@@ -132,6 +132,7 @@ const EditAnnotationModal: FC<Props> = ({
onRemove={() => {
onRemove()
setShowModal(false)
onHide()
}}
text={t('appDebug.feature.annotation.removeConfirm') as string}
/>
......
......@@ -48,7 +48,7 @@ const Popup: FC<PopupProps> = ({
>
<PortalToFollowElemTrigger onClick={() => setOpen(v => !v)}>
<div className='flex items-center px-2 max-w-[240px] h-7 bg-white rounded-lg'>
<FileIcon type={fileType} className='mr-1 w-4 h-4' />
<FileIcon type={fileType} className='shrink-0 mr-1 w-4 h-4' />
<div className='text-xs text-gray-600 truncate'>{data.documentName}</div>
</div>
</PortalToFollowElemTrigger>
......@@ -56,7 +56,7 @@ const Popup: FC<PopupProps> = ({
<div className='w-[360px] bg-gray-50 rounded-xl shadow-lg'>
<div className='px-4 pt-3 pb-2'>
<div className='flex items-center h-[18px]'>
<FileIcon type={fileType} className='mr-1 w-4 h-4' />
<FileIcon type={fileType} className='shrink-0 mr-1 w-4 h-4' />
<div className='text-xs font-medium text-gray-600 truncate'>{data.documentName}</div>
</div>
</div>
......
......@@ -43,7 +43,7 @@ const ConfigSelect: FC<IConfigSelectProps> = ({
<div className={`${s.inputWrap} relative`} key={index}>
<div className='handle flex items-center justify-center w-4 h-4 cursor-grab'>
<svg width="6" height="10" viewBox="0 0 6 10" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fillRule="evenodd" clipRule="evenodd" d="M1 2C1.55228 2 2 1.55228 2 1C2 0.447715 1.55228 0 1 0C0.447715 0 0 0.447715 0 1C0 1.55228 0.447715 2 1 2ZM1 6C1.55228 6 2 5.55228 2 5C2 4.44772 1.55228 4 1 4C0.447715 4 0 4.44772 0 5C0 5.55228 0.447715 6 1 6ZM6 1C6 1.55228 5.55228 2 5 2C4.44772 2 4 1.55228 4 1C4 0.447715 4.44772 0 5 0C5.55228 0 6 0.447715 6 1ZM5 6C5.55228 6 6 5.55228 6 5C6 4.44772 5.55228 4 5 4C4.44772 4 4 4.44772 4 5C4 5.55228 4.44772 6 5 6ZM2 9C2 9.55229 1.55228 10 1 10C0.447715 10 0 9.55229 0 9C0 8.44771 0.447715 8 1 8C1.55228 8 2 8.44771 2 9ZM5 10C5.55228 10 6 9.55229 6 9C6 8.44771 5.55228 8 5 8C4.44772 8 4 8.44771 4 9C4 9.55229 4.44772 10 5 10Z" fill="#98A2B3"/>
<path fillRule="evenodd" clipRule="evenodd" d="M1 2C1.55228 2 2 1.55228 2 1C2 0.447715 1.55228 0 1 0C0.447715 0 0 0.447715 0 1C0 1.55228 0.447715 2 1 2ZM1 6C1.55228 6 2 5.55228 2 5C2 4.44772 1.55228 4 1 4C0.447715 4 0 4.44772 0 5C0 5.55228 0.447715 6 1 6ZM6 1C6 1.55228 5.55228 2 5 2C4.44772 2 4 1.55228 4 1C4 0.447715 4.44772 0 5 0C5.55228 0 6 0.447715 6 1ZM5 6C5.55228 6 6 5.55228 6 5C6 4.44772 5.55228 4 5 4C4.44772 4 4 4.44772 4 5C4 5.55228 4.44772 6 5 6ZM2 9C2 9.55229 1.55228 10 1 10C0.447715 10 0 9.55229 0 9C0 8.44771 0.447715 8 1 8C1.55228 8 2 8.44771 2 9ZM5 10C5.55228 10 6 9.55229 6 9C6 8.44771 5.55228 8 5 8C4.44772 8 4 8.44771 4 9C4 9.55229 4.44772 10 5 10Z" fill="#98A2B3" />
</svg>
</div>
<input
......@@ -59,7 +59,7 @@ const ConfigSelect: FC<IConfigSelectProps> = ({
return item
}))
}}
className={`${s.input} w-full px-1.5 text-sm leading-9 text-gray-900 border-0 grow h-9 bg-transparent focus:outline-none cursor-pointer`}
className={'w-full pl-1.5 pr-8 text-sm leading-9 text-gray-900 border-0 grow h-9 bg-transparent focus:outline-none cursor-pointer'}
/>
<RemoveIcon
className={`${s.deleteBtn} absolute top-1/2 translate-y-[-50%] right-1.5 items-center justify-center w-6 h-6 rounded-md cursor-pointer hover:bg-[#FEE4E2]`}
......
......@@ -5,6 +5,7 @@ import { useTranslation } from 'react-i18next'
import cn from 'classnames'
import { useContext } from 'use-context-selector'
import produce from 'immer'
import { useFormattingChangedDispatcher } from '../../../debug/hooks'
import ChooseTool from './choose-tool'
import SettingBuiltInTool from './setting-built-in-tool'
import Panel from '@/app/components/app/configuration/base/feature-panel'
......@@ -27,6 +28,7 @@ const AgentTools: FC = () => {
const { t } = useTranslation()
const [isShowChooseTool, setIsShowChooseTool] = useState(false)
const { modelConfig, setModelConfig, collectionList } = useContext(ConfigContext)
const formattingChangedDispatcher = useFormattingChangedDispatcher()
const [currentTool, setCurrentTool] = useState<AgentToolWithMoreInfo>(null)
const [selectedProviderId, setSelectedProviderId] = useState<string | undefined>(undefined)
......@@ -49,6 +51,7 @@ const AgentTools: FC = () => {
})
setModelConfig(newModelConfig)
setIsShowSettingTool(false)
formattingChangedDispatcher()
}
return (
......@@ -141,6 +144,7 @@ const AgentTools: FC = () => {
draft.agentConfig.tools.splice(index, 1)
})
setModelConfig(newModelConfig)
formattingChangedDispatcher()
}}>
<Trash03 className='w-4 h-4 text-gray-500' />
</div>
......@@ -167,6 +171,7 @@ const AgentTools: FC = () => {
draft.agentConfig.tools.splice(index, 1)
})
setModelConfig(newModelConfig)
formattingChangedDispatcher()
}}>
<Trash03 className='w-4 h-4 text-gray-500' />
</div>
......@@ -183,6 +188,7 @@ const AgentTools: FC = () => {
(draft.agentConfig.tools[index] as any).enabled = enabled
})
setModelConfig(newModelConfig)
formattingChangedDispatcher()
}} />
</div>
</div>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment