Commit c8abf879 authored by takatost's avatar takatost

refactor app

parent 5a4964a4
......@@ -21,7 +21,7 @@ from controllers.console.app.error import (
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.application_queue_manager import ApplicationQueueManager
from core.app.app_queue_manager import AppQueueManager
from core.entities.application_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
......@@ -94,7 +94,7 @@ class CompletionMessageStopApi(Resource):
def post(self, app_model, task_id):
account = flask_login.current_user
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
return {'result': 'success'}, 200
......@@ -172,7 +172,7 @@ class ChatMessageStopApi(Resource):
def post(self, app_model, task_id):
account = flask_login.current_user
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
return {'result': 'success'}, 200
......
......@@ -11,7 +11,7 @@ from controllers.console.app.error import (
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from core.model_runtime.errors.invoke import InvokeError
from libs.login import login_required
......
......@@ -21,7 +21,7 @@ from controllers.console.app.error import (
)
from controllers.console.explore.error import NotChatAppError, NotCompletionAppError
from controllers.console.explore.wraps import InstalledAppResource
from core.application_queue_manager import ApplicationQueueManager
from core.app.app_queue_manager import AppQueueManager
from core.entities.application_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
......@@ -90,7 +90,7 @@ class CompletionStopApi(InstalledAppResource):
if app_model.mode != 'completion':
raise NotCompletionAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
return {'result': 'success'}, 200
......@@ -154,7 +154,7 @@ class ChatStopApi(InstalledAppResource):
if app_model.mode != 'chat':
raise NotChatAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
return {'result': 'success'}, 200
......
......@@ -19,7 +19,7 @@ from controllers.service_api.app.error import (
ProviderQuotaExceededError,
)
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.application_queue_manager import ApplicationQueueManager
from core.app.app_queue_manager import AppQueueManager
from core.entities.application_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
......@@ -85,7 +85,7 @@ class CompletionStopApi(Resource):
if app_model.mode != 'completion':
raise AppUnavailableError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
return {'result': 'success'}, 200
......@@ -147,7 +147,7 @@ class ChatStopApi(Resource):
if app_model.mode != 'chat':
raise NotChatAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
return {'result': 'success'}, 200
......
......@@ -20,7 +20,7 @@ from controllers.web.error import (
ProviderQuotaExceededError,
)
from controllers.web.wraps import WebApiResource
from core.application_queue_manager import ApplicationQueueManager
from core.app.app_queue_manager import AppQueueManager
from core.entities.application_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
......@@ -84,7 +84,7 @@ class CompletionStopApi(WebApiResource):
if app_model.mode != 'completion':
raise NotCompletionAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
return {'result': 'success'}, 200
......@@ -144,7 +144,7 @@ class ChatStopApi(WebApiResource):
if app_model.mode != 'chat':
raise NotChatAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
return {'result': 'success'}, 200
......
......@@ -5,8 +5,8 @@ from datetime import datetime
from mimetypes import guess_extension
from typing import Optional, Union, cast
from core.app_runner.app_runner import AppRunner
from core.application_queue_manager import ApplicationQueueManager
from core.app.base_app_runner import AppRunner
from core.app.app_queue_manager import AppQueueManager
from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.entities.application_entities import (
......@@ -48,13 +48,13 @@ from models.tools import ToolConversationVariables
logger = logging.getLogger(__name__)
class BaseAssistantApplicationRunner(AppRunner):
class BaseAgentRunner(AppRunner):
def __init__(self, tenant_id: str,
application_generate_entity: ApplicationGenerateEntity,
app_orchestration_config: AppOrchestrationConfigEntity,
model_config: ModelConfigEntity,
config: AgentEntity,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
message: Message,
user_id: str,
memory: Optional[TokenBufferMemory] = None,
......
......@@ -3,9 +3,9 @@ import re
from collections.abc import Generator
from typing import Literal, Union
from core.application_queue_manager import PublishFrom
from core.app.app_queue_manager import PublishFrom
from core.entities.application_entities import AgentPromptEntity, AgentScratchpadUnit
from core.features.assistant_base_runner import BaseAssistantApplicationRunner
from core.agent.base_agent_runner import BaseAgentRunner
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
......@@ -262,7 +262,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
tool_call_args = json.loads(tool_call_args)
except json.JSONDecodeError:
pass
tool_response = tool_instance.invoke(
user_id=self.user_id,
tool_parameters=tool_call_args
......
......@@ -3,8 +3,8 @@ import logging
from collections.abc import Generator
from typing import Any, Union
from core.application_queue_manager import PublishFrom
from core.features.assistant_base_runner import BaseAssistantApplicationRunner
from core.app.app_queue_manager import PublishFrom
from core.agent.base_agent_runner import BaseAgentRunner
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
......@@ -26,7 +26,7 @@ from models.model import Conversation, Message, MessageAgentThought
logger = logging.getLogger(__name__)
class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
class FunctionCallAgentRunner(BaseAgentRunner):
def run(self, conversation: Conversation,
message: Message,
query: str,
......
from core.apps.config_validators.file_upload import FileUploadValidator
from core.apps.config_validators.moderation import ModerationValidator
from core.apps.config_validators.opening_statement import OpeningStatementValidator
from core.apps.config_validators.retriever_resource import RetrieverResourceValidator
from core.apps.config_validators.speech_to_text import SpeechToTextValidator
from core.apps.config_validators.suggested_questions import SuggestedQuestionsValidator
from core.apps.config_validators.text_to_speech import TextToSpeechValidator
from core.app.validators.file_upload import FileUploadValidator
from core.app.validators.moderation import ModerationValidator
from core.app.validators.opening_statement import OpeningStatementValidator
from core.app.validators.retriever_resource import RetrieverResourceValidator
from core.app.validators.speech_to_text import SpeechToTextValidator
from core.app.validators.suggested_questions import SuggestedQuestionsValidator
from core.app.validators.text_to_speech import TextToSpeechValidator
class AdvancedChatAppConfigValidator:
......
import logging
from typing import cast
from core.app_runner.app_runner import AppRunner
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.app.base_app_runner import AppRunner
from core.app.app_queue_manager import AppQueueManager, PublishFrom
from core.entities.application_entities import AgentEntity, ApplicationGenerateEntity, ModelConfigEntity
from core.features.assistant_cot_runner import AssistantCotApplicationRunner
from core.features.assistant_fc_runner import AssistantFunctionCallApplicationRunner
from core.agent.cot_agent_runner import CotAgentRunner
from core.agent.fc_agent_runner import FunctionCallAgentRunner
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMUsage
......@@ -19,12 +19,13 @@ from models.tools import ToolConversationVariables
logger = logging.getLogger(__name__)
class AssistantApplicationRunner(AppRunner):
class AgentChatAppRunner(AppRunner):
"""
Assistant Application Runner
Agent Application Runner
"""
def run(self, application_generate_entity: ApplicationGenerateEntity,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
conversation: Conversation,
message: Message) -> None:
"""
......@@ -197,7 +198,7 @@ class AssistantApplicationRunner(AppRunner):
# start agent runner
if agent_entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT:
assistant_cot_runner = AssistantCotApplicationRunner(
assistant_cot_runner = CotAgentRunner(
tenant_id=application_generate_entity.tenant_id,
application_generate_entity=application_generate_entity,
app_orchestration_config=app_orchestration_config,
......@@ -219,7 +220,7 @@ class AssistantApplicationRunner(AppRunner):
inputs=inputs,
)
elif agent_entity.strategy == AgentEntity.Strategy.FUNCTION_CALLING:
assistant_fc_runner = AssistantFunctionCallApplicationRunner(
assistant_fc_runner = FunctionCallAgentRunner(
tenant_id=application_generate_entity.tenant_id,
application_generate_entity=application_generate_entity,
app_orchestration_config=app_orchestration_config,
......
import uuid
from core.apps.config_validators.dataset import DatasetValidator
from core.entities.agent_entities import PlanningStrategy
from core.app.validators.dataset_retrieval import DatasetValidator
from core.app.validators.external_data_fetch import ExternalDataFetchValidator
from core.app.validators.file_upload import FileUploadValidator
from core.app.validators.model_validator import ModelValidator
from core.app.validators.moderation import ModerationValidator
from core.app.validators.opening_statement import OpeningStatementValidator
from core.app.validators.prompt import PromptValidator
from core.app.validators.retriever_resource import RetrieverResourceValidator
from core.app.validators.speech_to_text import SpeechToTextValidator
from core.app.validators.suggested_questions import SuggestedQuestionsValidator
from core.app.validators.text_to_speech import TextToSpeechValidator
from core.app.validators.user_input_form import UserInputFormValidator
from models.model import AppMode
OLD_TOOLS = ["dataset", "google_search", "web_reader", "wikipedia", "current_datetime"]
class AgentValidator:
class AgentChatAppConfigValidator:
@classmethod
def config_validate(cls, tenant_id: str, config: dict) -> dict:
"""
Validate for agent chat app model config
:param tenant_id: tenant id
:param config: app model config args
"""
app_mode = AppMode.AGENT_CHAT
related_config_keys = []
# model
config, current_related_config_keys = ModelValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# user_input_form
config, current_related_config_keys = UserInputFormValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# external data tools validation
config, current_related_config_keys = ExternalDataFetchValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# file upload validation
config, current_related_config_keys = FileUploadValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# prompt
config, current_related_config_keys = PromptValidator.validate_and_set_defaults(app_mode, config)
related_config_keys.extend(current_related_config_keys)
# agent_mode
config, current_related_config_keys = cls.validate_agent_mode_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# opening_statement
config, current_related_config_keys = OpeningStatementValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# suggested_questions_after_answer
config, current_related_config_keys = SuggestedQuestionsValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# speech_to_text
config, current_related_config_keys = SpeechToTextValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# text_to_speech
config, current_related_config_keys = TextToSpeechValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# return retriever resource
config, current_related_config_keys = RetrieverResourceValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# moderation validation
config, current_related_config_keys = ModerationValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
related_config_keys = list(set(related_config_keys))
# Filter out extra parameters
filtered_config = {key: config.get(key) for key in related_config_keys}
return filtered_config
@classmethod
def validate_and_set_defaults(cls, tenant_id: str, config: dict) -> tuple[dict, list[str]]:
def validate_agent_mode_and_set_defaults(cls, tenant_id: str, config: dict) -> tuple[dict, list[str]]:
"""
Validate and set defaults for agent feature
Validate agent_mode and set defaults for agent feature
:param tenant_id: tenant ID
:param config: app model config args
......@@ -33,7 +113,8 @@ class AgentValidator:
if not config["agent_mode"].get("strategy"):
config["agent_mode"]["strategy"] = PlanningStrategy.ROUTER.value
if config["agent_mode"]["strategy"] not in [member.value for member in list(PlanningStrategy.__members__.values())]:
if config["agent_mode"]["strategy"] not in [member.value for member in
list(PlanningStrategy.__members__.values())]:
raise ValueError("strategy in agent_mode must be in the specified strategy list")
if not config["agent_mode"].get("tools"):
......
This diff is collapsed.
......@@ -32,7 +32,7 @@ class PublishFrom(Enum):
TASK_PIPELINE = 2
class ApplicationQueueManager:
class AppQueueManager:
def __init__(self, task_id: str,
user_id: str,
invoke_from: InvokeFrom,
......@@ -50,7 +50,7 @@ class ApplicationQueueManager:
self._message_id = str(message_id)
user_prefix = 'account' if self._invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else 'end-user'
redis_client.setex(ApplicationQueueManager._generate_task_belong_cache_key(self._task_id), 1800, f"{user_prefix}-{self._user_id}")
redis_client.setex(AppQueueManager._generate_task_belong_cache_key(self._task_id), 1800, f"{user_prefix}-{self._user_id}")
q = queue.Queue()
......@@ -239,7 +239,7 @@ class ApplicationQueueManager:
Check if task is stopped
:return:
"""
stopped_cache_key = ApplicationQueueManager._generate_stopped_cache_key(self._task_id)
stopped_cache_key = AppQueueManager._generate_stopped_cache_key(self._task_id)
result = redis_client.get(stopped_cache_key)
if result is not None:
return True
......
......@@ -2,7 +2,7 @@ import time
from collections.abc import Generator
from typing import Optional, Union, cast
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.app.app_queue_manager import AppQueueManager, PublishFrom
from core.entities.application_entities import (
ApplicationGenerateEntity,
AppOrchestrationConfigEntity,
......@@ -11,10 +11,10 @@ from core.entities.application_entities import (
ModelConfigEntity,
PromptTemplateEntity,
)
from core.features.annotation_reply import AnnotationReplyFeature
from core.features.external_data_fetch import ExternalDataFetchFeature
from core.features.hosting_moderation import HostingModerationFeature
from core.features.moderation import ModerationFeature
from core.app.features.annotation_reply.annotation_reply import AnnotationReplyFeature
from core.external_data_tool.external_data_fetch import ExternalDataFetch
from core.app.features.hosting_moderation.hosting_moderation import HostingModerationFeature
from core.moderation.input_moderation import InputModeration
from core.file.file_obj import FileObj
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
......@@ -169,7 +169,7 @@ class AppRunner:
return prompt_messages, stop
def direct_output(self, queue_manager: ApplicationQueueManager,
def direct_output(self, queue_manager: AppQueueManager,
app_orchestration_config: AppOrchestrationConfigEntity,
prompt_messages: list,
text: str,
......@@ -210,7 +210,7 @@ class AppRunner:
)
def _handle_invoke_result(self, invoke_result: Union[LLMResult, Generator],
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
stream: bool,
agent: bool = False) -> None:
"""
......@@ -234,7 +234,7 @@ class AppRunner:
)
def _handle_invoke_result_direct(self, invoke_result: LLMResult,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
agent: bool) -> None:
"""
Handle invoke result direct
......@@ -248,7 +248,7 @@ class AppRunner:
)
def _handle_invoke_result_stream(self, invoke_result: Generator,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
agent: bool) -> None:
"""
Handle invoke result
......@@ -306,7 +306,7 @@ class AppRunner:
:param query: query
:return:
"""
moderation_feature = ModerationFeature()
moderation_feature = InputModeration()
return moderation_feature.check(
app_id=app_id,
tenant_id=tenant_id,
......@@ -316,7 +316,7 @@ class AppRunner:
)
def check_hosting_moderation(self, application_generate_entity: ApplicationGenerateEntity,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
prompt_messages: list[PromptMessage]) -> bool:
"""
Check hosting moderation
......@@ -358,7 +358,7 @@ class AppRunner:
:param query: the query
:return: the filled inputs
"""
external_data_fetch_feature = ExternalDataFetchFeature()
external_data_fetch_feature = ExternalDataFetch()
return external_data_fetch_feature.fetch(
tenant_id=tenant_id,
app_id=app_id,
......@@ -388,4 +388,4 @@ class AppRunner:
query=query,
user_id=user_id,
invoke_from=invoke_from
)
\ No newline at end of file
)
import logging
from typing import Optional
from core.app_runner.app_runner import AppRunner
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.app.base_app_runner import AppRunner
from core.app.app_queue_manager import AppQueueManager, PublishFrom
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.entities.application_entities import (
ApplicationGenerateEntity,
......@@ -10,7 +10,7 @@ from core.entities.application_entities import (
InvokeFrom,
ModelConfigEntity,
)
from core.features.dataset_retrieval.dataset_retrieval import DatasetRetrievalFeature
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.moderation.base import ModerationException
......@@ -20,13 +20,13 @@ from models.model import App, AppMode, Conversation, Message
logger = logging.getLogger(__name__)
class BasicApplicationRunner(AppRunner):
class ChatAppRunner(AppRunner):
"""
Basic Application Runner
Chat Application Runner
"""
def run(self, application_generate_entity: ApplicationGenerateEntity,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
conversation: Conversation,
message: Message) -> None:
"""
......@@ -213,7 +213,7 @@ class BasicApplicationRunner(AppRunner):
def retrieve_dataset_context(self, tenant_id: str,
app_record: App,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
model_config: ModelConfigEntity,
dataset_config: DatasetEntity,
show_retrieve_source: bool,
......@@ -252,7 +252,7 @@ class BasicApplicationRunner(AppRunner):
and dataset_config.retrieve_config.query_variable):
query = inputs.get(dataset_config.retrieve_config.query_variable, "")
dataset_retrieval = DatasetRetrievalFeature()
dataset_retrieval = DatasetRetrieval()
return dataset_retrieval.retrieve(
tenant_id=tenant_id,
model_config=model_config,
......
from core.apps.config_validators.dataset import DatasetValidator
from core.apps.config_validators.external_data_tools import ExternalDataToolsValidator
from core.apps.config_validators.file_upload import FileUploadValidator
from core.apps.config_validators.model import ModelValidator
from core.apps.config_validators.moderation import ModerationValidator
from core.apps.config_validators.opening_statement import OpeningStatementValidator
from core.apps.config_validators.prompt import PromptValidator
from core.apps.config_validators.retriever_resource import RetrieverResourceValidator
from core.apps.config_validators.speech_to_text import SpeechToTextValidator
from core.apps.config_validators.suggested_questions import SuggestedQuestionsValidator
from core.apps.config_validators.text_to_speech import TextToSpeechValidator
from core.apps.config_validators.user_input_form import UserInputFormValidator
from core.app.validators.dataset_retrieval import DatasetValidator
from core.app.validators.external_data_fetch import ExternalDataFetchValidator
from core.app.validators.file_upload import FileUploadValidator
from core.app.validators.model_validator import ModelValidator
from core.app.validators.moderation import ModerationValidator
from core.app.validators.opening_statement import OpeningStatementValidator
from core.app.validators.prompt import PromptValidator
from core.app.validators.retriever_resource import RetrieverResourceValidator
from core.app.validators.speech_to_text import SpeechToTextValidator
from core.app.validators.suggested_questions import SuggestedQuestionsValidator
from core.app.validators.text_to_speech import TextToSpeechValidator
from core.app.validators.user_input_form import UserInputFormValidator
from models.model import AppMode
......@@ -35,7 +35,7 @@ class ChatAppConfigValidator:
related_config_keys.extend(current_related_config_keys)
# external data tools validation
config, current_related_config_keys = ExternalDataToolsValidator.validate_and_set_defaults(tenant_id, config)
config, current_related_config_keys = ExternalDataFetchValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# file upload validation
......
This diff is collapsed.
from core.apps.config_validators.dataset import DatasetValidator
from core.apps.config_validators.external_data_tools import ExternalDataToolsValidator
from core.apps.config_validators.file_upload import FileUploadValidator
from core.apps.config_validators.model import ModelValidator
from core.apps.config_validators.moderation import ModerationValidator
from core.apps.config_validators.more_like_this import MoreLikeThisValidator
from core.apps.config_validators.prompt import PromptValidator
from core.apps.config_validators.text_to_speech import TextToSpeechValidator
from core.apps.config_validators.user_input_form import UserInputFormValidator
from core.app.validators.dataset_retrieval import DatasetValidator
from core.app.validators.external_data_fetch import ExternalDataFetchValidator
from core.app.validators.file_upload import FileUploadValidator
from core.app.validators.model_validator import ModelValidator
from core.app.validators.moderation import ModerationValidator
from core.app.validators.more_like_this import MoreLikeThisValidator
from core.app.validators.prompt import PromptValidator
from core.app.validators.text_to_speech import TextToSpeechValidator
from core.app.validators.user_input_form import UserInputFormValidator
from models.model import AppMode
......@@ -32,7 +32,7 @@ class CompletionAppConfigValidator:
related_config_keys.extend(current_related_config_keys)
# external data tools validation
config, current_related_config_keys = ExternalDataToolsValidator.validate_and_set_defaults(tenant_id, config)
config, current_related_config_keys = ExternalDataFetchValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# file upload validation
......
......@@ -6,8 +6,8 @@ from typing import Optional, Union, cast
from pydantic import BaseModel
from core.app_runner.moderation_handler import ModerationRule, OutputModerationHandler
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.moderation.output_moderation import ModerationRule, OutputModeration
from core.app.app_queue_manager import AppQueueManager, PublishFrom
from core.entities.application_entities import ApplicationGenerateEntity, InvokeFrom
from core.entities.queue_entities import (
AnnotationReplyEvent,
......@@ -35,7 +35,7 @@ from core.model_runtime.entities.message_entities import (
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
from core.prompt.prompt_template import PromptTemplateParser
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from core.tools.tool_file_manager import ToolFileManager
from events.message_event import message_was_created
from extensions.ext_database import db
......@@ -59,7 +59,7 @@ class GenerateTaskPipeline:
"""
def __init__(self, application_generate_entity: ApplicationGenerateEntity,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
conversation: Conversation,
message: Message) -> None:
"""
......@@ -625,7 +625,7 @@ class GenerateTaskPipeline:
return prompts
def _init_output_moderation(self) -> Optional[OutputModerationHandler]:
def _init_output_moderation(self) -> Optional[OutputModeration]:
"""
Init output moderation.
:return:
......@@ -634,7 +634,7 @@ class GenerateTaskPipeline:
sensitive_word_avoidance = app_orchestration_config_entity.sensitive_word_avoidance
if sensitive_word_avoidance:
return OutputModerationHandler(
return OutputModeration(
tenant_id=self._application_generate_entity.tenant_id,
app_id=self._application_generate_entity.app_id,
rule=ModerationRule(
......
......@@ -2,7 +2,7 @@
from core.external_data_tool.factory import ExternalDataToolFactory
class ExternalDataToolsValidator:
class ExternalDataFetchValidator:
@classmethod
def validate_and_set_defaults(cls, tenant_id: str, config: dict) -> tuple[dict, list[str]]:
"""
......
from core.apps.config_validators.file_upload import FileUploadValidator
from core.apps.config_validators.moderation import ModerationValidator
from core.apps.config_validators.text_to_speech import TextToSpeechValidator
from core.app.validators.file_upload import FileUploadValidator
from core.app.validators.moderation import ModerationValidator
from core.app.validators.text_to_speech import TextToSpeechValidator
class WorkflowAppConfigValidator:
......
from core.apps.config_validators.agent import AgentValidator
from core.apps.config_validators.external_data_tools import ExternalDataToolsValidator
from core.apps.config_validators.file_upload import FileUploadValidator
from core.apps.config_validators.model import ModelValidator
from core.apps.config_validators.moderation import ModerationValidator
from core.apps.config_validators.opening_statement import OpeningStatementValidator
from core.apps.config_validators.prompt import PromptValidator
from core.apps.config_validators.retriever_resource import RetrieverResourceValidator
from core.apps.config_validators.speech_to_text import SpeechToTextValidator
from core.apps.config_validators.suggested_questions import SuggestedQuestionsValidator
from core.apps.config_validators.text_to_speech import TextToSpeechValidator
from core.apps.config_validators.user_input_form import UserInputFormValidator
from models.model import AppMode
class AgentChatAppConfigValidator:
@classmethod
def config_validate(cls, tenant_id: str, config: dict) -> dict:
"""
Validate for agent chat app model config
:param tenant_id: tenant id
:param config: app model config args
"""
app_mode = AppMode.AGENT_CHAT
related_config_keys = []
# model
config, current_related_config_keys = ModelValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# user_input_form
config, current_related_config_keys = UserInputFormValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# external data tools validation
config, current_related_config_keys = ExternalDataToolsValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# file upload validation
config, current_related_config_keys = FileUploadValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# prompt
config, current_related_config_keys = PromptValidator.validate_and_set_defaults(app_mode, config)
related_config_keys.extend(current_related_config_keys)
# agent_mode
config, current_related_config_keys = AgentValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
# opening_statement
config, current_related_config_keys = OpeningStatementValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# suggested_questions_after_answer
config, current_related_config_keys = SuggestedQuestionsValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# speech_to_text
config, current_related_config_keys = SpeechToTextValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# text_to_speech
config, current_related_config_keys = TextToSpeechValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# return retriever resource
config, current_related_config_keys = RetrieverResourceValidator.validate_and_set_defaults(config)
related_config_keys.extend(current_related_config_keys)
# moderation validation
config, current_related_config_keys = ModerationValidator.validate_and_set_defaults(tenant_id, config)
related_config_keys.extend(current_related_config_keys)
related_config_keys = list(set(related_config_keys))
# Filter out extra parameters
filtered_config = {key: config.get(key) for key in related_config_keys}
return filtered_config
......@@ -7,7 +7,7 @@ from langchain.agents import openai_functions_agent, openai_functions_multi_agen
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.app.app_queue_manager import AppQueueManager, PublishFrom
from core.callback_handler.entity.agent_loop import AgentLoop
from core.entities.application_entities import ModelConfigEntity
from core.model_runtime.entities.llm_entities import LLMResult as RuntimeLLMResult
......@@ -22,7 +22,7 @@ class AgentLoopGatherCallbackHandler(BaseCallbackHandler):
raise_error: bool = True
def __init__(self, model_config: ModelConfigEntity,
queue_manager: ApplicationQueueManager,
queue_manager: AppQueueManager,
message: Message,
message_chain: MessageChain) -> None:
"""Initialize callback handler."""
......
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.app.app_queue_manager import AppQueueManager, PublishFrom
from core.entities.application_entities import InvokeFrom
from core.rag.models.document import Document
from extensions.ext_database import db
......@@ -10,7 +10,7 @@ from models.model import DatasetRetrieverResource
class DatasetIndexToolCallbackHandler:
"""Callback handler for dataset tool."""
def __init__(self, queue_manager: ApplicationQueueManager,
def __init__(self, queue_manager: AppQueueManager,
app_id: str,
message_id: str,
user_id: str,
......
......@@ -11,7 +11,7 @@ from core.external_data_tool.factory import ExternalDataToolFactory
logger = logging.getLogger(__name__)
class ExternalDataFetchFeature:
class ExternalDataFetch:
def fetch(self, tenant_id: str,
app_id: str,
external_data_tools: list[ExternalDataVariableEntity],
......
......@@ -13,7 +13,7 @@ from sqlalchemy.orm.exc import ObjectDeletedError
from core.docstore.dataset_docstore import DatasetDocumentStore
from core.errors.error import ProviderTokenNotInitError
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities.model_entities import ModelType, PriceType
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
......
......@@ -7,10 +7,10 @@ from core.model_manager import ModelManager
from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.prompt.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.prompt.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.prompt.prompt_template import PromptTemplateParser
from core.prompt.prompts import CONVERSATION_TITLE_PROMPT, GENERATOR_QA_PROMPT
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from core.llm_generator.prompts import CONVERSATION_TITLE_PROMPT, GENERATOR_QA_PROMPT
class LLMGenerator:
......
......@@ -2,7 +2,7 @@ from typing import Any
from langchain.schema import BaseOutputParser, OutputParserException
from core.prompt.prompts import RULE_CONFIG_GENERATE_TEMPLATE
from core.llm_generator.prompts import RULE_CONFIG_GENERATE_TEMPLATE
from libs.json_in_md_parser import parse_and_check_json_markdown
......
......@@ -5,7 +5,7 @@ from typing import Any
from langchain.schema import BaseOutputParser
from core.model_runtime.errors.invoke import InvokeError
from core.prompt.prompts import SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
from core.llm_generator.prompts import SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
class SuggestedQuestionsAfterAnswerOutputParser(BaseOutputParser):
......
......@@ -7,7 +7,7 @@ from core.moderation.factory import ModerationFactory
logger = logging.getLogger(__name__)
class ModerationFeature:
class InputModeration:
def check(self, app_id: str,
tenant_id: str,
app_orchestration_config_entity: AppOrchestrationConfigEntity,
......
......@@ -6,7 +6,7 @@ from typing import Any, Optional
from flask import Flask, current_app
from pydantic import BaseModel
from core.application_queue_manager import PublishFrom
from core.app.app_queue_manager import PublishFrom
from core.moderation.base import ModerationAction, ModerationOutputsResult
from core.moderation.factory import ModerationFactory
......@@ -18,7 +18,7 @@ class ModerationRule(BaseModel):
config: dict[str, Any]
class OutputModerationHandler(BaseModel):
class OutputModeration(BaseModel):
DEFAULT_BUFFER_SIZE: int = 300
tenant_id: str
......
......@@ -15,7 +15,7 @@ from core.model_runtime.entities.message_entities import (
TextPromptMessageContent,
UserPromptMessage,
)
from core.prompt.prompt_template import PromptTemplateParser
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from core.prompt.prompt_transform import PromptTransform
from core.prompt.simple_prompt_transform import ModelMode
......
......@@ -15,7 +15,7 @@ from core.model_runtime.entities.message_entities import (
TextPromptMessageContent,
UserPromptMessage,
)
from core.prompt.prompt_template import PromptTemplateParser
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from core.prompt.prompt_transform import PromptTransform
from models.model import AppMode
......@@ -275,7 +275,7 @@ class SimplePromptTransform(PromptTransform):
return prompt_file_contents[prompt_file_name]
# Get the absolute path of the subdirectory
prompt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'generate_prompts')
prompt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'prompt_templates')
json_file_path = os.path.join(prompt_path, f'{prompt_file_name}.json')
# Open the JSON file and read its content
......
......@@ -9,7 +9,7 @@ import pandas as pd
from flask import Flask, current_app
from werkzeug.datastructures import FileStorage
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from core.rag.cleaner.clean_processor import CleanProcessor
from core.rag.datasource.retrieval_service import RetrievalService
from core.rag.datasource.vdb.vector_factory import Vector
......
......@@ -7,8 +7,8 @@ from langchain.schema.language_model import BaseLanguageModel
from core.entities.application_entities import ModelConfigEntity
from core.entities.message_entities import lc_messages_to_prompt_messages
from core.features.dataset_retrieval.agent.agent_llm_callback import AgentLLMCallback
from core.features.dataset_retrieval.agent.fake_llm import FakeLLM
from core.rag.retrieval.agent.agent_llm_callback import AgentLLMCallback
from core.rag.retrieval.agent.fake_llm import FakeLLM
from core.model_manager import ModelInstance
......
......@@ -12,7 +12,7 @@ from pydantic import root_validator
from core.entities.application_entities import ModelConfigEntity
from core.entities.message_entities import lc_messages_to_prompt_messages
from core.features.dataset_retrieval.agent.fake_llm import FakeLLM
from core.rag.retrieval.agent.fake_llm import FakeLLM
from core.model_manager import ModelInstance
from core.model_runtime.entities.message_entities import PromptMessageTool
......
......@@ -13,7 +13,7 @@ from langchain.schema import AgentAction, AgentFinish, OutputParserException
from langchain.tools import BaseTool
from core.entities.application_entities import ModelConfigEntity
from core.features.dataset_retrieval.agent.llm_chain import LLMChain
from core.rag.retrieval.agent.llm_chain import LLMChain
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English.
......
......@@ -10,10 +10,10 @@ from pydantic import BaseModel, Extra
from core.entities.agent_entities import PlanningStrategy
from core.entities.application_entities import ModelConfigEntity
from core.entities.message_entities import prompt_messages_to_lc_messages
from core.features.dataset_retrieval.agent.agent_llm_callback import AgentLLMCallback
from core.features.dataset_retrieval.agent.multi_dataset_router_agent import MultiDatasetRouterAgent
from core.features.dataset_retrieval.agent.output_parser.structured_chat import StructuredChatOutputParser
from core.features.dataset_retrieval.agent.structed_multi_dataset_router_agent import StructuredMultiDatasetRouterAgent
from core.rag.retrieval.agent.agent_llm_callback import AgentLLMCallback
from core.rag.retrieval.agent.multi_dataset_router_agent import MultiDatasetRouterAgent
from core.rag.retrieval.agent.output_parser.structured_chat import StructuredChatOutputParser
from core.rag.retrieval.agent.structed_multi_dataset_router_agent import StructuredMultiDatasetRouterAgent
from core.helper import moderation
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.errors.invoke import InvokeError
......
......@@ -5,7 +5,7 @@ from langchain.tools import BaseTool
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.entities.agent_entities import PlanningStrategy
from core.entities.application_entities import DatasetEntity, DatasetRetrieveConfigEntity, InvokeFrom, ModelConfigEntity
from core.features.dataset_retrieval.agent_based_dataset_executor import AgentConfiguration, AgentExecutor
from core.rag.retrieval.agent_based_dataset_executor import AgentConfiguration, AgentExecutor
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.model_entities import ModelFeature
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
......@@ -15,7 +15,7 @@ from extensions.ext_database import db
from models.dataset import Dataset
class DatasetRetrievalFeature:
class DatasetRetrieval:
def retrieve(self, tenant_id: str,
model_config: ModelConfigEntity,
config: DatasetEntity,
......
......@@ -4,7 +4,7 @@ from langchain.tools import BaseTool
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.entities.application_entities import DatasetRetrieveConfigEntity, InvokeFrom
from core.features.dataset_retrieval.dataset_retrieval import DatasetRetrievalFeature
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
from core.tools.entities.common_entities import I18nObject
from core.tools.entities.tool_entities import ToolDescription, ToolIdentity, ToolInvokeMessage, ToolParameter
from core.tools.tool.tool import Tool
......@@ -30,7 +30,7 @@ class DatasetRetrieverTool(Tool):
if retrieve_config is None:
return []
feature = DatasetRetrievalFeature()
feature = DatasetRetrieval()
# save original retrieve strategy, and set retrieve strategy to SINGLE
# Agent only support SINGLE mode
......
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from events.message_event import message_was_created
from extensions.ext_database import db
......
......@@ -310,22 +310,28 @@ class AppModelConfig(db.Model):
def from_model_config_dict(self, model_config: dict):
self.opening_statement = model_config['opening_statement']
self.suggested_questions = json.dumps(model_config['suggested_questions'])
self.suggested_questions_after_answer = json.dumps(model_config['suggested_questions_after_answer'])
self.suggested_questions = json.dumps(model_config['suggested_questions']) \
if model_config.get('suggested_questions') else None
self.suggested_questions_after_answer = json.dumps(model_config['suggested_questions_after_answer']) \
if model_config.get('suggested_questions_after_answer') else None
self.speech_to_text = json.dumps(model_config['speech_to_text']) \
if model_config.get('speech_to_text') else None
self.text_to_speech = json.dumps(model_config['text_to_speech']) \
if model_config.get('text_to_speech') else None
self.more_like_this = json.dumps(model_config['more_like_this'])
self.more_like_this = json.dumps(model_config['more_like_this']) \
if model_config.get('more_like_this') else None
self.sensitive_word_avoidance = json.dumps(model_config['sensitive_word_avoidance']) \
if model_config.get('sensitive_word_avoidance') else None
self.external_data_tools = json.dumps(model_config['external_data_tools']) \
if model_config.get('external_data_tools') else None
self.model = json.dumps(model_config['model'])
self.user_input_form = json.dumps(model_config['user_input_form'])
self.model = json.dumps(model_config['model']) \
if model_config.get('model') else None
self.user_input_form = json.dumps(model_config['user_input_form']) \
if model_config.get('user_input_form') else None
self.dataset_query_variable = model_config.get('dataset_query_variable')
self.pre_prompt = model_config['pre_prompt']
self.agent_mode = json.dumps(model_config['agent_mode'])
self.agent_mode = json.dumps(model_config['agent_mode']) \
if model_config.get('agent_mode') else None
self.retriever_resource = json.dumps(model_config['retriever_resource']) \
if model_config.get('retriever_resource') else None
self.prompt_type = model_config.get('prompt_type', 'simple')
......
import copy
from core.prompt.advanced_prompt_templates import (
from core.prompt.prompt_templates.advanced_prompt_templates import (
BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG,
BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG,
BAICHUAN_COMPLETION_APP_CHAT_PROMPT_CONFIG,
......
from core.apps.app_config_validators.advanced_chat_app import AdvancedChatAppConfigValidator
from core.apps.app_config_validators.agent_chat_app import AgentChatAppConfigValidator
from core.apps.app_config_validators.chat_app import ChatAppConfigValidator
from core.apps.app_config_validators.completion_app import CompletionAppConfigValidator
from core.apps.app_config_validators.workflow_app import WorkflowAppConfigValidator
from core.app.advanced_chat.config_validator import AdvancedChatAppConfigValidator
from core.app.agent_chat.config_validator import AgentChatAppConfigValidator
from core.app.chat.config_validator import ChatAppConfigValidator
from core.app.completion.config_validator import CompletionAppConfigValidator
from core.app.workflow.config_validator import WorkflowAppConfigValidator
from models.model import AppMode
......
......@@ -4,8 +4,8 @@ from typing import Any, Union
from sqlalchemy import and_
from core.application_manager import ApplicationManager
from core.apps.config_validators.model import ModelValidator
from core.app.app_manager import AppManager
from core.app.validators.model_validator import ModelValidator
from core.entities.application_entities import InvokeFrom
from core.file.message_file_parser import MessageFileParser
from extensions.ext_database import db
......@@ -137,7 +137,7 @@ class CompletionService:
user
)
application_manager = ApplicationManager()
application_manager = AppManager()
return application_manager.generate(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
......@@ -193,7 +193,7 @@ class CompletionService:
message.files, app_model_config
)
application_manager = ApplicationManager()
application_manager = AppManager()
return application_manager.generate(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
......
from typing import Optional, Union
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from extensions.ext_database import db
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from models.account import Account
......
import json
from typing import Optional, Union
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
......
import json
from typing import Optional
from core.application_manager import ApplicationManager
from core.app.app_manager import AppManager
from core.entities.application_entities import (
DatasetEntity,
DatasetRetrieveConfigEntity,
......@@ -111,7 +111,7 @@ class WorkflowConverter:
new_app_mode = self._get_new_app_mode(app_model)
# convert app model config
application_manager = ApplicationManager()
application_manager = AppManager()
app_orchestration_config_entity = application_manager.convert_from_app_model_config_dict(
tenant_id=app_model.tenant_id,
app_model_config_dict=app_model_config.to_dict(),
......
......@@ -8,7 +8,7 @@ from core.file.file_obj import FileObj, FileType, FileTransferMethod
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage, PromptMessageRole
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
from core.prompt.prompt_template import PromptTemplateParser
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from models.model import Conversation
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment