Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
655b34b7
Commit
655b34b7
authored
Mar 01, 2024
by
takatost
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
refactor apps
parent
e8b2cc73
Changes
108
Hide whitespace changes
Inline
Side-by-side
Showing
108 changed files
with
1985 additions
and
1664 deletions
+1985
-1664
audio.py
api/controllers/console/app/audio.py
+1
-1
completion.py
api/controllers/console/app/completion.py
+3
-3
conversation.py
api/controllers/console/app/conversation.py
+4
-4
message.py
api/controllers/console/app/message.py
+2
-2
statistic.py
api/controllers/console/app/statistic.py
+1
-1
completion.py
api/controllers/console/explore/completion.py
+1
-1
message.py
api/controllers/console/explore/message.py
+1
-1
completion.py
api/controllers/service_api/app/completion.py
+1
-1
completion.py
api/controllers/web/completion.py
+1
-1
message.py
api/controllers/web/message.py
+1
-1
base_agent_runner.py
api/core/agent/base_agent_runner.py
+22
-23
cot_agent_runner.py
api/core/agent/cot_agent_runner.py
+16
-14
entities.py
api/core/agent/entities.py
+61
-0
fc_agent_runner.py
api/core/agent/fc_agent_runner.py
+8
-6
__init__.py
api/core/app/app_config/__init__.py
+0
-0
base_app_config_manager.py
api/core/app/app_config/base_app_config_manager.py
+73
-0
__init__.py
api/core/app/app_config/common/__init__.py
+0
-0
__init__.py
...pp/app_config/common/sensitive_word_avoidance/__init__.py
+0
-0
manager.py
...app/app_config/common/sensitive_word_avoidance/manager.py
+16
-3
__init__.py
api/core/app/app_config/easy_ui_based_app/__init__.py
+0
-0
__init__.py
api/core/app/app_config/easy_ui_based_app/agent/__init__.py
+0
-0
manager.py
api/core/app/app_config/easy_ui_based_app/agent/manager.py
+79
-0
__init__.py
...core/app/app_config/easy_ui_based_app/dataset/__init__.py
+0
-0
manager.py
api/core/app/app_config/easy_ui_based_app/dataset/manager.py
+85
-2
__init__.py
...app/app_config/easy_ui_based_app/model_config/__init__.py
+0
-0
converter.py
...pp/app_config/easy_ui_based_app/model_config/converter.py
+104
-0
manager.py
.../app/app_config/easy_ui_based_app/model_config/manager.py
+33
-3
__init__.py
.../app_config/easy_ui_based_app/prompt_template/__init__.py
+0
-0
manager.py
...p/app_config/easy_ui_based_app/prompt_template/manager.py
+55
-4
__init__.py
...re/app/app_config/easy_ui_based_app/variables/__init__.py
+0
-0
manager.py
...ore/app/app_config/easy_ui_based_app/variables/manager.py
+184
-0
entities.py
api/core/app/app_config/entities.py
+34
-135
__init__.py
api/core/app/app_config/features/__init__.py
+0
-0
__init__.py
api/core/app/app_config/features/file_upload/__init__.py
+0
-0
manager.py
api/core/app/app_config/features/file_upload/manager.py
+25
-1
__init__.py
api/core/app/app_config/features/more_like_this/__init__.py
+0
-0
manager.py
api/core/app/app_config/features/more_like_this/manager.py
+14
-1
__init__.py
...ore/app/app_config/features/opening_statement/__init__.py
+0
-0
manager.py
...core/app/app_config/features/opening_statement/manager.py
+17
-1
__init__.py
...re/app/app_config/features/retrieval_resource/__init__.py
+0
-0
manager.py
...ore/app/app_config/features/retrieval_resource/manager.py
+9
-1
__init__.py
api/core/app/app_config/features/speech_to_text/__init__.py
+0
-0
manager.py
api/core/app/app_config/features/speech_to_text/manager.py
+14
-1
__init__.py
...fig/features/suggested_questions_after_answer/__init__.py
+0
-0
manager.py
...nfig/features/suggested_questions_after_answer/manager.py
+16
-2
__init__.py
api/core/app/app_config/features/text_to_speech/__init__.py
+0
-0
manager.py
api/core/app/app_config/features/text_to_speech/manager.py
+21
-1
__init__.py
api/core/app/app_config/workflow_ui_based_app/__init__.py
+0
-0
__init__.py
...pp/app_config/workflow_ui_based_app/variables/__init__.py
+0
-0
manager.py
...app/app_config/workflow_ui_based_app/variables/manager.py
+22
-0
app_manager.py
api/core/app/app_manager.py
+141
-55
app_orchestration_config_converter.py
api/core/app/app_orchestration_config_converter.py
+0
-421
app_queue_manager.py
api/core/app/app_queue_manager.py
+2
-2
__init__.py
api/core/app/apps/__init__.py
+0
-0
__init__.py
api/core/app/apps/advanced_chat/__init__.py
+0
-0
app_config_manager.py
api/core/app/apps/advanced_chat/app_config_manager.py
+94
-0
__init__.py
api/core/app/apps/agent_chat/__init__.py
+0
-0
app_config_manager.py
api/core/app/apps/agent_chat/app_config_manager.py
+86
-30
app_runner.py
api/core/app/apps/agent_chat/app_runner.py
+36
-33
base_app_runner.py
api/core/app/apps/base_app_runner.py
+16
-19
__init__.py
api/core/app/apps/chat/__init__.py
+0
-0
app_config_manager.py
api/core/app/apps/chat/app_config_manager.py
+135
-0
app_runner.py
api/core/app/apps/chat/app_runner.py
+31
-28
__init__.py
api/core/app/apps/completion/__init__.py
+0
-0
app_config_manager.py
api/core/app/apps/completion/app_config_manager.py
+118
-0
app_runner.py
api/core/app/apps/completion/app_runner.py
+28
-25
__init__.py
api/core/app/apps/workflow/__init__.py
+0
-0
app_config_manager.py
api/core/app/apps/workflow/app_config_manager.py
+71
-0
config_validator.py
api/core/app/chat/config_validator.py
+0
-82
config_validator.py
api/core/app/completion/config_validator.py
+0
-67
__init__.py
api/core/app/entities/__init__.py
+0
-0
app_invoke_entities.py
api/core/app/entities/app_invoke_entities.py
+111
-0
queue_entities.py
api/core/app/entities/queue_entities.py
+0
-0
annotation_reply.py
api/core/app/features/annotation_reply/annotation_reply.py
+1
-1
hosting_moderation.py
...ore/app/features/hosting_moderation/hosting_moderation.py
+3
-4
generate_task_pipeline.py
api/core/app/generate_task_pipeline.py
+11
-11
external_data_fetch.py
api/core/app/validators/external_data_fetch.py
+0
-39
agent_loop_gather_callback_handler.py
...re/callback_handler/agent_loop_gather_callback_handler.py
+0
-262
agent_loop.py
api/core/callback_handler/entity/agent_loop.py
+0
-23
index_tool_callback_handler.py
api/core/callback_handler/index_tool_callback_handler.py
+1
-1
external_data_fetch.py
api/core/external_data_tool/external_data_fetch.py
+1
-1
file_obj.py
api/core/file/file_obj.py
+3
-2
message_file_parser.py
api/core/file/message_file_parser.py
+17
-18
moderation.py
api/core/helper/moderation.py
+2
-2
token_buffer_memory.py
api/core/memory/token_buffer_memory.py
+23
-12
input_moderation.py
api/core/moderation/input_moderation.py
+5
-5
advanced_prompt_transform.py
api/core/prompt/advanced_prompt_transform.py
+6
-9
prompt_transform.py
api/core/prompt/prompt_transform.py
+3
-3
simple_prompt_transform.py
api/core/prompt/simple_prompt_transform.py
+6
-8
agent_llm_callback.py
api/core/rag/retrieval/agent/agent_llm_callback.py
+0
-101
llm_chain.py
api/core/rag/retrieval/agent/llm_chain.py
+2
-5
multi_dataset_router_agent.py
api/core/rag/retrieval/agent/multi_dataset_router_agent.py
+3
-3
structed_multi_dataset_router_agent.py
...ag/retrieval/agent/structed_multi_dataset_router_agent.py
+2
-2
agent_based_dataset_executor.py
api/core/rag/retrieval/agent_based_dataset_executor.py
+3
-5
dataset_retrieval.py
api/core/rag/retrieval/dataset_retrieval.py
+3
-2
dataset_retriever_tool.py
api/core/tools/tool/dataset_retriever_tool.py
+2
-1
deduct_quota_when_messaeg_created.py
...vents/event_handlers/deduct_quota_when_messaeg_created.py
+4
-4
update_provider_last_used_at_when_messaeg_created.py
...lers/update_provider_last_used_at_when_messaeg_created.py
+4
-4
model.py
api/models/model.py
+12
-0
workflow.py
api/models/workflow.py
+1
-1
app_model_config_service.py
api/services/app_model_config_service.py
+6
-6
completion_service.py
api/services/completion_service.py
+35
-112
workflow_converter.py
api/services/workflow/workflow_converter.py
+19
-27
workflow_service.py
api/services/workflow_service.py
+4
-4
test_advanced_prompt_transform.py
.../unit_tests/core/prompt/test_advanced_prompt_transform.py
+5
-5
test_prompt_transform.py
api/tests/unit_tests/core/prompt/test_prompt_transform.py
+1
-1
test_simple_prompt_transform.py
...ts/unit_tests/core/prompt/test_simple_prompt_transform.py
+3
-3
test_workflow_converter.py
...s/unit_tests/services/workflow/test_workflow_converter.py
+1
-1
No files found.
api/controllers/console/app/audio.py
View file @
655b34b7
...
@@ -37,7 +37,7 @@ class ChatMessageAudioApi(Resource):
...
@@ -37,7 +37,7 @@ class ChatMessageAudioApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
def
post
(
self
,
app_model
):
def
post
(
self
,
app_model
):
file
=
request
.
files
[
'file'
]
file
=
request
.
files
[
'file'
]
...
...
api/controllers/console/app/completion.py
View file @
655b34b7
...
@@ -22,7 +22,7 @@ from controllers.console.app.wraps import get_app_model
...
@@ -22,7 +22,7 @@ from controllers.console.app.wraps import get_app_model
from
controllers.console.setup
import
setup_required
from
controllers.console.setup
import
setup_required
from
controllers.console.wraps
import
account_initialization_required
from
controllers.console.wraps
import
account_initialization_required
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.app_queue_manager
import
AppQueueManager
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
libs.helper
import
uuid_value
from
libs.helper
import
uuid_value
...
@@ -103,7 +103,7 @@ class ChatMessageApi(Resource):
...
@@ -103,7 +103,7 @@ class ChatMessageApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
def
post
(
self
,
app_model
):
def
post
(
self
,
app_model
):
parser
=
reqparse
.
RequestParser
()
parser
=
reqparse
.
RequestParser
()
parser
.
add_argument
(
'inputs'
,
type
=
dict
,
required
=
True
,
location
=
'json'
)
parser
.
add_argument
(
'inputs'
,
type
=
dict
,
required
=
True
,
location
=
'json'
)
...
@@ -168,7 +168,7 @@ class ChatMessageStopApi(Resource):
...
@@ -168,7 +168,7 @@ class ChatMessageStopApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
def
post
(
self
,
app_model
,
task_id
):
def
post
(
self
,
app_model
,
task_id
):
account
=
flask_login
.
current_user
account
=
flask_login
.
current_user
...
...
api/controllers/console/app/conversation.py
View file @
655b34b7
...
@@ -112,7 +112,7 @@ class CompletionConversationDetailApi(Resource):
...
@@ -112,7 +112,7 @@ class CompletionConversationDetailApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
def
delete
(
self
,
app_model
,
conversation_id
):
def
delete
(
self
,
app_model
,
conversation_id
):
conversation_id
=
str
(
conversation_id
)
conversation_id
=
str
(
conversation_id
)
...
@@ -133,7 +133,7 @@ class ChatConversationApi(Resource):
...
@@ -133,7 +133,7 @@ class ChatConversationApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
@
marshal_with
(
conversation_with_summary_pagination_fields
)
@
marshal_with
(
conversation_with_summary_pagination_fields
)
def
get
(
self
,
app_model
):
def
get
(
self
,
app_model
):
parser
=
reqparse
.
RequestParser
()
parser
=
reqparse
.
RequestParser
()
...
@@ -218,7 +218,7 @@ class ChatConversationDetailApi(Resource):
...
@@ -218,7 +218,7 @@ class ChatConversationDetailApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
@
marshal_with
(
conversation_detail_fields
)
@
marshal_with
(
conversation_detail_fields
)
def
get
(
self
,
app_model
,
conversation_id
):
def
get
(
self
,
app_model
,
conversation_id
):
conversation_id
=
str
(
conversation_id
)
conversation_id
=
str
(
conversation_id
)
...
@@ -227,7 +227,7 @@ class ChatConversationDetailApi(Resource):
...
@@ -227,7 +227,7 @@ class ChatConversationDetailApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
@
account_initialization_required
@
account_initialization_required
def
delete
(
self
,
app_model
,
conversation_id
):
def
delete
(
self
,
app_model
,
conversation_id
):
conversation_id
=
str
(
conversation_id
)
conversation_id
=
str
(
conversation_id
)
...
...
api/controllers/console/app/message.py
View file @
655b34b7
...
@@ -42,7 +42,7 @@ class ChatMessageListApi(Resource):
...
@@ -42,7 +42,7 @@ class ChatMessageListApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
@
account_initialization_required
@
account_initialization_required
@
marshal_with
(
message_infinite_scroll_pagination_fields
)
@
marshal_with
(
message_infinite_scroll_pagination_fields
)
def
get
(
self
,
app_model
):
def
get
(
self
,
app_model
):
...
@@ -194,7 +194,7 @@ class MessageSuggestedQuestionApi(Resource):
...
@@ -194,7 +194,7 @@ class MessageSuggestedQuestionApi(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
def
get
(
self
,
app_model
,
message_id
):
def
get
(
self
,
app_model
,
message_id
):
message_id
=
str
(
message_id
)
message_id
=
str
(
message_id
)
...
...
api/controllers/console/app/statistic.py
View file @
655b34b7
...
@@ -203,7 +203,7 @@ class AverageSessionInteractionStatistic(Resource):
...
@@ -203,7 +203,7 @@ class AverageSessionInteractionStatistic(Resource):
@
setup_required
@
setup_required
@
login_required
@
login_required
@
account_initialization_required
@
account_initialization_required
@
get_app_model
(
mode
=
AppMode
.
CHAT
)
@
get_app_model
(
mode
=
[
AppMode
.
CHAT
,
AppMode
.
AGENT_CHAT
]
)
def
get
(
self
,
app_model
):
def
get
(
self
,
app_model
):
account
=
current_user
account
=
current_user
...
...
api/controllers/console/explore/completion.py
View file @
655b34b7
...
@@ -22,7 +22,7 @@ from controllers.console.app.error import (
...
@@ -22,7 +22,7 @@ from controllers.console.app.error import (
from
controllers.console.explore.error
import
NotChatAppError
,
NotCompletionAppError
from
controllers.console.explore.error
import
NotChatAppError
,
NotCompletionAppError
from
controllers.console.explore.wraps
import
InstalledAppResource
from
controllers.console.explore.wraps
import
InstalledAppResource
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.app_queue_manager
import
AppQueueManager
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
...
...
api/controllers/console/explore/message.py
View file @
655b34b7
...
@@ -24,7 +24,7 @@ from controllers.console.explore.error import (
...
@@ -24,7 +24,7 @@ from controllers.console.explore.error import (
NotCompletionAppError
,
NotCompletionAppError
,
)
)
from
controllers.console.explore.wraps
import
InstalledAppResource
from
controllers.console.explore.wraps
import
InstalledAppResource
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
fields.message_fields
import
message_infinite_scroll_pagination_fields
from
fields.message_fields
import
message_infinite_scroll_pagination_fields
...
...
api/controllers/service_api/app/completion.py
View file @
655b34b7
...
@@ -20,7 +20,7 @@ from controllers.service_api.app.error import (
...
@@ -20,7 +20,7 @@ from controllers.service_api.app.error import (
)
)
from
controllers.service_api.wraps
import
FetchUserArg
,
WhereisUserArg
,
validate_app_token
from
controllers.service_api.wraps
import
FetchUserArg
,
WhereisUserArg
,
validate_app_token
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.app_queue_manager
import
AppQueueManager
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
libs.helper
import
uuid_value
from
libs.helper
import
uuid_value
...
...
api/controllers/web/completion.py
View file @
655b34b7
...
@@ -21,7 +21,7 @@ from controllers.web.error import (
...
@@ -21,7 +21,7 @@ from controllers.web.error import (
)
)
from
controllers.web.wraps
import
WebApiResource
from
controllers.web.wraps
import
WebApiResource
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.app_queue_manager
import
AppQueueManager
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
libs.helper
import
uuid_value
from
libs.helper
import
uuid_value
...
...
api/controllers/web/message.py
View file @
655b34b7
...
@@ -21,7 +21,7 @@ from controllers.web.error import (
...
@@ -21,7 +21,7 @@ from controllers.web.error import (
ProviderQuotaExceededError
,
ProviderQuotaExceededError
,
)
)
from
controllers.web.wraps
import
WebApiResource
from
controllers.web.wraps
import
WebApiResource
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
fields.conversation_fields
import
message_file_fields
from
fields.conversation_fields
import
message_file_fields
...
...
api/core/agent/base_agent_runner.py
View file @
655b34b7
...
@@ -5,17 +5,15 @@ from datetime import datetime
...
@@ -5,17 +5,15 @@ from datetime import datetime
from
mimetypes
import
guess_extension
from
mimetypes
import
guess_extension
from
typing
import
Optional
,
Union
,
cast
from
typing
import
Optional
,
Union
,
cast
from
core.agent.entities
import
AgentEntity
,
AgentToolEntity
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.base_app_runner
import
AppRunner
from
core.app.apps.agent_chat.app_config_manager
import
AgentChatAppConfig
from
core.app.apps.base_app_runner
import
AppRunner
from
core.callback_handler.agent_tool_callback_handler
import
DifyAgentCallbackHandler
from
core.callback_handler.agent_tool_callback_handler
import
DifyAgentCallbackHandler
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.entities.application_entities
import
(
from
core.app.entities.app_invoke_entities
import
(
AgentEntity
,
EasyUIBasedAppGenerateEntity
,
AgentToolEntity
,
InvokeFrom
,
EasyUIBasedModelConfigEntity
,
ApplicationGenerateEntity
,
AppOrchestrationConfigEntity
,
InvokeFrom
,
ModelConfigEntity
,
)
)
from
core.file.message_file_parser
import
FileTransferMethod
from
core.file.message_file_parser
import
FileTransferMethod
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
...
@@ -50,9 +48,9 @@ logger = logging.getLogger(__name__)
...
@@ -50,9 +48,9 @@ logger = logging.getLogger(__name__)
class
BaseAgentRunner
(
AppRunner
):
class
BaseAgentRunner
(
AppRunner
):
def
__init__
(
self
,
tenant_id
:
str
,
def
__init__
(
self
,
tenant_id
:
str
,
application_generate_entity
:
Application
GenerateEntity
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
app_
orchestration_config
:
AppOrchestrationConfigEntity
,
app_
config
:
AgentChatAppConfig
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
config
:
AgentEntity
,
config
:
AgentEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
message
:
Message
,
message
:
Message
,
...
@@ -66,7 +64,7 @@ class BaseAgentRunner(AppRunner):
...
@@ -66,7 +64,7 @@ class BaseAgentRunner(AppRunner):
"""
"""
Agent runner
Agent runner
:param tenant_id: tenant id
:param tenant_id: tenant id
:param app_
orchestration_config: app orchestration config
:param app_
config: app generate entity
:param model_config: model config
:param model_config: model config
:param config: dataset config
:param config: dataset config
:param queue_manager: queue manager
:param queue_manager: queue manager
...
@@ -78,7 +76,7 @@ class BaseAgentRunner(AppRunner):
...
@@ -78,7 +76,7 @@ class BaseAgentRunner(AppRunner):
"""
"""
self
.
tenant_id
=
tenant_id
self
.
tenant_id
=
tenant_id
self
.
application_generate_entity
=
application_generate_entity
self
.
application_generate_entity
=
application_generate_entity
self
.
app_
orchestration_config
=
app_orchestration
_config
self
.
app_
config
=
app
_config
self
.
model_config
=
model_config
self
.
model_config
=
model_config
self
.
config
=
config
self
.
config
=
config
self
.
queue_manager
=
queue_manager
self
.
queue_manager
=
queue_manager
...
@@ -97,16 +95,16 @@ class BaseAgentRunner(AppRunner):
...
@@ -97,16 +95,16 @@ class BaseAgentRunner(AppRunner):
# init dataset tools
# init dataset tools
hit_callback
=
DatasetIndexToolCallbackHandler
(
hit_callback
=
DatasetIndexToolCallbackHandler
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_id
=
self
.
app
lication_generate_entity
.
app_id
,
app_id
=
self
.
app
_config
.
app_id
,
message_id
=
message
.
id
,
message_id
=
message
.
id
,
user_id
=
user_id
,
user_id
=
user_id
,
invoke_from
=
self
.
application_generate_entity
.
invoke_from
,
invoke_from
=
self
.
application_generate_entity
.
invoke_from
,
)
)
self
.
dataset_tools
=
DatasetRetrieverTool
.
get_dataset_tools
(
self
.
dataset_tools
=
DatasetRetrieverTool
.
get_dataset_tools
(
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
dataset_ids
=
app_
orchestration_config
.
dataset
.
dataset_ids
if
app_orchestration
_config
.
dataset
else
[],
dataset_ids
=
app_
config
.
dataset
.
dataset_ids
if
app
_config
.
dataset
else
[],
retrieve_config
=
app_
orchestration_config
.
dataset
.
retrieve_config
if
app_orchestration
_config
.
dataset
else
None
,
retrieve_config
=
app_
config
.
dataset
.
retrieve_config
if
app
_config
.
dataset
else
None
,
return_resource
=
app_
orchestration_config
.
show_retrieve_source
,
return_resource
=
app_
config
.
additional_features
.
show_retrieve_source
,
invoke_from
=
application_generate_entity
.
invoke_from
,
invoke_from
=
application_generate_entity
.
invoke_from
,
hit_callback
=
hit_callback
hit_callback
=
hit_callback
)
)
...
@@ -123,14 +121,15 @@ class BaseAgentRunner(AppRunner):
...
@@ -123,14 +121,15 @@ class BaseAgentRunner(AppRunner):
else
:
else
:
self
.
stream_tool_call
=
False
self
.
stream_tool_call
=
False
def
_repack_app_orchestration_config
(
self
,
app_orchestration_config
:
AppOrchestrationConfigEntity
)
->
AppOrchestrationConfigEntity
:
def
_repack_app_generate_entity
(
self
,
app_generate_entity
:
EasyUIBasedAppGenerateEntity
)
\
->
EasyUIBasedAppGenerateEntity
:
"""
"""
Repack app
orchestration config
Repack app
generate entity
"""
"""
if
app_
orchestration
_config
.
prompt_template
.
simple_prompt_template
is
None
:
if
app_
generate_entity
.
app
_config
.
prompt_template
.
simple_prompt_template
is
None
:
app_
orchestration
_config
.
prompt_template
.
simple_prompt_template
=
''
app_
generate_entity
.
app
_config
.
prompt_template
.
simple_prompt_template
=
''
return
app_
orchestration_config
return
app_
generate_entity
def
_convert_tool_response_to_str
(
self
,
tool_response
:
list
[
ToolInvokeMessage
])
->
str
:
def
_convert_tool_response_to_str
(
self
,
tool_response
:
list
[
ToolInvokeMessage
])
->
str
:
"""
"""
...
@@ -156,7 +155,7 @@ class BaseAgentRunner(AppRunner):
...
@@ -156,7 +155,7 @@ class BaseAgentRunner(AppRunner):
"""
"""
tool_entity
=
ToolManager
.
get_tool_runtime
(
tool_entity
=
ToolManager
.
get_tool_runtime
(
provider_type
=
tool
.
provider_type
,
provider_name
=
tool
.
provider_id
,
tool_name
=
tool
.
tool_name
,
provider_type
=
tool
.
provider_type
,
provider_name
=
tool
.
provider_id
,
tool_name
=
tool
.
tool_name
,
tenant_id
=
self
.
app
lication_generate_entity
.
tenant_id
,
tenant_id
=
self
.
app
_config
.
tenant_id
,
agent_callback
=
self
.
agent_callback
agent_callback
=
self
.
agent_callback
)
)
tool_entity
.
load_variables
(
self
.
variables_pool
)
tool_entity
.
load_variables
(
self
.
variables_pool
)
...
...
api/core/agent/cot_agent_runner.py
View file @
655b34b7
...
@@ -5,7 +5,7 @@ from typing import Literal, Union
...
@@ -5,7 +5,7 @@ from typing import Literal, Union
from
core.agent.base_agent_runner
import
BaseAgentRunner
from
core.agent.base_agent_runner
import
BaseAgentRunner
from
core.app.app_queue_manager
import
PublishFrom
from
core.app.app_queue_manager
import
PublishFrom
from
core.
entities.application_
entities
import
AgentPromptEntity
,
AgentScratchpadUnit
from
core.
agent.
entities
import
AgentPromptEntity
,
AgentScratchpadUnit
from
core.model_runtime.entities.llm_entities
import
LLMResult
,
LLMResultChunk
,
LLMResultChunkDelta
,
LLMUsage
from
core.model_runtime.entities.llm_entities
import
LLMResult
,
LLMResultChunk
,
LLMResultChunkDelta
,
LLMUsage
from
core.model_runtime.entities.message_entities
import
(
from
core.model_runtime.entities.message_entities
import
(
AssistantPromptMessage
,
AssistantPromptMessage
,
...
@@ -36,32 +36,34 @@ class CotAgentRunner(BaseAgentRunner):
...
@@ -36,32 +36,34 @@ class CotAgentRunner(BaseAgentRunner):
"""
"""
Run Cot agent application
Run Cot agent application
"""
"""
app_
orchestration_config
=
self
.
app_orchestration_config
app_
generate_entity
=
self
.
application_generate_entity
self
.
_repack_app_
orchestration_config
(
app_orchestration_config
)
self
.
_repack_app_
generate_entity
(
app_generate_entity
)
agent_scratchpad
:
list
[
AgentScratchpadUnit
]
=
[]
agent_scratchpad
:
list
[
AgentScratchpadUnit
]
=
[]
self
.
_init_agent_scratchpad
(
agent_scratchpad
,
self
.
history_prompt_messages
)
self
.
_init_agent_scratchpad
(
agent_scratchpad
,
self
.
history_prompt_messages
)
# check model mode
# check model mode
if
self
.
app_orchestration_config
.
model_config
.
mode
==
"completion"
:
if
app_generate_entity
.
model_config
.
mode
==
"completion"
:
# TODO: stop words
# TODO: stop words
if
'Observation'
not
in
app_orchestration_config
.
model_config
.
stop
:
if
'Observation'
not
in
app_generate_entity
.
model_config
.
stop
:
app_orchestration_config
.
model_config
.
stop
.
append
(
'Observation'
)
app_generate_entity
.
model_config
.
stop
.
append
(
'Observation'
)
app_config
=
self
.
app_config
# override inputs
# override inputs
inputs
=
inputs
or
{}
inputs
=
inputs
or
{}
instruction
=
self
.
app_orchestration
_config
.
prompt_template
.
simple_prompt_template
instruction
=
app
_config
.
prompt_template
.
simple_prompt_template
instruction
=
self
.
_fill_in_inputs_from_external_data_tools
(
instruction
,
inputs
)
instruction
=
self
.
_fill_in_inputs_from_external_data_tools
(
instruction
,
inputs
)
iteration_step
=
1
iteration_step
=
1
max_iteration_steps
=
min
(
self
.
app_orchestration
_config
.
agent
.
max_iteration
,
5
)
+
1
max_iteration_steps
=
min
(
app
_config
.
agent
.
max_iteration
,
5
)
+
1
prompt_messages
=
self
.
history_prompt_messages
prompt_messages
=
self
.
history_prompt_messages
# convert tools into ModelRuntime Tool format
# convert tools into ModelRuntime Tool format
prompt_messages_tools
:
list
[
PromptMessageTool
]
=
[]
prompt_messages_tools
:
list
[
PromptMessageTool
]
=
[]
tool_instances
=
{}
tool_instances
=
{}
for
tool
in
self
.
app_orchestration_config
.
agent
.
tools
if
self
.
app_orchestration
_config
.
agent
else
[]:
for
tool
in
app_config
.
agent
.
tools
if
app
_config
.
agent
else
[]:
try
:
try
:
prompt_tool
,
tool_entity
=
self
.
_convert_tool_to_prompt_message_tool
(
tool
)
prompt_tool
,
tool_entity
=
self
.
_convert_tool_to_prompt_message_tool
(
tool
)
except
Exception
:
except
Exception
:
...
@@ -121,11 +123,11 @@ class CotAgentRunner(BaseAgentRunner):
...
@@ -121,11 +123,11 @@ class CotAgentRunner(BaseAgentRunner):
# update prompt messages
# update prompt messages
prompt_messages
=
self
.
_organize_cot_prompt_messages
(
prompt_messages
=
self
.
_organize_cot_prompt_messages
(
mode
=
app_
orchestration_config
.
model_config
.
mode
,
mode
=
app_
generate_entity
.
model_config
.
mode
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
tools
=
prompt_messages_tools
,
tools
=
prompt_messages_tools
,
agent_scratchpad
=
agent_scratchpad
,
agent_scratchpad
=
agent_scratchpad
,
agent_prompt_message
=
app_
orchestration_
config
.
agent
.
prompt
,
agent_prompt_message
=
app_config
.
agent
.
prompt
,
instruction
=
instruction
,
instruction
=
instruction
,
input
=
query
input
=
query
)
)
...
@@ -135,9 +137,9 @@ class CotAgentRunner(BaseAgentRunner):
...
@@ -135,9 +137,9 @@ class CotAgentRunner(BaseAgentRunner):
# invoke model
# invoke model
chunks
:
Generator
[
LLMResultChunk
,
None
,
None
]
=
model_instance
.
invoke_llm
(
chunks
:
Generator
[
LLMResultChunk
,
None
,
None
]
=
model_instance
.
invoke_llm
(
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
model_parameters
=
app_
orchestration_config
.
model_config
.
parameters
,
model_parameters
=
app_
generate_entity
.
model_config
.
parameters
,
tools
=
[],
tools
=
[],
stop
=
app_
orchestration_config
.
model_config
.
stop
,
stop
=
app_
generate_entity
.
model_config
.
stop
,
stream
=
True
,
stream
=
True
,
user
=
self
.
user_id
,
user
=
self
.
user_id
,
callbacks
=
[],
callbacks
=
[],
...
@@ -542,7 +544,7 @@ class CotAgentRunner(BaseAgentRunner):
...
@@ -542,7 +544,7 @@ class CotAgentRunner(BaseAgentRunner):
"""
"""
convert agent scratchpad list to str
convert agent scratchpad list to str
"""
"""
next_iteration
=
self
.
app_
orchestration_
config
.
agent
.
prompt
.
next_iteration
next_iteration
=
self
.
app_config
.
agent
.
prompt
.
next_iteration
result
=
''
result
=
''
for
scratchpad
in
agent_scratchpad
:
for
scratchpad
in
agent_scratchpad
:
...
...
api/core/agent/entities.py
0 → 100644
View file @
655b34b7
from
enum
import
Enum
from
typing
import
Literal
,
Any
,
Union
,
Optional
from
pydantic
import
BaseModel
class
AgentToolEntity
(
BaseModel
):
"""
Agent Tool Entity.
"""
provider_type
:
Literal
[
"builtin"
,
"api"
]
provider_id
:
str
tool_name
:
str
tool_parameters
:
dict
[
str
,
Any
]
=
{}
class
AgentPromptEntity
(
BaseModel
):
"""
Agent Prompt Entity.
"""
first_prompt
:
str
next_iteration
:
str
class
AgentScratchpadUnit
(
BaseModel
):
"""
Agent First Prompt Entity.
"""
class
Action
(
BaseModel
):
"""
Action Entity.
"""
action_name
:
str
action_input
:
Union
[
dict
,
str
]
agent_response
:
Optional
[
str
]
=
None
thought
:
Optional
[
str
]
=
None
action_str
:
Optional
[
str
]
=
None
observation
:
Optional
[
str
]
=
None
action
:
Optional
[
Action
]
=
None
class
AgentEntity
(
BaseModel
):
"""
Agent Entity.
"""
class
Strategy
(
Enum
):
"""
Agent Strategy.
"""
CHAIN_OF_THOUGHT
=
'chain-of-thought'
FUNCTION_CALLING
=
'function-calling'
provider
:
str
model
:
str
strategy
:
Strategy
prompt
:
Optional
[
AgentPromptEntity
]
=
None
tools
:
list
[
AgentToolEntity
]
=
None
max_iteration
:
int
=
5
api/core/agent/fc_agent_runner.py
View file @
655b34b7
...
@@ -34,9 +34,11 @@ class FunctionCallAgentRunner(BaseAgentRunner):
...
@@ -34,9 +34,11 @@ class FunctionCallAgentRunner(BaseAgentRunner):
"""
"""
Run FunctionCall agent application
Run FunctionCall agent application
"""
"""
app_
orchestration_config
=
self
.
app_orchestration_config
app_
generate_entity
=
self
.
application_generate_entity
prompt_template
=
self
.
app_orchestration_config
.
prompt_template
.
simple_prompt_template
or
''
app_config
=
self
.
app_config
prompt_template
=
app_config
.
prompt_template
.
simple_prompt_template
or
''
prompt_messages
=
self
.
history_prompt_messages
prompt_messages
=
self
.
history_prompt_messages
prompt_messages
=
self
.
organize_prompt_messages
(
prompt_messages
=
self
.
organize_prompt_messages
(
prompt_template
=
prompt_template
,
prompt_template
=
prompt_template
,
...
@@ -47,7 +49,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
...
@@ -47,7 +49,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
# convert tools into ModelRuntime Tool format
# convert tools into ModelRuntime Tool format
prompt_messages_tools
:
list
[
PromptMessageTool
]
=
[]
prompt_messages_tools
:
list
[
PromptMessageTool
]
=
[]
tool_instances
=
{}
tool_instances
=
{}
for
tool
in
self
.
app_orchestration_config
.
agent
.
tools
if
self
.
app_orchestration
_config
.
agent
else
[]:
for
tool
in
app_config
.
agent
.
tools
if
app
_config
.
agent
else
[]:
try
:
try
:
prompt_tool
,
tool_entity
=
self
.
_convert_tool_to_prompt_message_tool
(
tool
)
prompt_tool
,
tool_entity
=
self
.
_convert_tool_to_prompt_message_tool
(
tool
)
except
Exception
:
except
Exception
:
...
@@ -67,7 +69,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
...
@@ -67,7 +69,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
tool_instances
[
dataset_tool
.
identity
.
name
]
=
dataset_tool
tool_instances
[
dataset_tool
.
identity
.
name
]
=
dataset_tool
iteration_step
=
1
iteration_step
=
1
max_iteration_steps
=
min
(
app_
orchestration_
config
.
agent
.
max_iteration
,
5
)
+
1
max_iteration_steps
=
min
(
app_config
.
agent
.
max_iteration
,
5
)
+
1
# continue to run until there is not any tool call
# continue to run until there is not any tool call
function_call_state
=
True
function_call_state
=
True
...
@@ -110,9 +112,9 @@ class FunctionCallAgentRunner(BaseAgentRunner):
...
@@ -110,9 +112,9 @@ class FunctionCallAgentRunner(BaseAgentRunner):
# invoke model
# invoke model
chunks
:
Union
[
Generator
[
LLMResultChunk
,
None
,
None
],
LLMResult
]
=
model_instance
.
invoke_llm
(
chunks
:
Union
[
Generator
[
LLMResultChunk
,
None
,
None
],
LLMResult
]
=
model_instance
.
invoke_llm
(
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
model_parameters
=
app_
orchestration_config
.
model_config
.
parameters
,
model_parameters
=
app_
generate_entity
.
model_config
.
parameters
,
tools
=
prompt_messages_tools
,
tools
=
prompt_messages_tools
,
stop
=
app_
orchestration_config
.
model_config
.
stop
,
stop
=
app_
generate_entity
.
model_config
.
stop
,
stream
=
self
.
stream_tool_call
,
stream
=
self
.
stream_tool_call
,
user
=
self
.
user_id
,
user
=
self
.
user_id
,
callbacks
=
[],
callbacks
=
[],
...
...
api/core/app/a
dvanced_chat
/__init__.py
→
api/core/app/a
pp_config
/__init__.py
View file @
655b34b7
File moved
api/core/app/app_config/base_app_config_manager.py
0 → 100644
View file @
655b34b7
from
typing
import
Union
,
Optional
from
core.app.app_config.entities
import
AppAdditionalFeatures
,
EasyUIBasedAppModelConfigFrom
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.app.app_config.features.more_like_this.manager
import
MoreLikeThisConfigManager
from
core.app.app_config.features.opening_statement.manager
import
OpeningStatementConfigManager
from
core.app.app_config.features.retrieval_resource.manager
import
RetrievalResourceConfigManager
from
core.app.app_config.features.speech_to_text.manager
import
SpeechToTextConfigManager
from
core.app.app_config.features.suggested_questions_after_answer.manager
import
\
SuggestedQuestionsAfterAnswerConfigManager
from
core.app.app_config.features.text_to_speech.manager
import
TextToSpeechConfigManager
from
models.model
import
AppModelConfig
class
BaseAppConfigManager
:
@
classmethod
def
convert_to_config_dict
(
cls
,
config_from
:
EasyUIBasedAppModelConfigFrom
,
app_model_config
:
Union
[
AppModelConfig
,
dict
],
config_dict
:
Optional
[
dict
]
=
None
)
->
dict
:
"""
Convert app model config to config dict
:param config_from: app model config from
:param app_model_config: app model config
:param config_dict: app model config dict
:return:
"""
if
config_from
!=
EasyUIBasedAppModelConfigFrom
.
ARGS
:
app_model_config_dict
=
app_model_config
.
to_dict
()
config_dict
=
app_model_config_dict
.
copy
()
return
config_dict
@
classmethod
def
convert_features
(
cls
,
config_dict
:
dict
)
->
AppAdditionalFeatures
:
"""
Convert app config to app model config
:param config_dict: app config
"""
config_dict
=
config_dict
.
copy
()
additional_features
=
AppAdditionalFeatures
()
additional_features
.
show_retrieve_source
=
RetrievalResourceConfigManager
.
convert
(
config
=
config_dict
)
additional_features
.
file_upload
=
FileUploadConfigManager
.
convert
(
config
=
config_dict
)
additional_features
.
opening_statement
,
additional_features
.
suggested_questions
=
\
OpeningStatementConfigManager
.
convert
(
config
=
config_dict
)
additional_features
.
suggested_questions_after_answer
=
SuggestedQuestionsAfterAnswerConfigManager
.
convert
(
config
=
config_dict
)
additional_features
.
more_like_this
=
MoreLikeThisConfigManager
.
convert
(
config
=
config_dict
)
additional_features
.
speech_to_text
=
SpeechToTextConfigManager
.
convert
(
config
=
config_dict
)
additional_features
.
text_to_speech
=
TextToSpeechConfigManager
.
convert
(
config
=
config_dict
)
return
additional_features
api/core/app/a
gent_chat
/__init__.py
→
api/core/app/a
pp_config/common
/__init__.py
View file @
655b34b7
File moved
api/core/app/
chat
/__init__.py
→
api/core/app/
app_config/common/sensitive_word_avoidance
/__init__.py
View file @
655b34b7
File moved
api/core/app/
validators/moderation
.py
→
api/core/app/
app_config/common/sensitive_word_avoidance/manager
.py
View file @
655b34b7
import
logging
from
typing
import
Optional
from
core.app.app_config.entities
import
SensitiveWordAvoidanceEntity
from
core.moderation.factory
import
ModerationFactory
from
core.moderation.factory
import
ModerationFactory
logger
=
logging
.
getLogger
(
__name__
)
class
SensitiveWordAvoidanceConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
Optional
[
SensitiveWordAvoidanceEntity
]:
sensitive_word_avoidance_dict
=
config
.
get
(
'sensitive_word_avoidance'
)
if
not
sensitive_word_avoidance_dict
:
return
None
if
'enabled'
in
sensitive_word_avoidance_dict
and
sensitive_word_avoidance_dict
[
'enabled'
]:
return
SensitiveWordAvoidanceEntity
(
type
=
sensitive_word_avoidance_dict
.
get
(
'type'
),
config
=
sensitive_word_avoidance_dict
.
get
(
'config'
),
)
else
:
return
None
class
ModerationValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
tenant_id
,
config
:
dict
,
only_structure_validate
:
bool
=
False
)
\
def
validate_and_set_defaults
(
cls
,
tenant_id
,
config
:
dict
,
only_structure_validate
:
bool
=
False
)
\
->
tuple
[
dict
,
list
[
str
]]:
->
tuple
[
dict
,
list
[
str
]]:
...
...
api/core/app/
completion
/__init__.py
→
api/core/app/
app_config/easy_ui_based_app
/__init__.py
View file @
655b34b7
File moved
api/core/app/
validators
/__init__.py
→
api/core/app/
app_config/easy_ui_based_app/agent
/__init__.py
View file @
655b34b7
File moved
api/core/app/app_config/easy_ui_based_app/agent/manager.py
0 → 100644
View file @
655b34b7
from
typing
import
Optional
from
core.agent.entities
import
AgentEntity
,
AgentPromptEntity
,
AgentToolEntity
from
core.tools.prompt.template
import
REACT_PROMPT_TEMPLATES
class
AgentConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
Optional
[
AgentEntity
]:
"""
Convert model config to model config
:param config: model config args
"""
if
'agent_mode'
in
config
and
config
[
'agent_mode'
]
\
and
'enabled'
in
config
[
'agent_mode'
]
\
and
config
[
'agent_mode'
][
'enabled'
]:
agent_dict
=
config
.
get
(
'agent_mode'
,
{})
agent_strategy
=
agent_dict
.
get
(
'strategy'
,
'cot'
)
if
agent_strategy
==
'function_call'
:
strategy
=
AgentEntity
.
Strategy
.
FUNCTION_CALLING
elif
agent_strategy
==
'cot'
or
agent_strategy
==
'react'
:
strategy
=
AgentEntity
.
Strategy
.
CHAIN_OF_THOUGHT
else
:
# old configs, try to detect default strategy
if
config
[
'model'
][
'provider'
]
==
'openai'
:
strategy
=
AgentEntity
.
Strategy
.
FUNCTION_CALLING
else
:
strategy
=
AgentEntity
.
Strategy
.
CHAIN_OF_THOUGHT
agent_tools
=
[]
for
tool
in
agent_dict
.
get
(
'tools'
,
[]):
keys
=
tool
.
keys
()
if
len
(
keys
)
>=
4
:
if
"enabled"
not
in
tool
or
not
tool
[
"enabled"
]:
continue
agent_tool_properties
=
{
'provider_type'
:
tool
[
'provider_type'
],
'provider_id'
:
tool
[
'provider_id'
],
'tool_name'
:
tool
[
'tool_name'
],
'tool_parameters'
:
tool
[
'tool_parameters'
]
if
'tool_parameters'
in
tool
else
{}
}
agent_tools
.
append
(
AgentToolEntity
(
**
agent_tool_properties
))
if
'strategy'
in
config
[
'agent_mode'
]
and
\
config
[
'agent_mode'
][
'strategy'
]
not
in
[
'react_router'
,
'router'
]:
agent_prompt
=
agent_dict
.
get
(
'prompt'
,
None
)
or
{}
# check model mode
model_mode
=
config
.
get
(
'model'
,
{})
.
get
(
'mode'
,
'completion'
)
if
model_mode
==
'completion'
:
agent_prompt_entity
=
AgentPromptEntity
(
first_prompt
=
agent_prompt
.
get
(
'first_prompt'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'completion'
][
'prompt'
]),
next_iteration
=
agent_prompt
.
get
(
'next_iteration'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'completion'
][
'agent_scratchpad'
]),
)
else
:
agent_prompt_entity
=
AgentPromptEntity
(
first_prompt
=
agent_prompt
.
get
(
'first_prompt'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'chat'
][
'prompt'
]),
next_iteration
=
agent_prompt
.
get
(
'next_iteration'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'chat'
][
'agent_scratchpad'
]),
)
return
AgentEntity
(
provider
=
config
[
'model'
][
'provider'
],
model
=
config
[
'model'
][
'name'
],
strategy
=
strategy
,
prompt
=
agent_prompt_entity
,
tools
=
agent_tools
,
max_iteration
=
agent_dict
.
get
(
'max_iteration'
,
5
)
)
return
None
api/core/app/
workflow
/__init__.py
→
api/core/app/
app_config/easy_ui_based_app/dataset
/__init__.py
View file @
655b34b7
File moved
api/core/app/
validators/dataset_retrieval
.py
→
api/core/app/
app_config/easy_ui_based_app/dataset/manager
.py
View file @
655b34b7
import
uuid
from
typing
import
Optional
from
core.app.app_config.entities
import
DatasetEntity
,
DatasetRetrieveConfigEntity
from
core.entities.agent_entities
import
PlanningStrategy
from
core.entities.agent_entities
import
PlanningStrategy
from
models.model
import
AppMode
from
models.model
import
AppMode
from
services.dataset_service
import
DatasetService
from
services.dataset_service
import
DatasetService
class
DatasetValidator
:
class
DatasetConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
Optional
[
DatasetEntity
]:
"""
Convert model config to model config
:param config: model config args
"""
dataset_ids
=
[]
if
'datasets'
in
config
.
get
(
'dataset_configs'
,
{}):
datasets
=
config
.
get
(
'dataset_configs'
,
{})
.
get
(
'datasets'
,
{
'strategy'
:
'router'
,
'datasets'
:
[]
})
for
dataset
in
datasets
.
get
(
'datasets'
,
[]):
keys
=
list
(
dataset
.
keys
())
if
len
(
keys
)
==
0
or
keys
[
0
]
!=
'dataset'
:
continue
dataset
=
dataset
[
'dataset'
]
if
'enabled'
not
in
dataset
or
not
dataset
[
'enabled'
]:
continue
dataset_id
=
dataset
.
get
(
'id'
,
None
)
if
dataset_id
:
dataset_ids
.
append
(
dataset_id
)
if
'agent_mode'
in
config
and
config
[
'agent_mode'
]
\
and
'enabled'
in
config
[
'agent_mode'
]
\
and
config
[
'agent_mode'
][
'enabled'
]:
agent_dict
=
config
.
get
(
'agent_mode'
,
{})
for
tool
in
agent_dict
.
get
(
'tools'
,
[]):
keys
=
tool
.
keys
()
if
len
(
keys
)
==
1
:
# old standard
key
=
list
(
tool
.
keys
())[
0
]
if
key
!=
'dataset'
:
continue
tool_item
=
tool
[
key
]
if
"enabled"
not
in
tool_item
or
not
tool_item
[
"enabled"
]:
continue
dataset_id
=
tool_item
[
'id'
]
dataset_ids
.
append
(
dataset_id
)
if
len
(
dataset_ids
)
==
0
:
return
None
# dataset configs
dataset_configs
=
config
.
get
(
'dataset_configs'
,
{
'retrieval_model'
:
'single'
})
query_variable
=
config
.
get
(
'dataset_query_variable'
)
if
dataset_configs
[
'retrieval_model'
]
==
'single'
:
return
DatasetEntity
(
dataset_ids
=
dataset_ids
,
retrieve_config
=
DatasetRetrieveConfigEntity
(
query_variable
=
query_variable
,
retrieve_strategy
=
DatasetRetrieveConfigEntity
.
RetrieveStrategy
.
value_of
(
dataset_configs
[
'retrieval_model'
]
)
)
)
else
:
return
DatasetEntity
(
dataset_ids
=
dataset_ids
,
retrieve_config
=
DatasetRetrieveConfigEntity
(
query_variable
=
query_variable
,
retrieve_strategy
=
DatasetRetrieveConfigEntity
.
RetrieveStrategy
.
value_of
(
dataset_configs
[
'retrieval_model'
]
),
top_k
=
dataset_configs
.
get
(
'top_k'
),
score_threshold
=
dataset_configs
.
get
(
'score_threshold'
),
reranking_model
=
dataset_configs
.
get
(
'reranking_model'
)
)
)
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
tenant_id
:
str
,
app_mode
:
AppMode
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
tenant_id
:
str
,
app_mode
:
AppMode
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/easy_ui_based_app/model_config/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/app_config/easy_ui_based_app/model_config/converter.py
0 → 100644
View file @
655b34b7
from
typing
import
cast
from
core.app.app_config.entities
import
EasyUIBasedAppConfig
from
core.app.entities.app_invoke_entities
import
EasyUIBasedModelConfigEntity
from
core.entities.model_entities
import
ModelStatus
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.entities.model_entities
import
ModelType
from
core.model_runtime.model_providers.__base.large_language_model
import
LargeLanguageModel
from
core.provider_manager
import
ProviderManager
class
EasyUIBasedModelConfigEntityConverter
:
@
classmethod
def
convert
(
cls
,
app_config
:
EasyUIBasedAppConfig
,
skip_check
:
bool
=
False
)
\
->
EasyUIBasedModelConfigEntity
:
"""
Convert app model config dict to entity.
:param app_config: app config
:param skip_check: skip check
:raises ProviderTokenNotInitError: provider token not init error
:return: app orchestration config entity
"""
model_config
=
app_config
.
model
provider_manager
=
ProviderManager
()
provider_model_bundle
=
provider_manager
.
get_provider_model_bundle
(
tenant_id
=
app_config
.
tenant_id
,
provider
=
model_config
.
provider
,
model_type
=
ModelType
.
LLM
)
provider_name
=
provider_model_bundle
.
configuration
.
provider
.
provider
model_name
=
model_config
.
model
model_type_instance
=
provider_model_bundle
.
model_type_instance
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
# check model credentials
model_credentials
=
provider_model_bundle
.
configuration
.
get_current_credentials
(
model_type
=
ModelType
.
LLM
,
model
=
model_config
.
model
)
if
model_credentials
is
None
:
if
not
skip_check
:
raise
ProviderTokenNotInitError
(
f
"Model {model_name} credentials is not initialized."
)
else
:
model_credentials
=
{}
if
not
skip_check
:
# check model
provider_model
=
provider_model_bundle
.
configuration
.
get_provider_model
(
model
=
model_config
.
model
,
model_type
=
ModelType
.
LLM
)
if
provider_model
is
None
:
model_name
=
model_config
.
model
raise
ValueError
(
f
"Model {model_name} not exist."
)
if
provider_model
.
status
==
ModelStatus
.
NO_CONFIGURE
:
raise
ProviderTokenNotInitError
(
f
"Model {model_name} credentials is not initialized."
)
elif
provider_model
.
status
==
ModelStatus
.
NO_PERMISSION
:
raise
ModelCurrentlyNotSupportError
(
f
"Dify Hosted OpenAI {model_name} currently not support."
)
elif
provider_model
.
status
==
ModelStatus
.
QUOTA_EXCEEDED
:
raise
QuotaExceededError
(
f
"Model provider {provider_name} quota exceeded."
)
# model config
completion_params
=
model_config
.
parameters
stop
=
[]
if
'stop'
in
completion_params
:
stop
=
completion_params
[
'stop'
]
del
completion_params
[
'stop'
]
# get model mode
model_mode
=
model_config
.
mode
if
not
model_mode
:
mode_enum
=
model_type_instance
.
get_model_mode
(
model
=
model_config
.
model
,
credentials
=
model_credentials
)
model_mode
=
mode_enum
.
value
model_schema
=
model_type_instance
.
get_model_schema
(
model_config
.
model
,
model_credentials
)
if
not
skip_check
and
not
model_schema
:
raise
ValueError
(
f
"Model {model_name} not exist."
)
return
EasyUIBasedModelConfigEntity
(
provider
=
model_config
.
provider
,
model
=
model_config
.
model
,
model_schema
=
model_schema
,
mode
=
model_mode
,
provider_model_bundle
=
provider_model_bundle
,
credentials
=
model_credentials
,
parameters
=
completion_params
,
stop
=
stop
,
)
api/core/app/
validators/model_validato
r.py
→
api/core/app/
app_config/easy_ui_based_app/model_config/manage
r.py
View file @
655b34b7
from
core.app.app_config.entities
import
ModelConfigEntity
from
core.model_runtime.entities.model_entities
import
Model
PropertyKey
,
ModelType
from
core.model_runtime.entities.model_entities
import
Model
Type
,
ModelPropertyKey
from
core.model_runtime.model_providers
import
model_provider_factory
from
core.model_runtime.model_providers
import
model_provider_factory
from
core.provider_manager
import
ProviderManager
from
core.provider_manager
import
ProviderManager
class
ModelValidator
:
class
ModelConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
ModelConfigEntity
:
"""
Convert model config to model config
:param config: model config args
"""
# model config
model_config
=
config
.
get
(
'model'
)
if
not
model_config
:
raise
ValueError
(
"model is required"
)
completion_params
=
model_config
.
get
(
'completion_params'
)
stop
=
[]
if
'stop'
in
completion_params
:
stop
=
completion_params
[
'stop'
]
del
completion_params
[
'stop'
]
# get model mode
model_mode
=
model_config
.
get
(
'mode'
)
return
ModelConfigEntity
(
provider
=
config
[
'model'
][
'provider'
],
model
=
config
[
'model'
][
'name'
],
mode
=
model_mode
,
parameters
=
completion_params
,
stop
=
stop
,
)
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/easy_ui_based_app/prompt_template/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/prompt
.py
→
api/core/app/
app_config/easy_ui_based_app/prompt_template/manager
.py
View file @
655b34b7
from
core.app.app_config.entities
import
PromptTemplateEntity
,
\
from
core.entities.application_entities
import
PromptTemplateEntity
AdvancedChatPromptTemplateEntity
,
AdvancedCompletionPromptTemplateEntity
from
core.model_runtime.entities.message_entities
import
PromptMessageRole
from
core.prompt.simple_prompt_transform
import
ModelMode
from
core.prompt.simple_prompt_transform
import
ModelMode
from
models.model
import
AppMode
from
models.model
import
AppMode
class
PromptValidator
:
class
PromptTemplateConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
PromptTemplateEntity
:
if
not
config
.
get
(
"prompt_type"
):
raise
ValueError
(
"prompt_type is required"
)
prompt_type
=
PromptTemplateEntity
.
PromptType
.
value_of
(
config
[
'prompt_type'
])
if
prompt_type
==
PromptTemplateEntity
.
PromptType
.
SIMPLE
:
simple_prompt_template
=
config
.
get
(
"pre_prompt"
,
""
)
return
PromptTemplateEntity
(
prompt_type
=
prompt_type
,
simple_prompt_template
=
simple_prompt_template
)
else
:
advanced_chat_prompt_template
=
None
chat_prompt_config
=
config
.
get
(
"chat_prompt_config"
,
{})
if
chat_prompt_config
:
chat_prompt_messages
=
[]
for
message
in
chat_prompt_config
.
get
(
"prompt"
,
[]):
chat_prompt_messages
.
append
({
"text"
:
message
[
"text"
],
"role"
:
PromptMessageRole
.
value_of
(
message
[
"role"
])
})
advanced_chat_prompt_template
=
AdvancedChatPromptTemplateEntity
(
messages
=
chat_prompt_messages
)
advanced_completion_prompt_template
=
None
completion_prompt_config
=
config
.
get
(
"completion_prompt_config"
,
{})
if
completion_prompt_config
:
completion_prompt_template_params
=
{
'prompt'
:
completion_prompt_config
[
'prompt'
][
'text'
],
}
if
'conversation_histories_role'
in
completion_prompt_config
:
completion_prompt_template_params
[
'role_prefix'
]
=
{
'user'
:
completion_prompt_config
[
'conversation_histories_role'
][
'user_prefix'
],
'assistant'
:
completion_prompt_config
[
'conversation_histories_role'
][
'assistant_prefix'
]
}
advanced_completion_prompt_template
=
AdvancedCompletionPromptTemplateEntity
(
**
completion_prompt_template_params
)
return
PromptTemplateEntity
(
prompt_type
=
prompt_type
,
advanced_chat_prompt_template
=
advanced_chat_prompt_template
,
advanced_completion_prompt_template
=
advanced_completion_prompt_template
)
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
app_mode
:
AppMode
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
app_mode
:
AppMode
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
@@ -83,4 +134,4 @@ class PromptValidator:
...
@@ -83,4 +134,4 @@ class PromptValidator:
if
not
isinstance
(
config
[
"post_prompt"
],
str
):
if
not
isinstance
(
config
[
"post_prompt"
],
str
):
raise
ValueError
(
"post_prompt must be of string type"
)
raise
ValueError
(
"post_prompt must be of string type"
)
return
config
return
config
\ No newline at end of file
api/core/app/app_config/easy_ui_based_app/variables/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/user_input_form
.py
→
api/core/app/
app_config/easy_ui_based_app/variables/manager
.py
View file @
655b34b7
import
re
import
re
from
typing
import
Tuple
from
core.app.app_config.entities
import
VariableEntity
,
ExternalDataVariableEntity
from
core.external_data_tool.factory
import
ExternalDataToolFactory
class
BasicVariablesConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
Tuple
[
list
[
VariableEntity
],
list
[
ExternalDataVariableEntity
]]:
"""
Convert model config to model config
:param config: model config args
"""
external_data_variables
=
[]
variables
=
[]
# old external_data_tools
external_data_tools
=
config
.
get
(
'external_data_tools'
,
[])
for
external_data_tool
in
external_data_tools
:
if
'enabled'
not
in
external_data_tool
or
not
external_data_tool
[
'enabled'
]:
continue
external_data_variables
.
append
(
ExternalDataVariableEntity
(
variable
=
external_data_tool
[
'variable'
],
type
=
external_data_tool
[
'type'
],
config
=
external_data_tool
[
'config'
]
)
)
# variables and external_data_tools
for
variable
in
config
.
get
(
'user_input_form'
,
[]):
typ
=
list
(
variable
.
keys
())[
0
]
if
typ
==
'external_data_tool'
:
val
=
variable
[
typ
]
external_data_variables
.
append
(
ExternalDataVariableEntity
(
variable
=
val
[
'variable'
],
type
=
val
[
'type'
],
config
=
val
[
'config'
]
)
)
elif
typ
in
[
VariableEntity
.
Type
.
TEXT_INPUT
.
value
,
VariableEntity
.
Type
.
PARAGRAPH
.
value
,
VariableEntity
.
Type
.
NUMBER
.
value
,
]:
variables
.
append
(
VariableEntity
(
type
=
VariableEntity
.
Type
.
value_of
(
typ
),
variable
=
variable
[
typ
]
.
get
(
'variable'
),
description
=
variable
[
typ
]
.
get
(
'description'
),
label
=
variable
[
typ
]
.
get
(
'label'
),
required
=
variable
[
typ
]
.
get
(
'required'
,
False
),
max_length
=
variable
[
typ
]
.
get
(
'max_length'
),
default
=
variable
[
typ
]
.
get
(
'default'
),
)
)
elif
typ
==
VariableEntity
.
Type
.
SELECT
.
value
:
variables
.
append
(
VariableEntity
(
type
=
VariableEntity
.
Type
.
SELECT
,
variable
=
variable
[
typ
]
.
get
(
'variable'
),
description
=
variable
[
typ
]
.
get
(
'description'
),
label
=
variable
[
typ
]
.
get
(
'label'
),
required
=
variable
[
typ
]
.
get
(
'required'
,
False
),
options
=
variable
[
typ
]
.
get
(
'options'
),
default
=
variable
[
typ
]
.
get
(
'default'
),
)
)
return
variables
,
external_data_variables
@
classmethod
def
validate_and_set_defaults
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
Validate and set defaults for user input form
:param tenant_id: workspace id
:param config: app model config args
"""
related_config_keys
=
[]
config
,
current_related_config_keys
=
cls
.
validate_variables_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
config
,
current_related_config_keys
=
cls
.
validate_external_data_tools_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
return
config
,
related_config_keys
class
UserInputFormValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_
variables_
and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
Validate and set defaults for user input form
Validate and set defaults for user input form
...
@@ -59,3 +147,38 @@ class UserInputFormValidator:
...
@@ -59,3 +147,38 @@ class UserInputFormValidator:
raise
ValueError
(
"default value in user_input_form must be in the options list"
)
raise
ValueError
(
"default value in user_input_form must be in the options list"
)
return
config
,
[
"user_input_form"
]
return
config
,
[
"user_input_form"
]
@
classmethod
def
validate_external_data_tools_and_set_defaults
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
Validate and set defaults for external data fetch feature
:param tenant_id: workspace id
:param config: app model config args
"""
if
not
config
.
get
(
"external_data_tools"
):
config
[
"external_data_tools"
]
=
[]
if
not
isinstance
(
config
[
"external_data_tools"
],
list
):
raise
ValueError
(
"external_data_tools must be of list type"
)
for
tool
in
config
[
"external_data_tools"
]:
if
"enabled"
not
in
tool
or
not
tool
[
"enabled"
]:
tool
[
"enabled"
]
=
False
if
not
tool
[
"enabled"
]:
continue
if
"type"
not
in
tool
or
not
tool
[
"type"
]:
raise
ValueError
(
"external_data_tools[].type is required"
)
typ
=
tool
[
"type"
]
config
=
tool
[
"config"
]
ExternalDataToolFactory
.
validate_config
(
name
=
typ
,
tenant_id
=
tenant_id
,
config
=
config
)
return
config
,
[
"external_data_tools"
]
\ No newline at end of file
api/core/
entities/application_
entities.py
→
api/core/
app/app_config/
entities.py
View file @
655b34b7
from
enum
import
Enum
from
enum
import
Enum
from
typing
import
Any
,
Literal
,
Optional
,
Union
from
typing
import
Any
,
Optional
from
pydantic
import
BaseModel
from
pydantic
import
BaseModel
from
core.entities.provider_configuration
import
ProviderModelBundle
from
core.file.file_obj
import
FileObj
from
core.model_runtime.entities.message_entities
import
PromptMessageRole
from
core.model_runtime.entities.message_entities
import
PromptMessageRole
from
core.model_runtime.entities.model_entities
import
AIModelEntity
from
models.model
import
AppMode
class
ModelConfigEntity
(
BaseModel
):
class
ModelConfigEntity
(
BaseModel
):
...
@@ -15,10 +13,7 @@ class ModelConfigEntity(BaseModel):
...
@@ -15,10 +13,7 @@ class ModelConfigEntity(BaseModel):
"""
"""
provider
:
str
provider
:
str
model
:
str
model
:
str
model_schema
:
Optional
[
AIModelEntity
]
=
None
mode
:
Optional
[
str
]
=
None
mode
:
str
provider_model_bundle
:
ProviderModelBundle
credentials
:
dict
[
str
,
Any
]
=
{}
parameters
:
dict
[
str
,
Any
]
=
{}
parameters
:
dict
[
str
,
Any
]
=
{}
stop
:
list
[
str
]
=
[]
stop
:
list
[
str
]
=
[]
...
@@ -194,149 +189,53 @@ class FileUploadEntity(BaseModel):
...
@@ -194,149 +189,53 @@ class FileUploadEntity(BaseModel):
image_config
:
Optional
[
dict
[
str
,
Any
]]
=
None
image_config
:
Optional
[
dict
[
str
,
Any
]]
=
None
class
AgentToolEntity
(
BaseModel
):
class
AppAdditionalFeatures
(
BaseModel
):
"""
Agent Tool Entity.
"""
provider_type
:
Literal
[
"builtin"
,
"api"
]
provider_id
:
str
tool_name
:
str
tool_parameters
:
dict
[
str
,
Any
]
=
{}
class
AgentPromptEntity
(
BaseModel
):
"""
Agent Prompt Entity.
"""
first_prompt
:
str
next_iteration
:
str
class
AgentScratchpadUnit
(
BaseModel
):
"""
Agent First Prompt Entity.
"""
class
Action
(
BaseModel
):
"""
Action Entity.
"""
action_name
:
str
action_input
:
Union
[
dict
,
str
]
agent_response
:
Optional
[
str
]
=
None
thought
:
Optional
[
str
]
=
None
action_str
:
Optional
[
str
]
=
None
observation
:
Optional
[
str
]
=
None
action
:
Optional
[
Action
]
=
None
class
AgentEntity
(
BaseModel
):
"""
Agent Entity.
"""
class
Strategy
(
Enum
):
"""
Agent Strategy.
"""
CHAIN_OF_THOUGHT
=
'chain-of-thought'
FUNCTION_CALLING
=
'function-calling'
provider
:
str
model
:
str
strategy
:
Strategy
prompt
:
Optional
[
AgentPromptEntity
]
=
None
tools
:
list
[
AgentToolEntity
]
=
None
max_iteration
:
int
=
5
class
AppOrchestrationConfigEntity
(
BaseModel
):
"""
App Orchestration Config Entity.
"""
model_config
:
ModelConfigEntity
prompt_template
:
PromptTemplateEntity
variables
:
list
[
VariableEntity
]
=
[]
external_data_variables
:
list
[
ExternalDataVariableEntity
]
=
[]
agent
:
Optional
[
AgentEntity
]
=
None
# features
dataset
:
Optional
[
DatasetEntity
]
=
None
file_upload
:
Optional
[
FileUploadEntity
]
=
None
file_upload
:
Optional
[
FileUploadEntity
]
=
None
opening_statement
:
Optional
[
str
]
=
None
opening_statement
:
Optional
[
str
]
=
None
suggested_questions
:
list
[
str
]
=
[]
suggested_questions_after_answer
:
bool
=
False
suggested_questions_after_answer
:
bool
=
False
show_retrieve_source
:
bool
=
False
show_retrieve_source
:
bool
=
False
more_like_this
:
bool
=
False
more_like_this
:
bool
=
False
speech_to_text
:
bool
=
False
speech_to_text
:
bool
=
False
text_to_speech
:
Optional
[
TextToSpeechEntity
]
=
None
text_to_speech
:
Optional
[
TextToSpeechEntity
]
=
None
sensitive_word_avoidance
:
Optional
[
SensitiveWordAvoidanceEntity
]
=
None
class
InvokeFrom
(
Enum
):
class
AppConfig
(
BaseModel
):
"""
"""
Invoke From
.
Application Config Entity
.
"""
"""
SERVICE_API
=
'service-api'
tenant_id
:
str
WEB_APP
=
'web-app'
app_id
:
str
EXPLORE
=
'explore'
app_mode
:
AppMode
DEBUGGER
=
'debugger'
additional_features
:
AppAdditionalFeatures
variables
:
list
[
VariableEntity
]
=
[]
@
classmethod
sensitive_word_avoidance
:
Optional
[
SensitiveWordAvoidanceEntity
]
=
None
def
value_of
(
cls
,
value
:
str
)
->
'InvokeFrom'
:
"""
Get value of given mode.
:param value: mode value
:return: mode
"""
for
mode
in
cls
:
if
mode
.
value
==
value
:
return
mode
raise
ValueError
(
f
'invalid invoke from value {value}'
)
def
to_source
(
self
)
->
str
:
"""
Get source of invoke from.
:return: source
"""
if
self
==
InvokeFrom
.
WEB_APP
:
return
'web_app'
elif
self
==
InvokeFrom
.
DEBUGGER
:
return
'dev'
elif
self
==
InvokeFrom
.
EXPLORE
:
return
'explore_app'
elif
self
==
InvokeFrom
.
SERVICE_API
:
return
'api'
return
'dev'
class
EasyUIBasedAppModelConfigFrom
(
Enum
):
"""
App Model Config From.
"""
ARGS
=
'args'
APP_LATEST_CONFIG
=
'app-latest-config'
CONVERSATION_SPECIFIC_CONFIG
=
'conversation-specific-config'
class
ApplicationGenerateEntity
(
BaseModel
):
class
EasyUIBasedAppConfig
(
AppConfig
):
"""
"""
Application Generate
Entity.
Easy UI Based App Config
Entity.
"""
"""
task_id
:
str
app_model_config_from
:
EasyUIBasedAppModelConfigFrom
tenant_id
:
str
app_id
:
str
app_model_config_id
:
str
app_model_config_id
:
str
# for save
app_model_config_dict
:
dict
app_model_config_dict
:
dict
app_model_config_override
:
bool
model
:
ModelConfigEntity
prompt_template
:
PromptTemplateEntity
# Converted from app_model_config to Entity object, or directly covered by external input
dataset
:
Optional
[
DatasetEntity
]
=
None
app_orchestration_config_entity
:
AppOrchestrationConfigEntity
external_data_variables
:
list
[
ExternalDataVariableEntity
]
=
[]
conversation_id
:
Optional
[
str
]
=
None
inputs
:
dict
[
str
,
str
]
class
WorkflowUIBasedAppConfig
(
AppConfig
):
query
:
Optional
[
str
]
=
None
"""
files
:
list
[
FileObj
]
=
[]
Workflow UI Based App Config Entity.
user_id
:
str
"""
# extras
workflow_id
:
str
stream
:
bool
invoke_from
:
InvokeFrom
# extra parameters, like: auto_generate_conversation_name
extras
:
dict
[
str
,
Any
]
=
{}
api/core/app/app_config/features/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/app_config/features/file_upload/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/file_upload
.py
→
api/core/app/
app_config/features/file_upload/manager
.py
View file @
655b34b7
from
typing
import
Optional
from
core.app.app_config.entities
import
FileUploadEntity
class
FileUploadConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
Optional
[
FileUploadEntity
]:
"""
Convert model config to model config
:param config: model config args
"""
file_upload_dict
=
config
.
get
(
'file_upload'
)
if
file_upload_dict
:
if
'image'
in
file_upload_dict
and
file_upload_dict
[
'image'
]:
if
'enabled'
in
file_upload_dict
[
'image'
]
and
file_upload_dict
[
'image'
][
'enabled'
]:
return
FileUploadEntity
(
image_config
=
{
'number_limits'
:
file_upload_dict
[
'image'
][
'number_limits'
],
'detail'
:
file_upload_dict
[
'image'
][
'detail'
],
'transfer_methods'
:
file_upload_dict
[
'image'
][
'transfer_methods'
]
}
)
return
None
class
FileUploadValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/features/more_like_this/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/more_like_this
.py
→
api/core/app/
app_config/features/more_like_this/manager
.py
View file @
655b34b7
class
MoreLikeThisConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
bool
:
"""
Convert model config to model config
:param config: model config args
"""
more_like_this
=
False
more_like_this_dict
=
config
.
get
(
'more_like_this'
)
if
more_like_this_dict
:
if
'enabled'
in
more_like_this_dict
and
more_like_this_dict
[
'enabled'
]:
more_like_this
=
True
return
more_like_this
class
MoreLikeThisValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/features/opening_statement/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/opening_statement
.py
→
api/core/app/
app_config/features/opening_statement/manager
.py
View file @
655b34b7
from
typing
import
Tuple
class
OpeningStatementValidator
:
class
OpeningStatementConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
Tuple
[
str
,
list
]:
"""
Convert model config to model config
:param config: model config args
"""
# opening statement
opening_statement
=
config
.
get
(
'opening_statement'
)
# suggested questions
suggested_questions_list
=
config
.
get
(
'suggested_questions'
)
return
opening_statement
,
suggested_questions_list
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/features/retrieval_resource/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/retriever_resource
.py
→
api/core/app/
app_config/features/retrieval_resource/manager
.py
View file @
655b34b7
class
RetrievalResourceConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
bool
:
show_retrieve_source
=
False
retriever_resource_dict
=
config
.
get
(
'retriever_resource'
)
if
retriever_resource_dict
:
if
'enabled'
in
retriever_resource_dict
and
retriever_resource_dict
[
'enabled'
]:
show_retrieve_source
=
True
return
show_retrieve_source
class
RetrieverResourceValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/features/speech_to_text/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/speech_to_text
.py
→
api/core/app/
app_config/features/speech_to_text/manager
.py
View file @
655b34b7
class
SpeechToTextConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
bool
:
"""
Convert model config to model config
:param config: model config args
"""
speech_to_text
=
False
speech_to_text_dict
=
config
.
get
(
'speech_to_text'
)
if
speech_to_text_dict
:
if
'enabled'
in
speech_to_text_dict
and
speech_to_text_dict
[
'enabled'
]:
speech_to_text
=
True
return
speech_to_text
class
SpeechToTextValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/features/suggested_questions_after_answer/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/suggested_questions
.py
→
api/core/app/
app_config/features/suggested_questions_after_answer/manager
.py
View file @
655b34b7
class
SuggestedQuestionsAfterAnswerConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
bool
:
"""
Convert model config to model config
:param config: model config args
"""
suggested_questions_after_answer
=
False
suggested_questions_after_answer_dict
=
config
.
get
(
'suggested_questions_after_answer'
)
if
suggested_questions_after_answer_dict
:
if
'enabled'
in
suggested_questions_after_answer_dict
and
suggested_questions_after_answer_dict
[
'enabled'
]:
suggested_questions_after_answer
=
True
return
suggested_questions_after_answer
class
SuggestedQuestionsValidator
:
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
@@ -16,7 +29,8 @@ class SuggestedQuestionsValidator:
...
@@ -16,7 +29,8 @@ class SuggestedQuestionsValidator:
if
not
isinstance
(
config
[
"suggested_questions_after_answer"
],
dict
):
if
not
isinstance
(
config
[
"suggested_questions_after_answer"
],
dict
):
raise
ValueError
(
"suggested_questions_after_answer must be of dict type"
)
raise
ValueError
(
"suggested_questions_after_answer must be of dict type"
)
if
"enabled"
not
in
config
[
"suggested_questions_after_answer"
]
or
not
config
[
"suggested_questions_after_answer"
][
"enabled"
]:
if
"enabled"
not
in
config
[
"suggested_questions_after_answer"
]
or
not
\
config
[
"suggested_questions_after_answer"
][
"enabled"
]:
config
[
"suggested_questions_after_answer"
][
"enabled"
]
=
False
config
[
"suggested_questions_after_answer"
][
"enabled"
]
=
False
if
not
isinstance
(
config
[
"suggested_questions_after_answer"
][
"enabled"
],
bool
):
if
not
isinstance
(
config
[
"suggested_questions_after_answer"
][
"enabled"
],
bool
):
...
...
api/core/app/app_config/features/text_to_speech/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
validators/text_to_speech
.py
→
api/core/app/
app_config/features/text_to_speech/manager
.py
View file @
655b34b7
from
core.app.app_config.entities
import
TextToSpeechEntity
class
TextToSpeechValidator
:
class
TextToSpeechConfigManager
:
@
classmethod
def
convert
(
cls
,
config
:
dict
)
->
bool
:
"""
Convert model config to model config
:param config: model config args
"""
text_to_speech
=
False
text_to_speech_dict
=
config
.
get
(
'text_to_speech'
)
if
text_to_speech_dict
:
if
'enabled'
in
text_to_speech_dict
and
text_to_speech_dict
[
'enabled'
]:
text_to_speech
=
TextToSpeechEntity
(
enabled
=
text_to_speech_dict
.
get
(
'enabled'
),
voice
=
text_to_speech_dict
.
get
(
'voice'
),
language
=
text_to_speech_dict
.
get
(
'language'
),
)
return
text_to_speech
@
classmethod
@
classmethod
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
def
validate_and_set_defaults
(
cls
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
"""
...
...
api/core/app/app_config/workflow_ui_based_app/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/app_config/workflow_ui_based_app/variables/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/app_config/workflow_ui_based_app/variables/manager.py
0 → 100644
View file @
655b34b7
from
core.app.app_config.entities
import
VariableEntity
from
models.workflow
import
Workflow
class
WorkflowVariablesConfigManager
:
@
classmethod
def
convert
(
cls
,
workflow
:
Workflow
)
->
list
[
VariableEntity
]:
"""
Convert workflow start variables to variables
:param workflow: workflow instance
"""
variables
=
[]
# find start node
user_input_form
=
workflow
.
user_input_form
()
# variables
for
variable
in
user_input_form
:
variables
.
append
(
VariableEntity
(
**
variable
))
return
variables
api/core/app/app_manager.py
View file @
655b34b7
...
@@ -8,13 +8,18 @@ from typing import Any, Optional, Union, cast
...
@@ -8,13 +8,18 @@ from typing import Any, Optional, Union, cast
from
flask
import
Flask
,
current_app
from
flask
import
Flask
,
current_app
from
pydantic
import
ValidationError
from
pydantic
import
ValidationError
from
core.app.agent_chat.app_runner
import
AgentChatAppRunner
from
core.app.app_config.easy_ui_based_app.model_config.converter
import
EasyUIBasedModelConfigEntityConverter
from
core.app.app_orchestration_config_converter
import
AppOrchestrationConfigConverter
from
core.app.app_config.entities
import
EasyUIBasedAppModelConfigFrom
,
EasyUIBasedAppConfig
,
VariableEntity
from
core.app.apps.agent_chat.app_config_manager
import
AgentChatAppConfigManager
from
core.app.apps.agent_chat.app_runner
import
AgentChatAppRunner
from
core.app.app_queue_manager
import
AppQueueManager
,
ConversationTaskStoppedException
,
PublishFrom
from
core.app.app_queue_manager
import
AppQueueManager
,
ConversationTaskStoppedException
,
PublishFrom
from
core.app.chat.app_runner
import
ChatAppRunner
from
core.app.apps.chat.app_config_manager
import
ChatAppConfigManager
from
core.app.apps.chat.app_runner
import
ChatAppRunner
from
core.app.apps.completion.app_config_manager
import
CompletionAppConfigManager
from
core.app.apps.completion.app_runner
import
CompletionAppRunner
from
core.app.generate_task_pipeline
import
GenerateTaskPipeline
from
core.app.generate_task_pipeline
import
GenerateTaskPipeline
from
core.
entities.application
_entities
import
(
from
core.
app.entities.app_invoke
_entities
import
(
Application
GenerateEntity
,
EasyUIBasedApp
GenerateEntity
,
InvokeFrom
,
InvokeFrom
,
)
)
from
core.file.file_obj
import
FileObj
from
core.file.file_obj
import
FileObj
...
@@ -23,24 +28,19 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
...
@@ -23,24 +28,19 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
from
core.prompt.utils.prompt_template_parser
import
PromptTemplateParser
from
core.prompt.utils.prompt_template_parser
import
PromptTemplateParser
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.account
import
Account
from
models.account
import
Account
from
models.model
import
App
,
Conversation
,
EndUser
,
Message
,
MessageFile
from
models.model
import
App
,
Conversation
,
EndUser
,
Message
,
MessageFile
,
AppMode
,
AppModelConfig
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
class
AppManager
:
class
EasyUIBasedAppManager
:
"""
This class is responsible for managing application
"""
def
generate
(
self
,
tenant_id
:
str
,
def
generate
(
self
,
app_model
:
App
,
app_id
:
str
,
app_model_config
:
AppModelConfig
,
app_model_config_id
:
str
,
app_model_config_dict
:
dict
,
app_model_config_override
:
bool
,
user
:
Union
[
Account
,
EndUser
],
user
:
Union
[
Account
,
EndUser
],
invoke_from
:
InvokeFrom
,
invoke_from
:
InvokeFrom
,
inputs
:
dict
[
str
,
str
],
inputs
:
dict
[
str
,
str
],
app_model_config_dict
:
Optional
[
dict
]
=
None
,
query
:
Optional
[
str
]
=
None
,
query
:
Optional
[
str
]
=
None
,
files
:
Optional
[
list
[
FileObj
]]
=
None
,
files
:
Optional
[
list
[
FileObj
]]
=
None
,
conversation
:
Optional
[
Conversation
]
=
None
,
conversation
:
Optional
[
Conversation
]
=
None
,
...
@@ -50,14 +50,12 @@ class AppManager:
...
@@ -50,14 +50,12 @@ class AppManager:
"""
"""
Generate App response.
Generate App response.
:param tenant_id: workspace ID
:param app_model: App
:param app_id: app ID
:param app_model_config: app model config
:param app_model_config_id: app model config id
:param app_model_config_dict: app model config dict
:param app_model_config_override: app model config override
:param user: account or end user
:param user: account or end user
:param invoke_from: invoke from source
:param invoke_from: invoke from source
:param inputs: inputs
:param inputs: inputs
:param app_model_config_dict: app model config dict
:param query: query
:param query: query
:param files: file obj list
:param files: file obj list
:param conversation: conversation
:param conversation: conversation
...
@@ -67,20 +65,21 @@ class AppManager:
...
@@ -67,20 +65,21 @@ class AppManager:
# init task id
# init task id
task_id
=
str
(
uuid
.
uuid4
())
task_id
=
str
(
uuid
.
uuid4
())
# convert to app config
app_config
=
self
.
convert_to_app_config
(
app_model
=
app_model
,
app_model_config
=
app_model_config
,
app_model_config_dict
=
app_model_config_dict
,
conversation
=
conversation
)
# init application generate entity
# init application generate entity
application_generate_entity
=
Application
GenerateEntity
(
application_generate_entity
=
EasyUIBasedApp
GenerateEntity
(
task_id
=
task_id
,
task_id
=
task_id
,
tenant_id
=
tenant_id
,
app_config
=
app_config
,
app_id
=
app_id
,
model_config
=
EasyUIBasedModelConfigEntityConverter
.
convert
(
app_config
),
app_model_config_id
=
app_model_config_id
,
app_model_config_dict
=
app_model_config_dict
,
app_orchestration_config_entity
=
AppOrchestrationConfigConverter
.
convert_from_app_model_config_dict
(
tenant_id
=
tenant_id
,
app_model_config_dict
=
app_model_config_dict
),
app_model_config_override
=
app_model_config_override
,
conversation_id
=
conversation
.
id
if
conversation
else
None
,
conversation_id
=
conversation
.
id
if
conversation
else
None
,
inputs
=
conversation
.
inputs
if
conversation
else
inputs
,
inputs
=
conversation
.
inputs
if
conversation
else
self
.
_get_cleaned_inputs
(
inputs
,
app_config
)
,
query
=
query
.
replace
(
'
\x00
'
,
''
)
if
query
else
None
,
query
=
query
.
replace
(
'
\x00
'
,
''
)
if
query
else
None
,
files
=
files
if
files
else
[],
files
=
files
if
files
else
[],
user_id
=
user
.
id
,
user_id
=
user
.
id
,
...
@@ -89,7 +88,7 @@ class AppManager:
...
@@ -89,7 +88,7 @@ class AppManager:
extras
=
extras
extras
=
extras
)
)
if
not
stream
and
application_generate_entity
.
app_
orchestration_config_entity
.
agent
:
if
not
stream
and
application_generate_entity
.
app_
config
.
app_mode
==
AppMode
.
AGENT_CHAT
:
raise
ValueError
(
"Agent app is not supported in blocking mode."
)
raise
ValueError
(
"Agent app is not supported in blocking mode."
)
# init generate records
# init generate records
...
@@ -128,8 +127,85 @@ class AppManager:
...
@@ -128,8 +127,85 @@ class AppManager:
stream
=
stream
stream
=
stream
)
)
def
convert_to_app_config
(
self
,
app_model
:
App
,
app_model_config
:
AppModelConfig
,
app_model_config_dict
:
Optional
[
dict
]
=
None
,
conversation
:
Optional
[
Conversation
]
=
None
)
->
EasyUIBasedAppConfig
:
if
app_model_config_dict
:
config_from
=
EasyUIBasedAppModelConfigFrom
.
ARGS
elif
conversation
:
config_from
=
EasyUIBasedAppModelConfigFrom
.
CONVERSATION_SPECIFIC_CONFIG
else
:
config_from
=
EasyUIBasedAppModelConfigFrom
.
APP_LATEST_CONFIG
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
)
if
app_mode
==
AppMode
.
AGENT_CHAT
or
app_model
.
is_agent
:
app_model
.
mode
=
AppMode
.
AGENT_CHAT
.
value
app_config
=
AgentChatAppConfigManager
.
config_convert
(
app_model
=
app_model
,
config_from
=
config_from
,
app_model_config
=
app_model_config
,
config_dict
=
app_model_config_dict
)
elif
app_mode
==
AppMode
.
CHAT
:
app_config
=
ChatAppConfigManager
.
config_convert
(
app_model
=
app_model
,
config_from
=
config_from
,
app_model_config
=
app_model_config
,
config_dict
=
app_model_config_dict
)
elif
app_mode
==
AppMode
.
COMPLETION
:
app_config
=
CompletionAppConfigManager
.
config_convert
(
app_model
=
app_model
,
config_from
=
config_from
,
app_model_config
=
app_model_config
,
config_dict
=
app_model_config_dict
)
else
:
raise
ValueError
(
"Invalid app mode"
)
return
app_config
def
_get_cleaned_inputs
(
self
,
user_inputs
:
dict
,
app_config
:
EasyUIBasedAppConfig
):
if
user_inputs
is
None
:
user_inputs
=
{}
filtered_inputs
=
{}
# Filter input variables from form configuration, handle required fields, default values, and option values
variables
=
app_config
.
variables
for
variable_config
in
variables
:
variable
=
variable_config
.
variable
if
variable
not
in
user_inputs
or
not
user_inputs
[
variable
]:
if
variable_config
.
required
:
raise
ValueError
(
f
"{variable} is required in input form"
)
else
:
filtered_inputs
[
variable
]
=
variable_config
.
default
if
variable_config
.
default
is
not
None
else
""
continue
value
=
user_inputs
[
variable
]
if
value
:
if
not
isinstance
(
value
,
str
):
raise
ValueError
(
f
"{variable} in input form must be a string"
)
if
variable_config
.
type
==
VariableEntity
.
Type
.
SELECT
:
options
=
variable_config
.
options
if
variable_config
.
options
is
not
None
else
[]
if
value
not
in
options
:
raise
ValueError
(
f
"{variable} in input form must be one of the following: {options}"
)
else
:
if
variable_config
.
max_length
is
not
None
:
max_length
=
variable_config
.
max_length
if
len
(
value
)
>
max_length
:
raise
ValueError
(
f
'{variable} in input form must be less than {max_length} characters'
)
filtered_inputs
[
variable
]
=
value
.
replace
(
'
\x00
'
,
''
)
if
value
else
None
return
filtered_inputs
def
_generate_worker
(
self
,
flask_app
:
Flask
,
def
_generate_worker
(
self
,
flask_app
:
Flask
,
application_generate_entity
:
Application
GenerateEntity
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
conversation_id
:
str
,
conversation_id
:
str
,
message_id
:
str
)
->
None
:
message_id
:
str
)
->
None
:
...
@@ -148,7 +224,7 @@ class AppManager:
...
@@ -148,7 +224,7 @@ class AppManager:
conversation
=
self
.
_get_conversation
(
conversation_id
)
conversation
=
self
.
_get_conversation
(
conversation_id
)
message
=
self
.
_get_message
(
message_id
)
message
=
self
.
_get_message
(
message_id
)
if
application_generate_entity
.
app_
orchestration_config_entity
.
agent
:
if
application_generate_entity
.
app_
config
.
app_mode
==
AppMode
.
AGENT_CHAT
:
# agent app
# agent app
runner
=
AgentChatAppRunner
()
runner
=
AgentChatAppRunner
()
runner
.
run
(
runner
.
run
(
...
@@ -157,8 +233,8 @@ class AppManager:
...
@@ -157,8 +233,8 @@ class AppManager:
conversation
=
conversation
,
conversation
=
conversation
,
message
=
message
message
=
message
)
)
el
se
:
el
if
application_generate_entity
.
app_config
.
app_mode
==
AppMode
.
CHAT
:
#
basic
app
#
chatbot
app
runner
=
ChatAppRunner
()
runner
=
ChatAppRunner
()
runner
.
run
(
runner
.
run
(
application_generate_entity
=
application_generate_entity
,
application_generate_entity
=
application_generate_entity
,
...
@@ -166,6 +242,16 @@ class AppManager:
...
@@ -166,6 +242,16 @@ class AppManager:
conversation
=
conversation
,
conversation
=
conversation
,
message
=
message
message
=
message
)
)
elif
application_generate_entity
.
app_config
.
app_mode
==
AppMode
.
COMPLETION
:
# completion app
runner
=
CompletionAppRunner
()
runner
.
run
(
application_generate_entity
=
application_generate_entity
,
queue_manager
=
queue_manager
,
message
=
message
)
else
:
raise
ValueError
(
"Invalid app mode"
)
except
ConversationTaskStoppedException
:
except
ConversationTaskStoppedException
:
pass
pass
except
InvokeAuthorizationError
:
except
InvokeAuthorizationError
:
...
@@ -184,7 +270,7 @@ class AppManager:
...
@@ -184,7 +270,7 @@ class AppManager:
finally
:
finally
:
db
.
session
.
remove
()
db
.
session
.
remove
()
def
_handle_response
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
_handle_response
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
conversation
:
Conversation
,
conversation
:
Conversation
,
message
:
Message
,
message
:
Message
,
...
@@ -217,24 +303,24 @@ class AppManager:
...
@@ -217,24 +303,24 @@ class AppManager:
finally
:
finally
:
db
.
session
.
remove
()
db
.
session
.
remove
()
def
_init_generate_records
(
self
,
application_generate_entity
:
Application
GenerateEntity
)
\
def
_init_generate_records
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
)
\
->
tuple
[
Conversation
,
Message
]:
->
tuple
[
Conversation
,
Message
]:
"""
"""
Initialize generate records
Initialize generate records
:param application_generate_entity: application generate entity
:param application_generate_entity: application generate entity
:return:
:return:
"""
"""
app_orchestration_config_entity
=
application_generate_entity
.
app_orchestration_config_entity
model_type_instance
=
application_generate_entity
.
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
app_orchestration_config_entity
.
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
model_schema
=
model_type_instance
.
get_model_schema
(
model_schema
=
model_type_instance
.
get_model_schema
(
model
=
app
_orchestration_config
_entity
.
model_config
.
model
,
model
=
app
lication_generate
_entity
.
model_config
.
model
,
credentials
=
app
_orchestration_config
_entity
.
model_config
.
credentials
credentials
=
app
lication_generate
_entity
.
model_config
.
credentials
)
)
app_config
=
application_generate_entity
.
app_config
app_record
=
(
db
.
session
.
query
(
App
)
app_record
=
(
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
app
lication_generate_entity
.
app_id
)
.
first
())
.
filter
(
App
.
id
==
app
_config
.
app_id
)
.
first
())
app_mode
=
app_record
.
mode
app_mode
=
app_record
.
mode
...
@@ -249,8 +335,8 @@ class AppManager:
...
@@ -249,8 +335,8 @@ class AppManager:
account_id
=
application_generate_entity
.
user_id
account_id
=
application_generate_entity
.
user_id
override_model_configs
=
None
override_model_configs
=
None
if
app
lication_generate_entity
.
app_model_config_override
:
if
app
_config
.
app_model_config_from
==
EasyUIBasedAppModelConfigFrom
.
ARGS
:
override_model_configs
=
app
lication_generate_entity
.
app_model_config_dict
override_model_configs
=
app
_config
.
app_model_config_dict
introduction
=
''
introduction
=
''
if
app_mode
==
'chat'
:
if
app_mode
==
'chat'
:
...
@@ -260,9 +346,9 @@ class AppManager:
...
@@ -260,9 +346,9 @@ class AppManager:
if
not
application_generate_entity
.
conversation_id
:
if
not
application_generate_entity
.
conversation_id
:
conversation
=
Conversation
(
conversation
=
Conversation
(
app_id
=
app_record
.
id
,
app_id
=
app_record
.
id
,
app_model_config_id
=
app
lication_generate_entity
.
app_model_config_id
,
app_model_config_id
=
app
_config
.
app_model_config_id
,
model_provider
=
app
_orchestration_config
_entity
.
model_config
.
provider
,
model_provider
=
app
lication_generate
_entity
.
model_config
.
provider
,
model_id
=
app
_orchestration_config
_entity
.
model_config
.
model
,
model_id
=
app
lication_generate
_entity
.
model_config
.
model
,
override_model_configs
=
json
.
dumps
(
override_model_configs
)
if
override_model_configs
else
None
,
override_model_configs
=
json
.
dumps
(
override_model_configs
)
if
override_model_configs
else
None
,
mode
=
app_mode
,
mode
=
app_mode
,
name
=
'New conversation'
,
name
=
'New conversation'
,
...
@@ -291,8 +377,8 @@ class AppManager:
...
@@ -291,8 +377,8 @@ class AppManager:
message
=
Message
(
message
=
Message
(
app_id
=
app_record
.
id
,
app_id
=
app_record
.
id
,
model_provider
=
app
_orchestration_config
_entity
.
model_config
.
provider
,
model_provider
=
app
lication_generate
_entity
.
model_config
.
provider
,
model_id
=
app
_orchestration_config
_entity
.
model_config
.
model
,
model_id
=
app
lication_generate
_entity
.
model_config
.
model
,
override_model_configs
=
json
.
dumps
(
override_model_configs
)
if
override_model_configs
else
None
,
override_model_configs
=
json
.
dumps
(
override_model_configs
)
if
override_model_configs
else
None
,
conversation_id
=
conversation
.
id
,
conversation_id
=
conversation
.
id
,
inputs
=
application_generate_entity
.
inputs
,
inputs
=
application_generate_entity
.
inputs
,
...
@@ -311,7 +397,7 @@ class AppManager:
...
@@ -311,7 +397,7 @@ class AppManager:
from_source
=
from_source
,
from_source
=
from_source
,
from_end_user_id
=
end_user_id
,
from_end_user_id
=
end_user_id
,
from_account_id
=
account_id
,
from_account_id
=
account_id
,
agent_based
=
app_
orchestration_config_entity
.
agent
is
not
None
agent_based
=
app_
config
.
app_mode
==
AppMode
.
AGENT_CHAT
,
)
)
db
.
session
.
add
(
message
)
db
.
session
.
add
(
message
)
...
@@ -333,14 +419,14 @@ class AppManager:
...
@@ -333,14 +419,14 @@ class AppManager:
return
conversation
,
message
return
conversation
,
message
def
_get_conversation_introduction
(
self
,
application_generate_entity
:
Application
GenerateEntity
)
->
str
:
def
_get_conversation_introduction
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
)
->
str
:
"""
"""
Get conversation introduction
Get conversation introduction
:param application_generate_entity: application generate entity
:param application_generate_entity: application generate entity
:return: conversation introduction
:return: conversation introduction
"""
"""
app_
orchestration_config_entity
=
application_generate_entity
.
app_orchestration_config_entity
app_
config
=
application_generate_entity
.
app_config
introduction
=
app_
orchestration_config_entity
.
opening_statement
introduction
=
app_
config
.
additional_features
.
opening_statement
if
introduction
:
if
introduction
:
try
:
try
:
...
...
api/core/app/app_orchestration_config_converter.py
deleted
100644 → 0
View file @
e8b2cc73
from
typing
import
cast
from
core.entities.application_entities
import
(
AdvancedChatPromptTemplateEntity
,
AdvancedCompletionPromptTemplateEntity
,
AgentEntity
,
AgentPromptEntity
,
AgentToolEntity
,
AppOrchestrationConfigEntity
,
DatasetEntity
,
DatasetRetrieveConfigEntity
,
ExternalDataVariableEntity
,
FileUploadEntity
,
ModelConfigEntity
,
PromptTemplateEntity
,
SensitiveWordAvoidanceEntity
,
TextToSpeechEntity
,
VariableEntity
,
)
from
core.entities.model_entities
import
ModelStatus
from
core.errors.error
import
ModelCurrentlyNotSupportError
,
ProviderTokenNotInitError
,
QuotaExceededError
from
core.model_runtime.entities.message_entities
import
PromptMessageRole
from
core.model_runtime.entities.model_entities
import
ModelType
from
core.model_runtime.model_providers.__base.large_language_model
import
LargeLanguageModel
from
core.provider_manager
import
ProviderManager
from
core.tools.prompt.template
import
REACT_PROMPT_TEMPLATES
class
AppOrchestrationConfigConverter
:
@
classmethod
def
convert_from_app_model_config_dict
(
cls
,
tenant_id
:
str
,
app_model_config_dict
:
dict
,
skip_check
:
bool
=
False
)
\
->
AppOrchestrationConfigEntity
:
"""
Convert app model config dict to entity.
:param tenant_id: tenant ID
:param app_model_config_dict: app model config dict
:param skip_check: skip check
:raises ProviderTokenNotInitError: provider token not init error
:return: app orchestration config entity
"""
properties
=
{}
copy_app_model_config_dict
=
app_model_config_dict
.
copy
()
provider_manager
=
ProviderManager
()
provider_model_bundle
=
provider_manager
.
get_provider_model_bundle
(
tenant_id
=
tenant_id
,
provider
=
copy_app_model_config_dict
[
'model'
][
'provider'
],
model_type
=
ModelType
.
LLM
)
provider_name
=
provider_model_bundle
.
configuration
.
provider
.
provider
model_name
=
copy_app_model_config_dict
[
'model'
][
'name'
]
model_type_instance
=
provider_model_bundle
.
model_type_instance
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
# check model credentials
model_credentials
=
provider_model_bundle
.
configuration
.
get_current_credentials
(
model_type
=
ModelType
.
LLM
,
model
=
copy_app_model_config_dict
[
'model'
][
'name'
]
)
if
model_credentials
is
None
:
if
not
skip_check
:
raise
ProviderTokenNotInitError
(
f
"Model {model_name} credentials is not initialized."
)
else
:
model_credentials
=
{}
if
not
skip_check
:
# check model
provider_model
=
provider_model_bundle
.
configuration
.
get_provider_model
(
model
=
copy_app_model_config_dict
[
'model'
][
'name'
],
model_type
=
ModelType
.
LLM
)
if
provider_model
is
None
:
model_name
=
copy_app_model_config_dict
[
'model'
][
'name'
]
raise
ValueError
(
f
"Model {model_name} not exist."
)
if
provider_model
.
status
==
ModelStatus
.
NO_CONFIGURE
:
raise
ProviderTokenNotInitError
(
f
"Model {model_name} credentials is not initialized."
)
elif
provider_model
.
status
==
ModelStatus
.
NO_PERMISSION
:
raise
ModelCurrentlyNotSupportError
(
f
"Dify Hosted OpenAI {model_name} currently not support."
)
elif
provider_model
.
status
==
ModelStatus
.
QUOTA_EXCEEDED
:
raise
QuotaExceededError
(
f
"Model provider {provider_name} quota exceeded."
)
# model config
completion_params
=
copy_app_model_config_dict
[
'model'
]
.
get
(
'completion_params'
)
stop
=
[]
if
'stop'
in
completion_params
:
stop
=
completion_params
[
'stop'
]
del
completion_params
[
'stop'
]
# get model mode
model_mode
=
copy_app_model_config_dict
[
'model'
]
.
get
(
'mode'
)
if
not
model_mode
:
mode_enum
=
model_type_instance
.
get_model_mode
(
model
=
copy_app_model_config_dict
[
'model'
][
'name'
],
credentials
=
model_credentials
)
model_mode
=
mode_enum
.
value
model_schema
=
model_type_instance
.
get_model_schema
(
copy_app_model_config_dict
[
'model'
][
'name'
],
model_credentials
)
if
not
skip_check
and
not
model_schema
:
raise
ValueError
(
f
"Model {model_name} not exist."
)
properties
[
'model_config'
]
=
ModelConfigEntity
(
provider
=
copy_app_model_config_dict
[
'model'
][
'provider'
],
model
=
copy_app_model_config_dict
[
'model'
][
'name'
],
model_schema
=
model_schema
,
mode
=
model_mode
,
provider_model_bundle
=
provider_model_bundle
,
credentials
=
model_credentials
,
parameters
=
completion_params
,
stop
=
stop
,
)
# prompt template
prompt_type
=
PromptTemplateEntity
.
PromptType
.
value_of
(
copy_app_model_config_dict
[
'prompt_type'
])
if
prompt_type
==
PromptTemplateEntity
.
PromptType
.
SIMPLE
:
simple_prompt_template
=
copy_app_model_config_dict
.
get
(
"pre_prompt"
,
""
)
properties
[
'prompt_template'
]
=
PromptTemplateEntity
(
prompt_type
=
prompt_type
,
simple_prompt_template
=
simple_prompt_template
)
else
:
advanced_chat_prompt_template
=
None
chat_prompt_config
=
copy_app_model_config_dict
.
get
(
"chat_prompt_config"
,
{})
if
chat_prompt_config
:
chat_prompt_messages
=
[]
for
message
in
chat_prompt_config
.
get
(
"prompt"
,
[]):
chat_prompt_messages
.
append
({
"text"
:
message
[
"text"
],
"role"
:
PromptMessageRole
.
value_of
(
message
[
"role"
])
})
advanced_chat_prompt_template
=
AdvancedChatPromptTemplateEntity
(
messages
=
chat_prompt_messages
)
advanced_completion_prompt_template
=
None
completion_prompt_config
=
copy_app_model_config_dict
.
get
(
"completion_prompt_config"
,
{})
if
completion_prompt_config
:
completion_prompt_template_params
=
{
'prompt'
:
completion_prompt_config
[
'prompt'
][
'text'
],
}
if
'conversation_histories_role'
in
completion_prompt_config
:
completion_prompt_template_params
[
'role_prefix'
]
=
{
'user'
:
completion_prompt_config
[
'conversation_histories_role'
][
'user_prefix'
],
'assistant'
:
completion_prompt_config
[
'conversation_histories_role'
][
'assistant_prefix'
]
}
advanced_completion_prompt_template
=
AdvancedCompletionPromptTemplateEntity
(
**
completion_prompt_template_params
)
properties
[
'prompt_template'
]
=
PromptTemplateEntity
(
prompt_type
=
prompt_type
,
advanced_chat_prompt_template
=
advanced_chat_prompt_template
,
advanced_completion_prompt_template
=
advanced_completion_prompt_template
)
# external data variables
properties
[
'external_data_variables'
]
=
[]
# old external_data_tools
external_data_tools
=
copy_app_model_config_dict
.
get
(
'external_data_tools'
,
[])
for
external_data_tool
in
external_data_tools
:
if
'enabled'
not
in
external_data_tool
or
not
external_data_tool
[
'enabled'
]:
continue
properties
[
'external_data_variables'
]
.
append
(
ExternalDataVariableEntity
(
variable
=
external_data_tool
[
'variable'
],
type
=
external_data_tool
[
'type'
],
config
=
external_data_tool
[
'config'
]
)
)
properties
[
'variables'
]
=
[]
# variables and external_data_tools
for
variable
in
copy_app_model_config_dict
.
get
(
'user_input_form'
,
[]):
typ
=
list
(
variable
.
keys
())[
0
]
if
typ
==
'external_data_tool'
:
val
=
variable
[
typ
]
properties
[
'external_data_variables'
]
.
append
(
ExternalDataVariableEntity
(
variable
=
val
[
'variable'
],
type
=
val
[
'type'
],
config
=
val
[
'config'
]
)
)
elif
typ
in
[
VariableEntity
.
Type
.
TEXT_INPUT
.
value
,
VariableEntity
.
Type
.
PARAGRAPH
.
value
,
VariableEntity
.
Type
.
NUMBER
.
value
,
]:
properties
[
'variables'
]
.
append
(
VariableEntity
(
type
=
VariableEntity
.
Type
.
value_of
(
typ
),
variable
=
variable
[
typ
]
.
get
(
'variable'
),
description
=
variable
[
typ
]
.
get
(
'description'
),
label
=
variable
[
typ
]
.
get
(
'label'
),
required
=
variable
[
typ
]
.
get
(
'required'
,
False
),
max_length
=
variable
[
typ
]
.
get
(
'max_length'
),
default
=
variable
[
typ
]
.
get
(
'default'
),
)
)
elif
typ
==
VariableEntity
.
Type
.
SELECT
.
value
:
properties
[
'variables'
]
.
append
(
VariableEntity
(
type
=
VariableEntity
.
Type
.
SELECT
,
variable
=
variable
[
typ
]
.
get
(
'variable'
),
description
=
variable
[
typ
]
.
get
(
'description'
),
label
=
variable
[
typ
]
.
get
(
'label'
),
required
=
variable
[
typ
]
.
get
(
'required'
,
False
),
options
=
variable
[
typ
]
.
get
(
'options'
),
default
=
variable
[
typ
]
.
get
(
'default'
),
)
)
# show retrieve source
show_retrieve_source
=
False
retriever_resource_dict
=
copy_app_model_config_dict
.
get
(
'retriever_resource'
)
if
retriever_resource_dict
:
if
'enabled'
in
retriever_resource_dict
and
retriever_resource_dict
[
'enabled'
]:
show_retrieve_source
=
True
properties
[
'show_retrieve_source'
]
=
show_retrieve_source
dataset_ids
=
[]
if
'datasets'
in
copy_app_model_config_dict
.
get
(
'dataset_configs'
,
{}):
datasets
=
copy_app_model_config_dict
.
get
(
'dataset_configs'
,
{})
.
get
(
'datasets'
,
{
'strategy'
:
'router'
,
'datasets'
:
[]
})
for
dataset
in
datasets
.
get
(
'datasets'
,
[]):
keys
=
list
(
dataset
.
keys
())
if
len
(
keys
)
==
0
or
keys
[
0
]
!=
'dataset'
:
continue
dataset
=
dataset
[
'dataset'
]
if
'enabled'
not
in
dataset
or
not
dataset
[
'enabled'
]:
continue
dataset_id
=
dataset
.
get
(
'id'
,
None
)
if
dataset_id
:
dataset_ids
.
append
(
dataset_id
)
if
'agent_mode'
in
copy_app_model_config_dict
and
copy_app_model_config_dict
[
'agent_mode'
]
\
and
'enabled'
in
copy_app_model_config_dict
[
'agent_mode'
]
\
and
copy_app_model_config_dict
[
'agent_mode'
][
'enabled'
]:
agent_dict
=
copy_app_model_config_dict
.
get
(
'agent_mode'
,
{})
agent_strategy
=
agent_dict
.
get
(
'strategy'
,
'cot'
)
if
agent_strategy
==
'function_call'
:
strategy
=
AgentEntity
.
Strategy
.
FUNCTION_CALLING
elif
agent_strategy
==
'cot'
or
agent_strategy
==
'react'
:
strategy
=
AgentEntity
.
Strategy
.
CHAIN_OF_THOUGHT
else
:
# old configs, try to detect default strategy
if
copy_app_model_config_dict
[
'model'
][
'provider'
]
==
'openai'
:
strategy
=
AgentEntity
.
Strategy
.
FUNCTION_CALLING
else
:
strategy
=
AgentEntity
.
Strategy
.
CHAIN_OF_THOUGHT
agent_tools
=
[]
for
tool
in
agent_dict
.
get
(
'tools'
,
[]):
keys
=
tool
.
keys
()
if
len
(
keys
)
>=
4
:
if
"enabled"
not
in
tool
or
not
tool
[
"enabled"
]:
continue
agent_tool_properties
=
{
'provider_type'
:
tool
[
'provider_type'
],
'provider_id'
:
tool
[
'provider_id'
],
'tool_name'
:
tool
[
'tool_name'
],
'tool_parameters'
:
tool
[
'tool_parameters'
]
if
'tool_parameters'
in
tool
else
{}
}
agent_tools
.
append
(
AgentToolEntity
(
**
agent_tool_properties
))
elif
len
(
keys
)
==
1
:
# old standard
key
=
list
(
tool
.
keys
())[
0
]
if
key
!=
'dataset'
:
continue
tool_item
=
tool
[
key
]
if
"enabled"
not
in
tool_item
or
not
tool_item
[
"enabled"
]:
continue
dataset_id
=
tool_item
[
'id'
]
dataset_ids
.
append
(
dataset_id
)
if
'strategy'
in
copy_app_model_config_dict
[
'agent_mode'
]
and
\
copy_app_model_config_dict
[
'agent_mode'
][
'strategy'
]
not
in
[
'react_router'
,
'router'
]:
agent_prompt
=
agent_dict
.
get
(
'prompt'
,
None
)
or
{}
# check model mode
model_mode
=
copy_app_model_config_dict
.
get
(
'model'
,
{})
.
get
(
'mode'
,
'completion'
)
if
model_mode
==
'completion'
:
agent_prompt_entity
=
AgentPromptEntity
(
first_prompt
=
agent_prompt
.
get
(
'first_prompt'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'completion'
][
'prompt'
]),
next_iteration
=
agent_prompt
.
get
(
'next_iteration'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'completion'
][
'agent_scratchpad'
]),
)
else
:
agent_prompt_entity
=
AgentPromptEntity
(
first_prompt
=
agent_prompt
.
get
(
'first_prompt'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'chat'
][
'prompt'
]),
next_iteration
=
agent_prompt
.
get
(
'next_iteration'
,
REACT_PROMPT_TEMPLATES
[
'english'
][
'chat'
][
'agent_scratchpad'
]),
)
properties
[
'agent'
]
=
AgentEntity
(
provider
=
properties
[
'model_config'
]
.
provider
,
model
=
properties
[
'model_config'
]
.
model
,
strategy
=
strategy
,
prompt
=
agent_prompt_entity
,
tools
=
agent_tools
,
max_iteration
=
agent_dict
.
get
(
'max_iteration'
,
5
)
)
if
len
(
dataset_ids
)
>
0
:
# dataset configs
dataset_configs
=
copy_app_model_config_dict
.
get
(
'dataset_configs'
,
{
'retrieval_model'
:
'single'
})
query_variable
=
copy_app_model_config_dict
.
get
(
'dataset_query_variable'
)
if
dataset_configs
[
'retrieval_model'
]
==
'single'
:
properties
[
'dataset'
]
=
DatasetEntity
(
dataset_ids
=
dataset_ids
,
retrieve_config
=
DatasetRetrieveConfigEntity
(
query_variable
=
query_variable
,
retrieve_strategy
=
DatasetRetrieveConfigEntity
.
RetrieveStrategy
.
value_of
(
dataset_configs
[
'retrieval_model'
]
)
)
)
else
:
properties
[
'dataset'
]
=
DatasetEntity
(
dataset_ids
=
dataset_ids
,
retrieve_config
=
DatasetRetrieveConfigEntity
(
query_variable
=
query_variable
,
retrieve_strategy
=
DatasetRetrieveConfigEntity
.
RetrieveStrategy
.
value_of
(
dataset_configs
[
'retrieval_model'
]
),
top_k
=
dataset_configs
.
get
(
'top_k'
),
score_threshold
=
dataset_configs
.
get
(
'score_threshold'
),
reranking_model
=
dataset_configs
.
get
(
'reranking_model'
)
)
)
# file upload
file_upload_dict
=
copy_app_model_config_dict
.
get
(
'file_upload'
)
if
file_upload_dict
:
if
'image'
in
file_upload_dict
and
file_upload_dict
[
'image'
]:
if
'enabled'
in
file_upload_dict
[
'image'
]
and
file_upload_dict
[
'image'
][
'enabled'
]:
properties
[
'file_upload'
]
=
FileUploadEntity
(
image_config
=
{
'number_limits'
:
file_upload_dict
[
'image'
][
'number_limits'
],
'detail'
:
file_upload_dict
[
'image'
][
'detail'
],
'transfer_methods'
:
file_upload_dict
[
'image'
][
'transfer_methods'
]
}
)
# opening statement
properties
[
'opening_statement'
]
=
copy_app_model_config_dict
.
get
(
'opening_statement'
)
# suggested questions after answer
suggested_questions_after_answer_dict
=
copy_app_model_config_dict
.
get
(
'suggested_questions_after_answer'
)
if
suggested_questions_after_answer_dict
:
if
'enabled'
in
suggested_questions_after_answer_dict
and
suggested_questions_after_answer_dict
[
'enabled'
]:
properties
[
'suggested_questions_after_answer'
]
=
True
# more like this
more_like_this_dict
=
copy_app_model_config_dict
.
get
(
'more_like_this'
)
if
more_like_this_dict
:
if
'enabled'
in
more_like_this_dict
and
more_like_this_dict
[
'enabled'
]:
properties
[
'more_like_this'
]
=
True
# speech to text
speech_to_text_dict
=
copy_app_model_config_dict
.
get
(
'speech_to_text'
)
if
speech_to_text_dict
:
if
'enabled'
in
speech_to_text_dict
and
speech_to_text_dict
[
'enabled'
]:
properties
[
'speech_to_text'
]
=
True
# text to speech
text_to_speech_dict
=
copy_app_model_config_dict
.
get
(
'text_to_speech'
)
if
text_to_speech_dict
:
if
'enabled'
in
text_to_speech_dict
and
text_to_speech_dict
[
'enabled'
]:
properties
[
'text_to_speech'
]
=
TextToSpeechEntity
(
enabled
=
text_to_speech_dict
.
get
(
'enabled'
),
voice
=
text_to_speech_dict
.
get
(
'voice'
),
language
=
text_to_speech_dict
.
get
(
'language'
),
)
# sensitive word avoidance
sensitive_word_avoidance_dict
=
copy_app_model_config_dict
.
get
(
'sensitive_word_avoidance'
)
if
sensitive_word_avoidance_dict
:
if
'enabled'
in
sensitive_word_avoidance_dict
and
sensitive_word_avoidance_dict
[
'enabled'
]:
properties
[
'sensitive_word_avoidance'
]
=
SensitiveWordAvoidanceEntity
(
type
=
sensitive_word_avoidance_dict
.
get
(
'type'
),
config
=
sensitive_word_avoidance_dict
.
get
(
'config'
),
)
return
AppOrchestrationConfigEntity
(
**
properties
)
api/core/app/app_queue_manager.py
View file @
655b34b7
...
@@ -6,8 +6,8 @@ from typing import Any
...
@@ -6,8 +6,8 @@ from typing import Any
from
sqlalchemy.orm
import
DeclarativeMeta
from
sqlalchemy.orm
import
DeclarativeMeta
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.entities.queue_entities
import
(
from
core.
app.
entities.queue_entities
import
(
AnnotationReplyEvent
,
AnnotationReplyEvent
,
AppQueueEvent
,
AppQueueEvent
,
QueueAgentMessageEvent
,
QueueAgentMessageEvent
,
...
...
api/core/app/apps/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/apps/advanced_chat/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/a
dvanced_chat/config_validato
r.py
→
api/core/app/a
pps/advanced_chat/app_config_manage
r.py
View file @
655b34b7
from
core.app.validators.file_upload
import
FileUploadValidator
from
core.app.app_config.base_app_config_manager
import
BaseAppConfigManager
from
core.app.validators.moderation
import
ModerationValidator
from
core.app.app_config.common.sensitive_word_avoidance.manager
import
SensitiveWordAvoidanceConfigManager
from
core.app.validators.opening_statement
import
OpeningStatementValidator
from
core.app.app_config.entities
import
WorkflowUIBasedAppConfig
from
core.app.validators.retriever_resource
import
RetrieverResourceValidator
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.app.validators.speech_to_text
import
SpeechToTextValidator
from
core.app.app_config.features.opening_statement.manager
import
OpeningStatementConfigManager
from
core.app.validators.suggested_questions
import
SuggestedQuestionsValidator
from
core.app.app_config.features.retrieval_resource.manager
import
RetrievalResourceConfigManager
from
core.app.validators.text_to_speech
import
TextToSpeechValidator
from
core.app.app_config.features.speech_to_text.manager
import
SpeechToTextConfigManager
from
core.app.app_config.features.suggested_questions_after_answer.manager
import
\
SuggestedQuestionsAfterAnswerConfigManager
from
core.app.app_config.features.text_to_speech.manager
import
TextToSpeechConfigManager
from
core.app.app_config.workflow_ui_based_app.variables.manager
import
WorkflowVariablesConfigManager
from
models.model
import
AppMode
,
App
from
models.workflow
import
Workflow
class
AdvancedChatAppConfigValidator
:
class
AdvancedChatAppConfig
(
WorkflowUIBasedAppConfig
):
"""
Advanced Chatbot App Config Entity.
"""
pass
class
AdvancedChatAppConfigManager
(
BaseAppConfigManager
):
@
classmethod
def
config_convert
(
cls
,
app_model
:
App
,
workflow
:
Workflow
)
->
AdvancedChatAppConfig
:
features_dict
=
workflow
.
features_dict
app_config
=
AdvancedChatAppConfig
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
,
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
),
workflow_id
=
workflow
.
id
,
sensitive_word_avoidance
=
SensitiveWordAvoidanceConfigManager
.
convert
(
config
=
features_dict
),
variables
=
WorkflowVariablesConfigManager
.
convert
(
workflow
=
workflow
),
additional_features
=
cls
.
convert_features
(
features_dict
)
)
return
app_config
@
classmethod
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
,
only_structure_validate
:
bool
=
False
)
->
dict
:
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
,
only_structure_validate
:
bool
=
False
)
->
dict
:
"""
"""
...
@@ -20,31 +53,32 @@ class AdvancedChatAppConfigValidator:
...
@@ -20,31 +53,32 @@ class AdvancedChatAppConfigValidator:
related_config_keys
=
[]
related_config_keys
=
[]
# file upload validation
# file upload validation
config
,
current_related_config_keys
=
FileUpload
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
FileUpload
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# opening_statement
# opening_statement
config
,
current_related_config_keys
=
OpeningStatement
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
OpeningStatement
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# suggested_questions_after_answer
# suggested_questions_after_answer
config
,
current_related_config_keys
=
SuggestedQuestionsValidator
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
SuggestedQuestionsAfterAnswerConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# speech_to_text
# speech_to_text
config
,
current_related_config_keys
=
SpeechToText
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
SpeechToText
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeech
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
TextToSpeech
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# return retriever resource
# return retriever resource
config
,
current_related_config_keys
=
Retriev
erResourceValidato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
Retriev
alResourceConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
# moderation validation
config
,
current_related_config_keys
=
ModerationValidato
r
.
validate_and_set_defaults
(
config
,
current_related_config_keys
=
SensitiveWordAvoidanceConfigManage
r
.
validate_and_set_defaults
(
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
config
=
config
,
config
=
config
,
only_structure_validate
=
only_structure_validate
only_structure_validate
=
only_structure_validate
...
@@ -57,3 +91,4 @@ class AdvancedChatAppConfigValidator:
...
@@ -57,3 +91,4 @@ class AdvancedChatAppConfigValidator:
filtered_config
=
{
key
:
config
.
get
(
key
)
for
key
in
related_config_keys
}
filtered_config
=
{
key
:
config
.
get
(
key
)
for
key
in
related_config_keys
}
return
filtered_config
return
filtered_config
api/core/app/apps/agent_chat/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/a
gent_chat/config_validato
r.py
→
api/core/app/a
pps/agent_chat/app_config_manage
r.py
View file @
655b34b7
import
uuid
import
uuid
from
typing
import
Optional
from
core.app.validators.dataset_retrieval
import
DatasetValidator
from
core.app.validators.external_data_fetch
import
ExternalDataFetchValidator
from
core.agent.entities
import
AgentEntity
from
core.app.validators.file_upload
import
FileUploadValidator
from
core.app.app_config.base_app_config_manager
import
BaseAppConfigManager
from
core.app.validators.model_validator
import
ModelValidator
from
core.app.app_config.easy_ui_based_app.agent.manager
import
AgentConfigManager
from
core.app.validators.moderation
import
ModerationValidator
from
core.app.app_config.easy_ui_based_app.dataset.manager
import
DatasetConfigManager
from
core.app.validators.opening_statement
import
OpeningStatementValidator
from
core.app.app_config.easy_ui_based_app.model_config.manager
import
ModelConfigManager
from
core.app.validators.prompt
import
PromptValidator
from
core.app.app_config.easy_ui_based_app.prompt_template.manager
import
PromptTemplateConfigManager
from
core.app.validators.retriever_resource
import
RetrieverResourceValidator
from
core.app.app_config.easy_ui_based_app.variables.manager
import
BasicVariablesConfigManager
from
core.app.validators.speech_to_text
import
SpeechToTextValidator
from
core.app.app_config.common.sensitive_word_avoidance.manager
import
SensitiveWordAvoidanceConfigManager
from
core.app.validators.suggested_questions
import
SuggestedQuestionsValidator
from
core.app.app_config.entities
import
EasyUIBasedAppConfig
,
EasyUIBasedAppModelConfigFrom
,
DatasetEntity
from
core.app.validators.text_to_speech
import
TextToSpeechValidator
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.app.validators.user_input_form
import
UserInputFormValidator
from
core.app.app_config.features.opening_statement.manager
import
OpeningStatementConfigManager
from
core.app.app_config.features.retrieval_resource.manager
import
RetrievalResourceConfigManager
from
core.app.app_config.features.speech_to_text.manager
import
SpeechToTextConfigManager
from
core.app.app_config.features.suggested_questions_after_answer.manager
import
\
SuggestedQuestionsAfterAnswerConfigManager
from
core.app.app_config.features.text_to_speech.manager
import
TextToSpeechConfigManager
from
core.entities.agent_entities
import
PlanningStrategy
from
core.entities.agent_entities
import
PlanningStrategy
from
models.model
import
AppMode
from
models.model
import
AppMode
,
App
,
AppModelConfig
OLD_TOOLS
=
[
"dataset"
,
"google_search"
,
"web_reader"
,
"wikipedia"
,
"current_datetime"
]
OLD_TOOLS
=
[
"dataset"
,
"google_search"
,
"web_reader"
,
"wikipedia"
,
"current_datetime"
]
class
AgentChatAppConfigValidator
:
class
AgentChatAppConfig
(
EasyUIBasedAppConfig
):
"""
Agent Chatbot App Config Entity.
"""
agent
:
Optional
[
AgentEntity
]
=
None
class
AgentChatAppConfigManager
(
BaseAppConfigManager
):
@
classmethod
def
config_convert
(
cls
,
app_model
:
App
,
config_from
:
EasyUIBasedAppModelConfigFrom
,
app_model_config
:
AppModelConfig
,
config_dict
:
Optional
[
dict
]
=
None
)
->
AgentChatAppConfig
:
"""
Convert app model config to agent chat app config
:param app_model: app model
:param config_from: app model config from
:param app_model_config: app model config
:param config_dict: app model config dict
:return:
"""
config_dict
=
cls
.
convert_to_config_dict
(
config_from
,
app_model_config
,
config_dict
)
app_config
=
AgentChatAppConfig
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
,
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
),
app_model_config_from
=
config_from
,
app_model_config_id
=
app_model_config
.
id
,
app_model_config_dict
=
config_dict
,
model
=
ModelConfigManager
.
convert
(
config
=
config_dict
),
prompt_template
=
PromptTemplateConfigManager
.
convert
(
config
=
config_dict
),
sensitive_word_avoidance
=
SensitiveWordAvoidanceConfigManager
.
convert
(
config
=
config_dict
),
dataset
=
DatasetConfigManager
.
convert
(
config
=
config_dict
),
agent
=
AgentConfigManager
.
convert
(
config
=
config_dict
),
additional_features
=
cls
.
convert_features
(
config_dict
)
)
app_config
.
variables
,
app_config
.
external_data_variables
=
BasicVariablesConfigManager
.
convert
(
config
=
config_dict
)
return
app_config
@
classmethod
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
dict
:
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
dict
:
"""
"""
...
@@ -32,23 +90,19 @@ class AgentChatAppConfigValidator:
...
@@ -32,23 +90,19 @@ class AgentChatAppConfigValidator:
related_config_keys
=
[]
related_config_keys
=
[]
# model
# model
config
,
current_related_config_keys
=
Model
Validato
r
.
validate_and_set_defaults
(
tenant_id
,
config
)
config
,
current_related_config_keys
=
Model
ConfigManage
r
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# user_input_form
# user_input_form
config
,
current_related_config_keys
=
UserInputFormValidator
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
BasicVariablesConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# external data tools validation
config
,
current_related_config_keys
=
ExternalDataFetchValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# file upload validation
# file upload validation
config
,
current_related_config_keys
=
FileUpload
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
FileUpload
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# prompt
# prompt
config
,
current_related_config_keys
=
Prompt
Validato
r
.
validate_and_set_defaults
(
app_mode
,
config
)
config
,
current_related_config_keys
=
Prompt
TemplateConfigManage
r
.
validate_and_set_defaults
(
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# agent_mode
# agent_mode
...
@@ -56,27 +110,29 @@ class AgentChatAppConfigValidator:
...
@@ -56,27 +110,29 @@ class AgentChatAppConfigValidator:
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# opening_statement
# opening_statement
config
,
current_related_config_keys
=
OpeningStatement
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
OpeningStatement
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# suggested_questions_after_answer
# suggested_questions_after_answer
config
,
current_related_config_keys
=
SuggestedQuestionsValidator
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
SuggestedQuestionsAfterAnswerConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# speech_to_text
# speech_to_text
config
,
current_related_config_keys
=
SpeechToText
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
SpeechToText
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeech
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
TextToSpeech
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# return retriever resource
# return retriever resource
config
,
current_related_config_keys
=
Retriev
erResourceValidato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
Retriev
alResourceConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
# moderation validation
config
,
current_related_config_keys
=
ModerationValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
config
,
current_related_config_keys
=
SensitiveWordAvoidanceConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
=
list
(
set
(
related_config_keys
))
related_config_keys
=
list
(
set
(
related_config_keys
))
...
@@ -143,7 +199,7 @@ class AgentChatAppConfigValidator:
...
@@ -143,7 +199,7 @@ class AgentChatAppConfigValidator:
except
ValueError
:
except
ValueError
:
raise
ValueError
(
"id in dataset must be of UUID type"
)
raise
ValueError
(
"id in dataset must be of UUID type"
)
if
not
Dataset
Validato
r
.
is_dataset_exists
(
tenant_id
,
tool_item
[
"id"
]):
if
not
Dataset
ConfigManage
r
.
is_dataset_exists
(
tenant_id
,
tool_item
[
"id"
]):
raise
ValueError
(
"Dataset ID does not exist, please check your permission."
)
raise
ValueError
(
"Dataset ID does not exist, please check your permission."
)
else
:
else
:
# latest style, use key-value pair
# latest style, use key-value pair
...
...
api/core/app/agent_chat/app_runner.py
→
api/core/app/a
pps/a
gent_chat/app_runner.py
View file @
655b34b7
...
@@ -2,10 +2,12 @@ import logging
...
@@ -2,10 +2,12 @@ import logging
from
typing
import
cast
from
typing
import
cast
from
core.agent.cot_agent_runner
import
CotAgentRunner
from
core.agent.cot_agent_runner
import
CotAgentRunner
from
core.agent.entities
import
AgentEntity
from
core.agent.fc_agent_runner
import
FunctionCallAgentRunner
from
core.agent.fc_agent_runner
import
FunctionCallAgentRunner
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.base_app_runner
import
AppRunner
from
core.app.apps.agent_chat.app_config_manager
import
AgentChatAppConfig
from
core.entities.application_entities
import
AgentEntity
,
ApplicationGenerateEntity
,
ModelConfigEntity
from
core.app.apps.base_app_runner
import
AppRunner
from
core.app.entities.app_invoke_entities
import
EasyUIBasedAppGenerateEntity
,
EasyUIBasedModelConfigEntity
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_manager
import
ModelInstance
from
core.model_manager
import
ModelInstance
from
core.model_runtime.entities.llm_entities
import
LLMUsage
from
core.model_runtime.entities.llm_entities
import
LLMUsage
...
@@ -24,7 +26,7 @@ class AgentChatAppRunner(AppRunner):
...
@@ -24,7 +26,7 @@ class AgentChatAppRunner(AppRunner):
"""
"""
Agent Application Runner
Agent Application Runner
"""
"""
def
run
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
run
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
conversation
:
Conversation
,
conversation
:
Conversation
,
message
:
Message
)
->
None
:
message
:
Message
)
->
None
:
...
@@ -36,12 +38,13 @@ class AgentChatAppRunner(AppRunner):
...
@@ -36,12 +38,13 @@ class AgentChatAppRunner(AppRunner):
:param message: message
:param message: message
:return:
:return:
"""
"""
app_record
=
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
application_generate_entity
.
app_id
)
.
first
()
app_config
=
application_generate_entity
.
app_config
app_config
=
cast
(
AgentChatAppConfig
,
app_config
)
app_record
=
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
app_config
.
app_id
)
.
first
()
if
not
app_record
:
if
not
app_record
:
raise
ValueError
(
"App not found"
)
raise
ValueError
(
"App not found"
)
app_orchestration_config
=
application_generate_entity
.
app_orchestration_config_entity
inputs
=
application_generate_entity
.
inputs
inputs
=
application_generate_entity
.
inputs
query
=
application_generate_entity
.
query
query
=
application_generate_entity
.
query
files
=
application_generate_entity
.
files
files
=
application_generate_entity
.
files
...
@@ -53,8 +56,8 @@ class AgentChatAppRunner(AppRunner):
...
@@ -53,8 +56,8 @@ class AgentChatAppRunner(AppRunner):
# Not Include: memory, external data, dataset context
# Not Include: memory, external data, dataset context
self
.
get_pre_calculate_rest_tokens
(
self
.
get_pre_calculate_rest_tokens
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
query
=
query
...
@@ -64,22 +67,22 @@ class AgentChatAppRunner(AppRunner):
...
@@ -64,22 +67,22 @@ class AgentChatAppRunner(AppRunner):
if
application_generate_entity
.
conversation_id
:
if
application_generate_entity
.
conversation_id
:
# get memory of conversation (read-only)
# get memory of conversation (read-only)
model_instance
=
ModelInstance
(
model_instance
=
ModelInstance
(
provider_model_bundle
=
app
_orchestration_config
.
model_config
.
provider_model_bundle
,
provider_model_bundle
=
app
lication_generate_entity
.
model_config
.
provider_model_bundle
,
model
=
app
_orchestration_config
.
model_config
.
model
model
=
app
lication_generate_entity
.
model_config
.
model
)
)
memory
=
TokenBufferMemory
(
memory
=
TokenBufferMemory
(
conversation
=
conversation
,
conversation
=
conversation
,
model_instance
=
model_instance
model_instance
=
model_instance
)
)
# organize all inputs and template to prompt messages
# organize all inputs and template to prompt messages
# Include: prompt template, inputs, query(optional), files(optional)
# Include: prompt template, inputs, query(optional), files(optional)
# memory(optional)
# memory(optional)
prompt_messages
,
_
=
self
.
organize_prompt_messages
(
prompt_messages
,
_
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
,
query
=
query
,
...
@@ -91,15 +94,15 @@ class AgentChatAppRunner(AppRunner):
...
@@ -91,15 +94,15 @@ class AgentChatAppRunner(AppRunner):
# process sensitive_word_avoidance
# process sensitive_word_avoidance
_
,
inputs
,
query
=
self
.
moderation_for_inputs
(
_
,
inputs
,
query
=
self
.
moderation_for_inputs
(
app_id
=
app_record
.
id
,
app_id
=
app_record
.
id
,
tenant_id
=
app
lication_generate_entity
.
tenant_id
,
tenant_id
=
app
_config
.
tenant_id
,
app_
orchestration_config_entity
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
inputs
=
inputs
,
inputs
=
inputs
,
query
=
query
,
query
=
query
,
)
)
except
ModerationException
as
e
:
except
ModerationException
as
e
:
self
.
direct_output
(
self
.
direct_output
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_
orchestration_config
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
text
=
str
(
e
),
text
=
str
(
e
),
stream
=
application_generate_entity
.
stream
stream
=
application_generate_entity
.
stream
...
@@ -123,7 +126,7 @@ class AgentChatAppRunner(AppRunner):
...
@@ -123,7 +126,7 @@ class AgentChatAppRunner(AppRunner):
)
)
self
.
direct_output
(
self
.
direct_output
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_
orchestration_config
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
text
=
annotation_reply
.
content
,
text
=
annotation_reply
.
content
,
stream
=
application_generate_entity
.
stream
stream
=
application_generate_entity
.
stream
...
@@ -131,7 +134,7 @@ class AgentChatAppRunner(AppRunner):
...
@@ -131,7 +134,7 @@ class AgentChatAppRunner(AppRunner):
return
return
# fill in variable inputs from external data tools if exists
# fill in variable inputs from external data tools if exists
external_data_tools
=
app_
orchestration_
config
.
external_data_variables
external_data_tools
=
app_config
.
external_data_variables
if
external_data_tools
:
if
external_data_tools
:
inputs
=
self
.
fill_in_inputs_from_external_data_tools
(
inputs
=
self
.
fill_in_inputs_from_external_data_tools
(
tenant_id
=
app_record
.
tenant_id
,
tenant_id
=
app_record
.
tenant_id
,
...
@@ -146,8 +149,8 @@ class AgentChatAppRunner(AppRunner):
...
@@ -146,8 +149,8 @@ class AgentChatAppRunner(AppRunner):
# memory(optional), external data, dataset context(optional)
# memory(optional), external data, dataset context(optional)
prompt_messages
,
_
=
self
.
organize_prompt_messages
(
prompt_messages
,
_
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
,
query
=
query
,
...
@@ -164,25 +167,25 @@ class AgentChatAppRunner(AppRunner):
...
@@ -164,25 +167,25 @@ class AgentChatAppRunner(AppRunner):
if
hosting_moderation_result
:
if
hosting_moderation_result
:
return
return
agent_entity
=
app_
orchestration_
config
.
agent
agent_entity
=
app_config
.
agent
# load tool variables
# load tool variables
tool_conversation_variables
=
self
.
_load_tool_variables
(
conversation_id
=
conversation
.
id
,
tool_conversation_variables
=
self
.
_load_tool_variables
(
conversation_id
=
conversation
.
id
,
user_id
=
application_generate_entity
.
user_id
,
user_id
=
application_generate_entity
.
user_id
,
tenant_id
=
app
lication_generate_entity
.
tenant_id
)
tenant_id
=
app
_config
.
tenant_id
)
# convert db variables to tool variables
# convert db variables to tool variables
tool_variables
=
self
.
_convert_db_variables_to_tool_variables
(
tool_conversation_variables
)
tool_variables
=
self
.
_convert_db_variables_to_tool_variables
(
tool_conversation_variables
)
# init model instance
# init model instance
model_instance
=
ModelInstance
(
model_instance
=
ModelInstance
(
provider_model_bundle
=
app
_orchestration_config
.
model_config
.
provider_model_bundle
,
provider_model_bundle
=
app
lication_generate_entity
.
model_config
.
provider_model_bundle
,
model
=
app
_orchestration_config
.
model_config
.
model
model
=
app
lication_generate_entity
.
model_config
.
model
)
)
prompt_message
,
_
=
self
.
organize_prompt_messages
(
prompt_message
,
_
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
,
query
=
query
,
...
@@ -199,10 +202,10 @@ class AgentChatAppRunner(AppRunner):
...
@@ -199,10 +202,10 @@ class AgentChatAppRunner(AppRunner):
# start agent runner
# start agent runner
if
agent_entity
.
strategy
==
AgentEntity
.
Strategy
.
CHAIN_OF_THOUGHT
:
if
agent_entity
.
strategy
==
AgentEntity
.
Strategy
.
CHAIN_OF_THOUGHT
:
assistant_cot_runner
=
CotAgentRunner
(
assistant_cot_runner
=
CotAgentRunner
(
tenant_id
=
app
lication_generate_entity
.
tenant_id
,
tenant_id
=
app
_config
.
tenant_id
,
application_generate_entity
=
application_generate_entity
,
application_generate_entity
=
application_generate_entity
,
app_
orchestration_config
=
app_orchestration
_config
,
app_
config
=
app
_config
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
config
=
agent_entity
,
config
=
agent_entity
,
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
message
=
message
,
message
=
message
,
...
@@ -221,10 +224,10 @@ class AgentChatAppRunner(AppRunner):
...
@@ -221,10 +224,10 @@ class AgentChatAppRunner(AppRunner):
)
)
elif
agent_entity
.
strategy
==
AgentEntity
.
Strategy
.
FUNCTION_CALLING
:
elif
agent_entity
.
strategy
==
AgentEntity
.
Strategy
.
FUNCTION_CALLING
:
assistant_fc_runner
=
FunctionCallAgentRunner
(
assistant_fc_runner
=
FunctionCallAgentRunner
(
tenant_id
=
app
lication_generate_entity
.
tenant_id
,
tenant_id
=
app
_config
.
tenant_id
,
application_generate_entity
=
application_generate_entity
,
application_generate_entity
=
application_generate_entity
,
app_
orchestration_config
=
app_orchestration
_config
,
app_
config
=
app
_config
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
config
=
agent_entity
,
config
=
agent_entity
,
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
message
=
message
,
message
=
message
,
...
@@ -285,7 +288,7 @@ class AgentChatAppRunner(AppRunner):
...
@@ -285,7 +288,7 @@ class AgentChatAppRunner(AppRunner):
'pool'
:
db_variables
.
variables
'pool'
:
db_variables
.
variables
})
})
def
_get_usage_of_all_agent_thoughts
(
self
,
model_config
:
ModelConfigEntity
,
def
_get_usage_of_all_agent_thoughts
(
self
,
model_config
:
EasyUIBased
ModelConfigEntity
,
message
:
Message
)
->
LLMUsage
:
message
:
Message
)
->
LLMUsage
:
"""
"""
Get usage of all agent thoughts
Get usage of all agent thoughts
...
...
api/core/app/base_app_runner.py
→
api/core/app/
apps/
base_app_runner.py
View file @
655b34b7
...
@@ -2,16 +2,13 @@ import time
...
@@ -2,16 +2,13 @@ import time
from
collections.abc
import
Generator
from
collections.abc
import
Generator
from
typing
import
Optional
,
Union
,
cast
from
typing
import
Optional
,
Union
,
cast
from
core.app.app_config.entities
import
PromptTemplateEntity
,
ExternalDataVariableEntity
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.features.annotation_reply.annotation_reply
import
AnnotationReplyFeature
from
core.app.features.annotation_reply.annotation_reply
import
AnnotationReplyFeature
from
core.app.features.hosting_moderation.hosting_moderation
import
HostingModerationFeature
from
core.app.features.hosting_moderation.hosting_moderation
import
HostingModerationFeature
from
core.entities.application_entities
import
(
from
core.app.entities.app_invoke_entities
import
(
ApplicationGenerateEntity
,
EasyUIBasedAppGenerateEntity
,
AppOrchestrationConfigEntity
,
InvokeFrom
,
EasyUIBasedModelConfigEntity
,
ExternalDataVariableEntity
,
InvokeFrom
,
ModelConfigEntity
,
PromptTemplateEntity
,
)
)
from
core.external_data_tool.external_data_fetch
import
ExternalDataFetch
from
core.external_data_tool.external_data_fetch
import
ExternalDataFetch
from
core.file.file_obj
import
FileObj
from
core.file.file_obj
import
FileObj
...
@@ -29,7 +26,7 @@ from models.model import App, AppMode, Message, MessageAnnotation
...
@@ -29,7 +26,7 @@ from models.model import App, AppMode, Message, MessageAnnotation
class
AppRunner
:
class
AppRunner
:
def
get_pre_calculate_rest_tokens
(
self
,
app_record
:
App
,
def
get_pre_calculate_rest_tokens
(
self
,
app_record
:
App
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
prompt_template_entity
:
PromptTemplateEntity
,
prompt_template_entity
:
PromptTemplateEntity
,
inputs
:
dict
[
str
,
str
],
inputs
:
dict
[
str
,
str
],
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
...
@@ -85,7 +82,7 @@ class AppRunner:
...
@@ -85,7 +82,7 @@ class AppRunner:
return
rest_tokens
return
rest_tokens
def
recale_llm_max_tokens
(
self
,
model_config
:
ModelConfigEntity
,
def
recale_llm_max_tokens
(
self
,
model_config
:
EasyUIBased
ModelConfigEntity
,
prompt_messages
:
list
[
PromptMessage
]):
prompt_messages
:
list
[
PromptMessage
]):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
...
@@ -121,7 +118,7 @@ class AppRunner:
...
@@ -121,7 +118,7 @@ class AppRunner:
model_config
.
parameters
[
parameter_rule
.
name
]
=
max_tokens
model_config
.
parameters
[
parameter_rule
.
name
]
=
max_tokens
def
organize_prompt_messages
(
self
,
app_record
:
App
,
def
organize_prompt_messages
(
self
,
app_record
:
App
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
prompt_template_entity
:
PromptTemplateEntity
,
prompt_template_entity
:
PromptTemplateEntity
,
inputs
:
dict
[
str
,
str
],
inputs
:
dict
[
str
,
str
],
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
...
@@ -170,7 +167,7 @@ class AppRunner:
...
@@ -170,7 +167,7 @@ class AppRunner:
return
prompt_messages
,
stop
return
prompt_messages
,
stop
def
direct_output
(
self
,
queue_manager
:
AppQueueManager
,
def
direct_output
(
self
,
queue_manager
:
AppQueueManager
,
app_
orchestration_config
:
AppOrchestrationConfig
Entity
,
app_
generate_entity
:
EasyUIBasedAppGenerate
Entity
,
prompt_messages
:
list
,
prompt_messages
:
list
,
text
:
str
,
text
:
str
,
stream
:
bool
,
stream
:
bool
,
...
@@ -178,7 +175,7 @@ class AppRunner:
...
@@ -178,7 +175,7 @@ class AppRunner:
"""
"""
Direct output
Direct output
:param queue_manager: application queue manager
:param queue_manager: application queue manager
:param app_
orchestration_config: app orchestration config
:param app_
generate_entity: app generate entity
:param prompt_messages: prompt messages
:param prompt_messages: prompt messages
:param text: text
:param text: text
:param stream: stream
:param stream: stream
...
@@ -189,7 +186,7 @@ class AppRunner:
...
@@ -189,7 +186,7 @@ class AppRunner:
index
=
0
index
=
0
for
token
in
text
:
for
token
in
text
:
queue_manager
.
publish_chunk_message
(
LLMResultChunk
(
queue_manager
.
publish_chunk_message
(
LLMResultChunk
(
model
=
app_
orchestration_config
.
model_config
.
model
,
model
=
app_
generate_entity
.
model_config
.
model
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
delta
=
LLMResultChunkDelta
(
delta
=
LLMResultChunkDelta
(
index
=
index
,
index
=
index
,
...
@@ -201,7 +198,7 @@ class AppRunner:
...
@@ -201,7 +198,7 @@ class AppRunner:
queue_manager
.
publish_message_end
(
queue_manager
.
publish_message_end
(
llm_result
=
LLMResult
(
llm_result
=
LLMResult
(
model
=
app_
orchestration_config
.
model_config
.
model
,
model
=
app_
generate_entity
.
model_config
.
model
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
message
=
AssistantPromptMessage
(
content
=
text
),
message
=
AssistantPromptMessage
(
content
=
text
),
usage
=
usage
if
usage
else
LLMUsage
.
empty_usage
()
usage
=
usage
if
usage
else
LLMUsage
.
empty_usage
()
...
@@ -294,14 +291,14 @@ class AppRunner:
...
@@ -294,14 +291,14 @@ class AppRunner:
def
moderation_for_inputs
(
self
,
app_id
:
str
,
def
moderation_for_inputs
(
self
,
app_id
:
str
,
tenant_id
:
str
,
tenant_id
:
str
,
app_
orchestration_config_entity
:
AppOrchestrationConfig
Entity
,
app_
generate_entity
:
EasyUIBasedAppGenerate
Entity
,
inputs
:
dict
,
inputs
:
dict
,
query
:
str
)
->
tuple
[
bool
,
dict
,
str
]:
query
:
str
)
->
tuple
[
bool
,
dict
,
str
]:
"""
"""
Process sensitive_word_avoidance.
Process sensitive_word_avoidance.
:param app_id: app id
:param app_id: app id
:param tenant_id: tenant id
:param tenant_id: tenant id
:param app_
orchestration_config_entity: app orchestration config
entity
:param app_
generate_entity: app generate
entity
:param inputs: inputs
:param inputs: inputs
:param query: query
:param query: query
:return:
:return:
...
@@ -310,12 +307,12 @@ class AppRunner:
...
@@ -310,12 +307,12 @@ class AppRunner:
return
moderation_feature
.
check
(
return
moderation_feature
.
check
(
app_id
=
app_id
,
app_id
=
app_id
,
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
app_
orchestration_config_entity
=
app_orchestration_config_entity
,
app_
config
=
app_generate_entity
.
app_config
,
inputs
=
inputs
,
inputs
=
inputs
,
query
=
query
,
query
=
query
,
)
)
def
check_hosting_moderation
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
check_hosting_moderation
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
prompt_messages
:
list
[
PromptMessage
])
->
bool
:
prompt_messages
:
list
[
PromptMessage
])
->
bool
:
"""
"""
...
@@ -334,7 +331,7 @@ class AppRunner:
...
@@ -334,7 +331,7 @@ class AppRunner:
if
moderation_result
:
if
moderation_result
:
self
.
direct_output
(
self
.
direct_output
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_
orchestration_config
=
application_generate_entity
.
app_orchestration_config
_entity
,
app_
generate_entity
=
application_generate
_entity
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
text
=
"I apologize for any confusion, "
\
text
=
"I apologize for any confusion, "
\
"but I'm an AI assistant to be helpful, harmless, and honest."
,
"but I'm an AI assistant to be helpful, harmless, and honest."
,
...
...
api/core/app/apps/chat/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/apps/chat/app_config_manager.py
0 → 100644
View file @
655b34b7
from
typing
import
Optional
from
core.app.app_config.base_app_config_manager
import
BaseAppConfigManager
from
core.app.app_config.easy_ui_based_app.dataset.manager
import
DatasetConfigManager
from
core.app.app_config.easy_ui_based_app.model_config.manager
import
ModelConfigManager
from
core.app.app_config.easy_ui_based_app.prompt_template.manager
import
PromptTemplateConfigManager
from
core.app.app_config.easy_ui_based_app.variables.manager
import
BasicVariablesConfigManager
from
core.app.app_config.common.sensitive_word_avoidance.manager
import
SensitiveWordAvoidanceConfigManager
from
core.app.app_config.entities
import
EasyUIBasedAppConfig
,
EasyUIBasedAppModelConfigFrom
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.app.app_config.features.opening_statement.manager
import
OpeningStatementConfigManager
from
core.app.app_config.features.retrieval_resource.manager
import
RetrievalResourceConfigManager
from
core.app.app_config.features.speech_to_text.manager
import
SpeechToTextConfigManager
from
core.app.app_config.features.suggested_questions_after_answer.manager
import
\
SuggestedQuestionsAfterAnswerConfigManager
from
core.app.app_config.features.text_to_speech.manager
import
TextToSpeechConfigManager
from
models.model
import
AppMode
,
App
,
AppModelConfig
class
ChatAppConfig
(
EasyUIBasedAppConfig
):
"""
Chatbot App Config Entity.
"""
pass
class
ChatAppConfigManager
(
BaseAppConfigManager
):
@
classmethod
def
config_convert
(
cls
,
app_model
:
App
,
config_from
:
EasyUIBasedAppModelConfigFrom
,
app_model_config
:
AppModelConfig
,
config_dict
:
Optional
[
dict
]
=
None
)
->
ChatAppConfig
:
"""
Convert app model config to chat app config
:param app_model: app model
:param config_from: app model config from
:param app_model_config: app model config
:param config_dict: app model config dict
:return:
"""
config_dict
=
cls
.
convert_to_config_dict
(
config_from
,
app_model_config
,
config_dict
)
app_config
=
ChatAppConfig
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
,
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
),
app_model_config_from
=
config_from
,
app_model_config_id
=
app_model_config
.
id
,
app_model_config_dict
=
config_dict
,
model
=
ModelConfigManager
.
convert
(
config
=
config_dict
),
prompt_template
=
PromptTemplateConfigManager
.
convert
(
config
=
config_dict
),
sensitive_word_avoidance
=
SensitiveWordAvoidanceConfigManager
.
convert
(
config
=
config_dict
),
dataset
=
DatasetConfigManager
.
convert
(
config
=
config_dict
),
additional_features
=
cls
.
convert_features
(
config_dict
)
)
app_config
.
variables
,
app_config
.
external_data_variables
=
BasicVariablesConfigManager
.
convert
(
config
=
config_dict
)
return
app_config
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
dict
:
"""
Validate for chat app model config
:param tenant_id: tenant id
:param config: app model config args
"""
app_mode
=
AppMode
.
CHAT
related_config_keys
=
[]
# model
config
,
current_related_config_keys
=
ModelConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# user_input_form
config
,
current_related_config_keys
=
BasicVariablesConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# file upload validation
config
,
current_related_config_keys
=
FileUploadConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# prompt
config
,
current_related_config_keys
=
PromptTemplateConfigManager
.
validate_and_set_defaults
(
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# dataset_query_variable
config
,
current_related_config_keys
=
DatasetConfigManager
.
validate_and_set_defaults
(
tenant_id
,
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# opening_statement
config
,
current_related_config_keys
=
OpeningStatementConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# suggested_questions_after_answer
config
,
current_related_config_keys
=
SuggestedQuestionsAfterAnswerConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# speech_to_text
config
,
current_related_config_keys
=
SpeechToTextConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeechConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# return retriever resource
config
,
current_related_config_keys
=
RetrievalResourceConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
config
,
current_related_config_keys
=
SensitiveWordAvoidanceConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
=
list
(
set
(
related_config_keys
))
# Filter out extra parameters
filtered_config
=
{
key
:
config
.
get
(
key
)
for
key
in
related_config_keys
}
return
filtered_config
api/core/app/chat/app_runner.py
→
api/core/app/
apps/
chat/app_runner.py
View file @
655b34b7
import
logging
import
logging
from
typing
import
cast
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.base_app_runner
import
AppRunner
from
core.app.apps.chat.app_config_manager
import
ChatAppConfig
from
core.app.apps.base_app_runner
import
AppRunner
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.
entities.application
_entities
import
(
from
core.
app.entities.app_invoke
_entities
import
(
Application
GenerateEntity
,
EasyUIBasedApp
GenerateEntity
,
)
)
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_manager
import
ModelInstance
from
core.model_manager
import
ModelInstance
...
@@ -21,7 +23,7 @@ class ChatAppRunner(AppRunner):
...
@@ -21,7 +23,7 @@ class ChatAppRunner(AppRunner):
Chat Application Runner
Chat Application Runner
"""
"""
def
run
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
run
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
conversation
:
Conversation
,
conversation
:
Conversation
,
message
:
Message
)
->
None
:
message
:
Message
)
->
None
:
...
@@ -33,12 +35,13 @@ class ChatAppRunner(AppRunner):
...
@@ -33,12 +35,13 @@ class ChatAppRunner(AppRunner):
:param message: message
:param message: message
:return:
:return:
"""
"""
app_record
=
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
application_generate_entity
.
app_id
)
.
first
()
app_config
=
application_generate_entity
.
app_config
app_config
=
cast
(
ChatAppConfig
,
app_config
)
app_record
=
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
app_config
.
app_id
)
.
first
()
if
not
app_record
:
if
not
app_record
:
raise
ValueError
(
"App not found"
)
raise
ValueError
(
"App not found"
)
app_orchestration_config
=
application_generate_entity
.
app_orchestration_config_entity
inputs
=
application_generate_entity
.
inputs
inputs
=
application_generate_entity
.
inputs
query
=
application_generate_entity
.
query
query
=
application_generate_entity
.
query
files
=
application_generate_entity
.
files
files
=
application_generate_entity
.
files
...
@@ -50,8 +53,8 @@ class ChatAppRunner(AppRunner):
...
@@ -50,8 +53,8 @@ class ChatAppRunner(AppRunner):
# Not Include: memory, external data, dataset context
# Not Include: memory, external data, dataset context
self
.
get_pre_calculate_rest_tokens
(
self
.
get_pre_calculate_rest_tokens
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
query
=
query
...
@@ -61,8 +64,8 @@ class ChatAppRunner(AppRunner):
...
@@ -61,8 +64,8 @@ class ChatAppRunner(AppRunner):
if
application_generate_entity
.
conversation_id
:
if
application_generate_entity
.
conversation_id
:
# get memory of conversation (read-only)
# get memory of conversation (read-only)
model_instance
=
ModelInstance
(
model_instance
=
ModelInstance
(
provider_model_bundle
=
app
_orchestration_config
.
model_config
.
provider_model_bundle
,
provider_model_bundle
=
app
lication_generate_entity
.
model_config
.
provider_model_bundle
,
model
=
app
_orchestration_config
.
model_config
.
model
model
=
app
lication_generate_entity
.
model_config
.
model
)
)
memory
=
TokenBufferMemory
(
memory
=
TokenBufferMemory
(
...
@@ -75,8 +78,8 @@ class ChatAppRunner(AppRunner):
...
@@ -75,8 +78,8 @@ class ChatAppRunner(AppRunner):
# memory(optional)
# memory(optional)
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
,
query
=
query
,
...
@@ -88,15 +91,15 @@ class ChatAppRunner(AppRunner):
...
@@ -88,15 +91,15 @@ class ChatAppRunner(AppRunner):
# process sensitive_word_avoidance
# process sensitive_word_avoidance
_
,
inputs
,
query
=
self
.
moderation_for_inputs
(
_
,
inputs
,
query
=
self
.
moderation_for_inputs
(
app_id
=
app_record
.
id
,
app_id
=
app_record
.
id
,
tenant_id
=
app
lication_generate_entity
.
tenant_id
,
tenant_id
=
app
_config
.
tenant_id
,
app_
orchestration_config_entity
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
inputs
=
inputs
,
inputs
=
inputs
,
query
=
query
,
query
=
query
,
)
)
except
ModerationException
as
e
:
except
ModerationException
as
e
:
self
.
direct_output
(
self
.
direct_output
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_
orchestration_config
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
text
=
str
(
e
),
text
=
str
(
e
),
stream
=
application_generate_entity
.
stream
stream
=
application_generate_entity
.
stream
...
@@ -120,7 +123,7 @@ class ChatAppRunner(AppRunner):
...
@@ -120,7 +123,7 @@ class ChatAppRunner(AppRunner):
)
)
self
.
direct_output
(
self
.
direct_output
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_
orchestration_config
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
text
=
annotation_reply
.
content
,
text
=
annotation_reply
.
content
,
stream
=
application_generate_entity
.
stream
stream
=
application_generate_entity
.
stream
...
@@ -128,7 +131,7 @@ class ChatAppRunner(AppRunner):
...
@@ -128,7 +131,7 @@ class ChatAppRunner(AppRunner):
return
return
# fill in variable inputs from external data tools if exists
# fill in variable inputs from external data tools if exists
external_data_tools
=
app_
orchestration_
config
.
external_data_variables
external_data_tools
=
app_config
.
external_data_variables
if
external_data_tools
:
if
external_data_tools
:
inputs
=
self
.
fill_in_inputs_from_external_data_tools
(
inputs
=
self
.
fill_in_inputs_from_external_data_tools
(
tenant_id
=
app_record
.
tenant_id
,
tenant_id
=
app_record
.
tenant_id
,
...
@@ -140,7 +143,7 @@ class ChatAppRunner(AppRunner):
...
@@ -140,7 +143,7 @@ class ChatAppRunner(AppRunner):
# get context from datasets
# get context from datasets
context
=
None
context
=
None
if
app_
orchestration_config
.
dataset
and
app_orchestration
_config
.
dataset
.
dataset_ids
:
if
app_
config
.
dataset
and
app
_config
.
dataset
.
dataset_ids
:
hit_callback
=
DatasetIndexToolCallbackHandler
(
hit_callback
=
DatasetIndexToolCallbackHandler
(
queue_manager
,
queue_manager
,
app_record
.
id
,
app_record
.
id
,
...
@@ -152,11 +155,11 @@ class ChatAppRunner(AppRunner):
...
@@ -152,11 +155,11 @@ class ChatAppRunner(AppRunner):
dataset_retrieval
=
DatasetRetrieval
()
dataset_retrieval
=
DatasetRetrieval
()
context
=
dataset_retrieval
.
retrieve
(
context
=
dataset_retrieval
.
retrieve
(
tenant_id
=
app_record
.
tenant_id
,
tenant_id
=
app_record
.
tenant_id
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
config
=
app_
orchestration_
config
.
dataset
,
config
=
app_config
.
dataset
,
query
=
query
,
query
=
query
,
invoke_from
=
application_generate_entity
.
invoke_from
,
invoke_from
=
application_generate_entity
.
invoke_from
,
show_retrieve_source
=
app_
orchestration_config
.
show_retrieve_source
,
show_retrieve_source
=
app_
config
.
additional_features
.
show_retrieve_source
,
hit_callback
=
hit_callback
,
hit_callback
=
hit_callback
,
memory
=
memory
memory
=
memory
)
)
...
@@ -166,8 +169,8 @@ class ChatAppRunner(AppRunner):
...
@@ -166,8 +169,8 @@ class ChatAppRunner(AppRunner):
# memory(optional), external data, dataset context(optional)
# memory(optional), external data, dataset context(optional)
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
,
query
=
query
,
...
@@ -187,19 +190,19 @@ class ChatAppRunner(AppRunner):
...
@@ -187,19 +190,19 @@ class ChatAppRunner(AppRunner):
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self
.
recale_llm_max_tokens
(
self
.
recale_llm_max_tokens
(
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_messages
=
prompt_messages
prompt_messages
=
prompt_messages
)
)
# Invoke model
# Invoke model
model_instance
=
ModelInstance
(
model_instance
=
ModelInstance
(
provider_model_bundle
=
app
_orchestration_config
.
model_config
.
provider_model_bundle
,
provider_model_bundle
=
app
lication_generate_entity
.
model_config
.
provider_model_bundle
,
model
=
app
_orchestration_config
.
model_config
.
model
model
=
app
lication_generate_entity
.
model_config
.
model
)
)
invoke_result
=
model_instance
.
invoke_llm
(
invoke_result
=
model_instance
.
invoke_llm
(
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
model_parameters
=
app
_orchestration_config
.
model_config
.
parameters
,
model_parameters
=
app
lication_generate_entity
.
model_config
.
parameters
,
stop
=
stop
,
stop
=
stop
,
stream
=
application_generate_entity
.
stream
,
stream
=
application_generate_entity
.
stream
,
user
=
application_generate_entity
.
user_id
,
user
=
application_generate_entity
.
user_id
,
...
...
api/core/app/apps/completion/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/apps/completion/app_config_manager.py
0 → 100644
View file @
655b34b7
from
typing
import
Optional
from
core.app.app_config.base_app_config_manager
import
BaseAppConfigManager
from
core.app.app_config.easy_ui_based_app.dataset.manager
import
DatasetConfigManager
from
core.app.app_config.easy_ui_based_app.model_config.manager
import
ModelConfigManager
from
core.app.app_config.easy_ui_based_app.prompt_template.manager
import
PromptTemplateConfigManager
from
core.app.app_config.easy_ui_based_app.variables.manager
import
BasicVariablesConfigManager
from
core.app.app_config.common.sensitive_word_avoidance.manager
import
SensitiveWordAvoidanceConfigManager
from
core.app.app_config.entities
import
EasyUIBasedAppConfig
,
EasyUIBasedAppModelConfigFrom
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.app.app_config.features.more_like_this.manager
import
MoreLikeThisConfigManager
from
core.app.app_config.features.text_to_speech.manager
import
TextToSpeechConfigManager
from
models.model
import
AppMode
,
App
,
AppModelConfig
class
CompletionAppConfig
(
EasyUIBasedAppConfig
):
"""
Completion App Config Entity.
"""
pass
class
CompletionAppConfigManager
(
BaseAppConfigManager
):
@
classmethod
def
config_convert
(
cls
,
app_model
:
App
,
config_from
:
EasyUIBasedAppModelConfigFrom
,
app_model_config
:
AppModelConfig
,
config_dict
:
Optional
[
dict
]
=
None
)
->
CompletionAppConfig
:
"""
Convert app model config to completion app config
:param app_model: app model
:param config_from: app model config from
:param app_model_config: app model config
:param config_dict: app model config dict
:return:
"""
config_dict
=
cls
.
convert_to_config_dict
(
config_from
,
app_model_config
,
config_dict
)
app_config
=
CompletionAppConfig
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
,
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
),
app_model_config_from
=
config_from
,
app_model_config_id
=
app_model_config
.
id
,
app_model_config_dict
=
config_dict
,
model
=
ModelConfigManager
.
convert
(
config
=
config_dict
),
prompt_template
=
PromptTemplateConfigManager
.
convert
(
config
=
config_dict
),
sensitive_word_avoidance
=
SensitiveWordAvoidanceConfigManager
.
convert
(
config
=
config_dict
),
dataset
=
DatasetConfigManager
.
convert
(
config
=
config_dict
),
additional_features
=
cls
.
convert_features
(
config_dict
)
)
app_config
.
variables
,
app_config
.
external_data_variables
=
BasicVariablesConfigManager
.
convert
(
config
=
config_dict
)
return
app_config
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
dict
:
"""
Validate for completion app model config
:param tenant_id: tenant id
:param config: app model config args
"""
app_mode
=
AppMode
.
COMPLETION
related_config_keys
=
[]
# model
config
,
current_related_config_keys
=
ModelConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# user_input_form
config
,
current_related_config_keys
=
BasicVariablesConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# file upload validation
config
,
current_related_config_keys
=
FileUploadConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# prompt
config
,
current_related_config_keys
=
PromptTemplateConfigManager
.
validate_and_set_defaults
(
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# dataset_query_variable
config
,
current_related_config_keys
=
DatasetConfigManager
.
validate_and_set_defaults
(
tenant_id
,
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeechConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# more_like_this
config
,
current_related_config_keys
=
MoreLikeThisConfigManager
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
config
,
current_related_config_keys
=
SensitiveWordAvoidanceConfigManager
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
=
list
(
set
(
related_config_keys
))
# Filter out extra parameters
filtered_config
=
{
key
:
config
.
get
(
key
)
for
key
in
related_config_keys
}
return
filtered_config
api/core/app/completion/app_runner.py
→
api/core/app/
apps/
completion/app_runner.py
View file @
655b34b7
import
logging
import
logging
from
typing
import
cast
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.app_queue_manager
import
AppQueueManager
from
core.app.base_app_runner
import
AppRunner
from
core.app.apps.completion.app_config_manager
import
CompletionAppConfig
from
core.app.apps.base_app_runner
import
AppRunner
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.
entities.application
_entities
import
(
from
core.
app.entities.app_invoke
_entities
import
(
Application
GenerateEntity
,
EasyUIBasedApp
GenerateEntity
,
)
)
from
core.model_manager
import
ModelInstance
from
core.model_manager
import
ModelInstance
from
core.moderation.base
import
ModerationException
from
core.moderation.base
import
ModerationException
...
@@ -20,7 +22,7 @@ class CompletionAppRunner(AppRunner):
...
@@ -20,7 +22,7 @@ class CompletionAppRunner(AppRunner):
Completion Application Runner
Completion Application Runner
"""
"""
def
run
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
run
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
message
:
Message
)
->
None
:
message
:
Message
)
->
None
:
"""
"""
...
@@ -30,12 +32,13 @@ class CompletionAppRunner(AppRunner):
...
@@ -30,12 +32,13 @@ class CompletionAppRunner(AppRunner):
:param message: message
:param message: message
:return:
:return:
"""
"""
app_record
=
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
application_generate_entity
.
app_id
)
.
first
()
app_config
=
application_generate_entity
.
app_config
app_config
=
cast
(
CompletionAppConfig
,
app_config
)
app_record
=
db
.
session
.
query
(
App
)
.
filter
(
App
.
id
==
app_config
.
app_id
)
.
first
()
if
not
app_record
:
if
not
app_record
:
raise
ValueError
(
"App not found"
)
raise
ValueError
(
"App not found"
)
app_orchestration_config
=
application_generate_entity
.
app_orchestration_config_entity
inputs
=
application_generate_entity
.
inputs
inputs
=
application_generate_entity
.
inputs
query
=
application_generate_entity
.
query
query
=
application_generate_entity
.
query
files
=
application_generate_entity
.
files
files
=
application_generate_entity
.
files
...
@@ -47,8 +50,8 @@ class CompletionAppRunner(AppRunner):
...
@@ -47,8 +50,8 @@ class CompletionAppRunner(AppRunner):
# Not Include: memory, external data, dataset context
# Not Include: memory, external data, dataset context
self
.
get_pre_calculate_rest_tokens
(
self
.
get_pre_calculate_rest_tokens
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
query
=
query
...
@@ -58,8 +61,8 @@ class CompletionAppRunner(AppRunner):
...
@@ -58,8 +61,8 @@ class CompletionAppRunner(AppRunner):
# Include: prompt template, inputs, query(optional), files(optional)
# Include: prompt template, inputs, query(optional), files(optional)
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
query
=
query
...
@@ -70,15 +73,15 @@ class CompletionAppRunner(AppRunner):
...
@@ -70,15 +73,15 @@ class CompletionAppRunner(AppRunner):
# process sensitive_word_avoidance
# process sensitive_word_avoidance
_
,
inputs
,
query
=
self
.
moderation_for_inputs
(
_
,
inputs
,
query
=
self
.
moderation_for_inputs
(
app_id
=
app_record
.
id
,
app_id
=
app_record
.
id
,
tenant_id
=
app
lication_generate_entity
.
tenant_id
,
tenant_id
=
app
_config
.
tenant_id
,
app_
orchestration_config_entity
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
inputs
=
inputs
,
inputs
=
inputs
,
query
=
query
,
query
=
query
,
)
)
except
ModerationException
as
e
:
except
ModerationException
as
e
:
self
.
direct_output
(
self
.
direct_output
(
queue_manager
=
queue_manager
,
queue_manager
=
queue_manager
,
app_
orchestration_config
=
app_orchestration_config
,
app_
generate_entity
=
application_generate_entity
,
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
text
=
str
(
e
),
text
=
str
(
e
),
stream
=
application_generate_entity
.
stream
stream
=
application_generate_entity
.
stream
...
@@ -86,7 +89,7 @@ class CompletionAppRunner(AppRunner):
...
@@ -86,7 +89,7 @@ class CompletionAppRunner(AppRunner):
return
return
# fill in variable inputs from external data tools if exists
# fill in variable inputs from external data tools if exists
external_data_tools
=
app_
orchestration_
config
.
external_data_variables
external_data_tools
=
app_config
.
external_data_variables
if
external_data_tools
:
if
external_data_tools
:
inputs
=
self
.
fill_in_inputs_from_external_data_tools
(
inputs
=
self
.
fill_in_inputs_from_external_data_tools
(
tenant_id
=
app_record
.
tenant_id
,
tenant_id
=
app_record
.
tenant_id
,
...
@@ -98,7 +101,7 @@ class CompletionAppRunner(AppRunner):
...
@@ -98,7 +101,7 @@ class CompletionAppRunner(AppRunner):
# get context from datasets
# get context from datasets
context
=
None
context
=
None
if
app_
orchestration_config
.
dataset
and
app_orchestration
_config
.
dataset
.
dataset_ids
:
if
app_
config
.
dataset
and
app
_config
.
dataset
.
dataset_ids
:
hit_callback
=
DatasetIndexToolCallbackHandler
(
hit_callback
=
DatasetIndexToolCallbackHandler
(
queue_manager
,
queue_manager
,
app_record
.
id
,
app_record
.
id
,
...
@@ -107,18 +110,18 @@ class CompletionAppRunner(AppRunner):
...
@@ -107,18 +110,18 @@ class CompletionAppRunner(AppRunner):
application_generate_entity
.
invoke_from
application_generate_entity
.
invoke_from
)
)
dataset_config
=
app_
orchestration_
config
.
dataset
dataset_config
=
app_config
.
dataset
if
dataset_config
and
dataset_config
.
retrieve_config
.
query_variable
:
if
dataset_config
and
dataset_config
.
retrieve_config
.
query_variable
:
query
=
inputs
.
get
(
dataset_config
.
retrieve_config
.
query_variable
,
""
)
query
=
inputs
.
get
(
dataset_config
.
retrieve_config
.
query_variable
,
""
)
dataset_retrieval
=
DatasetRetrieval
()
dataset_retrieval
=
DatasetRetrieval
()
context
=
dataset_retrieval
.
retrieve
(
context
=
dataset_retrieval
.
retrieve
(
tenant_id
=
app_record
.
tenant_id
,
tenant_id
=
app_record
.
tenant_id
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
config
=
dataset_config
,
config
=
dataset_config
,
query
=
query
,
query
=
query
,
invoke_from
=
application_generate_entity
.
invoke_from
,
invoke_from
=
application_generate_entity
.
invoke_from
,
show_retrieve_source
=
app_
orchestration_config
.
show_retrieve_source
,
show_retrieve_source
=
app_
config
.
additional_features
.
show_retrieve_source
,
hit_callback
=
hit_callback
hit_callback
=
hit_callback
)
)
...
@@ -127,8 +130,8 @@ class CompletionAppRunner(AppRunner):
...
@@ -127,8 +130,8 @@ class CompletionAppRunner(AppRunner):
# memory(optional), external data, dataset context(optional)
# memory(optional), external data, dataset context(optional)
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
prompt_messages
,
stop
=
self
.
organize_prompt_messages
(
app_record
=
app_record
,
app_record
=
app_record
,
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_template_entity
=
app_
orchestration_
config
.
prompt_template
,
prompt_template_entity
=
app_config
.
prompt_template
,
inputs
=
inputs
,
inputs
=
inputs
,
files
=
files
,
files
=
files
,
query
=
query
,
query
=
query
,
...
@@ -147,19 +150,19 @@ class CompletionAppRunner(AppRunner):
...
@@ -147,19 +150,19 @@ class CompletionAppRunner(AppRunner):
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self
.
recale_llm_max_tokens
(
self
.
recale_llm_max_tokens
(
model_config
=
app
_orchestration_config
.
model_config
,
model_config
=
app
lication_generate_entity
.
model_config
,
prompt_messages
=
prompt_messages
prompt_messages
=
prompt_messages
)
)
# Invoke model
# Invoke model
model_instance
=
ModelInstance
(
model_instance
=
ModelInstance
(
provider_model_bundle
=
app
_orchestration_config
.
model_config
.
provider_model_bundle
,
provider_model_bundle
=
app
lication_generate_entity
.
model_config
.
provider_model_bundle
,
model
=
app
_orchestration_config
.
model_config
.
model
model
=
app
lication_generate_entity
.
model_config
.
model
)
)
invoke_result
=
model_instance
.
invoke_llm
(
invoke_result
=
model_instance
.
invoke_llm
(
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
model_parameters
=
app
_orchestration_config
.
model_config
.
parameters
,
model_parameters
=
app
lication_generate_entity
.
model_config
.
parameters
,
stop
=
stop
,
stop
=
stop
,
stream
=
application_generate_entity
.
stream
,
stream
=
application_generate_entity
.
stream
,
user
=
application_generate_entity
.
user_id
,
user
=
application_generate_entity
.
user_id
,
...
...
api/core/app/apps/workflow/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/
workflow/config_validato
r.py
→
api/core/app/
apps/workflow/app_config_manage
r.py
View file @
655b34b7
from
core.app.validators.file_upload
import
FileUploadValidator
from
core.app.app_config.base_app_config_manager
import
BaseAppConfigManager
from
core.app.validators.moderation
import
ModerationValidator
from
core.app.app_config.common.sensitive_word_avoidance.manager
import
SensitiveWordAvoidanceConfigManager
from
core.app.validators.text_to_speech
import
TextToSpeechValidator
from
core.app.app_config.entities
import
WorkflowUIBasedAppConfig
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.app.app_config.features.text_to_speech.manager
import
TextToSpeechConfigManager
from
core.app.app_config.workflow_ui_based_app.variables.manager
import
WorkflowVariablesConfigManager
from
models.model
import
AppMode
,
App
from
models.workflow
import
Workflow
class
WorkflowAppConfigValidator
:
class
WorkflowAppConfig
(
WorkflowUIBasedAppConfig
):
"""
Workflow App Config Entity.
"""
pass
class
WorkflowAppConfigManager
(
BaseAppConfigManager
):
@
classmethod
def
config_convert
(
cls
,
app_model
:
App
,
workflow
:
Workflow
)
->
WorkflowAppConfig
:
features_dict
=
workflow
.
features_dict
app_config
=
WorkflowAppConfig
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
,
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
),
workflow_id
=
workflow
.
id
,
sensitive_word_avoidance
=
SensitiveWordAvoidanceConfigManager
.
convert
(
config
=
features_dict
),
variables
=
WorkflowVariablesConfigManager
.
convert
(
workflow
=
workflow
),
additional_features
=
cls
.
convert_features
(
features_dict
)
)
return
app_config
@
classmethod
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
,
only_structure_validate
:
bool
=
False
)
->
dict
:
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
,
only_structure_validate
:
bool
=
False
)
->
dict
:
"""
"""
...
@@ -16,15 +48,15 @@ class WorkflowAppConfigValidator:
...
@@ -16,15 +48,15 @@ class WorkflowAppConfigValidator:
related_config_keys
=
[]
related_config_keys
=
[]
# file upload validation
# file upload validation
config
,
current_related_config_keys
=
FileUpload
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
FileUpload
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeech
Validato
r
.
validate_and_set_defaults
(
config
)
config
,
current_related_config_keys
=
TextToSpeech
ConfigManage
r
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
# moderation validation
config
,
current_related_config_keys
=
ModerationValidato
r
.
validate_and_set_defaults
(
config
,
current_related_config_keys
=
SensitiveWordAvoidanceConfigManage
r
.
validate_and_set_defaults
(
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
config
=
config
,
config
=
config
,
only_structure_validate
=
only_structure_validate
only_structure_validate
=
only_structure_validate
...
...
api/core/app/chat/config_validator.py
deleted
100644 → 0
View file @
e8b2cc73
from
core.app.validators.dataset_retrieval
import
DatasetValidator
from
core.app.validators.external_data_fetch
import
ExternalDataFetchValidator
from
core.app.validators.file_upload
import
FileUploadValidator
from
core.app.validators.model_validator
import
ModelValidator
from
core.app.validators.moderation
import
ModerationValidator
from
core.app.validators.opening_statement
import
OpeningStatementValidator
from
core.app.validators.prompt
import
PromptValidator
from
core.app.validators.retriever_resource
import
RetrieverResourceValidator
from
core.app.validators.speech_to_text
import
SpeechToTextValidator
from
core.app.validators.suggested_questions
import
SuggestedQuestionsValidator
from
core.app.validators.text_to_speech
import
TextToSpeechValidator
from
core.app.validators.user_input_form
import
UserInputFormValidator
from
models.model
import
AppMode
class
ChatAppConfigValidator
:
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
dict
:
"""
Validate for chat app model config
:param tenant_id: tenant id
:param config: app model config args
"""
app_mode
=
AppMode
.
CHAT
related_config_keys
=
[]
# model
config
,
current_related_config_keys
=
ModelValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# user_input_form
config
,
current_related_config_keys
=
UserInputFormValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# external data tools validation
config
,
current_related_config_keys
=
ExternalDataFetchValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# file upload validation
config
,
current_related_config_keys
=
FileUploadValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# prompt
config
,
current_related_config_keys
=
PromptValidator
.
validate_and_set_defaults
(
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# dataset_query_variable
config
,
current_related_config_keys
=
DatasetValidator
.
validate_and_set_defaults
(
tenant_id
,
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# opening_statement
config
,
current_related_config_keys
=
OpeningStatementValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# suggested_questions_after_answer
config
,
current_related_config_keys
=
SuggestedQuestionsValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# speech_to_text
config
,
current_related_config_keys
=
SpeechToTextValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeechValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# return retriever resource
config
,
current_related_config_keys
=
RetrieverResourceValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
config
,
current_related_config_keys
=
ModerationValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
=
list
(
set
(
related_config_keys
))
# Filter out extra parameters
filtered_config
=
{
key
:
config
.
get
(
key
)
for
key
in
related_config_keys
}
return
filtered_config
api/core/app/completion/config_validator.py
deleted
100644 → 0
View file @
e8b2cc73
from
core.app.validators.dataset_retrieval
import
DatasetValidator
from
core.app.validators.external_data_fetch
import
ExternalDataFetchValidator
from
core.app.validators.file_upload
import
FileUploadValidator
from
core.app.validators.model_validator
import
ModelValidator
from
core.app.validators.moderation
import
ModerationValidator
from
core.app.validators.more_like_this
import
MoreLikeThisValidator
from
core.app.validators.prompt
import
PromptValidator
from
core.app.validators.text_to_speech
import
TextToSpeechValidator
from
core.app.validators.user_input_form
import
UserInputFormValidator
from
models.model
import
AppMode
class
CompletionAppConfigValidator
:
@
classmethod
def
config_validate
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
dict
:
"""
Validate for completion app model config
:param tenant_id: tenant id
:param config: app model config args
"""
app_mode
=
AppMode
.
COMPLETION
related_config_keys
=
[]
# model
config
,
current_related_config_keys
=
ModelValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# user_input_form
config
,
current_related_config_keys
=
UserInputFormValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# external data tools validation
config
,
current_related_config_keys
=
ExternalDataFetchValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# file upload validation
config
,
current_related_config_keys
=
FileUploadValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# prompt
config
,
current_related_config_keys
=
PromptValidator
.
validate_and_set_defaults
(
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# dataset_query_variable
config
,
current_related_config_keys
=
DatasetValidator
.
validate_and_set_defaults
(
tenant_id
,
app_mode
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# text_to_speech
config
,
current_related_config_keys
=
TextToSpeechValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# more_like_this
config
,
current_related_config_keys
=
MoreLikeThisValidator
.
validate_and_set_defaults
(
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
# moderation validation
config
,
current_related_config_keys
=
ModerationValidator
.
validate_and_set_defaults
(
tenant_id
,
config
)
related_config_keys
.
extend
(
current_related_config_keys
)
related_config_keys
=
list
(
set
(
related_config_keys
))
# Filter out extra parameters
filtered_config
=
{
key
:
config
.
get
(
key
)
for
key
in
related_config_keys
}
return
filtered_config
api/core/app/entities/__init__.py
0 → 100644
View file @
655b34b7
api/core/app/entities/app_invoke_entities.py
0 → 100644
View file @
655b34b7
from
enum
import
Enum
from
typing
import
Any
,
Optional
from
pydantic
import
BaseModel
from
core.app.app_config.entities
import
EasyUIBasedAppConfig
,
WorkflowUIBasedAppConfig
from
core.entities.provider_configuration
import
ProviderModelBundle
from
core.file.file_obj
import
FileObj
from
core.model_runtime.entities.model_entities
import
AIModelEntity
class
InvokeFrom
(
Enum
):
"""
Invoke From.
"""
SERVICE_API
=
'service-api'
WEB_APP
=
'web-app'
EXPLORE
=
'explore'
DEBUGGER
=
'debugger'
@
classmethod
def
value_of
(
cls
,
value
:
str
)
->
'InvokeFrom'
:
"""
Get value of given mode.
:param value: mode value
:return: mode
"""
for
mode
in
cls
:
if
mode
.
value
==
value
:
return
mode
raise
ValueError
(
f
'invalid invoke from value {value}'
)
def
to_source
(
self
)
->
str
:
"""
Get source of invoke from.
:return: source
"""
if
self
==
InvokeFrom
.
WEB_APP
:
return
'web_app'
elif
self
==
InvokeFrom
.
DEBUGGER
:
return
'dev'
elif
self
==
InvokeFrom
.
EXPLORE
:
return
'explore_app'
elif
self
==
InvokeFrom
.
SERVICE_API
:
return
'api'
return
'dev'
class
EasyUIBasedModelConfigEntity
(
BaseModel
):
"""
Model Config Entity.
"""
provider
:
str
model
:
str
model_schema
:
AIModelEntity
mode
:
str
provider_model_bundle
:
ProviderModelBundle
credentials
:
dict
[
str
,
Any
]
=
{}
parameters
:
dict
[
str
,
Any
]
=
{}
stop
:
list
[
str
]
=
[]
class
EasyUIBasedAppGenerateEntity
(
BaseModel
):
"""
EasyUI Based Application Generate Entity.
"""
task_id
:
str
# app config
app_config
:
EasyUIBasedAppConfig
model_config
:
EasyUIBasedModelConfigEntity
conversation_id
:
Optional
[
str
]
=
None
inputs
:
dict
[
str
,
str
]
query
:
Optional
[
str
]
=
None
files
:
list
[
FileObj
]
=
[]
user_id
:
str
# extras
stream
:
bool
invoke_from
:
InvokeFrom
# extra parameters, like: auto_generate_conversation_name
extras
:
dict
[
str
,
Any
]
=
{}
class
WorkflowUIBasedAppGenerateEntity
(
BaseModel
):
"""
Workflow UI Based Application Generate Entity.
"""
task_id
:
str
# app config
app_config
:
WorkflowUIBasedAppConfig
inputs
:
dict
[
str
,
str
]
files
:
list
[
FileObj
]
=
[]
user_id
:
str
# extras
stream
:
bool
invoke_from
:
InvokeFrom
# extra parameters
extras
:
dict
[
str
,
Any
]
=
{}
class
AdvancedChatAppGenerateEntity
(
WorkflowUIBasedAppGenerateEntity
):
conversation_id
:
Optional
[
str
]
=
None
query
:
str
api/core/entities/queue_entities.py
→
api/core/
app/
entities/queue_entities.py
View file @
655b34b7
File moved
api/core/app/features/annotation_reply/annotation_reply.py
View file @
655b34b7
import
logging
import
logging
from
typing
import
Optional
from
typing
import
Optional
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.rag.datasource.vdb.vector_factory
import
Vector
from
core.rag.datasource.vdb.vector_factory
import
Vector
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.dataset
import
Dataset
from
models.dataset
import
Dataset
...
...
api/core/app/features/hosting_moderation/hosting_moderation.py
View file @
655b34b7
import
logging
import
logging
from
core.
entities.application_entities
import
Application
GenerateEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBasedApp
GenerateEntity
from
core.helper
import
moderation
from
core.helper
import
moderation
from
core.model_runtime.entities.message_entities
import
PromptMessage
from
core.model_runtime.entities.message_entities
import
PromptMessage
...
@@ -8,7 +8,7 @@ logger = logging.getLogger(__name__)
...
@@ -8,7 +8,7 @@ logger = logging.getLogger(__name__)
class
HostingModerationFeature
:
class
HostingModerationFeature
:
def
check
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
check
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
prompt_messages
:
list
[
PromptMessage
])
->
bool
:
prompt_messages
:
list
[
PromptMessage
])
->
bool
:
"""
"""
Check hosting moderation
Check hosting moderation
...
@@ -16,8 +16,7 @@ class HostingModerationFeature:
...
@@ -16,8 +16,7 @@ class HostingModerationFeature:
:param prompt_messages: prompt messages
:param prompt_messages: prompt messages
:return:
:return:
"""
"""
app_orchestration_config
=
application_generate_entity
.
app_orchestration_config_entity
model_config
=
application_generate_entity
.
model_config
model_config
=
app_orchestration_config
.
model_config
text
=
""
text
=
""
for
prompt_message
in
prompt_messages
:
for
prompt_message
in
prompt_messages
:
...
...
api/core/app/generate_task_pipeline.py
View file @
655b34b7
...
@@ -7,8 +7,8 @@ from typing import Optional, Union, cast
...
@@ -7,8 +7,8 @@ from typing import Optional, Union, cast
from
pydantic
import
BaseModel
from
pydantic
import
BaseModel
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.
entities.application_entities
import
Application
GenerateEntity
,
InvokeFrom
from
core.
app.entities.app_invoke_entities
import
EasyUIBasedApp
GenerateEntity
,
InvokeFrom
from
core.entities.queue_entities
import
(
from
core.
app.
entities.queue_entities
import
(
AnnotationReplyEvent
,
AnnotationReplyEvent
,
QueueAgentMessageEvent
,
QueueAgentMessageEvent
,
QueueAgentThoughtEvent
,
QueueAgentThoughtEvent
,
...
@@ -58,7 +58,7 @@ class GenerateTaskPipeline:
...
@@ -58,7 +58,7 @@ class GenerateTaskPipeline:
GenerateTaskPipeline is a class that generate stream output and state management for Application.
GenerateTaskPipeline is a class that generate stream output and state management for Application.
"""
"""
def
__init__
(
self
,
application_generate_entity
:
Application
GenerateEntity
,
def
__init__
(
self
,
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
,
queue_manager
:
AppQueueManager
,
queue_manager
:
AppQueueManager
,
conversation
:
Conversation
,
conversation
:
Conversation
,
message
:
Message
)
->
None
:
message
:
Message
)
->
None
:
...
@@ -75,7 +75,7 @@ class GenerateTaskPipeline:
...
@@ -75,7 +75,7 @@ class GenerateTaskPipeline:
self
.
_message
=
message
self
.
_message
=
message
self
.
_task_state
=
TaskState
(
self
.
_task_state
=
TaskState
(
llm_result
=
LLMResult
(
llm_result
=
LLMResult
(
model
=
self
.
_application_generate_entity
.
app_orchestration_config_entity
.
model_config
.
model
,
model
=
self
.
_application_generate_entity
.
model_config
.
model
,
prompt_messages
=
[],
prompt_messages
=
[],
message
=
AssistantPromptMessage
(
content
=
""
),
message
=
AssistantPromptMessage
(
content
=
""
),
usage
=
LLMUsage
.
empty_usage
()
usage
=
LLMUsage
.
empty_usage
()
...
@@ -123,7 +123,7 @@ class GenerateTaskPipeline:
...
@@ -123,7 +123,7 @@ class GenerateTaskPipeline:
if
isinstance
(
event
,
QueueMessageEndEvent
):
if
isinstance
(
event
,
QueueMessageEndEvent
):
self
.
_task_state
.
llm_result
=
event
.
llm_result
self
.
_task_state
.
llm_result
=
event
.
llm_result
else
:
else
:
model_config
=
self
.
_application_generate_entity
.
app_orchestration_config_entity
.
model_config
model_config
=
self
.
_application_generate_entity
.
model_config
model
=
model_config
.
model
model
=
model_config
.
model
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
...
@@ -206,7 +206,7 @@ class GenerateTaskPipeline:
...
@@ -206,7 +206,7 @@ class GenerateTaskPipeline:
if
isinstance
(
event
,
QueueMessageEndEvent
):
if
isinstance
(
event
,
QueueMessageEndEvent
):
self
.
_task_state
.
llm_result
=
event
.
llm_result
self
.
_task_state
.
llm_result
=
event
.
llm_result
else
:
else
:
model_config
=
self
.
_application_generate_entity
.
app_orchestration_config_entity
.
model_config
model_config
=
self
.
_application_generate_entity
.
model_config
model
=
model_config
.
model
model
=
model_config
.
model
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
...
@@ -561,7 +561,7 @@ class GenerateTaskPipeline:
...
@@ -561,7 +561,7 @@ class GenerateTaskPipeline:
:return:
:return:
"""
"""
prompts
=
[]
prompts
=
[]
if
self
.
_application_generate_entity
.
app_orchestration_config_entity
.
model_config
.
mode
==
'chat'
:
if
self
.
_application_generate_entity
.
model_config
.
mode
==
'chat'
:
for
prompt_message
in
prompt_messages
:
for
prompt_message
in
prompt_messages
:
if
prompt_message
.
role
==
PromptMessageRole
.
USER
:
if
prompt_message
.
role
==
PromptMessageRole
.
USER
:
role
=
'user'
role
=
'user'
...
@@ -630,13 +630,13 @@ class GenerateTaskPipeline:
...
@@ -630,13 +630,13 @@ class GenerateTaskPipeline:
Init output moderation.
Init output moderation.
:return:
:return:
"""
"""
app_
orchestration_config_entity
=
self
.
_application_generate_entity
.
app_orchestration_config_entity
app_
config
=
self
.
_application_generate_entity
.
app_config
sensitive_word_avoidance
=
app_
orchestration_config_entity
.
sensitive_word_avoidance
sensitive_word_avoidance
=
app_
config
.
sensitive_word_avoidance
if
sensitive_word_avoidance
:
if
sensitive_word_avoidance
:
return
OutputModeration
(
return
OutputModeration
(
tenant_id
=
self
.
_application_generate_entity
.
tenant_id
,
tenant_id
=
app_config
.
tenant_id
,
app_id
=
self
.
_application_generate_entity
.
app_id
,
app_id
=
app_config
.
app_id
,
rule
=
ModerationRule
(
rule
=
ModerationRule
(
type
=
sensitive_word_avoidance
.
type
,
type
=
sensitive_word_avoidance
.
type
,
config
=
sensitive_word_avoidance
.
config
config
=
sensitive_word_avoidance
.
config
...
...
api/core/app/validators/external_data_fetch.py
deleted
100644 → 0
View file @
e8b2cc73
from
core.external_data_tool.factory
import
ExternalDataToolFactory
class
ExternalDataFetchValidator
:
@
classmethod
def
validate_and_set_defaults
(
cls
,
tenant_id
:
str
,
config
:
dict
)
->
tuple
[
dict
,
list
[
str
]]:
"""
Validate and set defaults for external data fetch feature
:param tenant_id: workspace id
:param config: app model config args
"""
if
not
config
.
get
(
"external_data_tools"
):
config
[
"external_data_tools"
]
=
[]
if
not
isinstance
(
config
[
"external_data_tools"
],
list
):
raise
ValueError
(
"external_data_tools must be of list type"
)
for
tool
in
config
[
"external_data_tools"
]:
if
"enabled"
not
in
tool
or
not
tool
[
"enabled"
]:
tool
[
"enabled"
]
=
False
if
not
tool
[
"enabled"
]:
continue
if
"type"
not
in
tool
or
not
tool
[
"type"
]:
raise
ValueError
(
"external_data_tools[].type is required"
)
typ
=
tool
[
"type"
]
config
=
tool
[
"config"
]
ExternalDataToolFactory
.
validate_config
(
name
=
typ
,
tenant_id
=
tenant_id
,
config
=
config
)
return
config
,
[
"external_data_tools"
]
api/core/callback_handler/agent_loop_gather_callback_handler.py
deleted
100644 → 0
View file @
e8b2cc73
import
json
import
logging
import
time
from
typing
import
Any
,
Optional
,
Union
,
cast
from
langchain.agents
import
openai_functions_agent
,
openai_functions_multi_agent
from
langchain.callbacks.base
import
BaseCallbackHandler
from
langchain.schema
import
AgentAction
,
AgentFinish
,
BaseMessage
,
LLMResult
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.callback_handler.entity.agent_loop
import
AgentLoop
from
core.entities.application_entities
import
ModelConfigEntity
from
core.model_runtime.entities.llm_entities
import
LLMResult
as
RuntimeLLMResult
from
core.model_runtime.entities.message_entities
import
AssistantPromptMessage
,
PromptMessage
,
UserPromptMessage
from
core.model_runtime.model_providers.__base.large_language_model
import
LargeLanguageModel
from
extensions.ext_database
import
db
from
models.model
import
Message
,
MessageAgentThought
,
MessageChain
class
AgentLoopGatherCallbackHandler
(
BaseCallbackHandler
):
"""Callback Handler that prints to std out."""
raise_error
:
bool
=
True
def
__init__
(
self
,
model_config
:
ModelConfigEntity
,
queue_manager
:
AppQueueManager
,
message
:
Message
,
message_chain
:
MessageChain
)
->
None
:
"""Initialize callback handler."""
self
.
model_config
=
model_config
self
.
queue_manager
=
queue_manager
self
.
message
=
message
self
.
message_chain
=
message_chain
model_type_instance
=
self
.
model_config
.
provider_model_bundle
.
model_type_instance
self
.
model_type_instance
=
cast
(
LargeLanguageModel
,
model_type_instance
)
self
.
_agent_loops
=
[]
self
.
_current_loop
=
None
self
.
_message_agent_thought
=
None
@
property
def
agent_loops
(
self
)
->
list
[
AgentLoop
]:
return
self
.
_agent_loops
def
clear_agent_loops
(
self
)
->
None
:
self
.
_agent_loops
=
[]
self
.
_current_loop
=
None
self
.
_message_agent_thought
=
None
@
property
def
always_verbose
(
self
)
->
bool
:
"""Whether to call verbose callbacks even if verbose is False."""
return
True
@
property
def
ignore_chain
(
self
)
->
bool
:
"""Whether to ignore chain callbacks."""
return
True
def
on_llm_before_invoke
(
self
,
prompt_messages
:
list
[
PromptMessage
])
->
None
:
if
not
self
.
_current_loop
:
# Agent start with a LLM query
self
.
_current_loop
=
AgentLoop
(
position
=
len
(
self
.
_agent_loops
)
+
1
,
prompt
=
"
\n
"
.
join
([
prompt_message
.
content
for
prompt_message
in
prompt_messages
]),
status
=
'llm_started'
,
started_at
=
time
.
perf_counter
()
)
def
on_llm_after_invoke
(
self
,
result
:
RuntimeLLMResult
)
->
None
:
if
self
.
_current_loop
and
self
.
_current_loop
.
status
==
'llm_started'
:
self
.
_current_loop
.
status
=
'llm_end'
if
result
.
usage
:
self
.
_current_loop
.
prompt_tokens
=
result
.
usage
.
prompt_tokens
else
:
self
.
_current_loop
.
prompt_tokens
=
self
.
model_type_instance
.
get_num_tokens
(
model
=
self
.
model_config
.
model
,
credentials
=
self
.
model_config
.
credentials
,
prompt_messages
=
[
UserPromptMessage
(
content
=
self
.
_current_loop
.
prompt
)]
)
completion_message
=
result
.
message
if
completion_message
.
tool_calls
:
self
.
_current_loop
.
completion
\
=
json
.
dumps
({
'function_call'
:
completion_message
.
tool_calls
})
else
:
self
.
_current_loop
.
completion
=
completion_message
.
content
if
result
.
usage
:
self
.
_current_loop
.
completion_tokens
=
result
.
usage
.
completion_tokens
else
:
self
.
_current_loop
.
completion_tokens
=
self
.
model_type_instance
.
get_num_tokens
(
model
=
self
.
model_config
.
model
,
credentials
=
self
.
model_config
.
credentials
,
prompt_messages
=
[
AssistantPromptMessage
(
content
=
self
.
_current_loop
.
completion
)]
)
def
on_chat_model_start
(
self
,
serialized
:
dict
[
str
,
Any
],
messages
:
list
[
list
[
BaseMessage
]],
**
kwargs
:
Any
)
->
Any
:
pass
def
on_llm_start
(
self
,
serialized
:
dict
[
str
,
Any
],
prompts
:
list
[
str
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_llm_end
(
self
,
response
:
LLMResult
,
**
kwargs
:
Any
)
->
None
:
"""Do nothing."""
pass
def
on_llm_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
logging
.
debug
(
"Agent on_llm_error:
%
s"
,
error
)
self
.
_agent_loops
=
[]
self
.
_current_loop
=
None
self
.
_message_agent_thought
=
None
def
on_tool_start
(
self
,
serialized
:
dict
[
str
,
Any
],
input_str
:
str
,
**
kwargs
:
Any
,
)
->
None
:
"""Do nothing."""
# kwargs={'color': 'green', 'llm_prefix': 'Thought:', 'observation_prefix': 'Observation: '}
# input_str='action-input'
# serialized={'description': 'A search engine. Useful for when you need to answer questions about current events. Input should be a search query.', 'name': 'Search'}
pass
def
on_agent_action
(
self
,
action
:
AgentAction
,
color
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
)
->
Any
:
"""Run on agent action."""
tool
=
action
.
tool
tool_input
=
json
.
dumps
({
"query"
:
action
.
tool_input
}
if
isinstance
(
action
.
tool_input
,
str
)
else
action
.
tool_input
)
completion
=
None
if
isinstance
(
action
,
openai_functions_agent
.
base
.
_FunctionsAgentAction
)
\
or
isinstance
(
action
,
openai_functions_multi_agent
.
base
.
_FunctionsAgentAction
):
thought
=
action
.
log
.
strip
()
completion
=
json
.
dumps
({
'function_call'
:
action
.
message_log
[
0
]
.
additional_kwargs
[
'function_call'
]})
else
:
action_name_position
=
action
.
log
.
index
(
"Action:"
)
if
action
.
log
else
-
1
thought
=
action
.
log
[:
action_name_position
]
.
strip
()
if
action
.
log
else
''
if
self
.
_current_loop
and
self
.
_current_loop
.
status
==
'llm_end'
:
self
.
_current_loop
.
status
=
'agent_action'
self
.
_current_loop
.
thought
=
thought
self
.
_current_loop
.
tool_name
=
tool
self
.
_current_loop
.
tool_input
=
tool_input
if
completion
is
not
None
:
self
.
_current_loop
.
completion
=
completion
self
.
_message_agent_thought
=
self
.
_init_agent_thought
()
def
on_tool_end
(
self
,
output
:
str
,
color
:
Optional
[
str
]
=
None
,
observation_prefix
:
Optional
[
str
]
=
None
,
llm_prefix
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
,
)
->
None
:
"""If not the final action, print out observation."""
# kwargs={'name': 'Search'}
# llm_prefix='Thought:'
# observation_prefix='Observation: '
# output='53 years'
if
self
.
_current_loop
and
self
.
_current_loop
.
status
==
'agent_action'
and
output
and
output
!=
'None'
:
self
.
_current_loop
.
status
=
'tool_end'
self
.
_current_loop
.
tool_output
=
output
self
.
_current_loop
.
completed
=
True
self
.
_current_loop
.
completed_at
=
time
.
perf_counter
()
self
.
_current_loop
.
latency
=
self
.
_current_loop
.
completed_at
-
self
.
_current_loop
.
started_at
self
.
_complete_agent_thought
(
self
.
_message_agent_thought
)
self
.
_agent_loops
.
append
(
self
.
_current_loop
)
self
.
_current_loop
=
None
self
.
_message_agent_thought
=
None
def
on_tool_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
"""Do nothing."""
logging
.
debug
(
"Agent on_tool_error:
%
s"
,
error
)
self
.
_agent_loops
=
[]
self
.
_current_loop
=
None
self
.
_message_agent_thought
=
None
def
on_agent_finish
(
self
,
finish
:
AgentFinish
,
**
kwargs
:
Any
)
->
Any
:
"""Run on agent end."""
# Final Answer
if
self
.
_current_loop
and
(
self
.
_current_loop
.
status
==
'llm_end'
or
self
.
_current_loop
.
status
==
'agent_action'
):
self
.
_current_loop
.
status
=
'agent_finish'
self
.
_current_loop
.
completed
=
True
self
.
_current_loop
.
completed_at
=
time
.
perf_counter
()
self
.
_current_loop
.
latency
=
self
.
_current_loop
.
completed_at
-
self
.
_current_loop
.
started_at
self
.
_current_loop
.
thought
=
'[DONE]'
self
.
_message_agent_thought
=
self
.
_init_agent_thought
()
self
.
_complete_agent_thought
(
self
.
_message_agent_thought
)
self
.
_agent_loops
.
append
(
self
.
_current_loop
)
self
.
_current_loop
=
None
self
.
_message_agent_thought
=
None
elif
not
self
.
_current_loop
and
self
.
_agent_loops
:
self
.
_agent_loops
[
-
1
]
.
status
=
'agent_finish'
def
_init_agent_thought
(
self
)
->
MessageAgentThought
:
message_agent_thought
=
MessageAgentThought
(
message_id
=
self
.
message
.
id
,
message_chain_id
=
self
.
message_chain
.
id
,
position
=
self
.
_current_loop
.
position
,
thought
=
self
.
_current_loop
.
thought
,
tool
=
self
.
_current_loop
.
tool_name
,
tool_input
=
self
.
_current_loop
.
tool_input
,
message
=
self
.
_current_loop
.
prompt
,
message_price_unit
=
0
,
answer
=
self
.
_current_loop
.
completion
,
answer_price_unit
=
0
,
created_by_role
=
(
'account'
if
self
.
message
.
from_source
==
'console'
else
'end_user'
),
created_by
=
(
self
.
message
.
from_account_id
if
self
.
message
.
from_source
==
'console'
else
self
.
message
.
from_end_user_id
)
)
db
.
session
.
add
(
message_agent_thought
)
db
.
session
.
commit
()
self
.
queue_manager
.
publish_agent_thought
(
message_agent_thought
,
PublishFrom
.
APPLICATION_MANAGER
)
return
message_agent_thought
def
_complete_agent_thought
(
self
,
message_agent_thought
:
MessageAgentThought
)
->
None
:
loop_message_tokens
=
self
.
_current_loop
.
prompt_tokens
loop_answer_tokens
=
self
.
_current_loop
.
completion_tokens
# transform usage
llm_usage
=
self
.
model_type_instance
.
_calc_response_usage
(
self
.
model_config
.
model
,
self
.
model_config
.
credentials
,
loop_message_tokens
,
loop_answer_tokens
)
message_agent_thought
.
observation
=
self
.
_current_loop
.
tool_output
message_agent_thought
.
tool_process_data
=
''
# currently not support
message_agent_thought
.
message_token
=
loop_message_tokens
message_agent_thought
.
message_unit_price
=
llm_usage
.
prompt_unit_price
message_agent_thought
.
message_price_unit
=
llm_usage
.
prompt_price_unit
message_agent_thought
.
answer_token
=
loop_answer_tokens
message_agent_thought
.
answer_unit_price
=
llm_usage
.
completion_unit_price
message_agent_thought
.
answer_price_unit
=
llm_usage
.
completion_price_unit
message_agent_thought
.
latency
=
self
.
_current_loop
.
latency
message_agent_thought
.
tokens
=
self
.
_current_loop
.
prompt_tokens
+
self
.
_current_loop
.
completion_tokens
message_agent_thought
.
total_price
=
llm_usage
.
total_price
message_agent_thought
.
currency
=
llm_usage
.
currency
db
.
session
.
commit
()
api/core/callback_handler/entity/agent_loop.py
deleted
100644 → 0
View file @
e8b2cc73
from
pydantic
import
BaseModel
class
AgentLoop
(
BaseModel
):
position
:
int
=
1
thought
:
str
=
None
tool_name
:
str
=
None
tool_input
:
str
=
None
tool_output
:
str
=
None
prompt
:
str
=
None
prompt_tokens
:
int
=
0
completion
:
str
=
None
completion_tokens
:
int
=
0
latency
:
float
=
None
status
:
str
=
'llm_started'
completed
:
bool
=
False
started_at
:
float
=
None
completed_at
:
float
=
None
\ No newline at end of file
api/core/callback_handler/index_tool_callback_handler.py
View file @
655b34b7
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.app.app_queue_manager
import
AppQueueManager
,
PublishFrom
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.rag.models.document
import
Document
from
core.rag.models.document
import
Document
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.dataset
import
DatasetQuery
,
DocumentSegment
from
models.dataset
import
DatasetQuery
,
DocumentSegment
...
...
api/core/external_data_tool/external_data_fetch.py
View file @
655b34b7
...
@@ -5,7 +5,7 @@ from typing import Optional
...
@@ -5,7 +5,7 @@ from typing import Optional
from
flask
import
Flask
,
current_app
from
flask
import
Flask
,
current_app
from
core.
entities.application_
entities
import
ExternalDataVariableEntity
from
core.
app.app_config.
entities
import
ExternalDataVariableEntity
from
core.external_data_tool.factory
import
ExternalDataToolFactory
from
core.external_data_tool.factory
import
ExternalDataToolFactory
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
...
api/core/file/file_obj.py
View file @
655b34b7
...
@@ -3,6 +3,7 @@ from typing import Optional
...
@@ -3,6 +3,7 @@ from typing import Optional
from
pydantic
import
BaseModel
from
pydantic
import
BaseModel
from
core.app.app_config.entities
import
FileUploadEntity
from
core.file.upload_file_parser
import
UploadFileParser
from
core.file.upload_file_parser
import
UploadFileParser
from
core.model_runtime.entities.message_entities
import
ImagePromptMessageContent
from
core.model_runtime.entities.message_entities
import
ImagePromptMessageContent
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
...
@@ -50,7 +51,7 @@ class FileObj(BaseModel):
...
@@ -50,7 +51,7 @@ class FileObj(BaseModel):
transfer_method
:
FileTransferMethod
transfer_method
:
FileTransferMethod
url
:
Optional
[
str
]
url
:
Optional
[
str
]
upload_file_id
:
Optional
[
str
]
upload_file_id
:
Optional
[
str
]
file_
config
:
dict
file_
upload_entity
:
FileUploadEntity
@
property
@
property
def
data
(
self
)
->
Optional
[
str
]:
def
data
(
self
)
->
Optional
[
str
]:
...
@@ -63,7 +64,7 @@ class FileObj(BaseModel):
...
@@ -63,7 +64,7 @@ class FileObj(BaseModel):
@
property
@
property
def
prompt_message_content
(
self
)
->
ImagePromptMessageContent
:
def
prompt_message_content
(
self
)
->
ImagePromptMessageContent
:
if
self
.
type
==
FileType
.
IMAGE
:
if
self
.
type
==
FileType
.
IMAGE
:
image_config
=
self
.
file_
config
.
get
(
'image'
)
image_config
=
self
.
file_
upload_entity
.
image_config
return
ImagePromptMessageContent
(
return
ImagePromptMessageContent
(
data
=
self
.
data
,
data
=
self
.
data
,
...
...
api/core/file/message_file_parser.py
View file @
655b34b7
from
typing
import
Optional
,
Union
from
typing
import
Union
import
requests
import
requests
from
core.app.app_config.entities
import
FileUploadEntity
from
core.file.file_obj
import
FileBelongsTo
,
FileObj
,
FileTransferMethod
,
FileType
from
core.file.file_obj
import
FileBelongsTo
,
FileObj
,
FileTransferMethod
,
FileType
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.account
import
Account
from
models.account
import
Account
from
models.model
import
AppModelConfig
,
EndUser
,
MessageFile
,
UploadFile
from
models.model
import
EndUser
,
MessageFile
,
UploadFile
from
services.file_service
import
IMAGE_EXTENSIONS
from
services.file_service
import
IMAGE_EXTENSIONS
...
@@ -15,18 +16,16 @@ class MessageFileParser:
...
@@ -15,18 +16,16 @@ class MessageFileParser:
self
.
tenant_id
=
tenant_id
self
.
tenant_id
=
tenant_id
self
.
app_id
=
app_id
self
.
app_id
=
app_id
def
validate_and_transform_files_arg
(
self
,
files
:
list
[
dict
],
app_model_config
:
AppModelConfig
,
def
validate_and_transform_files_arg
(
self
,
files
:
list
[
dict
],
file_upload_entity
:
FileUploadEntity
,
user
:
Union
[
Account
,
EndUser
])
->
list
[
FileObj
]:
user
:
Union
[
Account
,
EndUser
])
->
list
[
FileObj
]:
"""
"""
validate and transform files arg
validate and transform files arg
:param files:
:param files:
:param
app_model_config
:
:param
file_upload_entity
:
:param user:
:param user:
:return:
:return:
"""
"""
file_upload_config
=
app_model_config
.
file_upload_dict
for
file
in
files
:
for
file
in
files
:
if
not
isinstance
(
file
,
dict
):
if
not
isinstance
(
file
,
dict
):
raise
ValueError
(
'Invalid file format, must be dict'
)
raise
ValueError
(
'Invalid file format, must be dict'
)
...
@@ -45,17 +44,17 @@ class MessageFileParser:
...
@@ -45,17 +44,17 @@ class MessageFileParser:
raise
ValueError
(
'Missing file upload_file_id'
)
raise
ValueError
(
'Missing file upload_file_id'
)
# transform files to file objs
# transform files to file objs
type_file_objs
=
self
.
_to_file_objs
(
files
,
file_upload_
config
)
type_file_objs
=
self
.
_to_file_objs
(
files
,
file_upload_
entity
)
# validate files
# validate files
new_files
=
[]
new_files
=
[]
for
file_type
,
file_objs
in
type_file_objs
.
items
():
for
file_type
,
file_objs
in
type_file_objs
.
items
():
if
file_type
==
FileType
.
IMAGE
:
if
file_type
==
FileType
.
IMAGE
:
# parse and validate files
# parse and validate files
image_config
=
file_upload_
config
.
get
(
'image'
)
image_config
=
file_upload_
entity
.
image_config
# check if image file feature is enabled
# check if image file feature is enabled
if
not
image_config
[
'enabled'
]
:
if
not
image_config
:
continue
continue
# Validate number of files
# Validate number of files
...
@@ -96,27 +95,27 @@ class MessageFileParser:
...
@@ -96,27 +95,27 @@ class MessageFileParser:
# return all file objs
# return all file objs
return
new_files
return
new_files
def
transform_message_files
(
self
,
files
:
list
[
MessageFile
],
file_upload_
config
:
Optional
[
dict
]
)
->
list
[
FileObj
]:
def
transform_message_files
(
self
,
files
:
list
[
MessageFile
],
file_upload_
entity
:
FileUploadEntity
)
->
list
[
FileObj
]:
"""
"""
transform message files
transform message files
:param files:
:param files:
:param file_upload_
config
:
:param file_upload_
entity
:
:return:
:return:
"""
"""
# transform files to file objs
# transform files to file objs
type_file_objs
=
self
.
_to_file_objs
(
files
,
file_upload_
config
)
type_file_objs
=
self
.
_to_file_objs
(
files
,
file_upload_
entity
)
# return all file objs
# return all file objs
return
[
file_obj
for
file_objs
in
type_file_objs
.
values
()
for
file_obj
in
file_objs
]
return
[
file_obj
for
file_objs
in
type_file_objs
.
values
()
for
file_obj
in
file_objs
]
def
_to_file_objs
(
self
,
files
:
list
[
Union
[
dict
,
MessageFile
]],
def
_to_file_objs
(
self
,
files
:
list
[
Union
[
dict
,
MessageFile
]],
file_upload_
config
:
dict
)
->
dict
[
FileType
,
list
[
FileObj
]]:
file_upload_
entity
:
FileUploadEntity
)
->
dict
[
FileType
,
list
[
FileObj
]]:
"""
"""
transform files to file objs
transform files to file objs
:param files:
:param files:
:param file_upload_
config
:
:param file_upload_
entity
:
:return:
:return:
"""
"""
type_file_objs
:
dict
[
FileType
,
list
[
FileObj
]]
=
{
type_file_objs
:
dict
[
FileType
,
list
[
FileObj
]]
=
{
...
@@ -133,7 +132,7 @@ class MessageFileParser:
...
@@ -133,7 +132,7 @@ class MessageFileParser:
if
file
.
belongs_to
==
FileBelongsTo
.
ASSISTANT
.
value
:
if
file
.
belongs_to
==
FileBelongsTo
.
ASSISTANT
.
value
:
continue
continue
file_obj
=
self
.
_to_file_obj
(
file
,
file_upload_
config
)
file_obj
=
self
.
_to_file_obj
(
file
,
file_upload_
entity
)
if
file_obj
.
type
not
in
type_file_objs
:
if
file_obj
.
type
not
in
type_file_objs
:
continue
continue
...
@@ -141,7 +140,7 @@ class MessageFileParser:
...
@@ -141,7 +140,7 @@ class MessageFileParser:
return
type_file_objs
return
type_file_objs
def
_to_file_obj
(
self
,
file
:
Union
[
dict
,
MessageFile
],
file_upload_
config
:
dict
)
->
FileObj
:
def
_to_file_obj
(
self
,
file
:
Union
[
dict
,
MessageFile
],
file_upload_
entity
:
FileUploadEntity
)
->
FileObj
:
"""
"""
transform file to file obj
transform file to file obj
...
@@ -156,7 +155,7 @@ class MessageFileParser:
...
@@ -156,7 +155,7 @@ class MessageFileParser:
transfer_method
=
transfer_method
,
transfer_method
=
transfer_method
,
url
=
file
.
get
(
'url'
)
if
transfer_method
==
FileTransferMethod
.
REMOTE_URL
else
None
,
url
=
file
.
get
(
'url'
)
if
transfer_method
==
FileTransferMethod
.
REMOTE_URL
else
None
,
upload_file_id
=
file
.
get
(
'upload_file_id'
)
if
transfer_method
==
FileTransferMethod
.
LOCAL_FILE
else
None
,
upload_file_id
=
file
.
get
(
'upload_file_id'
)
if
transfer_method
==
FileTransferMethod
.
LOCAL_FILE
else
None
,
file_
config
=
file_upload_config
file_
upload_entity
=
file_upload_entity
)
)
else
:
else
:
return
FileObj
(
return
FileObj
(
...
@@ -166,7 +165,7 @@ class MessageFileParser:
...
@@ -166,7 +165,7 @@ class MessageFileParser:
transfer_method
=
FileTransferMethod
.
value_of
(
file
.
transfer_method
),
transfer_method
=
FileTransferMethod
.
value_of
(
file
.
transfer_method
),
url
=
file
.
url
,
url
=
file
.
url
,
upload_file_id
=
file
.
upload_file_id
or
None
,
upload_file_id
=
file
.
upload_file_id
or
None
,
file_
config
=
file_upload_config
file_
upload_entity
=
file_upload_entity
)
)
def
_check_image_remote_url
(
self
,
url
):
def
_check_image_remote_url
(
self
,
url
):
...
...
api/core/helper/moderation.py
View file @
655b34b7
import
logging
import
logging
import
random
import
random
from
core.
entities.application_entities
import
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBased
ModelConfigEntity
from
core.model_runtime.errors.invoke
import
InvokeBadRequestError
from
core.model_runtime.errors.invoke
import
InvokeBadRequestError
from
core.model_runtime.model_providers.openai.moderation.moderation
import
OpenAIModerationModel
from
core.model_runtime.model_providers.openai.moderation.moderation
import
OpenAIModerationModel
from
extensions.ext_hosting_provider
import
hosting_configuration
from
extensions.ext_hosting_provider
import
hosting_configuration
...
@@ -10,7 +10,7 @@ from models.provider import ProviderType
...
@@ -10,7 +10,7 @@ from models.provider import ProviderType
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
def
check_moderation
(
model_config
:
ModelConfigEntity
,
text
:
str
)
->
bool
:
def
check_moderation
(
model_config
:
EasyUIBased
ModelConfigEntity
,
text
:
str
)
->
bool
:
moderation_config
=
hosting_configuration
.
moderation_config
moderation_config
=
hosting_configuration
.
moderation_config
if
(
moderation_config
and
moderation_config
.
enabled
is
True
if
(
moderation_config
and
moderation_config
.
enabled
is
True
and
'openai'
in
hosting_configuration
.
provider_map
and
'openai'
in
hosting_configuration
.
provider_map
...
...
api/core/memory/token_buffer_memory.py
View file @
655b34b7
from
core.app.app_config.entities
import
FileUploadEntity
from
core.app.app_config.features.file_upload.manager
import
FileUploadConfigManager
from
core.file.message_file_parser
import
MessageFileParser
from
core.file.message_file_parser
import
MessageFileParser
from
core.model_manager
import
ModelInstance
from
core.model_manager
import
ModelInstance
from
core.model_runtime.entities.message_entities
import
(
from
core.model_runtime.entities.message_entities
import
(
...
@@ -43,18 +45,27 @@ class TokenBufferMemory:
...
@@ -43,18 +45,27 @@ class TokenBufferMemory:
for
message
in
messages
:
for
message
in
messages
:
files
=
message
.
message_files
files
=
message
.
message_files
if
files
:
if
files
:
file_objs
=
message_file_parser
.
transform_message_files
(
if
self
.
conversation
.
mode
not
in
[
AppMode
.
ADVANCED_CHAT
.
value
,
AppMode
.
WORKFLOW
.
value
]:
files
,
file_upload_entity
=
FileUploadConfigManager
.
convert
(
message
.
app_model_config
.
to_dict
())
message
.
app_model_config
.
file_upload_dict
else
:
if
self
.
conversation
.
mode
not
in
[
AppMode
.
ADVANCED_CHAT
.
value
,
AppMode
.
WORKFLOW
.
value
]
file_upload_entity
=
FileUploadConfigManager
.
convert
(
message
.
workflow_run
.
workflow
.
features_dict
)
else
message
.
workflow_run
.
workflow
.
features_dict
.
get
(
'file_upload'
,
{})
)
if
file_upload_entity
:
file_objs
=
message_file_parser
.
transform_message_files
(
prompt_message_contents
=
[
TextPromptMessageContent
(
data
=
message
.
query
)]
files
,
for
file_obj
in
file_objs
:
file_upload_entity
prompt_message_contents
.
append
(
file_obj
.
prompt_message_content
)
)
else
:
prompt_messages
.
append
(
UserPromptMessage
(
content
=
prompt_message_contents
))
file_objs
=
[]
if
not
file_objs
:
prompt_messages
.
append
(
UserPromptMessage
(
content
=
message
.
query
))
else
:
prompt_message_contents
=
[
TextPromptMessageContent
(
data
=
message
.
query
)]
for
file_obj
in
file_objs
:
prompt_message_contents
.
append
(
file_obj
.
prompt_message_content
)
prompt_messages
.
append
(
UserPromptMessage
(
content
=
prompt_message_contents
))
else
:
else
:
prompt_messages
.
append
(
UserPromptMessage
(
content
=
message
.
query
))
prompt_messages
.
append
(
UserPromptMessage
(
content
=
message
.
query
))
...
...
api/core/moderation/input_moderation.py
View file @
655b34b7
import
logging
import
logging
from
core.
entities.application_entities
import
AppOrchestrationConfigEntity
from
core.
app.app_config.entities
import
AppConfig
from
core.moderation.base
import
ModerationAction
,
ModerationException
from
core.moderation.base
import
ModerationAction
,
ModerationException
from
core.moderation.factory
import
ModerationFactory
from
core.moderation.factory
import
ModerationFactory
...
@@ -10,22 +10,22 @@ logger = logging.getLogger(__name__)
...
@@ -10,22 +10,22 @@ logger = logging.getLogger(__name__)
class
InputModeration
:
class
InputModeration
:
def
check
(
self
,
app_id
:
str
,
def
check
(
self
,
app_id
:
str
,
tenant_id
:
str
,
tenant_id
:
str
,
app_
orchestration_config_entity
:
AppOrchestrationConfigEntity
,
app_
config
:
AppConfig
,
inputs
:
dict
,
inputs
:
dict
,
query
:
str
)
->
tuple
[
bool
,
dict
,
str
]:
query
:
str
)
->
tuple
[
bool
,
dict
,
str
]:
"""
"""
Process sensitive_word_avoidance.
Process sensitive_word_avoidance.
:param app_id: app id
:param app_id: app id
:param tenant_id: tenant id
:param tenant_id: tenant id
:param app_
orchestration_config_entity: app orchestration config entity
:param app_
config: app config
:param inputs: inputs
:param inputs: inputs
:param query: query
:param query: query
:return:
:return:
"""
"""
if
not
app_
orchestration_config_entity
.
sensitive_word_avoidance
:
if
not
app_
config
.
sensitive_word_avoidance
:
return
False
,
inputs
,
query
return
False
,
inputs
,
query
sensitive_word_avoidance_config
=
app_
orchestration_config_entity
.
sensitive_word_avoidance
sensitive_word_avoidance_config
=
app_
config
.
sensitive_word_avoidance
moderation_type
=
sensitive_word_avoidance_config
.
type
moderation_type
=
sensitive_word_avoidance_config
.
type
moderation_factory
=
ModerationFactory
(
moderation_factory
=
ModerationFactory
(
...
...
api/core/prompt/advanced_prompt_transform.py
View file @
655b34b7
from
typing
import
Optional
from
typing
import
Optional
from
core.entities.application_entities
import
(
from
core.app.app_config.entities
import
PromptTemplateEntity
,
AdvancedCompletionPromptTemplateEntity
AdvancedCompletionPromptTemplateEntity
,
from
core.app.entities.app_invoke_entities
import
EasyUIBasedModelConfigEntity
ModelConfigEntity
,
PromptTemplateEntity
,
)
from
core.file.file_obj
import
FileObj
from
core.file.file_obj
import
FileObj
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.entities.message_entities
import
(
from
core.model_runtime.entities.message_entities
import
(
...
@@ -31,7 +28,7 @@ class AdvancedPromptTransform(PromptTransform):
...
@@ -31,7 +28,7 @@ class AdvancedPromptTransform(PromptTransform):
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
context
:
Optional
[
str
],
context
:
Optional
[
str
],
memory
:
Optional
[
TokenBufferMemory
],
memory
:
Optional
[
TokenBufferMemory
],
model_config
:
ModelConfigEntity
)
->
list
[
PromptMessage
]:
model_config
:
EasyUIBased
ModelConfigEntity
)
->
list
[
PromptMessage
]:
prompt_messages
=
[]
prompt_messages
=
[]
model_mode
=
ModelMode
.
value_of
(
model_config
.
mode
)
model_mode
=
ModelMode
.
value_of
(
model_config
.
mode
)
...
@@ -65,7 +62,7 @@ class AdvancedPromptTransform(PromptTransform):
...
@@ -65,7 +62,7 @@ class AdvancedPromptTransform(PromptTransform):
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
context
:
Optional
[
str
],
context
:
Optional
[
str
],
memory
:
Optional
[
TokenBufferMemory
],
memory
:
Optional
[
TokenBufferMemory
],
model_config
:
ModelConfigEntity
)
->
list
[
PromptMessage
]:
model_config
:
EasyUIBased
ModelConfigEntity
)
->
list
[
PromptMessage
]:
"""
"""
Get completion model prompt messages.
Get completion model prompt messages.
"""
"""
...
@@ -113,7 +110,7 @@ class AdvancedPromptTransform(PromptTransform):
...
@@ -113,7 +110,7 @@ class AdvancedPromptTransform(PromptTransform):
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
context
:
Optional
[
str
],
context
:
Optional
[
str
],
memory
:
Optional
[
TokenBufferMemory
],
memory
:
Optional
[
TokenBufferMemory
],
model_config
:
ModelConfigEntity
)
->
list
[
PromptMessage
]:
model_config
:
EasyUIBased
ModelConfigEntity
)
->
list
[
PromptMessage
]:
"""
"""
Get chat model prompt messages.
Get chat model prompt messages.
"""
"""
...
@@ -202,7 +199,7 @@ class AdvancedPromptTransform(PromptTransform):
...
@@ -202,7 +199,7 @@ class AdvancedPromptTransform(PromptTransform):
role_prefix
:
AdvancedCompletionPromptTemplateEntity
.
RolePrefixEntity
,
role_prefix
:
AdvancedCompletionPromptTemplateEntity
.
RolePrefixEntity
,
prompt_template
:
PromptTemplateParser
,
prompt_template
:
PromptTemplateParser
,
prompt_inputs
:
dict
,
prompt_inputs
:
dict
,
model_config
:
ModelConfigEntity
)
->
dict
:
model_config
:
EasyUIBased
ModelConfigEntity
)
->
dict
:
if
'#histories#'
in
prompt_template
.
variable_keys
:
if
'#histories#'
in
prompt_template
.
variable_keys
:
if
memory
:
if
memory
:
inputs
=
{
'#histories#'
:
''
,
**
prompt_inputs
}
inputs
=
{
'#histories#'
:
''
,
**
prompt_inputs
}
...
...
api/core/prompt/prompt_transform.py
View file @
655b34b7
from
typing
import
Optional
,
cast
from
typing
import
Optional
,
cast
from
core.
entities.application_entities
import
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBased
ModelConfigEntity
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.entities.message_entities
import
PromptMessage
from
core.model_runtime.entities.message_entities
import
PromptMessage
from
core.model_runtime.entities.model_entities
import
ModelPropertyKey
from
core.model_runtime.entities.model_entities
import
ModelPropertyKey
...
@@ -10,14 +10,14 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
...
@@ -10,14 +10,14 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
class
PromptTransform
:
class
PromptTransform
:
def
_append_chat_histories
(
self
,
memory
:
TokenBufferMemory
,
def
_append_chat_histories
(
self
,
memory
:
TokenBufferMemory
,
prompt_messages
:
list
[
PromptMessage
],
prompt_messages
:
list
[
PromptMessage
],
model_config
:
ModelConfigEntity
)
->
list
[
PromptMessage
]:
model_config
:
EasyUIBased
ModelConfigEntity
)
->
list
[
PromptMessage
]:
rest_tokens
=
self
.
_calculate_rest_token
(
prompt_messages
,
model_config
)
rest_tokens
=
self
.
_calculate_rest_token
(
prompt_messages
,
model_config
)
histories
=
self
.
_get_history_messages_list_from_memory
(
memory
,
rest_tokens
)
histories
=
self
.
_get_history_messages_list_from_memory
(
memory
,
rest_tokens
)
prompt_messages
.
extend
(
histories
)
prompt_messages
.
extend
(
histories
)
return
prompt_messages
return
prompt_messages
def
_calculate_rest_token
(
self
,
prompt_messages
:
list
[
PromptMessage
],
model_config
:
ModelConfigEntity
)
->
int
:
def
_calculate_rest_token
(
self
,
prompt_messages
:
list
[
PromptMessage
],
model_config
:
EasyUIBased
ModelConfigEntity
)
->
int
:
rest_tokens
=
2000
rest_tokens
=
2000
model_context_tokens
=
model_config
.
model_schema
.
model_properties
.
get
(
ModelPropertyKey
.
CONTEXT_SIZE
)
model_context_tokens
=
model_config
.
model_schema
.
model_properties
.
get
(
ModelPropertyKey
.
CONTEXT_SIZE
)
...
...
api/core/prompt/simple_prompt_transform.py
View file @
655b34b7
...
@@ -3,10 +3,8 @@ import json
...
@@ -3,10 +3,8 @@ import json
import
os
import
os
from
typing
import
Optional
from
typing
import
Optional
from
core.entities.application_entities
import
(
from
core.app.app_config.entities
import
PromptTemplateEntity
ModelConfigEntity
,
from
core.app.entities.app_invoke_entities
import
EasyUIBasedModelConfigEntity
PromptTemplateEntity
,
)
from
core.file.file_obj
import
FileObj
from
core.file.file_obj
import
FileObj
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.entities.message_entities
import
(
from
core.model_runtime.entities.message_entities
import
(
...
@@ -54,7 +52,7 @@ class SimplePromptTransform(PromptTransform):
...
@@ -54,7 +52,7 @@ class SimplePromptTransform(PromptTransform):
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
context
:
Optional
[
str
],
context
:
Optional
[
str
],
memory
:
Optional
[
TokenBufferMemory
],
memory
:
Optional
[
TokenBufferMemory
],
model_config
:
ModelConfigEntity
)
->
\
model_config
:
EasyUIBased
ModelConfigEntity
)
->
\
tuple
[
list
[
PromptMessage
],
Optional
[
list
[
str
]]]:
tuple
[
list
[
PromptMessage
],
Optional
[
list
[
str
]]]:
model_mode
=
ModelMode
.
value_of
(
model_config
.
mode
)
model_mode
=
ModelMode
.
value_of
(
model_config
.
mode
)
if
model_mode
==
ModelMode
.
CHAT
:
if
model_mode
==
ModelMode
.
CHAT
:
...
@@ -83,7 +81,7 @@ class SimplePromptTransform(PromptTransform):
...
@@ -83,7 +81,7 @@ class SimplePromptTransform(PromptTransform):
return
prompt_messages
,
stops
return
prompt_messages
,
stops
def
get_prompt_str_and_rules
(
self
,
app_mode
:
AppMode
,
def
get_prompt_str_and_rules
(
self
,
app_mode
:
AppMode
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
pre_prompt
:
str
,
pre_prompt
:
str
,
inputs
:
dict
,
inputs
:
dict
,
query
:
Optional
[
str
]
=
None
,
query
:
Optional
[
str
]
=
None
,
...
@@ -164,7 +162,7 @@ class SimplePromptTransform(PromptTransform):
...
@@ -164,7 +162,7 @@ class SimplePromptTransform(PromptTransform):
context
:
Optional
[
str
],
context
:
Optional
[
str
],
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
memory
:
Optional
[
TokenBufferMemory
],
memory
:
Optional
[
TokenBufferMemory
],
model_config
:
ModelConfigEntity
)
\
model_config
:
EasyUIBased
ModelConfigEntity
)
\
->
tuple
[
list
[
PromptMessage
],
Optional
[
list
[
str
]]]:
->
tuple
[
list
[
PromptMessage
],
Optional
[
list
[
str
]]]:
prompt_messages
=
[]
prompt_messages
=
[]
...
@@ -202,7 +200,7 @@ class SimplePromptTransform(PromptTransform):
...
@@ -202,7 +200,7 @@ class SimplePromptTransform(PromptTransform):
context
:
Optional
[
str
],
context
:
Optional
[
str
],
files
:
list
[
FileObj
],
files
:
list
[
FileObj
],
memory
:
Optional
[
TokenBufferMemory
],
memory
:
Optional
[
TokenBufferMemory
],
model_config
:
ModelConfigEntity
)
\
model_config
:
EasyUIBased
ModelConfigEntity
)
\
->
tuple
[
list
[
PromptMessage
],
Optional
[
list
[
str
]]]:
->
tuple
[
list
[
PromptMessage
],
Optional
[
list
[
str
]]]:
# get prompt
# get prompt
prompt
,
prompt_rules
=
self
.
get_prompt_str_and_rules
(
prompt
,
prompt_rules
=
self
.
get_prompt_str_and_rules
(
...
...
api/core/rag/retrieval/agent/agent_llm_callback.py
deleted
100644 → 0
View file @
e8b2cc73
import
logging
from
typing
import
Optional
from
core.callback_handler.agent_loop_gather_callback_handler
import
AgentLoopGatherCallbackHandler
from
core.model_runtime.callbacks.base_callback
import
Callback
from
core.model_runtime.entities.llm_entities
import
LLMResult
,
LLMResultChunk
from
core.model_runtime.entities.message_entities
import
PromptMessage
,
PromptMessageTool
from
core.model_runtime.model_providers.__base.ai_model
import
AIModel
logger
=
logging
.
getLogger
(
__name__
)
class
AgentLLMCallback
(
Callback
):
def
__init__
(
self
,
agent_callback
:
AgentLoopGatherCallbackHandler
)
->
None
:
self
.
agent_callback
=
agent_callback
def
on_before_invoke
(
self
,
llm_instance
:
AIModel
,
model
:
str
,
credentials
:
dict
,
prompt_messages
:
list
[
PromptMessage
],
model_parameters
:
dict
,
tools
:
Optional
[
list
[
PromptMessageTool
]]
=
None
,
stop
:
Optional
[
list
[
str
]]
=
None
,
stream
:
bool
=
True
,
user
:
Optional
[
str
]
=
None
)
->
None
:
"""
Before invoke callback
:param llm_instance: LLM instance
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
self
.
agent_callback
.
on_llm_before_invoke
(
prompt_messages
=
prompt_messages
)
def
on_new_chunk
(
self
,
llm_instance
:
AIModel
,
chunk
:
LLMResultChunk
,
model
:
str
,
credentials
:
dict
,
prompt_messages
:
list
[
PromptMessage
],
model_parameters
:
dict
,
tools
:
Optional
[
list
[
PromptMessageTool
]]
=
None
,
stop
:
Optional
[
list
[
str
]]
=
None
,
stream
:
bool
=
True
,
user
:
Optional
[
str
]
=
None
):
"""
On new chunk callback
:param llm_instance: LLM instance
:param chunk: chunk
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
pass
def
on_after_invoke
(
self
,
llm_instance
:
AIModel
,
result
:
LLMResult
,
model
:
str
,
credentials
:
dict
,
prompt_messages
:
list
[
PromptMessage
],
model_parameters
:
dict
,
tools
:
Optional
[
list
[
PromptMessageTool
]]
=
None
,
stop
:
Optional
[
list
[
str
]]
=
None
,
stream
:
bool
=
True
,
user
:
Optional
[
str
]
=
None
)
->
None
:
"""
After invoke callback
:param llm_instance: LLM instance
:param result: result
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
self
.
agent_callback
.
on_llm_after_invoke
(
result
=
result
)
def
on_invoke_error
(
self
,
llm_instance
:
AIModel
,
ex
:
Exception
,
model
:
str
,
credentials
:
dict
,
prompt_messages
:
list
[
PromptMessage
],
model_parameters
:
dict
,
tools
:
Optional
[
list
[
PromptMessageTool
]]
=
None
,
stop
:
Optional
[
list
[
str
]]
=
None
,
stream
:
bool
=
True
,
user
:
Optional
[
str
]
=
None
)
->
None
:
"""
Invoke error callback
:param llm_instance: LLM instance
:param ex: exception
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
self
.
agent_callback
.
on_llm_error
(
error
=
ex
)
api/core/rag/retrieval/agent/llm_chain.py
View file @
655b34b7
...
@@ -5,19 +5,17 @@ from langchain.callbacks.manager import CallbackManagerForChainRun
...
@@ -5,19 +5,17 @@ from langchain.callbacks.manager import CallbackManagerForChainRun
from
langchain.schema
import
Generation
,
LLMResult
from
langchain.schema
import
Generation
,
LLMResult
from
langchain.schema.language_model
import
BaseLanguageModel
from
langchain.schema.language_model
import
BaseLanguageModel
from
core.
entities.application_entities
import
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBased
ModelConfigEntity
from
core.entities.message_entities
import
lc_messages_to_prompt_messages
from
core.entities.message_entities
import
lc_messages_to_prompt_messages
from
core.model_manager
import
ModelInstance
from
core.model_manager
import
ModelInstance
from
core.rag.retrieval.agent.agent_llm_callback
import
AgentLLMCallback
from
core.rag.retrieval.agent.fake_llm
import
FakeLLM
from
core.rag.retrieval.agent.fake_llm
import
FakeLLM
class
LLMChain
(
LCLLMChain
):
class
LLMChain
(
LCLLMChain
):
model_config
:
ModelConfigEntity
model_config
:
EasyUIBased
ModelConfigEntity
"""The language model instance to use."""
"""The language model instance to use."""
llm
:
BaseLanguageModel
=
FakeLLM
(
response
=
""
)
llm
:
BaseLanguageModel
=
FakeLLM
(
response
=
""
)
parameters
:
dict
[
str
,
Any
]
=
{}
parameters
:
dict
[
str
,
Any
]
=
{}
agent_llm_callback
:
Optional
[
AgentLLMCallback
]
=
None
def
generate
(
def
generate
(
self
,
self
,
...
@@ -38,7 +36,6 @@ class LLMChain(LCLLMChain):
...
@@ -38,7 +36,6 @@ class LLMChain(LCLLMChain):
prompt_messages
=
prompt_messages
,
prompt_messages
=
prompt_messages
,
stream
=
False
,
stream
=
False
,
stop
=
stop
,
stop
=
stop
,
callbacks
=
[
self
.
agent_llm_callback
]
if
self
.
agent_llm_callback
else
None
,
model_parameters
=
self
.
parameters
model_parameters
=
self
.
parameters
)
)
...
...
api/core/rag/retrieval/agent/multi_dataset_router_agent.py
View file @
655b34b7
...
@@ -10,7 +10,7 @@ from langchain.schema import AgentAction, AgentFinish, AIMessage, SystemMessage
...
@@ -10,7 +10,7 @@ from langchain.schema import AgentAction, AgentFinish, AIMessage, SystemMessage
from
langchain.tools
import
BaseTool
from
langchain.tools
import
BaseTool
from
pydantic
import
root_validator
from
pydantic
import
root_validator
from
core.
entities.application_entities
import
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBased
ModelConfigEntity
from
core.entities.message_entities
import
lc_messages_to_prompt_messages
from
core.entities.message_entities
import
lc_messages_to_prompt_messages
from
core.model_manager
import
ModelInstance
from
core.model_manager
import
ModelInstance
from
core.model_runtime.entities.message_entities
import
PromptMessageTool
from
core.model_runtime.entities.message_entities
import
PromptMessageTool
...
@@ -21,7 +21,7 @@ class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
...
@@ -21,7 +21,7 @@ class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
"""
"""
An Multi Dataset Retrieve Agent driven by Router.
An Multi Dataset Retrieve Agent driven by Router.
"""
"""
model_config
:
ModelConfigEntity
model_config
:
EasyUIBased
ModelConfigEntity
class
Config
:
class
Config
:
"""Configuration for this pydantic object."""
"""Configuration for this pydantic object."""
...
@@ -156,7 +156,7 @@ class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
...
@@ -156,7 +156,7 @@ class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
@
classmethod
@
classmethod
def
from_llm_and_tools
(
def
from_llm_and_tools
(
cls
,
cls
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
tools
:
Sequence
[
BaseTool
],
tools
:
Sequence
[
BaseTool
],
callback_manager
:
Optional
[
BaseCallbackManager
]
=
None
,
callback_manager
:
Optional
[
BaseCallbackManager
]
=
None
,
extra_prompt_messages
:
Optional
[
list
[
BaseMessagePromptTemplate
]]
=
None
,
extra_prompt_messages
:
Optional
[
list
[
BaseMessagePromptTemplate
]]
=
None
,
...
...
api/core/rag/retrieval/agent/structed_multi_dataset_router_agent.py
View file @
655b34b7
...
@@ -12,7 +12,7 @@ from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, Sy
...
@@ -12,7 +12,7 @@ from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, Sy
from
langchain.schema
import
AgentAction
,
AgentFinish
,
OutputParserException
from
langchain.schema
import
AgentAction
,
AgentFinish
,
OutputParserException
from
langchain.tools
import
BaseTool
from
langchain.tools
import
BaseTool
from
core.
entities.application_entities
import
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBased
ModelConfigEntity
from
core.rag.retrieval.agent.llm_chain
import
LLMChain
from
core.rag.retrieval.agent.llm_chain
import
LLMChain
FORMAT_INSTRUCTIONS
=
"""Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
FORMAT_INSTRUCTIONS
=
"""Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
...
@@ -206,7 +206,7 @@ Thought: {agent_scratchpad}
...
@@ -206,7 +206,7 @@ Thought: {agent_scratchpad}
@
classmethod
@
classmethod
def
from_llm_and_tools
(
def
from_llm_and_tools
(
cls
,
cls
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
tools
:
Sequence
[
BaseTool
],
tools
:
Sequence
[
BaseTool
],
callback_manager
:
Optional
[
BaseCallbackManager
]
=
None
,
callback_manager
:
Optional
[
BaseCallbackManager
]
=
None
,
output_parser
:
Optional
[
AgentOutputParser
]
=
None
,
output_parser
:
Optional
[
AgentOutputParser
]
=
None
,
...
...
api/core/rag/retrieval/agent_based_dataset_executor.py
View file @
655b34b7
...
@@ -7,13 +7,12 @@ from langchain.callbacks.manager import Callbacks
...
@@ -7,13 +7,12 @@ from langchain.callbacks.manager import Callbacks
from
langchain.tools
import
BaseTool
from
langchain.tools
import
BaseTool
from
pydantic
import
BaseModel
,
Extra
from
pydantic
import
BaseModel
,
Extra
from
core.app.entities.app_invoke_entities
import
EasyUIBasedModelConfigEntity
from
core.entities.agent_entities
import
PlanningStrategy
from
core.entities.agent_entities
import
PlanningStrategy
from
core.entities.application_entities
import
ModelConfigEntity
from
core.entities.message_entities
import
prompt_messages_to_lc_messages
from
core.entities.message_entities
import
prompt_messages_to_lc_messages
from
core.helper
import
moderation
from
core.helper
import
moderation
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.model_runtime.errors.invoke
import
InvokeError
from
core.rag.retrieval.agent.agent_llm_callback
import
AgentLLMCallback
from
core.rag.retrieval.agent.multi_dataset_router_agent
import
MultiDatasetRouterAgent
from
core.rag.retrieval.agent.multi_dataset_router_agent
import
MultiDatasetRouterAgent
from
core.rag.retrieval.agent.output_parser.structured_chat
import
StructuredChatOutputParser
from
core.rag.retrieval.agent.output_parser.structured_chat
import
StructuredChatOutputParser
from
core.rag.retrieval.agent.structed_multi_dataset_router_agent
import
StructuredMultiDatasetRouterAgent
from
core.rag.retrieval.agent.structed_multi_dataset_router_agent
import
StructuredMultiDatasetRouterAgent
...
@@ -23,15 +22,14 @@ from core.tools.tool.dataset_retriever.dataset_retriever_tool import DatasetRetr
...
@@ -23,15 +22,14 @@ from core.tools.tool.dataset_retriever.dataset_retriever_tool import DatasetRetr
class
AgentConfiguration
(
BaseModel
):
class
AgentConfiguration
(
BaseModel
):
strategy
:
PlanningStrategy
strategy
:
PlanningStrategy
model_config
:
ModelConfigEntity
model_config
:
EasyUIBased
ModelConfigEntity
tools
:
list
[
BaseTool
]
tools
:
list
[
BaseTool
]
summary_model_config
:
Optional
[
ModelConfigEntity
]
=
None
summary_model_config
:
Optional
[
EasyUIBased
ModelConfigEntity
]
=
None
memory
:
Optional
[
TokenBufferMemory
]
=
None
memory
:
Optional
[
TokenBufferMemory
]
=
None
callbacks
:
Callbacks
=
None
callbacks
:
Callbacks
=
None
max_iterations
:
int
=
6
max_iterations
:
int
=
6
max_execution_time
:
Optional
[
float
]
=
None
max_execution_time
:
Optional
[
float
]
=
None
early_stopping_method
:
str
=
"generate"
early_stopping_method
:
str
=
"generate"
agent_llm_callback
:
Optional
[
AgentLLMCallback
]
=
None
# `generate` will continue to complete the last inference after reaching the iteration limit or request time limit
# `generate` will continue to complete the last inference after reaching the iteration limit or request time limit
class
Config
:
class
Config
:
...
...
api/core/rag/retrieval/dataset_retrieval.py
View file @
655b34b7
...
@@ -2,9 +2,10 @@ from typing import Optional, cast
...
@@ -2,9 +2,10 @@ from typing import Optional, cast
from
langchain.tools
import
BaseTool
from
langchain.tools
import
BaseTool
from
core.app.app_config.entities
import
DatasetEntity
,
DatasetRetrieveConfigEntity
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.entities.agent_entities
import
PlanningStrategy
from
core.entities.agent_entities
import
PlanningStrategy
from
core.
entities.application_entities
import
DatasetEntity
,
DatasetRetrieveConfigEntity
,
InvokeFrom
,
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
InvokeFrom
,
EasyUIBased
ModelConfigEntity
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.entities.model_entities
import
ModelFeature
from
core.model_runtime.entities.model_entities
import
ModelFeature
from
core.model_runtime.model_providers.__base.large_language_model
import
LargeLanguageModel
from
core.model_runtime.model_providers.__base.large_language_model
import
LargeLanguageModel
...
@@ -17,7 +18,7 @@ from models.dataset import Dataset
...
@@ -17,7 +18,7 @@ from models.dataset import Dataset
class
DatasetRetrieval
:
class
DatasetRetrieval
:
def
retrieve
(
self
,
tenant_id
:
str
,
def
retrieve
(
self
,
tenant_id
:
str
,
model_config
:
ModelConfigEntity
,
model_config
:
EasyUIBased
ModelConfigEntity
,
config
:
DatasetEntity
,
config
:
DatasetEntity
,
query
:
str
,
query
:
str
,
invoke_from
:
InvokeFrom
,
invoke_from
:
InvokeFrom
,
...
...
api/core/tools/tool/dataset_retriever_tool.py
View file @
655b34b7
...
@@ -2,8 +2,9 @@ from typing import Any
...
@@ -2,8 +2,9 @@ from typing import Any
from
langchain.tools
import
BaseTool
from
langchain.tools
import
BaseTool
from
core.app.app_config.entities
import
DatasetRetrieveConfigEntity
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.callback_handler.index_tool_callback_handler
import
DatasetIndexToolCallbackHandler
from
core.
entities.application_entities
import
DatasetRetrieveConfigEntity
,
InvokeFrom
from
core.
app.entities.app_invoke_entities
import
InvokeFrom
from
core.rag.retrieval.dataset_retrieval
import
DatasetRetrieval
from
core.rag.retrieval.dataset_retrieval
import
DatasetRetrieval
from
core.tools.entities.common_entities
import
I18nObject
from
core.tools.entities.common_entities
import
I18nObject
from
core.tools.entities.tool_entities
import
ToolDescription
,
ToolIdentity
,
ToolInvokeMessage
,
ToolParameter
from
core.tools.entities.tool_entities
import
ToolDescription
,
ToolIdentity
,
ToolInvokeMessage
,
ToolParameter
...
...
api/events/event_handlers/deduct_quota_when_messaeg_created.py
View file @
655b34b7
from
core.
entities.application_entities
import
Application
GenerateEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBasedApp
GenerateEntity
from
core.entities.provider_entities
import
QuotaUnit
from
core.entities.provider_entities
import
QuotaUnit
from
events.message_event
import
message_was_created
from
events.message_event
import
message_was_created
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
...
@@ -8,9 +8,9 @@ from models.provider import Provider, ProviderType
...
@@ -8,9 +8,9 @@ from models.provider import Provider, ProviderType
@
message_was_created
.
connect
@
message_was_created
.
connect
def
handle
(
sender
,
**
kwargs
):
def
handle
(
sender
,
**
kwargs
):
message
=
sender
message
=
sender
application_generate_entity
:
Application
GenerateEntity
=
kwargs
.
get
(
'application_generate_entity'
)
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
=
kwargs
.
get
(
'application_generate_entity'
)
model_config
=
application_generate_entity
.
app_orchestration_config_entity
.
model_config
model_config
=
application_generate_entity
.
model_config
provider_model_bundle
=
model_config
.
provider_model_bundle
provider_model_bundle
=
model_config
.
provider_model_bundle
provider_configuration
=
provider_model_bundle
.
configuration
provider_configuration
=
provider_model_bundle
.
configuration
...
@@ -43,7 +43,7 @@ def handle(sender, **kwargs):
...
@@ -43,7 +43,7 @@ def handle(sender, **kwargs):
if
used_quota
is
not
None
:
if
used_quota
is
not
None
:
db
.
session
.
query
(
Provider
)
.
filter
(
db
.
session
.
query
(
Provider
)
.
filter
(
Provider
.
tenant_id
==
application_generate_entity
.
tenant_id
,
Provider
.
tenant_id
==
application_generate_entity
.
app_config
.
tenant_id
,
Provider
.
provider_name
==
model_config
.
provider
,
Provider
.
provider_name
==
model_config
.
provider
,
Provider
.
provider_type
==
ProviderType
.
SYSTEM
.
value
,
Provider
.
provider_type
==
ProviderType
.
SYSTEM
.
value
,
Provider
.
quota_type
==
system_configuration
.
current_quota_type
.
value
,
Provider
.
quota_type
==
system_configuration
.
current_quota_type
.
value
,
...
...
api/events/event_handlers/update_provider_last_used_at_when_messaeg_created.py
View file @
655b34b7
from
datetime
import
datetime
from
datetime
import
datetime
from
core.
entities.application_entities
import
Application
GenerateEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBasedApp
GenerateEntity
from
events.message_event
import
message_was_created
from
events.message_event
import
message_was_created
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.provider
import
Provider
from
models.provider
import
Provider
...
@@ -9,10 +9,10 @@ from models.provider import Provider
...
@@ -9,10 +9,10 @@ from models.provider import Provider
@
message_was_created
.
connect
@
message_was_created
.
connect
def
handle
(
sender
,
**
kwargs
):
def
handle
(
sender
,
**
kwargs
):
message
=
sender
message
=
sender
application_generate_entity
:
Application
GenerateEntity
=
kwargs
.
get
(
'application_generate_entity'
)
application_generate_entity
:
EasyUIBasedApp
GenerateEntity
=
kwargs
.
get
(
'application_generate_entity'
)
db
.
session
.
query
(
Provider
)
.
filter
(
db
.
session
.
query
(
Provider
)
.
filter
(
Provider
.
tenant_id
==
application_generate_entity
.
tenant_id
,
Provider
.
tenant_id
==
application_generate_entity
.
app_config
.
tenant_id
,
Provider
.
provider_name
==
application_generate_entity
.
app_orchestration_config_entity
.
model_config
.
provider
Provider
.
provider_name
==
application_generate_entity
.
model_config
.
provider
)
.
update
({
'last_used'
:
datetime
.
utcnow
()})
)
.
update
({
'last_used'
:
datetime
.
utcnow
()})
db
.
session
.
commit
()
db
.
session
.
commit
()
api/models/model.py
View file @
655b34b7
...
@@ -105,6 +105,18 @@ class App(db.Model):
...
@@ -105,6 +105,18 @@ class App(db.Model):
tenant
=
db
.
session
.
query
(
Tenant
)
.
filter
(
Tenant
.
id
==
self
.
tenant_id
)
.
first
()
tenant
=
db
.
session
.
query
(
Tenant
)
.
filter
(
Tenant
.
id
==
self
.
tenant_id
)
.
first
()
return
tenant
return
tenant
@
property
def
is_agent
(
self
)
->
bool
:
app_model_config
=
self
.
app_model_config
if
not
app_model_config
:
return
False
if
not
app_model_config
.
agent_mode
:
return
False
if
self
.
app_model_config
.
agent_mode_dict
.
get
(
'enabled'
,
False
)
\
and
self
.
app_model_config
.
agent_mode_dict
.
get
(
'strategy'
,
''
)
in
[
'function_call'
,
'react'
]:
return
True
return
False
@
property
@
property
def
deleted_tools
(
self
)
->
list
:
def
deleted_tools
(
self
)
->
list
:
# get agent mode tools
# get agent mode tools
...
...
api/models/workflow.py
View file @
655b34b7
...
@@ -129,7 +129,7 @@ class Workflow(db.Model):
...
@@ -129,7 +129,7 @@ class Workflow(db.Model):
def
features_dict
(
self
):
def
features_dict
(
self
):
return
self
.
features
if
not
self
.
features
else
json
.
loads
(
self
.
features
)
return
self
.
features
if
not
self
.
features
else
json
.
loads
(
self
.
features
)
def
user_input_form
(
self
):
def
user_input_form
(
self
)
->
list
:
# get start node from graph
# get start node from graph
if
not
self
.
graph
:
if
not
self
.
graph
:
return
[]
return
[]
...
...
api/services/app_model_config_service.py
View file @
655b34b7
from
core.app.a
gent_chat.config_validator
import
AgentChatAppConfigValidato
r
from
core.app.a
pps.agent_chat.app_config_manager
import
AgentChatAppConfigManage
r
from
core.app.
chat.config_validator
import
ChatAppConfigValidato
r
from
core.app.
apps.chat.app_config_manager
import
ChatAppConfigManage
r
from
core.app.
completion.config_validator
import
CompletionAppConfigValidato
r
from
core.app.
apps.completion.app_config_manager
import
CompletionAppConfigManage
r
from
models.model
import
AppMode
from
models.model
import
AppMode
...
@@ -9,10 +9,10 @@ class AppModelConfigService:
...
@@ -9,10 +9,10 @@ class AppModelConfigService:
@
classmethod
@
classmethod
def
validate_configuration
(
cls
,
tenant_id
:
str
,
config
:
dict
,
app_mode
:
AppMode
)
->
dict
:
def
validate_configuration
(
cls
,
tenant_id
:
str
,
config
:
dict
,
app_mode
:
AppMode
)
->
dict
:
if
app_mode
==
AppMode
.
CHAT
:
if
app_mode
==
AppMode
.
CHAT
:
return
ChatAppConfig
Validato
r
.
config_validate
(
tenant_id
,
config
)
return
ChatAppConfig
Manage
r
.
config_validate
(
tenant_id
,
config
)
elif
app_mode
==
AppMode
.
AGENT_CHAT
:
elif
app_mode
==
AppMode
.
AGENT_CHAT
:
return
AgentChatAppConfig
Validato
r
.
config_validate
(
tenant_id
,
config
)
return
AgentChatAppConfig
Manage
r
.
config_validate
(
tenant_id
,
config
)
elif
app_mode
==
AppMode
.
COMPLETION
:
elif
app_mode
==
AppMode
.
COMPLETION
:
return
CompletionAppConfig
Validato
r
.
config_validate
(
tenant_id
,
config
)
return
CompletionAppConfig
Manage
r
.
config_validate
(
tenant_id
,
config
)
else
:
else
:
raise
ValueError
(
f
"Invalid app mode: {app_mode}"
)
raise
ValueError
(
f
"Invalid app mode: {app_mode}"
)
api/services/completion_service.py
View file @
655b34b7
...
@@ -4,9 +4,9 @@ from typing import Any, Union
...
@@ -4,9 +4,9 @@ from typing import Any, Union
from
sqlalchemy
import
and_
from
sqlalchemy
import
and_
from
core.app.app_
manager
import
App
Manager
from
core.app.app_
config.features.file_upload.manager
import
FileUploadConfig
Manager
from
core.app.
validators.model_validator
import
ModelValidato
r
from
core.app.
app_manager
import
EasyUIBasedAppManage
r
from
core.
entities.application
_entities
import
InvokeFrom
from
core.
app.entities.app_invoke
_entities
import
InvokeFrom
from
core.file.message_file_parser
import
MessageFileParser
from
core.file.message_file_parser
import
MessageFileParser
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.model
import
Account
,
App
,
AppMode
,
AppModelConfig
,
Conversation
,
EndUser
,
Message
from
models.model
import
Account
,
App
,
AppMode
,
AppModelConfig
,
Conversation
,
EndUser
,
Message
...
@@ -30,7 +30,7 @@ class CompletionService:
...
@@ -30,7 +30,7 @@ class CompletionService:
auto_generate_name
=
args
[
'auto_generate_name'
]
\
auto_generate_name
=
args
[
'auto_generate_name'
]
\
if
'auto_generate_name'
in
args
else
True
if
'auto_generate_name'
in
args
else
True
if
app_model
.
mode
!=
'completion'
:
if
app_model
.
mode
!=
AppMode
.
COMPLETION
.
value
:
if
not
query
:
if
not
query
:
raise
ValueError
(
'query is required'
)
raise
ValueError
(
'query is required'
)
...
@@ -43,6 +43,7 @@ class CompletionService:
...
@@ -43,6 +43,7 @@ class CompletionService:
conversation_id
=
args
[
'conversation_id'
]
if
'conversation_id'
in
args
else
None
conversation_id
=
args
[
'conversation_id'
]
if
'conversation_id'
in
args
else
None
conversation
=
None
conversation
=
None
app_model_config_dict
=
None
if
conversation_id
:
if
conversation_id
:
conversation_filter
=
[
conversation_filter
=
[
Conversation
.
id
==
args
[
'conversation_id'
],
Conversation
.
id
==
args
[
'conversation_id'
],
...
@@ -63,42 +64,13 @@ class CompletionService:
...
@@ -63,42 +64,13 @@ class CompletionService:
if
conversation
.
status
!=
'normal'
:
if
conversation
.
status
!=
'normal'
:
raise
ConversationCompletedError
()
raise
ConversationCompletedError
()
if
not
conversation
.
override_model_configs
:
app_model_config
=
db
.
session
.
query
(
AppModelConfig
)
.
filter
(
app_model_config
=
db
.
session
.
query
(
AppModelConfig
)
.
filter
(
AppModelConfig
.
id
==
conversation
.
app_model_config_id
,
AppModelConfig
.
id
==
conversation
.
app_model_config_id
,
AppModelConfig
.
app_id
==
app_model
.
id
AppModelConfig
.
app_id
==
app_model
.
id
)
.
first
()
)
.
first
()
if
not
app_model_config
:
if
not
app_model_config
:
raise
AppModelConfigBrokenError
()
raise
AppModelConfigBrokenError
()
else
:
conversation_override_model_configs
=
json
.
loads
(
conversation
.
override_model_configs
)
app_model_config
=
AppModelConfig
(
id
=
conversation
.
app_model_config_id
,
app_id
=
app_model
.
id
,
)
app_model_config
=
app_model_config
.
from_model_config_dict
(
conversation_override_model_configs
)
if
is_model_config_override
:
# build new app model config
if
'model'
not
in
args
[
'model_config'
]:
raise
ValueError
(
'model_config.model is required'
)
if
'completion_params'
not
in
args
[
'model_config'
][
'model'
]:
raise
ValueError
(
'model_config.model.completion_params is required'
)
completion_params
=
ModelValidator
.
validate_model_completion_params
(
cp
=
args
[
'model_config'
][
'model'
][
'completion_params'
]
)
app_model_config_model
=
app_model_config
.
model_dict
app_model_config_model
[
'completion_params'
]
=
completion_params
app_model_config
.
retriever_resource
=
json
.
dumps
({
'enabled'
:
True
})
app_model_config
=
app_model_config
.
copy
()
app_model_config
.
model
=
json
.
dumps
(
app_model_config_model
)
else
:
else
:
if
app_model
.
app_model_config_id
is
None
:
if
app_model
.
app_model_config_id
is
None
:
raise
AppModelConfigBrokenError
()
raise
AppModelConfigBrokenError
()
...
@@ -113,37 +85,29 @@ class CompletionService:
...
@@ -113,37 +85,29 @@ class CompletionService:
raise
Exception
(
"Only account can override model config"
)
raise
Exception
(
"Only account can override model config"
)
# validate config
# validate config
model_config
=
AppModelConfigService
.
validate_configuration
(
app_model_config_dict
=
AppModelConfigService
.
validate_configuration
(
tenant_id
=
app_model
.
tenant_id
,
tenant_id
=
app_model
.
tenant_id
,
config
=
args
[
'model_config'
],
config
=
args
[
'model_config'
],
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
)
app_mode
=
AppMode
.
value_of
(
app_model
.
mode
)
)
)
app_model_config
=
AppModelConfig
(
id
=
app_model_config
.
id
,
app_id
=
app_model
.
id
,
)
app_model_config
=
app_model_config
.
from_model_config_dict
(
model_config
)
# clean input by app_model_config form rules
inputs
=
cls
.
get_cleaned_inputs
(
inputs
,
app_model_config
)
# parse files
# parse files
message_file_parser
=
MessageFileParser
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
)
message_file_parser
=
MessageFileParser
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
)
file_objs
=
message_file_parser
.
validate_and_transform_files_arg
(
file_upload_entity
=
FileUploadConfigManager
.
convert
(
app_model_config_dict
or
app_model_config
.
to_dict
())
files
,
if
file_upload_entity
:
app_model_config
,
file_objs
=
message_file_parser
.
validate_and_transform_files_arg
(
user
files
,
)
file_upload_entity
,
user
)
else
:
file_objs
=
[]
application_manager
=
AppManager
()
application_manager
=
EasyUIBased
AppManager
()
return
application_manager
.
generate
(
return
application_manager
.
generate
(
tenant_id
=
app_model
.
tenant_id
,
app_model
=
app_model
,
app_id
=
app_model
.
id
,
app_model_config
=
app_model_config
,
app_model_config_id
=
app_model_config
.
id
,
app_model_config_dict
=
app_model_config_dict
,
app_model_config_dict
=
app_model_config
.
to_dict
(),
app_model_config_override
=
is_model_config_override
,
user
=
user
,
user
=
user
,
invoke_from
=
invoke_from
,
invoke_from
=
invoke_from
,
inputs
=
inputs
,
inputs
=
inputs
,
...
@@ -189,17 +153,19 @@ class CompletionService:
...
@@ -189,17 +153,19 @@ class CompletionService:
# parse files
# parse files
message_file_parser
=
MessageFileParser
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
)
message_file_parser
=
MessageFileParser
(
tenant_id
=
app_model
.
tenant_id
,
app_id
=
app_model
.
id
)
file_objs
=
message_file_parser
.
transform_message_files
(
file_upload_entity
=
FileUploadConfigManager
.
convert
(
current_app_model_config
.
to_dict
())
message
.
files
,
app_model_config
if
file_upload_entity
:
)
file_objs
=
message_file_parser
.
transform_message_files
(
message
.
files
,
file_upload_entity
)
else
:
file_objs
=
[]
application_manager
=
AppManager
()
application_manager
=
EasyUIBased
AppManager
()
return
application_manager
.
generate
(
return
application_manager
.
generate
(
tenant_id
=
app_model
.
tenant_id
,
app_model
=
app_model
,
app_id
=
app_model
.
id
,
app_model_config
=
current_app_model_config
,
app_model_config_id
=
app_model_config
.
id
,
app_model_config_dict
=
app_model_config
.
to_dict
(),
app_model_config_dict
=
app_model_config
.
to_dict
(),
app_model_config_override
=
True
,
user
=
user
,
user
=
user
,
invoke_from
=
invoke_from
,
invoke_from
=
invoke_from
,
inputs
=
message
.
inputs
,
inputs
=
message
.
inputs
,
...
@@ -212,46 +178,3 @@ class CompletionService:
...
@@ -212,46 +178,3 @@ class CompletionService:
}
}
)
)
@
classmethod
def
get_cleaned_inputs
(
cls
,
user_inputs
:
dict
,
app_model_config
:
AppModelConfig
):
if
user_inputs
is
None
:
user_inputs
=
{}
filtered_inputs
=
{}
# Filter input variables from form configuration, handle required fields, default values, and option values
input_form_config
=
app_model_config
.
user_input_form_list
for
config
in
input_form_config
:
input_config
=
list
(
config
.
values
())[
0
]
variable
=
input_config
[
"variable"
]
input_type
=
list
(
config
.
keys
())[
0
]
if
variable
not
in
user_inputs
or
not
user_inputs
[
variable
]:
if
input_type
==
"external_data_tool"
:
continue
if
"required"
in
input_config
and
input_config
[
"required"
]:
raise
ValueError
(
f
"{variable} is required in input form"
)
else
:
filtered_inputs
[
variable
]
=
input_config
[
"default"
]
if
"default"
in
input_config
else
""
continue
value
=
user_inputs
[
variable
]
if
value
:
if
not
isinstance
(
value
,
str
):
raise
ValueError
(
f
"{variable} in input form must be a string"
)
if
input_type
==
"select"
:
options
=
input_config
[
"options"
]
if
"options"
in
input_config
else
[]
if
value
not
in
options
:
raise
ValueError
(
f
"{variable} in input form must be one of the following: {options}"
)
else
:
if
'max_length'
in
input_config
:
max_length
=
input_config
[
'max_length'
]
if
len
(
value
)
>
max_length
:
raise
ValueError
(
f
'{variable} in input form must be less than {max_length} characters'
)
filtered_inputs
[
variable
]
=
value
.
replace
(
'
\x00
'
,
''
)
if
value
else
None
return
filtered_inputs
api/services/workflow/workflow_converter.py
View file @
655b34b7
import
json
import
json
from
typing
import
Optional
from
typing
import
Optional
from
core.app.app_manager
import
AppManager
from
core.app.app_config.entities
import
VariableEntity
,
ExternalDataVariableEntity
,
DatasetEntity
,
\
from
core.entities.application_entities
import
(
DatasetRetrieveConfigEntity
,
ModelConfigEntity
,
PromptTemplateEntity
,
FileUploadEntity
DatasetEntity
,
from
core.app.app_manager
import
EasyUIBasedAppManager
DatasetRetrieveConfigEntity
,
ExternalDataVariableEntity
,
FileUploadEntity
,
ModelConfigEntity
,
PromptTemplateEntity
,
VariableEntity
,
)
from
core.helper
import
encrypter
from
core.helper
import
encrypter
from
core.model_runtime.entities.llm_entities
import
LLMMode
from
core.model_runtime.entities.llm_entities
import
LLMMode
from
core.model_runtime.utils.encoders
import
jsonable_encoder
from
core.model_runtime.utils.encoders
import
jsonable_encoder
...
@@ -36,7 +29,7 @@ class WorkflowConverter:
...
@@ -36,7 +29,7 @@ class WorkflowConverter:
- basic mode of chatbot app
- basic mode of chatbot app
-
advanced mode of assistan
t app
-
expert mode of chatbo
t app
- completion app
- completion app
...
@@ -86,14 +79,11 @@ class WorkflowConverter:
...
@@ -86,14 +79,11 @@ class WorkflowConverter:
# get new app mode
# get new app mode
new_app_mode
=
self
.
_get_new_app_mode
(
app_model
)
new_app_mode
=
self
.
_get_new_app_mode
(
app_model
)
app_model_config_dict
=
app_model_config
.
to_dict
()
# convert app model config
# convert app model config
application_manager
=
AppManager
()
application_manager
=
EasyUIBasedAppManager
()
app_orchestration_config_entity
=
application_manager
.
convert_from_app_model_config_dict
(
app_config
=
application_manager
.
convert_to_app_config
(
tenant_id
=
app_model
.
tenant_id
,
app_model
=
app_model
,
app_model_config_dict
=
app_model_config_dict
,
app_model_config
=
app_model_config
skip_check
=
True
)
)
# init workflow graph
# init workflow graph
...
@@ -113,27 +103,27 @@ class WorkflowConverter:
...
@@ -113,27 +103,27 @@ class WorkflowConverter:
# convert to start node
# convert to start node
start_node
=
self
.
_convert_to_start_node
(
start_node
=
self
.
_convert_to_start_node
(
variables
=
app_
orchestration_config_entity
.
variables
variables
=
app_
config
.
variables
)
)
graph
[
'nodes'
]
.
append
(
start_node
)
graph
[
'nodes'
]
.
append
(
start_node
)
# convert to http request node
# convert to http request node
if
app_
orchestration_config_entity
.
external_data_variables
:
if
app_
config
.
external_data_variables
:
http_request_nodes
=
self
.
_convert_to_http_request_node
(
http_request_nodes
=
self
.
_convert_to_http_request_node
(
app_model
=
app_model
,
app_model
=
app_model
,
variables
=
app_
orchestration_config_entity
.
variables
,
variables
=
app_
config
.
variables
,
external_data_variables
=
app_
orchestration_config_entity
.
external_data_variables
external_data_variables
=
app_
config
.
external_data_variables
)
)
for
http_request_node
in
http_request_nodes
:
for
http_request_node
in
http_request_nodes
:
graph
=
self
.
_append_node
(
graph
,
http_request_node
)
graph
=
self
.
_append_node
(
graph
,
http_request_node
)
# convert to knowledge retrieval node
# convert to knowledge retrieval node
if
app_
orchestration_config_entity
.
dataset
:
if
app_
config
.
dataset
:
knowledge_retrieval_node
=
self
.
_convert_to_knowledge_retrieval_node
(
knowledge_retrieval_node
=
self
.
_convert_to_knowledge_retrieval_node
(
new_app_mode
=
new_app_mode
,
new_app_mode
=
new_app_mode
,
dataset_config
=
app_
orchestration_config_entity
.
dataset
dataset_config
=
app_
config
.
dataset
)
)
if
knowledge_retrieval_node
:
if
knowledge_retrieval_node
:
...
@@ -143,9 +133,9 @@ class WorkflowConverter:
...
@@ -143,9 +133,9 @@ class WorkflowConverter:
llm_node
=
self
.
_convert_to_llm_node
(
llm_node
=
self
.
_convert_to_llm_node
(
new_app_mode
=
new_app_mode
,
new_app_mode
=
new_app_mode
,
graph
=
graph
,
graph
=
graph
,
model_config
=
app_
orchestration_config_entity
.
model_config
,
model_config
=
app_
config
.
model
,
prompt_template
=
app_
orchestration_config_entity
.
prompt_template
,
prompt_template
=
app_
config
.
prompt_template
,
file_upload
=
app_
orchestration_config_entity
.
file_upload
file_upload
=
app_
config
.
additional_features
.
file_upload
)
)
graph
=
self
.
_append_node
(
graph
,
llm_node
)
graph
=
self
.
_append_node
(
graph
,
llm_node
)
...
@@ -155,6 +145,8 @@ class WorkflowConverter:
...
@@ -155,6 +145,8 @@ class WorkflowConverter:
graph
=
self
.
_append_node
(
graph
,
end_node
)
graph
=
self
.
_append_node
(
graph
,
end_node
)
app_model_config_dict
=
app_config
.
app_model_config_dict
# features
# features
if
new_app_mode
==
AppMode
.
ADVANCED_CHAT
:
if
new_app_mode
==
AppMode
.
ADVANCED_CHAT
:
features
=
{
features
=
{
...
...
api/services/workflow_service.py
View file @
655b34b7
...
@@ -2,8 +2,8 @@ import json
...
@@ -2,8 +2,8 @@ import json
from
datetime
import
datetime
from
datetime
import
datetime
from
typing
import
Optional
from
typing
import
Optional
from
core.app.a
dvanced_chat.config_validator
import
AdvancedChatAppConfigValidato
r
from
core.app.a
pps.advanced_chat.app_config_manager
import
AdvancedChatAppConfigManage
r
from
core.app.
workflow.config_validator
import
WorkflowAppConfigValidato
r
from
core.app.
apps.workflow.app_config_manager
import
WorkflowAppConfigManage
r
from
extensions.ext_database
import
db
from
extensions.ext_database
import
db
from
models.account
import
Account
from
models.account
import
Account
from
models.model
import
App
,
AppMode
from
models.model
import
App
,
AppMode
...
@@ -162,13 +162,13 @@ class WorkflowService:
...
@@ -162,13 +162,13 @@ class WorkflowService:
def
validate_features_structure
(
self
,
app_model
:
App
,
features
:
dict
)
->
dict
:
def
validate_features_structure
(
self
,
app_model
:
App
,
features
:
dict
)
->
dict
:
if
app_model
.
mode
==
AppMode
.
ADVANCED_CHAT
.
value
:
if
app_model
.
mode
==
AppMode
.
ADVANCED_CHAT
.
value
:
return
AdvancedChatAppConfig
Validato
r
.
config_validate
(
return
AdvancedChatAppConfig
Manage
r
.
config_validate
(
tenant_id
=
app_model
.
tenant_id
,
tenant_id
=
app_model
.
tenant_id
,
config
=
features
,
config
=
features
,
only_structure_validate
=
True
only_structure_validate
=
True
)
)
elif
app_model
.
mode
==
AppMode
.
WORKFLOW
.
value
:
elif
app_model
.
mode
==
AppMode
.
WORKFLOW
.
value
:
return
WorkflowAppConfig
Validato
r
.
config_validate
(
return
WorkflowAppConfig
Manage
r
.
config_validate
(
tenant_id
=
app_model
.
tenant_id
,
tenant_id
=
app_model
.
tenant_id
,
config
=
features
,
config
=
features
,
only_structure_validate
=
True
only_structure_validate
=
True
...
...
api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py
View file @
655b34b7
...
@@ -2,8 +2,8 @@ from unittest.mock import MagicMock
...
@@ -2,8 +2,8 @@ from unittest.mock import MagicMock
import
pytest
import
pytest
from
core.
entities.application_
entities
import
PromptTemplateEntity
,
AdvancedCompletionPromptTemplateEntity
,
\
from
core.
app.app_config.
entities
import
PromptTemplateEntity
,
AdvancedCompletionPromptTemplateEntity
,
\
ModelConfigEntity
,
AdvancedChatPromptTemplateEntity
,
AdvancedChatMessageEntity
ModelConfigEntity
,
AdvancedChatPromptTemplateEntity
,
AdvancedChatMessageEntity
,
FileUploadEntity
from
core.file.file_obj
import
FileObj
,
FileType
,
FileTransferMethod
from
core.file.file_obj
import
FileObj
,
FileType
,
FileTransferMethod
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.entities.message_entities
import
UserPromptMessage
,
AssistantPromptMessage
,
PromptMessageRole
from
core.model_runtime.entities.message_entities
import
UserPromptMessage
,
AssistantPromptMessage
,
PromptMessageRole
...
@@ -137,11 +137,11 @@ def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_arg
...
@@ -137,11 +137,11 @@ def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_arg
type
=
FileType
.
IMAGE
,
type
=
FileType
.
IMAGE
,
transfer_method
=
FileTransferMethod
.
REMOTE_URL
,
transfer_method
=
FileTransferMethod
.
REMOTE_URL
,
url
=
"https://example.com/image1.jpg"
,
url
=
"https://example.com/image1.jpg"
,
file_
config
=
{
file_
upload_entity
=
FileUploadEntity
(
"image"
:
{
image_config
=
{
"detail"
:
"high"
,
"detail"
:
"high"
,
}
}
}
)
)
)
]
]
...
...
api/tests/unit_tests/core/prompt/test_prompt_transform.py
View file @
655b34b7
from
unittest.mock
import
MagicMock
from
unittest.mock
import
MagicMock
from
core.
entities.application_
entities
import
ModelConfigEntity
from
core.
app.app_config.
entities
import
ModelConfigEntity
from
core.entities.provider_configuration
import
ProviderModelBundle
from
core.entities.provider_configuration
import
ProviderModelBundle
from
core.model_runtime.entities.message_entities
import
UserPromptMessage
from
core.model_runtime.entities.message_entities
import
UserPromptMessage
from
core.model_runtime.entities.model_entities
import
ModelPropertyKey
,
AIModelEntity
,
ParameterRule
from
core.model_runtime.entities.model_entities
import
ModelPropertyKey
,
AIModelEntity
,
ParameterRule
...
...
api/tests/unit_tests/core/prompt/test_simple_prompt_transform.py
View file @
655b34b7
from
unittest.mock
import
MagicMock
from
unittest.mock
import
MagicMock
from
core.
entities.application_entities
import
ModelConfigEntity
from
core.
app.entities.app_invoke_entities
import
EasyUIBased
ModelConfigEntity
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.memory.token_buffer_memory
import
TokenBufferMemory
from
core.model_runtime.entities.message_entities
import
UserPromptMessage
,
AssistantPromptMessage
from
core.model_runtime.entities.message_entities
import
UserPromptMessage
,
AssistantPromptMessage
from
core.prompt.simple_prompt_transform
import
SimplePromptTransform
from
core.prompt.simple_prompt_transform
import
SimplePromptTransform
...
@@ -139,7 +139,7 @@ def test_get_common_chat_app_prompt_template_with_p():
...
@@ -139,7 +139,7 @@ def test_get_common_chat_app_prompt_template_with_p():
def
test__get_chat_model_prompt_messages
():
def
test__get_chat_model_prompt_messages
():
model_config_mock
=
MagicMock
(
spec
=
ModelConfigEntity
)
model_config_mock
=
MagicMock
(
spec
=
EasyUIBased
ModelConfigEntity
)
model_config_mock
.
provider
=
'openai'
model_config_mock
.
provider
=
'openai'
model_config_mock
.
model
=
'gpt-4'
model_config_mock
.
model
=
'gpt-4'
...
@@ -191,7 +191,7 @@ def test__get_chat_model_prompt_messages():
...
@@ -191,7 +191,7 @@ def test__get_chat_model_prompt_messages():
def
test__get_completion_model_prompt_messages
():
def
test__get_completion_model_prompt_messages
():
model_config_mock
=
MagicMock
(
spec
=
ModelConfigEntity
)
model_config_mock
=
MagicMock
(
spec
=
EasyUIBased
ModelConfigEntity
)
model_config_mock
.
provider
=
'openai'
model_config_mock
.
provider
=
'openai'
model_config_mock
.
model
=
'gpt-3.5-turbo-instruct'
model_config_mock
.
model
=
'gpt-3.5-turbo-instruct'
...
...
api/tests/unit_tests/services/workflow/test_workflow_converter.py
View file @
655b34b7
...
@@ -4,7 +4,7 @@ from unittest.mock import MagicMock
...
@@ -4,7 +4,7 @@ from unittest.mock import MagicMock
import
pytest
import
pytest
from
core.
entities.application_
entities
import
VariableEntity
,
ExternalDataVariableEntity
,
DatasetEntity
,
\
from
core.
app.app_config.
entities
import
VariableEntity
,
ExternalDataVariableEntity
,
DatasetEntity
,
\
DatasetRetrieveConfigEntity
,
ModelConfigEntity
,
PromptTemplateEntity
,
AdvancedChatPromptTemplateEntity
,
\
DatasetRetrieveConfigEntity
,
ModelConfigEntity
,
PromptTemplateEntity
,
AdvancedChatPromptTemplateEntity
,
\
AdvancedChatMessageEntity
,
AdvancedCompletionPromptTemplateEntity
AdvancedChatMessageEntity
,
AdvancedCompletionPromptTemplateEntity
from
core.helper
import
encrypter
from
core.helper
import
encrypter
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment