Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
3b357f51
Unverified
Commit
3b357f51
authored
Feb 01, 2024
by
Yeuoly
Committed by
GitHub
Feb 01, 2024
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix: first agent latency (#2334)
parent
09acf215
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
6 additions
and
1 deletion
+6
-1
assistant_fc_runner.py
api/core/features/assistant_fc_runner.py
+6
-1
No files found.
api/core/features/assistant_fc_runner.py
View file @
3b357f51
...
@@ -97,7 +97,6 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
...
@@ -97,7 +97,6 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
tool_input
=
''
,
tool_input
=
''
,
messages_ids
=
message_file_ids
messages_ids
=
message_file_ids
)
)
self
.
queue_manager
.
publish_agent_thought
(
agent_thought
,
PublishFrom
.
APPLICATION_MANAGER
)
# recale llm max tokens
# recale llm max tokens
self
.
recale_llm_max_tokens
(
self
.
model_config
,
prompt_messages
)
self
.
recale_llm_max_tokens
(
self
.
model_config
,
prompt_messages
)
...
@@ -124,7 +123,11 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
...
@@ -124,7 +123,11 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
current_llm_usage
=
None
current_llm_usage
=
None
if
self
.
stream_tool_call
:
if
self
.
stream_tool_call
:
is_first_chunk
=
True
for
chunk
in
chunks
:
for
chunk
in
chunks
:
if
is_first_chunk
:
self
.
queue_manager
.
publish_agent_thought
(
agent_thought
,
PublishFrom
.
APPLICATION_MANAGER
)
is_first_chunk
=
False
# check if there is any tool call
# check if there is any tool call
if
self
.
check_tool_calls
(
chunk
):
if
self
.
check_tool_calls
(
chunk
):
function_call_state
=
True
function_call_state
=
True
...
@@ -183,6 +186,8 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
...
@@ -183,6 +186,8 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
if
not
result
.
message
.
content
:
if
not
result
.
message
.
content
:
result
.
message
.
content
=
''
result
.
message
.
content
=
''
self
.
queue_manager
.
publish_agent_thought
(
agent_thought
,
PublishFrom
.
APPLICATION_MANAGER
)
yield
LLMResultChunk
(
yield
LLMResultChunk
(
model
=
model_instance
.
model
,
model
=
model_instance
.
model
,
prompt_messages
=
result
.
prompt_messages
,
prompt_messages
=
result
.
prompt_messages
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment