Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
7b738e04
Commit
7b738e04
authored
Mar 04, 2024
by
takatost
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix typo
parent
3f6c1724
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
5 additions
and
5 deletions
+5
-5
cot_agent_runner.py
api/core/agent/cot_agent_runner.py
+1
-1
fc_agent_runner.py
api/core/agent/fc_agent_runner.py
+1
-1
base_app_runner.py
api/core/app/apps/base_app_runner.py
+1
-1
app_runner.py
api/core/app/apps/chat/app_runner.py
+1
-1
app_runner.py
api/core/app/apps/completion/app_runner.py
+1
-1
No files found.
api/core/agent/cot_agent_runner.py
View file @
7b738e04
...
...
@@ -134,7 +134,7 @@ class CotAgentRunner(BaseAgentRunner):
input
=
query
)
# recal
c
llm max tokens
# recal
e
llm max tokens
self
.
recalc_llm_max_tokens
(
self
.
model_config
,
prompt_messages
)
# invoke model
chunks
:
Generator
[
LLMResultChunk
,
None
,
None
]
=
model_instance
.
invoke_llm
(
...
...
api/core/agent/fc_agent_runner.py
View file @
7b738e04
...
...
@@ -107,7 +107,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
messages_ids
=
message_file_ids
)
# recal
c
llm max tokens
# recal
e
llm max tokens
self
.
recalc_llm_max_tokens
(
self
.
model_config
,
prompt_messages
)
# invoke model
chunks
:
Union
[
Generator
[
LLMResultChunk
,
None
,
None
],
LLMResult
]
=
model_instance
.
invoke_llm
(
...
...
api/core/app/apps/base_app_runner.py
View file @
7b738e04
...
...
@@ -84,7 +84,7 @@ class AppRunner:
return
rest_tokens
def
recal
e
_llm_max_tokens
(
self
,
model_config
:
ModelConfigWithCredentialsEntity
,
def
recal
c
_llm_max_tokens
(
self
,
model_config
:
ModelConfigWithCredentialsEntity
,
prompt_messages
:
list
[
PromptMessage
]):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_type_instance
=
model_config
.
provider_model_bundle
.
model_type_instance
...
...
api/core/app/apps/chat/app_runner.py
View file @
7b738e04
...
...
@@ -189,7 +189,7 @@ class ChatAppRunner(AppRunner):
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self
.
recal
e
_llm_max_tokens
(
self
.
recal
c
_llm_max_tokens
(
model_config
=
application_generate_entity
.
model_config
,
prompt_messages
=
prompt_messages
)
...
...
api/core/app/apps/completion/app_runner.py
View file @
7b738e04
...
...
@@ -149,7 +149,7 @@ class CompletionAppRunner(AppRunner):
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self
.
recal
e
_llm_max_tokens
(
self
.
recal
c
_llm_max_tokens
(
model_config
=
application_generate_entity
.
model_config
,
prompt_messages
=
prompt_messages
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment