Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
167d8574
Commit
167d8574
authored
Jul 21, 2023
by
John Wang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix: agent response format nouns error in claude model
parent
58d6b9be
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
69 additions
and
88 deletions
+69
-88
structured_chat.py
api/core/agent/agent/structured_chat.py
+69
-2
agent_builder.py
api/core/agent/agent_builder.py
+0
-86
No files found.
api/core/agent/agent/structured_chat.py
View file @
167d8574
from
typing
import
List
,
Tuple
,
Any
,
Union
from
typing
import
List
,
Tuple
,
Any
,
Union
,
Sequence
,
Optional
from
langchain.agents
import
StructuredChatAgent
from
langchain
import
BasePromptTemplate
from
langchain.agents
import
StructuredChatAgent
,
AgentOutputParser
,
Agent
from
langchain.agents.structured_chat.base
import
HUMAN_MESSAGE_TEMPLATE
from
langchain.base_language
import
BaseLanguageModel
from
langchain.callbacks.base
import
BaseCallbackManager
from
langchain.callbacks.manager
import
Callbacks
from
langchain.memory.summary
import
SummarizerMixin
from
langchain.schema
import
AgentAction
,
AgentFinish
,
AIMessage
,
HumanMessage
from
langchain.tools
import
BaseTool
from
langchain.agents.structured_chat.prompt
import
PREFIX
,
SUFFIX
from
core.agent.agent.calc_token_mixin
import
CalcTokenMixin
,
ExceededLLMTokensLimitError
FORMAT_INSTRUCTIONS
=
"""Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English.
Valid "action" values: "Final Answer" or {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{{{{
"action": "Final Answer",
"action_input": "Final response to human"
}}}}
```"""
class
AutoSummarizingStructuredChatAgent
(
StructuredChatAgent
,
CalcTokenMixin
):
moving_summary_buffer
:
str
=
""
moving_summary_index
:
int
=
0
...
...
@@ -82,3 +120,32 @@ class AutoSummarizingStructuredChatAgent(StructuredChatAgent, CalcTokenMixin):
kwargs
[
"chat_history"
]
.
append
(
AIMessage
(
content
=
self
.
moving_summary_buffer
))
return
self
.
get_full_inputs
([
intermediate_steps
[
-
1
]],
**
kwargs
)
@
classmethod
def
from_llm_and_tools
(
cls
,
llm
:
BaseLanguageModel
,
tools
:
Sequence
[
BaseTool
],
callback_manager
:
Optional
[
BaseCallbackManager
]
=
None
,
output_parser
:
Optional
[
AgentOutputParser
]
=
None
,
prefix
:
str
=
PREFIX
,
suffix
:
str
=
SUFFIX
,
human_message_template
:
str
=
HUMAN_MESSAGE_TEMPLATE
,
format_instructions
:
str
=
FORMAT_INSTRUCTIONS
,
input_variables
:
Optional
[
List
[
str
]]
=
None
,
memory_prompts
:
Optional
[
List
[
BasePromptTemplate
]]
=
None
,
**
kwargs
:
Any
,
)
->
Agent
:
return
super
()
.
from_llm_and_tools
(
llm
=
llm
,
tools
=
tools
,
callback_manager
=
callback_manager
,
output_parser
=
output_parser
,
prefix
=
prefix
,
suffix
=
suffix
,
human_message_template
=
human_message_template
,
format_instructions
=
format_instructions
,
input_variables
=
input_variables
,
memory_prompts
=
memory_prompts
,
**
kwargs
,
)
api/core/agent/agent_builder.py
deleted
100644 → 0
View file @
58d6b9be
from
typing
import
Optional
from
langchain
import
LLMChain
from
langchain.agents
import
ZeroShotAgent
,
AgentExecutor
,
ConversationalAgent
from
langchain.callbacks.manager
import
CallbackManager
from
langchain.memory.chat_memory
import
BaseChatMemory
from
core.callback_handler.agent_loop_gather_callback_handler
import
AgentLoopGatherCallbackHandler
from
core.callback_handler.dataset_tool_callback_handler
import
DatasetToolCallbackHandler
from
core.callback_handler.std_out_callback_handler
import
DifyStdOutCallbackHandler
from
core.llm.llm_builder
import
LLMBuilder
class
AgentBuilder
:
@
classmethod
def
to_agent_chain
(
cls
,
tenant_id
:
str
,
tools
,
memory
:
Optional
[
BaseChatMemory
],
dataset_tool_callback_handler
:
DatasetToolCallbackHandler
,
agent_loop_gather_callback_handler
:
AgentLoopGatherCallbackHandler
):
llm
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
model_name
=
agent_loop_gather_callback_handler
.
model_name
,
temperature
=
0
,
max_tokens
=
1024
,
callbacks
=
[
agent_loop_gather_callback_handler
,
DifyStdOutCallbackHandler
()]
)
for
tool
in
tools
:
tool
.
callbacks
=
[
agent_loop_gather_callback_handler
,
dataset_tool_callback_handler
,
DifyStdOutCallbackHandler
()
]
prompt
=
cls
.
build_agent_prompt_template
(
tools
=
tools
,
memory
=
memory
,
)
agent_llm_chain
=
LLMChain
(
llm
=
llm
,
prompt
=
prompt
,
)
agent
=
cls
.
build_agent
(
agent_llm_chain
=
agent_llm_chain
,
memory
=
memory
)
agent_callback_manager
=
CallbackManager
(
[
agent_loop_gather_callback_handler
,
DifyStdOutCallbackHandler
()]
)
agent_chain
=
AgentExecutor
.
from_agent_and_tools
(
tools
=
tools
,
agent
=
agent
,
memory
=
memory
,
callbacks
=
agent_callback_manager
,
max_iterations
=
6
,
early_stopping_method
=
"generate"
,
# `generate` will continue to complete the last inference after reaching the iteration limit or request time limit
)
return
agent_chain
@
classmethod
def
build_agent_prompt_template
(
cls
,
tools
,
memory
:
Optional
[
BaseChatMemory
]):
if
memory
:
prompt
=
ConversationalAgent
.
create_prompt
(
tools
=
tools
,
)
else
:
prompt
=
ZeroShotAgent
.
create_prompt
(
tools
=
tools
,
)
return
prompt
@
classmethod
def
build_agent
(
cls
,
agent_llm_chain
:
LLMChain
,
memory
:
Optional
[
BaseChatMemory
]):
if
memory
:
agent
=
ConversationalAgent
(
llm_chain
=
agent_llm_chain
)
else
:
agent
=
ZeroShotAgent
(
llm_chain
=
agent_llm_chain
)
return
agent
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment