Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
4de6e955
Commit
4de6e955
authored
Jun 19, 2023
by
John Wang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
feat: completed callback
parent
0578c1b6
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
38 additions
and
198 deletions
+38
-198
agent_loop_gather_callback_handler.py
...re/callback_handler/agent_loop_gather_callback_handler.py
+0
-29
dataset_tool_callback_handler.py
api/core/callback_handler/dataset_tool_callback_handler.py
+0
-49
llm_callback_handler.py
api/core/callback_handler/llm_callback_handler.py
+25
-56
main_chain_gather_callback_handler.py
...re/callback_handler/main_chain_gather_callback_handler.py
+2
-63
std_out_callback_handler.py
api/core/callback_handler/std_out_callback_handler.py
+11
-1
No files found.
api/core/callback_handler/agent_loop_gather_callback_handler.py
View file @
4de6e955
...
...
@@ -64,10 +64,6 @@ class AgentLoopGatherCallbackHandler(BaseCallbackHandler):
self
.
_current_loop
.
completion
=
response
.
generations
[
0
][
0
]
.
text
self
.
_current_loop
.
completion_tokens
=
response
.
llm_output
[
'token_usage'
][
'completion_tokens'
]
def
on_llm_new_token
(
self
,
token
:
str
,
**
kwargs
:
Any
)
->
None
:
"""Do nothing."""
pass
def
on_llm_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
...
...
@@ -75,21 +71,6 @@ class AgentLoopGatherCallbackHandler(BaseCallbackHandler):
self
.
_agent_loops
=
[]
self
.
_current_loop
=
None
def
on_chain_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
inputs
:
Dict
[
str
,
Any
],
**
kwargs
:
Any
)
->
None
:
"""Print out that we are entering a chain."""
pass
def
on_chain_end
(
self
,
outputs
:
Dict
[
str
,
Any
],
**
kwargs
:
Any
)
->
None
:
"""Print out that we finished a chain."""
pass
def
on_chain_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
logging
.
error
(
error
)
def
on_tool_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
...
...
@@ -151,16 +132,6 @@ class AgentLoopGatherCallbackHandler(BaseCallbackHandler):
self
.
_agent_loops
=
[]
self
.
_current_loop
=
None
def
on_text
(
self
,
text
:
str
,
color
:
Optional
[
str
]
=
None
,
end
:
str
=
""
,
**
kwargs
:
Optional
[
str
],
)
->
None
:
"""Run on additional input from chains and agents."""
pass
def
on_agent_finish
(
self
,
finish
:
AgentFinish
,
**
kwargs
:
Any
)
->
Any
:
"""Run on agent end."""
# Final Answer
...
...
api/core/callback_handler/dataset_tool_callback_handler.py
View file @
4de6e955
...
...
@@ -66,52 +66,3 @@ class DatasetToolCallbackHandler(BaseCallbackHandler):
)
->
None
:
"""Do nothing."""
logging
.
error
(
error
)
def
on_chain_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
inputs
:
Dict
[
str
,
Any
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_chain_end
(
self
,
outputs
:
Dict
[
str
,
Any
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_chain_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_llm_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
prompts
:
List
[
str
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_llm_end
(
self
,
response
:
LLMResult
,
**
kwargs
:
Any
)
->
None
:
pass
def
on_llm_new_token
(
self
,
token
:
str
,
**
kwargs
:
Any
)
->
None
:
"""Do nothing."""
pass
def
on_llm_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
logging
.
error
(
error
)
def
on_agent_action
(
self
,
action
:
AgentAction
,
color
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
)
->
Any
:
pass
def
on_text
(
self
,
text
:
str
,
color
:
Optional
[
str
]
=
None
,
end
:
str
=
""
,
**
kwargs
:
Optional
[
str
],
)
->
None
:
"""Run on additional input from chains and agents."""
pass
def
on_agent_finish
(
self
,
finish
:
AgentFinish
,
**
kwargs
:
Any
)
->
Any
:
"""Run on agent end."""
pass
api/core/callback_handler/llm_callback_handler.py
View file @
4de6e955
...
...
@@ -3,7 +3,7 @@ import time
from
typing
import
Any
,
Dict
,
List
,
Union
,
Optional
from
langchain.callbacks.base
import
BaseCallbackHandler
from
langchain.schema
import
AgentAction
,
AgentFinish
,
LLMResult
,
HumanMessage
,
AIMessage
,
SystemMessage
from
langchain.schema
import
AgentAction
,
AgentFinish
,
LLMResult
,
HumanMessage
,
AIMessage
,
SystemMessage
,
BaseMessage
from
core.callback_handler.entity.llm_message
import
LLMMessage
from
core.conversation_message_task
import
ConversationMessageTask
,
ConversationTaskStoppedException
...
...
@@ -25,11 +25,35 @@ class LLMCallbackHandler(BaseCallbackHandler):
"""Whether to call verbose callbacks even if verbose is False."""
return
True
def
on_chat_model_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
messages
:
List
[
List
[
BaseMessage
]],
**
kwargs
:
Any
)
->
Any
:
real_prompts
=
[]
for
message
in
messages
[
0
]:
if
message
.
type
==
'human'
:
role
=
'user'
elif
message
.
type
==
'ai'
:
role
=
'assistant'
else
:
role
=
'system'
real_prompts
.
append
({
"role"
:
role
,
"text"
:
message
.
content
})
self
.
llm_message
.
prompt
=
real_prompts
self
.
llm_message
.
prompt_tokens
=
self
.
llm
.
get_messages_tokens
(
messages
[
0
])
def
on_llm_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
prompts
:
List
[
str
],
**
kwargs
:
Any
)
->
None
:
self
.
start_at
=
time
.
perf_counter
()
# todo chat serialized maybe deprecated in future
if
'Chat'
in
serialized
[
'name'
]:
real_prompts
=
[]
messages
=
[]
...
...
@@ -95,58 +119,3 @@ class LLMCallbackHandler(BaseCallbackHandler):
self
.
conversation_message_task
.
save_message
(
llm_message
=
self
.
llm_message
,
by_stopped
=
True
)
else
:
logging
.
error
(
error
)
def
on_chain_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
inputs
:
Dict
[
str
,
Any
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_chain_end
(
self
,
outputs
:
Dict
[
str
,
Any
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_chain_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_tool_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
input_str
:
str
,
**
kwargs
:
Any
,
)
->
None
:
pass
def
on_agent_action
(
self
,
action
:
AgentAction
,
color
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
)
->
Any
:
pass
def
on_tool_end
(
self
,
output
:
str
,
color
:
Optional
[
str
]
=
None
,
observation_prefix
:
Optional
[
str
]
=
None
,
llm_prefix
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
,
)
->
None
:
pass
def
on_tool_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_text
(
self
,
text
:
str
,
color
:
Optional
[
str
]
=
None
,
end
:
str
=
""
,
**
kwargs
:
Optional
[
str
],
)
->
None
:
pass
def
on_agent_finish
(
self
,
finish
:
AgentFinish
,
color
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
)
->
None
:
pass
api/core/callback_handler/main_chain_gather_callback_handler.py
View file @
4de6e955
import
logging
import
time
from
typing
import
Any
,
Dict
,
List
,
Union
,
Optional
from
typing
import
Any
,
Dict
,
Union
from
langchain.callbacks.base
import
BaseCallbackHandler
from
langchain.schema
import
AgentAction
,
AgentFinish
,
LLMResult
from
core.callback_handler.agent_loop_gather_callback_handler
import
AgentLoopGatherCallbackHandler
from
core.callback_handler.entity.chain_result
import
ChainResult
...
...
@@ -74,64 +73,4 @@ class MainChainGatherCallbackHandler(BaseCallbackHandler):
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
logging
.
error
(
error
)
self
.
clear_chain_results
()
def
on_llm_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
prompts
:
List
[
str
],
**
kwargs
:
Any
)
->
None
:
pass
def
on_llm_end
(
self
,
response
:
LLMResult
,
**
kwargs
:
Any
)
->
None
:
pass
def
on_llm_new_token
(
self
,
token
:
str
,
**
kwargs
:
Any
)
->
None
:
"""Do nothing."""
pass
def
on_llm_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
logging
.
error
(
error
)
def
on_tool_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
input_str
:
str
,
**
kwargs
:
Any
,
)
->
None
:
pass
def
on_agent_action
(
self
,
action
:
AgentAction
,
color
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
)
->
Any
:
pass
def
on_tool_end
(
self
,
output
:
str
,
color
:
Optional
[
str
]
=
None
,
observation_prefix
:
Optional
[
str
]
=
None
,
llm_prefix
:
Optional
[
str
]
=
None
,
**
kwargs
:
Any
,
)
->
None
:
pass
def
on_tool_error
(
self
,
error
:
Union
[
Exception
,
KeyboardInterrupt
],
**
kwargs
:
Any
)
->
None
:
"""Do nothing."""
logging
.
error
(
error
)
def
on_text
(
self
,
text
:
str
,
color
:
Optional
[
str
]
=
None
,
end
:
str
=
""
,
**
kwargs
:
Optional
[
str
],
)
->
None
:
"""Run on additional input from chains and agents."""
pass
def
on_agent_finish
(
self
,
finish
:
AgentFinish
,
**
kwargs
:
Any
)
->
Any
:
"""Run on agent end."""
pass
self
.
clear_chain_results
()
\ No newline at end of file
api/core/callback_handler/std_out_callback_handler.py
View file @
4de6e955
...
...
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union
from
langchain.callbacks.base
import
BaseCallbackHandler
from
langchain.input
import
print_text
from
langchain.schema
import
AgentAction
,
AgentFinish
,
LLMResult
from
langchain.schema
import
AgentAction
,
AgentFinish
,
LLMResult
,
BaseMessage
class
DifyStdOutCallbackHandler
(
BaseCallbackHandler
):
...
...
@@ -13,6 +13,16 @@ class DifyStdOutCallbackHandler(BaseCallbackHandler):
"""Initialize callback handler."""
self
.
color
=
color
def
on_chat_model_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
messages
:
List
[
List
[
BaseMessage
]],
**
kwargs
:
Any
)
->
Any
:
for
sub_messages
in
messages
:
for
sub_message
in
sub_messages
:
print_text
(
str
(
sub_message
)
+
"
\n
"
,
color
=
'blue'
)
def
on_llm_start
(
self
,
serialized
:
Dict
[
str
,
Any
],
prompts
:
List
[
str
],
**
kwargs
:
Any
)
->
None
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment