Commit 2de7f5d2 authored by John Wang's avatar John Wang

fix: Agent run failed error

parent 70c5d1aa
import enum import enum
import logging
from typing import Union, Optional from typing import Union, Optional
from langchain.agents import BaseSingleActionAgent, BaseMultiActionAgent from langchain.agents import BaseSingleActionAgent, BaseMultiActionAgent
...@@ -107,7 +108,11 @@ class AgentExecutor: ...@@ -107,7 +108,11 @@ class AgentExecutor:
callbacks=self.configuration.callbacks callbacks=self.configuration.callbacks
) )
try:
output = agent_executor.run(query) output = agent_executor.run(query)
except Exception:
logging.exception("agent_executor run failed")
output = None
return AgentExecuteResult( return AgentExecuteResult(
output=output, output=output,
......
...@@ -126,7 +126,7 @@ class Completion: ...@@ -126,7 +126,7 @@ class Completion:
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool): memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool):
# When no extra pre prompt is specified, # When no extra pre prompt is specified,
# the output of the agent can be used directly as the main output content without calling LLM again # the output of the agent can be used directly as the main output content without calling LLM again
if not app_model_config.pre_prompt and agent_execute_result \ if not app_model_config.pre_prompt and agent_execute_result and agent_execute_result.output \
and agent_execute_result.strategy != PlanningStrategy.ROUTER: and agent_execute_result.strategy != PlanningStrategy.ROUTER:
final_llm = FakeLLM(response=agent_execute_result.output, final_llm = FakeLLM(response=agent_execute_result.output,
origin_llm=agent_execute_result.configuration.llm, origin_llm=agent_execute_result.configuration.llm,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment