Commit 8af0e837 authored by takatost's avatar takatost

fix typo

parent 38b2e383
......@@ -132,7 +132,7 @@ class CotAgentRunner(BaseAgentRunner):
input=query
)
# recale llm max tokens
# recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
......
......@@ -107,7 +107,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
messages_ids=message_file_ids
)
# recale llm max tokens
# recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment