Unverified Commit 34387ec0 authored by cola's avatar cola Committed by GitHub

fix typo recale to recalc (#2670)

parent 83a6b0c6
...@@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner): ...@@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
input=query input=query
) )
# recale llm max tokens # recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages) self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model # invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
......
...@@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner): ...@@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
messages_ids=message_file_ids messages_ids=message_file_ids
) )
# recale llm max tokens # recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages) self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model # invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm( chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment