diff --git a/api/core/features/assistant_cot_runner.py b/api/core/features/assistant_cot_runner.py index 09ab27109b..8fcbff983d 100644 --- a/api/core/features/assistant_cot_runner.py +++ b/api/core/features/assistant_cot_runner.py @@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner): input=query ) - # recale llm max tokens + # recalc llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( diff --git a/api/core/features/assistant_fc_runner.py b/api/core/features/assistant_fc_runner.py index afb312341d..391e040c53 100644 --- a/api/core/features/assistant_fc_runner.py +++ b/api/core/features/assistant_fc_runner.py @@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner): messages_ids=message_file_ids ) - # recale llm max tokens + # recalc llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(