From 34387ec0f112eb54e5b906cae77f9062b9487fbb Mon Sep 17 00:00:00 2001 From: cola <45722758+xiangpingjiang@users.noreply.github.com> Date: Mon, 4 Mar 2024 14:15:53 +0800 Subject: [PATCH] fix typo recale to recalc (#2670) --- api/core/features/assistant_cot_runner.py | 2 +- api/core/features/assistant_fc_runner.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/features/assistant_cot_runner.py b/api/core/features/assistant_cot_runner.py index 09ab27109b..8fcbff983d 100644 --- a/api/core/features/assistant_cot_runner.py +++ b/api/core/features/assistant_cot_runner.py @@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner): input=query ) - # recale llm max tokens + # recalc llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( diff --git a/api/core/features/assistant_fc_runner.py b/api/core/features/assistant_fc_runner.py index afb312341d..391e040c53 100644 --- a/api/core/features/assistant_fc_runner.py +++ b/api/core/features/assistant_fc_runner.py @@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner): messages_ids=message_file_ids ) - # recale llm max tokens + # recalc llm max tokens self.recalc_llm_max_tokens(self.model_config, prompt_messages) # invoke model chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(