From 3087913b741922672189c62276c5a3186c7baf6a Mon Sep 17 00:00:00 2001 From: Ding Jiatong Date: Tue, 19 Nov 2024 21:19:13 +0800 Subject: [PATCH] Fix the situation where output_tokens/input_tokens may be None in response.usage (#10728) --- .../model_providers/anthropic/llm/llm.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index 3a5a42ba05..4e7faab891 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -325,14 +325,13 @@ class AnthropicLargeLanguageModel(LargeLanguageModel): assistant_prompt_message.tool_calls.append(tool_call) # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.input_tokens - completion_tokens = response.usage.output_tokens - else: - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) + prompt_tokens = (response.usage and response.usage.input_tokens) or self.get_num_tokens( + model, credentials, prompt_messages + ) + + completion_tokens = (response.usage and response.usage.output_tokens) or self.get_num_tokens( + model, credentials, [assistant_prompt_message] + ) # transform usage usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)