Fix the situation where output_tokens/input_tokens may be None in response.usage (#10728)

This commit is contained in:
Ding Jiatong 2024-11-19 21:19:13 +08:00 committed by GitHub
parent 904ea05bf6
commit 3087913b74
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -325,14 +325,13 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
assistant_prompt_message.tool_calls.append(tool_call) assistant_prompt_message.tool_calls.append(tool_call)
# calculate num tokens # calculate num tokens
if response.usage: prompt_tokens = (response.usage and response.usage.input_tokens) or self.get_num_tokens(
# transform usage model, credentials, prompt_messages
prompt_tokens = response.usage.input_tokens )
completion_tokens = response.usage.output_tokens
else: completion_tokens = (response.usage and response.usage.output_tokens) or self.get_num_tokens(
# calculate num tokens model, credentials, [assistant_prompt_message]
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) )
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
# transform usage # transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)