From 6c31ee36cdd02869a914c4f338cef64569e6be39 Mon Sep 17 00:00:00 2001 From: heyszt <36215648+hieheihei@users.noreply.github.com> Date: Mon, 27 Jan 2025 11:35:23 +0800 Subject: [PATCH] fix qwen-vl blocking mode (#13052) --- api/core/model_runtime/model_providers/tongyi/llm/llm.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index 8214667427..75c62a9080 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -219,8 +219,12 @@ class TongyiLargeLanguageModel(LargeLanguageModel): if response.status_code not in {200, HTTPStatus.OK}: raise ServiceUnavailableError(response.message) # transform assistant message to prompt message + resp_content = response.output.choices[0].message.content + # special for qwen-vl + if isinstance(resp_content, list): + resp_content = resp_content[0]["text"] assistant_prompt_message = AssistantPromptMessage( - content=response.output.choices[0].message.content, + content=resp_content, ) # transform usage