From 8a0aa91ed7d080d6f82d1a3ea092f5c6fe1b11ca Mon Sep 17 00:00:00 2001 From: Vasu Negi Date: Fri, 14 Feb 2025 03:02:04 -0600 Subject: [PATCH] Non-Streaming Models Do Not Return Results Properly in _handle_invoke_result (#13571) Co-authored-by: crazywoola <427733928@qq.com> --- api/core/workflow/nodes/llm/node.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 7e28aa7a3f..9f30048fe6 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -247,6 +247,24 @@ class LLMNode(BaseNode[LLMNodeData]): def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]: if isinstance(invoke_result, LLMResult): + content = invoke_result.message.content + if content is None: + message_text = "" + elif isinstance(content, str): + message_text = content + elif isinstance(content, list): + # Assuming the list contains PromptMessageContent objects with a "data" attribute + message_text = "".join( + item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content + ) + else: + message_text = str(content) + + yield ModelInvokeCompletedEvent( + text=message_text, + usage=invoke_result.usage, + finish_reason=None, + ) return model = None