mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-15 09:45:56 +08:00
Non-Streaming Models Do Not Return Results Properly in _handle_invoke_result (#13571)
Co-authored-by: crazywoola <427733928@qq.com>
This commit is contained in:
parent
62079991b7
commit
8a0aa91ed7
@ -247,6 +247,24 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||||||
|
|
||||||
def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
|
def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
|
||||||
if isinstance(invoke_result, LLMResult):
|
if isinstance(invoke_result, LLMResult):
|
||||||
|
content = invoke_result.message.content
|
||||||
|
if content is None:
|
||||||
|
message_text = ""
|
||||||
|
elif isinstance(content, str):
|
||||||
|
message_text = content
|
||||||
|
elif isinstance(content, list):
|
||||||
|
# Assuming the list contains PromptMessageContent objects with a "data" attribute
|
||||||
|
message_text = "".join(
|
||||||
|
item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
message_text = str(content)
|
||||||
|
|
||||||
|
yield ModelInvokeCompletedEvent(
|
||||||
|
text=message_text,
|
||||||
|
usage=invoke_result.usage,
|
||||||
|
finish_reason=None,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
model = None
|
model = None
|
||||||
|
Loading…
x
Reference in New Issue
Block a user