mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-12 14:18:58 +08:00
fix(workflow): enhance prompt handling with vision support (#9790)
This commit is contained in:
parent
e54b7cda3d
commit
d018b32d0b
@ -127,9 +127,10 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||||||
context=context,
|
context=context,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
vision_detail=self.node_data.vision.configs.detail,
|
|
||||||
prompt_template=self.node_data.prompt_template,
|
prompt_template=self.node_data.prompt_template,
|
||||||
memory_config=self.node_data.memory,
|
memory_config=self.node_data.memory,
|
||||||
|
vision_enabled=self.node_data.vision.enabled,
|
||||||
|
vision_detail=self.node_data.vision.configs.detail,
|
||||||
)
|
)
|
||||||
|
|
||||||
process_data = {
|
process_data = {
|
||||||
@ -518,6 +519,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
|
prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
|
||||||
memory_config: MemoryConfig | None = None,
|
memory_config: MemoryConfig | None = None,
|
||||||
|
vision_enabled: bool = False,
|
||||||
vision_detail: ImagePromptMessageContent.DETAIL,
|
vision_detail: ImagePromptMessageContent.DETAIL,
|
||||||
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
||||||
inputs = inputs or {}
|
inputs = inputs or {}
|
||||||
@ -542,6 +544,10 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||||||
if not isinstance(prompt_message.content, str):
|
if not isinstance(prompt_message.content, str):
|
||||||
prompt_message_content = []
|
prompt_message_content = []
|
||||||
for content_item in prompt_message.content or []:
|
for content_item in prompt_message.content or []:
|
||||||
|
# Skip image if vision is disabled
|
||||||
|
if not vision_enabled and content_item.type == PromptMessageContentType.IMAGE:
|
||||||
|
continue
|
||||||
|
|
||||||
if isinstance(content_item, ImagePromptMessageContent):
|
if isinstance(content_item, ImagePromptMessageContent):
|
||||||
# Override vision config if LLM node has vision config,
|
# Override vision config if LLM node has vision config,
|
||||||
# cuz vision detail is related to the configuration from FileUpload feature.
|
# cuz vision detail is related to the configuration from FileUpload feature.
|
||||||
|
@ -88,6 +88,7 @@ class QuestionClassifierNode(LLMNode):
|
|||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
files=files,
|
files=files,
|
||||||
|
vision_enabled=node_data.vision.enabled,
|
||||||
vision_detail=node_data.vision.configs.detail,
|
vision_detail=node_data.vision.configs.detail,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user