mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-06-04 11:14:10 +08:00
fix: respect resolution settings for vision for basic chatbot, text generator, and parameter extractor node (#16041)
This commit is contained in:
parent
e91531fc23
commit
86d3fff666
@ -17,7 +17,11 @@ from core.external_data_tool.external_data_fetch import ExternalDataFetch
|
|||||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||||
from core.model_manager import ModelInstance
|
from core.model_manager import ModelInstance
|
||||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
||||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
|
from core.model_runtime.entities.message_entities import (
|
||||||
|
AssistantPromptMessage,
|
||||||
|
ImagePromptMessageContent,
|
||||||
|
PromptMessage,
|
||||||
|
)
|
||||||
from core.model_runtime.entities.model_entities import ModelPropertyKey
|
from core.model_runtime.entities.model_entities import ModelPropertyKey
|
||||||
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
||||||
from core.moderation.input_moderation import InputModeration
|
from core.moderation.input_moderation import InputModeration
|
||||||
@ -141,6 +145,7 @@ class AppRunner:
|
|||||||
query: Optional[str] = None,
|
query: Optional[str] = None,
|
||||||
context: Optional[str] = None,
|
context: Optional[str] = None,
|
||||||
memory: Optional[TokenBufferMemory] = None,
|
memory: Optional[TokenBufferMemory] = None,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
||||||
"""
|
"""
|
||||||
Organize prompt messages
|
Organize prompt messages
|
||||||
@ -167,6 +172,7 @@ class AppRunner:
|
|||||||
context=context,
|
context=context,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
memory_config = MemoryConfig(window=MemoryConfig.WindowConfig(enabled=False))
|
memory_config = MemoryConfig(window=MemoryConfig.WindowConfig(enabled=False))
|
||||||
@ -201,6 +207,7 @@ class AppRunner:
|
|||||||
memory_config=memory_config,
|
memory_config=memory_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
stop = model_config.stop
|
stop = model_config.stop
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ from core.app.entities.queue_entities import QueueAnnotationReplyEvent
|
|||||||
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
|
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
|
||||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||||
from core.model_manager import ModelInstance
|
from core.model_manager import ModelInstance
|
||||||
|
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
||||||
from core.moderation.base import ModerationError
|
from core.moderation.base import ModerationError
|
||||||
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
|
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
@ -50,6 +51,16 @@ class ChatAppRunner(AppRunner):
|
|||||||
query = application_generate_entity.query
|
query = application_generate_entity.query
|
||||||
files = application_generate_entity.files
|
files = application_generate_entity.files
|
||||||
|
|
||||||
|
image_detail_config = (
|
||||||
|
application_generate_entity.file_upload_config.image_config.detail
|
||||||
|
if (
|
||||||
|
application_generate_entity.file_upload_config
|
||||||
|
and application_generate_entity.file_upload_config.image_config
|
||||||
|
)
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||||
|
|
||||||
# Pre-calculate the number of tokens of the prompt messages,
|
# Pre-calculate the number of tokens of the prompt messages,
|
||||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
# and return the rest number of tokens by model context token size limit and max token size limit.
|
||||||
# If the rest number of tokens is not enough, raise exception.
|
# If the rest number of tokens is not enough, raise exception.
|
||||||
@ -85,6 +96,7 @@ class ChatAppRunner(AppRunner):
|
|||||||
files=files,
|
files=files,
|
||||||
query=query,
|
query=query,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
# moderation
|
# moderation
|
||||||
@ -182,6 +194,7 @@ class ChatAppRunner(AppRunner):
|
|||||||
query=query,
|
query=query,
|
||||||
context=context,
|
context=context,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
# check hosting moderation
|
# check hosting moderation
|
||||||
|
@ -9,6 +9,7 @@ from core.app.entities.app_invoke_entities import (
|
|||||||
)
|
)
|
||||||
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
|
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
|
||||||
from core.model_manager import ModelInstance
|
from core.model_manager import ModelInstance
|
||||||
|
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
||||||
from core.moderation.base import ModerationError
|
from core.moderation.base import ModerationError
|
||||||
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
|
from core.rag.retrieval.dataset_retrieval import DatasetRetrieval
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
@ -43,6 +44,16 @@ class CompletionAppRunner(AppRunner):
|
|||||||
query = application_generate_entity.query
|
query = application_generate_entity.query
|
||||||
files = application_generate_entity.files
|
files = application_generate_entity.files
|
||||||
|
|
||||||
|
image_detail_config = (
|
||||||
|
application_generate_entity.file_upload_config.image_config.detail
|
||||||
|
if (
|
||||||
|
application_generate_entity.file_upload_config
|
||||||
|
and application_generate_entity.file_upload_config.image_config
|
||||||
|
)
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
||||||
|
|
||||||
# Pre-calculate the number of tokens of the prompt messages,
|
# Pre-calculate the number of tokens of the prompt messages,
|
||||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
# and return the rest number of tokens by model context token size limit and max token size limit.
|
||||||
# If the rest number of tokens is not enough, raise exception.
|
# If the rest number of tokens is not enough, raise exception.
|
||||||
@ -66,6 +77,7 @@ class CompletionAppRunner(AppRunner):
|
|||||||
inputs=inputs,
|
inputs=inputs,
|
||||||
files=files,
|
files=files,
|
||||||
query=query,
|
query=query,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
# moderation
|
# moderation
|
||||||
@ -140,6 +152,7 @@ class CompletionAppRunner(AppRunner):
|
|||||||
files=files,
|
files=files,
|
||||||
query=query,
|
query=query,
|
||||||
context=context,
|
context=context,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
# check hosting moderation
|
# check hosting moderation
|
||||||
|
@ -46,6 +46,7 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
memory_config: Optional[MemoryConfig],
|
memory_config: Optional[MemoryConfig],
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> list[PromptMessage]:
|
) -> list[PromptMessage]:
|
||||||
prompt_messages = []
|
prompt_messages = []
|
||||||
|
|
||||||
@ -59,6 +60,7 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
memory_config=memory_config,
|
memory_config=memory_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
elif isinstance(prompt_template, list) and all(isinstance(item, ChatModelMessage) for item in prompt_template):
|
elif isinstance(prompt_template, list) and all(isinstance(item, ChatModelMessage) for item in prompt_template):
|
||||||
prompt_messages = self._get_chat_model_prompt_messages(
|
prompt_messages = self._get_chat_model_prompt_messages(
|
||||||
@ -70,6 +72,7 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
memory_config=memory_config,
|
memory_config=memory_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
return prompt_messages
|
return prompt_messages
|
||||||
@ -84,6 +87,7 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
memory_config: Optional[MemoryConfig],
|
memory_config: Optional[MemoryConfig],
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> list[PromptMessage]:
|
) -> list[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Get completion model prompt messages.
|
Get completion model prompt messages.
|
||||||
@ -124,7 +128,9 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
prompt_message_contents: list[PromptMessageContent] = []
|
prompt_message_contents: list[PromptMessageContent] = []
|
||||||
prompt_message_contents.append(TextPromptMessageContent(data=prompt))
|
prompt_message_contents.append(TextPromptMessageContent(data=prompt))
|
||||||
for file in files:
|
for file in files:
|
||||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
|
prompt_message_contents.append(
|
||||||
|
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
|
||||||
|
)
|
||||||
|
|
||||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||||
else:
|
else:
|
||||||
@ -142,6 +148,7 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
memory_config: Optional[MemoryConfig],
|
memory_config: Optional[MemoryConfig],
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> list[PromptMessage]:
|
) -> list[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Get chat model prompt messages.
|
Get chat model prompt messages.
|
||||||
@ -197,7 +204,9 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
prompt_message_contents: list[PromptMessageContent] = []
|
prompt_message_contents: list[PromptMessageContent] = []
|
||||||
prompt_message_contents.append(TextPromptMessageContent(data=query))
|
prompt_message_contents.append(TextPromptMessageContent(data=query))
|
||||||
for file in files:
|
for file in files:
|
||||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
|
prompt_message_contents.append(
|
||||||
|
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
|
||||||
|
)
|
||||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||||
else:
|
else:
|
||||||
prompt_messages.append(UserPromptMessage(content=query))
|
prompt_messages.append(UserPromptMessage(content=query))
|
||||||
@ -209,19 +218,25 @@ class AdvancedPromptTransform(PromptTransform):
|
|||||||
# get last user message content and add files
|
# get last user message content and add files
|
||||||
prompt_message_contents = [TextPromptMessageContent(data=cast(str, last_message.content))]
|
prompt_message_contents = [TextPromptMessageContent(data=cast(str, last_message.content))]
|
||||||
for file in files:
|
for file in files:
|
||||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
|
prompt_message_contents.append(
|
||||||
|
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
|
||||||
|
)
|
||||||
|
|
||||||
last_message.content = prompt_message_contents
|
last_message.content = prompt_message_contents
|
||||||
else:
|
else:
|
||||||
prompt_message_contents = [TextPromptMessageContent(data="")] # not for query
|
prompt_message_contents = [TextPromptMessageContent(data="")] # not for query
|
||||||
for file in files:
|
for file in files:
|
||||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
|
prompt_message_contents.append(
|
||||||
|
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
|
||||||
|
)
|
||||||
|
|
||||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||||
else:
|
else:
|
||||||
prompt_message_contents = [TextPromptMessageContent(data=query)]
|
prompt_message_contents = [TextPromptMessageContent(data=query)]
|
||||||
for file in files:
|
for file in files:
|
||||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
|
prompt_message_contents.append(
|
||||||
|
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
|
||||||
|
)
|
||||||
|
|
||||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||||
elif query:
|
elif query:
|
||||||
|
@ -9,6 +9,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti
|
|||||||
from core.file import file_manager
|
from core.file import file_manager
|
||||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||||
from core.model_runtime.entities.message_entities import (
|
from core.model_runtime.entities.message_entities import (
|
||||||
|
ImagePromptMessageContent,
|
||||||
PromptMessage,
|
PromptMessage,
|
||||||
PromptMessageContent,
|
PromptMessageContent,
|
||||||
SystemPromptMessage,
|
SystemPromptMessage,
|
||||||
@ -60,6 +61,7 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
context: Optional[str],
|
context: Optional[str],
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
||||||
inputs = {key: str(value) for key, value in inputs.items()}
|
inputs = {key: str(value) for key, value in inputs.items()}
|
||||||
|
|
||||||
@ -74,6 +76,7 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
context=context,
|
context=context,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
prompt_messages, stops = self._get_completion_model_prompt_messages(
|
prompt_messages, stops = self._get_completion_model_prompt_messages(
|
||||||
@ -85,6 +88,7 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
context=context,
|
context=context,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=image_detail_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
return prompt_messages, stops
|
return prompt_messages, stops
|
||||||
@ -175,6 +179,7 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
files: Sequence["File"],
|
files: Sequence["File"],
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
||||||
prompt_messages: list[PromptMessage] = []
|
prompt_messages: list[PromptMessage] = []
|
||||||
|
|
||||||
@ -204,9 +209,9 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if query:
|
if query:
|
||||||
prompt_messages.append(self.get_last_user_message(query, files))
|
prompt_messages.append(self.get_last_user_message(query, files, image_detail_config))
|
||||||
else:
|
else:
|
||||||
prompt_messages.append(self.get_last_user_message(prompt, files))
|
prompt_messages.append(self.get_last_user_message(prompt, files, image_detail_config))
|
||||||
|
|
||||||
return prompt_messages, None
|
return prompt_messages, None
|
||||||
|
|
||||||
@ -220,6 +225,7 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
files: Sequence["File"],
|
files: Sequence["File"],
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
) -> tuple[list[PromptMessage], Optional[list[str]]]:
|
||||||
# get prompt
|
# get prompt
|
||||||
prompt, prompt_rules = self.get_prompt_str_and_rules(
|
prompt, prompt_rules = self.get_prompt_str_and_rules(
|
||||||
@ -262,14 +268,21 @@ class SimplePromptTransform(PromptTransform):
|
|||||||
if stops is not None and len(stops) == 0:
|
if stops is not None and len(stops) == 0:
|
||||||
stops = None
|
stops = None
|
||||||
|
|
||||||
return [self.get_last_user_message(prompt, files)], stops
|
return [self.get_last_user_message(prompt, files, image_detail_config)], stops
|
||||||
|
|
||||||
def get_last_user_message(self, prompt: str, files: Sequence["File"]) -> UserPromptMessage:
|
def get_last_user_message(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
files: Sequence["File"],
|
||||||
|
image_detail_config: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
|
) -> UserPromptMessage:
|
||||||
if files:
|
if files:
|
||||||
prompt_message_contents: list[PromptMessageContent] = []
|
prompt_message_contents: list[PromptMessageContent] = []
|
||||||
prompt_message_contents.append(TextPromptMessageContent(data=prompt))
|
prompt_message_contents.append(TextPromptMessageContent(data=prompt))
|
||||||
for file in files:
|
for file in files:
|
||||||
prompt_message_contents.append(file_manager.to_prompt_message_content(file))
|
prompt_message_contents.append(
|
||||||
|
file_manager.to_prompt_message_content(file, image_detail_config=image_detail_config)
|
||||||
|
)
|
||||||
|
|
||||||
prompt_message = UserPromptMessage(content=prompt_message_contents)
|
prompt_message = UserPromptMessage(content=prompt_message_contents)
|
||||||
else:
|
else:
|
||||||
|
@ -7,6 +7,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti
|
|||||||
from core.file import File
|
from core.file import File
|
||||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||||
from core.model_manager import ModelInstance
|
from core.model_manager import ModelInstance
|
||||||
|
from core.model_runtime.entities import ImagePromptMessageContent
|
||||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
|
||||||
from core.model_runtime.entities.message_entities import (
|
from core.model_runtime.entities.message_entities import (
|
||||||
AssistantPromptMessage,
|
AssistantPromptMessage,
|
||||||
@ -129,6 +130,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
files=files,
|
files=files,
|
||||||
|
vision_detail=node_data.vision.configs.detail,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# use prompt engineering
|
# use prompt engineering
|
||||||
@ -139,6 +141,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
files=files,
|
files=files,
|
||||||
|
vision_detail=node_data.vision.configs.detail,
|
||||||
)
|
)
|
||||||
|
|
||||||
prompt_message_tools = []
|
prompt_message_tools = []
|
||||||
@ -267,6 +270,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
files: Sequence[File],
|
files: Sequence[File],
|
||||||
|
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> tuple[list[PromptMessage], list[PromptMessageTool]]:
|
) -> tuple[list[PromptMessage], list[PromptMessageTool]]:
|
||||||
"""
|
"""
|
||||||
Generate function call prompt.
|
Generate function call prompt.
|
||||||
@ -289,6 +293,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
memory_config=node_data.memory,
|
memory_config=node_data.memory,
|
||||||
memory=None,
|
memory=None,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=vision_detail,
|
||||||
)
|
)
|
||||||
|
|
||||||
# find last user message
|
# find last user message
|
||||||
@ -347,6 +352,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
files: Sequence[File],
|
files: Sequence[File],
|
||||||
|
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> list[PromptMessage]:
|
) -> list[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Generate prompt engineering prompt.
|
Generate prompt engineering prompt.
|
||||||
@ -361,6 +367,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
files=files,
|
files=files,
|
||||||
|
vision_detail=vision_detail,
|
||||||
)
|
)
|
||||||
elif model_mode == ModelMode.CHAT:
|
elif model_mode == ModelMode.CHAT:
|
||||||
return self._generate_prompt_engineering_chat_prompt(
|
return self._generate_prompt_engineering_chat_prompt(
|
||||||
@ -370,6 +377,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
files=files,
|
files=files,
|
||||||
|
vision_detail=vision_detail,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise InvalidModelModeError(f"Invalid model mode: {model_mode}")
|
raise InvalidModelModeError(f"Invalid model mode: {model_mode}")
|
||||||
@ -382,6 +390,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
files: Sequence[File],
|
files: Sequence[File],
|
||||||
|
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> list[PromptMessage]:
|
) -> list[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Generate completion prompt.
|
Generate completion prompt.
|
||||||
@ -402,6 +411,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
memory_config=node_data.memory,
|
memory_config=node_data.memory,
|
||||||
memory=memory,
|
memory=memory,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=vision_detail,
|
||||||
)
|
)
|
||||||
|
|
||||||
return prompt_messages
|
return prompt_messages
|
||||||
@ -414,6 +424,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity,
|
||||||
memory: Optional[TokenBufferMemory],
|
memory: Optional[TokenBufferMemory],
|
||||||
files: Sequence[File],
|
files: Sequence[File],
|
||||||
|
vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
||||||
) -> list[PromptMessage]:
|
) -> list[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Generate chat prompt.
|
Generate chat prompt.
|
||||||
@ -441,6 +452,7 @@ class ParameterExtractorNode(LLMNode):
|
|||||||
memory_config=node_data.memory,
|
memory_config=node_data.memory,
|
||||||
memory=None,
|
memory=None,
|
||||||
model_config=model_config,
|
model_config=model_config,
|
||||||
|
image_detail_config=vision_detail,
|
||||||
)
|
)
|
||||||
|
|
||||||
# find last user message
|
# find last user message
|
||||||
|
Loading…
x
Reference in New Issue
Block a user