mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-05-13 15:08:17 +08:00
fix: gemini system prompt with variable raise error (#11946)
This commit is contained in:
parent
9578246bbb
commit
366857cd26
@ -21,6 +21,7 @@ from core.model_runtime.entities.message_entities import (
|
|||||||
PromptMessageContentType,
|
PromptMessageContentType,
|
||||||
PromptMessageTool,
|
PromptMessageTool,
|
||||||
SystemPromptMessage,
|
SystemPromptMessage,
|
||||||
|
TextPromptMessageContent,
|
||||||
ToolPromptMessage,
|
ToolPromptMessage,
|
||||||
UserPromptMessage,
|
UserPromptMessage,
|
||||||
)
|
)
|
||||||
@ -143,7 +144,7 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ping_message = SystemPromptMessage(content="ping")
|
ping_message = UserPromptMessage(content="ping")
|
||||||
self._generate(model, credentials, [ping_message], {"max_output_tokens": 5})
|
self._generate(model, credentials, [ping_message], {"max_output_tokens": 5})
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
@ -187,17 +188,23 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
config_kwargs["stop_sequences"] = stop
|
config_kwargs["stop_sequences"] = stop
|
||||||
|
|
||||||
genai.configure(api_key=credentials["google_api_key"])
|
genai.configure(api_key=credentials["google_api_key"])
|
||||||
google_model = genai.GenerativeModel(model_name=model)
|
|
||||||
|
|
||||||
history = []
|
history = []
|
||||||
|
system_instruction = None
|
||||||
|
|
||||||
for msg in prompt_messages: # makes message roles strictly alternating
|
for msg in prompt_messages: # makes message roles strictly alternating
|
||||||
content = self._format_message_to_glm_content(msg)
|
content = self._format_message_to_glm_content(msg)
|
||||||
if history and history[-1]["role"] == content["role"]:
|
if history and history[-1]["role"] == content["role"]:
|
||||||
history[-1]["parts"].extend(content["parts"])
|
history[-1]["parts"].extend(content["parts"])
|
||||||
|
elif content["role"] == "system":
|
||||||
|
system_instruction = content["parts"][0]
|
||||||
else:
|
else:
|
||||||
history.append(content)
|
history.append(content)
|
||||||
|
|
||||||
|
if not history:
|
||||||
|
raise InvokeError("The user prompt message is required. You only add a system prompt message.")
|
||||||
|
|
||||||
|
google_model = genai.GenerativeModel(model_name=model, system_instruction=system_instruction)
|
||||||
response = google_model.generate_content(
|
response = google_model.generate_content(
|
||||||
contents=history,
|
contents=history,
|
||||||
generation_config=genai.types.GenerationConfig(**config_kwargs),
|
generation_config=genai.types.GenerationConfig(**config_kwargs),
|
||||||
@ -404,7 +411,10 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
|
|||||||
)
|
)
|
||||||
return glm_content
|
return glm_content
|
||||||
elif isinstance(message, SystemPromptMessage):
|
elif isinstance(message, SystemPromptMessage):
|
||||||
return {"role": "user", "parts": [to_part(message.content)]}
|
if isinstance(message.content, list):
|
||||||
|
text_contents = filter(lambda c: isinstance(c, TextPromptMessageContent), message.content)
|
||||||
|
message.content = "".join(c.data for c in text_contents)
|
||||||
|
return {"role": "system", "parts": [to_part(message.content)]}
|
||||||
elif isinstance(message, ToolPromptMessage):
|
elif isinstance(message, ToolPromptMessage):
|
||||||
return {
|
return {
|
||||||
"role": "function",
|
"role": "function",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user