mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 13:55:58 +08:00
Fix: tongyi code wrapper works not stable (#7871)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: crazywoola <427733928@qq.com>
This commit is contained in:
parent
954580a4af
commit
a15791e788
@ -17,7 +17,6 @@ from dashscope.common.error import (
|
|||||||
UnsupportedModel,
|
UnsupportedModel,
|
||||||
)
|
)
|
||||||
|
|
||||||
from core.model_runtime.callbacks.base_callback import Callback
|
|
||||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||||
from core.model_runtime.entities.message_entities import (
|
from core.model_runtime.entities.message_entities import (
|
||||||
AssistantPromptMessage,
|
AssistantPromptMessage,
|
||||||
@ -64,88 +63,8 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
|
|||||||
:param user: unique user id
|
:param user: unique user id
|
||||||
:return: full response or stream response chunk generator result
|
:return: full response or stream response chunk generator result
|
||||||
"""
|
"""
|
||||||
# invoke model
|
# invoke model without code wrapper
|
||||||
return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||||
|
|
||||||
def _code_block_mode_wrapper(self, model: str, credentials: dict,
|
|
||||||
prompt_messages: list[PromptMessage], model_parameters: dict,
|
|
||||||
tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None,
|
|
||||||
stream: bool = True, user: str | None = None, callbacks: list[Callback] = None) \
|
|
||||||
-> LLMResult | Generator:
|
|
||||||
"""
|
|
||||||
Wrapper for code block mode
|
|
||||||
"""
|
|
||||||
block_prompts = """You should always follow the instructions and output a valid {{block}} object.
|
|
||||||
The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure
|
|
||||||
if you are not sure about the structure.
|
|
||||||
|
|
||||||
<instructions>
|
|
||||||
{{instructions}}
|
|
||||||
</instructions>
|
|
||||||
You should also complete the text started with ``` but not tell ``` directly.
|
|
||||||
"""
|
|
||||||
|
|
||||||
code_block = model_parameters.get("response_format", "")
|
|
||||||
if not code_block:
|
|
||||||
return self._invoke(
|
|
||||||
model=model,
|
|
||||||
credentials=credentials,
|
|
||||||
prompt_messages=prompt_messages,
|
|
||||||
model_parameters=model_parameters,
|
|
||||||
tools=tools,
|
|
||||||
stop=stop,
|
|
||||||
stream=stream,
|
|
||||||
user=user
|
|
||||||
)
|
|
||||||
|
|
||||||
model_parameters.pop("response_format")
|
|
||||||
stop = stop or []
|
|
||||||
stop.extend(["\n```", "```\n"])
|
|
||||||
block_prompts = block_prompts.replace("{{block}}", code_block)
|
|
||||||
|
|
||||||
# check if there is a system message
|
|
||||||
if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage):
|
|
||||||
# override the system message
|
|
||||||
prompt_messages[0] = SystemPromptMessage(
|
|
||||||
content=block_prompts
|
|
||||||
.replace("{{instructions}}", prompt_messages[0].content)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# insert the system message
|
|
||||||
prompt_messages.insert(0, SystemPromptMessage(
|
|
||||||
content=block_prompts
|
|
||||||
.replace("{{instructions}}", f"Please output a valid {code_block} with markdown codeblocks.")
|
|
||||||
))
|
|
||||||
|
|
||||||
if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage):
|
|
||||||
# add ```JSON\n to the last message
|
|
||||||
prompt_messages[-1].content += f"\n```{code_block}\n"
|
|
||||||
else:
|
|
||||||
# append a user message
|
|
||||||
prompt_messages.append(UserPromptMessage(
|
|
||||||
content=f"```{code_block}\n"
|
|
||||||
))
|
|
||||||
|
|
||||||
response = self._invoke(
|
|
||||||
model=model,
|
|
||||||
credentials=credentials,
|
|
||||||
prompt_messages=prompt_messages,
|
|
||||||
model_parameters=model_parameters,
|
|
||||||
tools=tools,
|
|
||||||
stop=stop,
|
|
||||||
stream=stream,
|
|
||||||
user=user
|
|
||||||
)
|
|
||||||
|
|
||||||
if isinstance(response, Generator):
|
|
||||||
return self._code_block_mode_stream_processor_with_backtick(
|
|
||||||
model=model,
|
|
||||||
prompt_messages=prompt_messages,
|
|
||||||
input_generator=response
|
|
||||||
)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
def get_num_tokens(self, model: str, credentials: dict, prompt_messages: list[PromptMessage],
|
def get_num_tokens(self, model: str, credentials: dict, prompt_messages: list[PromptMessage],
|
||||||
tools: Optional[list[PromptMessageTool]] = None) -> int:
|
tools: Optional[list[PromptMessageTool]] = None) -> int:
|
||||||
"""
|
"""
|
||||||
|
Loading…
x
Reference in New Issue
Block a user