mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-04-22 13:49:45 +08:00

Signed-off-by: yihong0618 <zouzou0208@gmail.com> Signed-off-by: -LAN- <laipz8200@outlook.com> Signed-off-by: xhe <xw897002528@gmail.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: takatost <takatost@gmail.com> Co-authored-by: kurokobo <kuro664@gmail.com> Co-authored-by: Novice Lee <novicelee@NoviPro.local> Co-authored-by: zxhlyh <jasonapring2015@outlook.com> Co-authored-by: AkaraChen <akarachen@outlook.com> Co-authored-by: Yi <yxiaoisme@gmail.com> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: JzoNg <jzongcode@gmail.com> Co-authored-by: twwu <twwu@dify.ai> Co-authored-by: Hiroshi Fujita <fujita-h@users.noreply.github.com> Co-authored-by: AkaraChen <85140972+AkaraChen@users.noreply.github.com> Co-authored-by: NFish <douxc512@gmail.com> Co-authored-by: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Co-authored-by: 非法操作 <hjlarry@163.com> Co-authored-by: Novice <857526207@qq.com> Co-authored-by: Hiroki Nagai <82458324+nagaihiroki-git@users.noreply.github.com> Co-authored-by: Gen Sato <52241300+halogen22@users.noreply.github.com> Co-authored-by: eux <euxuuu@gmail.com> Co-authored-by: huangzhuo1949 <167434202+huangzhuo1949@users.noreply.github.com> Co-authored-by: huangzhuo <huangzhuo1@xiaomi.com> Co-authored-by: lotsik <lotsik@mail.ru> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: nite-knite <nkCoding@gmail.com> Co-authored-by: Jyong <76649700+JohnJyong@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: gakkiyomi <gakkiyomi@aliyun.com> Co-authored-by: CN-P5 <heibai2006@gmail.com> Co-authored-by: CN-P5 <heibai2006@qq.com> Co-authored-by: Chuehnone <1897025+chuehnone@users.noreply.github.com> Co-authored-by: yihong <zouzou0208@gmail.com> Co-authored-by: Kevin9703 <51311316+Kevin9703@users.noreply.github.com> Co-authored-by: -LAN- <laipz8200@outlook.com> Co-authored-by: Boris Feld <lothiraldan@gmail.com> Co-authored-by: mbo <himabo@gmail.com> Co-authored-by: mabo <mabo@aeyes.ai> Co-authored-by: Warren Chen <warren.chen830@gmail.com> Co-authored-by: JzoNgKVO <27049666+JzoNgKVO@users.noreply.github.com> Co-authored-by: jiandanfeng <chenjh3@wangsu.com> Co-authored-by: zhu-an <70234959+xhdd123321@users.noreply.github.com> Co-authored-by: zhaoqingyu.1075 <zhaoqingyu.1075@bytedance.com> Co-authored-by: 海狸大師 <86974027+yenslife@users.noreply.github.com> Co-authored-by: Xu Song <xusong.vip@gmail.com> Co-authored-by: rayshaw001 <396301947@163.com> Co-authored-by: Ding Jiatong <dingjiatong@gmail.com> Co-authored-by: Bowen Liang <liangbowen@gf.com.cn> Co-authored-by: JasonVV <jasonwangiii@outlook.com> Co-authored-by: le0zh <newlight@qq.com> Co-authored-by: zhuxinliang <zhuxinliang@didiglobal.com> Co-authored-by: k-zaku <zaku99@outlook.jp> Co-authored-by: luckylhb90 <luckylhb90@gmail.com> Co-authored-by: hobo.l <hobo.l@binance.com> Co-authored-by: jiangbo721 <365065261@qq.com> Co-authored-by: 刘江波 <jiangbo721@163.com> Co-authored-by: Shun Miyazawa <34241526+miya@users.noreply.github.com> Co-authored-by: EricPan <30651140+Egfly@users.noreply.github.com> Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: sino <sino2322@gmail.com> Co-authored-by: Jhvcc <37662342+Jhvcc@users.noreply.github.com> Co-authored-by: lowell <lowell.hu@zkteco.in> Co-authored-by: Boris Polonsky <BorisPolonsky@users.noreply.github.com> Co-authored-by: Ademílson Tonato <ademilsonft@outlook.com> Co-authored-by: Ademílson Tonato <ademilson.tonato@refurbed.com> Co-authored-by: IWAI, Masaharu <iwaim.sub@gmail.com> Co-authored-by: Yueh-Po Peng (Yabi) <94939112+y10ab1@users.noreply.github.com> Co-authored-by: Jason <ggbbddjm@gmail.com> Co-authored-by: Xin Zhang <sjhpzx@gmail.com> Co-authored-by: yjc980121 <3898524+yjc980121@users.noreply.github.com> Co-authored-by: heyszt <36215648+hieheihei@users.noreply.github.com> Co-authored-by: Abdullah AlOsaimi <osaimiacc@gmail.com> Co-authored-by: Abdullah AlOsaimi <189027247+osaimi@users.noreply.github.com> Co-authored-by: Yingchun Lai <laiyingchun@apache.org> Co-authored-by: Hash Brown <hi@xzd.me> Co-authored-by: zuodongxu <192560071+zuodongxu@users.noreply.github.com> Co-authored-by: Masashi Tomooka <tmokmss@users.noreply.github.com> Co-authored-by: aplio <ryo.091219@gmail.com> Co-authored-by: Obada Khalili <54270856+obadakhalili@users.noreply.github.com> Co-authored-by: Nam Vu <zuzoovn@gmail.com> Co-authored-by: Kei YAMAZAKI <1715090+kei-yamazaki@users.noreply.github.com> Co-authored-by: TechnoHouse <13776377+deephbz@users.noreply.github.com> Co-authored-by: Riddhimaan-Senapati <114703025+Riddhimaan-Senapati@users.noreply.github.com> Co-authored-by: MaFee921 <31881301+2284730142@users.noreply.github.com> Co-authored-by: te-chan <t-nakanome@sakura-is.co.jp> Co-authored-by: HQidea <HQidea@users.noreply.github.com> Co-authored-by: Joshbly <36315710+Joshbly@users.noreply.github.com> Co-authored-by: xhe <xw897002528@gmail.com> Co-authored-by: weiwenyan-dev <154779315+weiwenyan-dev@users.noreply.github.com> Co-authored-by: ex_wenyan.wei <ex_wenyan.wei@tcl.com> Co-authored-by: engchina <12236799+engchina@users.noreply.github.com> Co-authored-by: engchina <atjapan2015@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: 呆萌闷油瓶 <253605712@qq.com> Co-authored-by: Kemal <kemalmeler@outlook.com> Co-authored-by: Lazy_Frog <4590648+lazyFrogLOL@users.noreply.github.com> Co-authored-by: Yi Xiao <54782454+YIXIAO0@users.noreply.github.com> Co-authored-by: Steven sun <98230804+Tuyohai@users.noreply.github.com> Co-authored-by: steven <sunzwj@digitalchina.com> Co-authored-by: Kalo Chin <91766386+fdb02983rhy@users.noreply.github.com> Co-authored-by: Katy Tao <34019945+KatyTao@users.noreply.github.com> Co-authored-by: depy <42985524+h4ckdepy@users.noreply.github.com> Co-authored-by: 胡春东 <gycm520@gmail.com> Co-authored-by: Junjie.M <118170653@qq.com> Co-authored-by: MuYu <mr.muzea@gmail.com> Co-authored-by: Naoki Takashima <39912547+takatea@users.noreply.github.com> Co-authored-by: Summer-Gu <37869445+gubinjie@users.noreply.github.com> Co-authored-by: Fei He <droxer.he@gmail.com> Co-authored-by: ybalbert001 <120714773+ybalbert001@users.noreply.github.com> Co-authored-by: Yuanbo Li <ybalbert@amazon.com> Co-authored-by: douxc <7553076+douxc@users.noreply.github.com> Co-authored-by: liuzhenghua <1090179900@qq.com> Co-authored-by: Wu Jiayang <62842862+Wu-Jiayang@users.noreply.github.com> Co-authored-by: Your Name <you@example.com> Co-authored-by: kimjion <45935338+kimjion@users.noreply.github.com> Co-authored-by: AugNSo <song.tiankai@icloud.com> Co-authored-by: llinvokerl <38915183+llinvokerl@users.noreply.github.com> Co-authored-by: liusurong.lsr <liusurong.lsr@alibaba-inc.com> Co-authored-by: Vasu Negi <vasu-negi@users.noreply.github.com> Co-authored-by: Hundredwz <1808096180@qq.com> Co-authored-by: Xiyuan Chen <52963600+GareArc@users.noreply.github.com>
465 lines
19 KiB
Python
465 lines
19 KiB
Python
import json
|
|
import logging
|
|
from collections.abc import Generator
|
|
from copy import deepcopy
|
|
from typing import Any, Optional, Union
|
|
|
|
from core.agent.base_agent_runner import BaseAgentRunner
|
|
from core.app.apps.base_app_queue_manager import PublishFrom
|
|
from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent
|
|
from core.file import file_manager
|
|
from core.model_runtime.entities import (
|
|
AssistantPromptMessage,
|
|
LLMResult,
|
|
LLMResultChunk,
|
|
LLMResultChunkDelta,
|
|
LLMUsage,
|
|
PromptMessage,
|
|
PromptMessageContent,
|
|
PromptMessageContentType,
|
|
SystemPromptMessage,
|
|
TextPromptMessageContent,
|
|
ToolPromptMessage,
|
|
UserPromptMessage,
|
|
)
|
|
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
|
|
from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
|
|
from core.tools.entities.tool_entities import ToolInvokeMeta
|
|
from core.tools.tool_engine import ToolEngine
|
|
from models.model import Message
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class FunctionCallAgentRunner(BaseAgentRunner):
|
|
def run(self, message: Message, query: str, **kwargs: Any) -> Generator[LLMResultChunk, None, None]:
|
|
"""
|
|
Run FunctionCall agent application
|
|
"""
|
|
self.query = query
|
|
app_generate_entity = self.application_generate_entity
|
|
|
|
app_config = self.app_config
|
|
assert app_config is not None, "app_config is required"
|
|
assert app_config.agent is not None, "app_config.agent is required"
|
|
|
|
# convert tools into ModelRuntime Tool format
|
|
tool_instances, prompt_messages_tools = self._init_prompt_tools()
|
|
|
|
assert app_config.agent
|
|
|
|
iteration_step = 1
|
|
max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1
|
|
|
|
# continue to run until there is not any tool call
|
|
function_call_state = True
|
|
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
|
|
final_answer = ""
|
|
|
|
# get tracing instance
|
|
trace_manager = app_generate_entity.trace_manager
|
|
|
|
def increase_usage(final_llm_usage_dict: dict[str, Optional[LLMUsage]], usage: LLMUsage):
|
|
if not final_llm_usage_dict["usage"]:
|
|
final_llm_usage_dict["usage"] = usage
|
|
else:
|
|
llm_usage = final_llm_usage_dict["usage"]
|
|
llm_usage.prompt_tokens += usage.prompt_tokens
|
|
llm_usage.completion_tokens += usage.completion_tokens
|
|
llm_usage.prompt_price += usage.prompt_price
|
|
llm_usage.completion_price += usage.completion_price
|
|
llm_usage.total_price += usage.total_price
|
|
|
|
model_instance = self.model_instance
|
|
|
|
while function_call_state and iteration_step <= max_iteration_steps:
|
|
function_call_state = False
|
|
|
|
if iteration_step == max_iteration_steps:
|
|
# the last iteration, remove all tools
|
|
prompt_messages_tools = []
|
|
|
|
message_file_ids: list[str] = []
|
|
agent_thought = self.create_agent_thought(
|
|
message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids
|
|
)
|
|
|
|
# recalc llm max tokens
|
|
prompt_messages = self._organize_prompt_messages()
|
|
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
|
# invoke model
|
|
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
|
prompt_messages=prompt_messages,
|
|
model_parameters=app_generate_entity.model_conf.parameters,
|
|
tools=prompt_messages_tools,
|
|
stop=app_generate_entity.model_conf.stop,
|
|
stream=self.stream_tool_call,
|
|
user=self.user_id,
|
|
callbacks=[],
|
|
)
|
|
|
|
tool_calls: list[tuple[str, str, dict[str, Any]]] = []
|
|
|
|
# save full response
|
|
response = ""
|
|
|
|
# save tool call names and inputs
|
|
tool_call_names = ""
|
|
tool_call_inputs = ""
|
|
|
|
current_llm_usage = None
|
|
|
|
if isinstance(chunks, Generator):
|
|
is_first_chunk = True
|
|
for chunk in chunks:
|
|
if is_first_chunk:
|
|
self.queue_manager.publish(
|
|
QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
|
|
)
|
|
is_first_chunk = False
|
|
# check if there is any tool call
|
|
if self.check_tool_calls(chunk):
|
|
function_call_state = True
|
|
tool_calls.extend(self.extract_tool_calls(chunk) or [])
|
|
tool_call_names = ";".join([tool_call[1] for tool_call in tool_calls])
|
|
try:
|
|
tool_call_inputs = json.dumps(
|
|
{tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False
|
|
)
|
|
except json.JSONDecodeError:
|
|
# ensure ascii to avoid encoding error
|
|
tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls})
|
|
|
|
if chunk.delta.message and chunk.delta.message.content:
|
|
if isinstance(chunk.delta.message.content, list):
|
|
for content in chunk.delta.message.content:
|
|
response += content.data
|
|
else:
|
|
response += str(chunk.delta.message.content)
|
|
|
|
if chunk.delta.usage:
|
|
increase_usage(llm_usage, chunk.delta.usage)
|
|
current_llm_usage = chunk.delta.usage
|
|
|
|
yield chunk
|
|
else:
|
|
result = chunks
|
|
# check if there is any tool call
|
|
if self.check_blocking_tool_calls(result):
|
|
function_call_state = True
|
|
tool_calls.extend(self.extract_blocking_tool_calls(result) or [])
|
|
tool_call_names = ";".join([tool_call[1] for tool_call in tool_calls])
|
|
try:
|
|
tool_call_inputs = json.dumps(
|
|
{tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False
|
|
)
|
|
except json.JSONDecodeError:
|
|
# ensure ascii to avoid encoding error
|
|
tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls})
|
|
|
|
if result.usage:
|
|
increase_usage(llm_usage, result.usage)
|
|
current_llm_usage = result.usage
|
|
|
|
if result.message and result.message.content:
|
|
if isinstance(result.message.content, list):
|
|
for content in result.message.content:
|
|
response += content.data
|
|
else:
|
|
response += str(result.message.content)
|
|
|
|
if not result.message.content:
|
|
result.message.content = ""
|
|
|
|
self.queue_manager.publish(
|
|
QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
|
|
)
|
|
|
|
yield LLMResultChunk(
|
|
model=model_instance.model,
|
|
prompt_messages=result.prompt_messages,
|
|
system_fingerprint=result.system_fingerprint,
|
|
delta=LLMResultChunkDelta(
|
|
index=0,
|
|
message=result.message,
|
|
usage=result.usage,
|
|
),
|
|
)
|
|
|
|
assistant_message = AssistantPromptMessage(content="", tool_calls=[])
|
|
if tool_calls:
|
|
assistant_message.tool_calls = [
|
|
AssistantPromptMessage.ToolCall(
|
|
id=tool_call[0],
|
|
type="function",
|
|
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
|
|
name=tool_call[1], arguments=json.dumps(tool_call[2], ensure_ascii=False)
|
|
),
|
|
)
|
|
for tool_call in tool_calls
|
|
]
|
|
else:
|
|
assistant_message.content = response
|
|
|
|
self._current_thoughts.append(assistant_message)
|
|
|
|
# save thought
|
|
self.save_agent_thought(
|
|
agent_thought=agent_thought,
|
|
tool_name=tool_call_names,
|
|
tool_input=tool_call_inputs,
|
|
thought=response,
|
|
tool_invoke_meta=None,
|
|
observation=None,
|
|
answer=response,
|
|
messages_ids=[],
|
|
llm_usage=current_llm_usage,
|
|
)
|
|
self.queue_manager.publish(
|
|
QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
|
|
)
|
|
|
|
final_answer += response + "\n"
|
|
|
|
# call tools
|
|
tool_responses = []
|
|
for tool_call_id, tool_call_name, tool_call_args in tool_calls:
|
|
tool_instance = tool_instances.get(tool_call_name)
|
|
if not tool_instance:
|
|
tool_response = {
|
|
"tool_call_id": tool_call_id,
|
|
"tool_call_name": tool_call_name,
|
|
"tool_response": f"there is not a tool named {tool_call_name}",
|
|
"meta": ToolInvokeMeta.error_instance(f"there is not a tool named {tool_call_name}").to_dict(),
|
|
}
|
|
else:
|
|
# invoke tool
|
|
tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke(
|
|
tool=tool_instance,
|
|
tool_parameters=tool_call_args,
|
|
user_id=self.user_id,
|
|
tenant_id=self.tenant_id,
|
|
message=self.message,
|
|
invoke_from=self.application_generate_entity.invoke_from,
|
|
agent_tool_callback=self.agent_callback,
|
|
trace_manager=trace_manager,
|
|
app_id=self.application_generate_entity.app_config.app_id,
|
|
message_id=self.message.id,
|
|
conversation_id=self.conversation.id,
|
|
)
|
|
# publish files
|
|
for message_file_id in message_files:
|
|
# publish message file
|
|
self.queue_manager.publish(
|
|
QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER
|
|
)
|
|
# add message file ids
|
|
message_file_ids.append(message_file_id)
|
|
|
|
tool_response = {
|
|
"tool_call_id": tool_call_id,
|
|
"tool_call_name": tool_call_name,
|
|
"tool_response": tool_invoke_response,
|
|
"meta": tool_invoke_meta.to_dict(),
|
|
}
|
|
|
|
tool_responses.append(tool_response)
|
|
if tool_response["tool_response"] is not None:
|
|
self._current_thoughts.append(
|
|
ToolPromptMessage(
|
|
content=str(tool_response["tool_response"]),
|
|
tool_call_id=tool_call_id,
|
|
name=tool_call_name,
|
|
)
|
|
)
|
|
|
|
if len(tool_responses) > 0:
|
|
# save agent thought
|
|
self.save_agent_thought(
|
|
agent_thought=agent_thought,
|
|
tool_name="",
|
|
tool_input="",
|
|
thought="",
|
|
tool_invoke_meta={
|
|
tool_response["tool_call_name"]: tool_response["meta"] for tool_response in tool_responses
|
|
},
|
|
observation={
|
|
tool_response["tool_call_name"]: tool_response["tool_response"]
|
|
for tool_response in tool_responses
|
|
},
|
|
answer="",
|
|
messages_ids=message_file_ids,
|
|
)
|
|
self.queue_manager.publish(
|
|
QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER
|
|
)
|
|
|
|
# update prompt tool
|
|
for prompt_tool in prompt_messages_tools:
|
|
self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool)
|
|
|
|
iteration_step += 1
|
|
|
|
# publish end event
|
|
self.queue_manager.publish(
|
|
QueueMessageEndEvent(
|
|
llm_result=LLMResult(
|
|
model=model_instance.model,
|
|
prompt_messages=prompt_messages,
|
|
message=AssistantPromptMessage(content=final_answer),
|
|
usage=llm_usage["usage"] or LLMUsage.empty_usage(),
|
|
system_fingerprint="",
|
|
)
|
|
),
|
|
PublishFrom.APPLICATION_MANAGER,
|
|
)
|
|
|
|
def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool:
|
|
"""
|
|
Check if there is any tool call in llm result chunk
|
|
"""
|
|
if llm_result_chunk.delta.message.tool_calls:
|
|
return True
|
|
return False
|
|
|
|
def check_blocking_tool_calls(self, llm_result: LLMResult) -> bool:
|
|
"""
|
|
Check if there is any blocking tool call in llm result
|
|
"""
|
|
if llm_result.message.tool_calls:
|
|
return True
|
|
return False
|
|
|
|
def extract_tool_calls(self, llm_result_chunk: LLMResultChunk) -> list[tuple[str, str, dict[str, Any]]]:
|
|
"""
|
|
Extract tool calls from llm result chunk
|
|
|
|
Returns:
|
|
List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)]
|
|
"""
|
|
tool_calls = []
|
|
for prompt_message in llm_result_chunk.delta.message.tool_calls:
|
|
args = {}
|
|
if prompt_message.function.arguments != "":
|
|
args = json.loads(prompt_message.function.arguments)
|
|
|
|
tool_calls.append(
|
|
(
|
|
prompt_message.id,
|
|
prompt_message.function.name,
|
|
args,
|
|
)
|
|
)
|
|
|
|
return tool_calls
|
|
|
|
def extract_blocking_tool_calls(self, llm_result: LLMResult) -> list[tuple[str, str, dict[str, Any]]]:
|
|
"""
|
|
Extract blocking tool calls from llm result
|
|
|
|
Returns:
|
|
List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)]
|
|
"""
|
|
tool_calls = []
|
|
for prompt_message in llm_result.message.tool_calls:
|
|
args = {}
|
|
if prompt_message.function.arguments != "":
|
|
args = json.loads(prompt_message.function.arguments)
|
|
|
|
tool_calls.append(
|
|
(
|
|
prompt_message.id,
|
|
prompt_message.function.name,
|
|
args,
|
|
)
|
|
)
|
|
|
|
return tool_calls
|
|
|
|
def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
|
|
"""
|
|
Initialize system message
|
|
"""
|
|
if not prompt_messages and prompt_template:
|
|
return [
|
|
SystemPromptMessage(content=prompt_template),
|
|
]
|
|
|
|
if prompt_messages and not isinstance(prompt_messages[0], SystemPromptMessage) and prompt_template:
|
|
prompt_messages.insert(0, SystemPromptMessage(content=prompt_template))
|
|
|
|
return prompt_messages or []
|
|
|
|
def _organize_user_query(self, query: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
|
|
"""
|
|
Organize user query
|
|
"""
|
|
if self.files:
|
|
prompt_message_contents: list[PromptMessageContent] = []
|
|
prompt_message_contents.append(TextPromptMessageContent(data=query))
|
|
|
|
# get image detail config
|
|
image_detail_config = (
|
|
self.application_generate_entity.file_upload_config.image_config.detail
|
|
if (
|
|
self.application_generate_entity.file_upload_config
|
|
and self.application_generate_entity.file_upload_config.image_config
|
|
)
|
|
else None
|
|
)
|
|
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
|
|
for file in self.files:
|
|
prompt_message_contents.append(
|
|
file_manager.to_prompt_message_content(
|
|
file,
|
|
image_detail_config=image_detail_config,
|
|
)
|
|
)
|
|
|
|
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
|
else:
|
|
prompt_messages.append(UserPromptMessage(content=query))
|
|
|
|
return prompt_messages
|
|
|
|
def _clear_user_prompt_image_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
|
|
"""
|
|
As for now, gpt supports both fc and vision at the first iteration.
|
|
We need to remove the image messages from the prompt messages at the first iteration.
|
|
"""
|
|
prompt_messages = deepcopy(prompt_messages)
|
|
|
|
for prompt_message in prompt_messages:
|
|
if isinstance(prompt_message, UserPromptMessage):
|
|
if isinstance(prompt_message.content, list):
|
|
prompt_message.content = "\n".join(
|
|
[
|
|
content.data
|
|
if content.type == PromptMessageContentType.TEXT
|
|
else "[image]"
|
|
if content.type == PromptMessageContentType.IMAGE
|
|
else "[file]"
|
|
for content in prompt_message.content
|
|
]
|
|
)
|
|
|
|
return prompt_messages
|
|
|
|
def _organize_prompt_messages(self):
|
|
prompt_template = self.app_config.prompt_template.simple_prompt_template or ""
|
|
self.history_prompt_messages = self._init_system_message(prompt_template, self.history_prompt_messages)
|
|
query_prompt_messages = self._organize_user_query(self.query or "", [])
|
|
|
|
self.history_prompt_messages = AgentHistoryPromptTransform(
|
|
model_config=self.model_config,
|
|
prompt_messages=[*query_prompt_messages, *self._current_thoughts],
|
|
history_messages=self.history_prompt_messages,
|
|
memory=self.memory,
|
|
).get_prompt()
|
|
|
|
prompt_messages = [*self.history_prompt_messages, *query_prompt_messages, *self._current_thoughts]
|
|
if len(self._current_thoughts) != 0:
|
|
# clear messages after the first iteration
|
|
prompt_messages = self._clear_user_prompt_image_messages(prompt_messages)
|
|
return prompt_messages
|