mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 18:45:57 +08:00
Fix multiple generate (#1722)
### What problem does this PR solve? #1625 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
parent
61096596bc
commit
013856b604
@ -59,8 +59,10 @@ class Answer(ComponentBase, ABC):
|
|||||||
stream = self.get_stream_input()
|
stream = self.get_stream_input()
|
||||||
if isinstance(stream, pd.DataFrame):
|
if isinstance(stream, pd.DataFrame):
|
||||||
res = stream
|
res = stream
|
||||||
|
answer = ""
|
||||||
for ii, row in stream.iterrows():
|
for ii, row in stream.iterrows():
|
||||||
yield row.to_dict()
|
answer += row.to_dict()["content"]
|
||||||
|
yield {"content": answer}
|
||||||
else:
|
else:
|
||||||
for st in stream():
|
for st in stream():
|
||||||
res = st
|
res = st
|
||||||
|
@ -67,85 +67,11 @@ class Generate(ComponentBase):
|
|||||||
cpnts = [para["component_id"] for para in self._param.parameters]
|
cpnts = [para["component_id"] for para in self._param.parameters]
|
||||||
return cpnts
|
return cpnts
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def set_cite(self, retrieval_res, answer):
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
answer, idx = retrievaler.insert_citations(answer, [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||||
prompt = self._param.prompt
|
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||||
|
|
||||||
retrieval_res = self.get_input()
|
|
||||||
input = (" - " + "\n - ".join(retrieval_res["content"])) if "content" in retrieval_res else ""
|
|
||||||
for para in self._param.parameters:
|
|
||||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
|
||||||
_, out = cpn.output(allow_partial=False)
|
|
||||||
if "content" not in out.columns:
|
|
||||||
kwargs[para["key"]] = "Nothing"
|
|
||||||
else:
|
|
||||||
kwargs[para["key"]] = " - " + "\n - ".join(out["content"])
|
|
||||||
|
|
||||||
kwargs["input"] = input
|
|
||||||
for n, v in kwargs.items():
|
|
||||||
# prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
|
|
||||||
prompt = re.sub(r"\{%s\}" % n, str(v), prompt)
|
|
||||||
|
|
||||||
downstreams = self._canvas.get_component(self._id)["downstream"]
|
|
||||||
if kwargs.get("stream") \
|
|
||||||
and len(downstreams) == 1 \
|
|
||||||
and self._canvas.get_component(downstreams[0])["obj"].component_name.lower() == "answer":
|
|
||||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
|
||||||
|
|
||||||
if "empty_response" in retrieval_res.columns:
|
|
||||||
return Generate.be_output(input)
|
|
||||||
|
|
||||||
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
|
||||||
self._param.gen_conf())
|
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
|
||||||
ans, idx = retrievaler.insert_citations(ans,
|
|
||||||
[ck["content_ltks"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
[ck["vector"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||||
self._canvas.get_embedding_model()),
|
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||||
tkweight=0.7,
|
|
||||||
vtweight=0.3)
|
|
||||||
del retrieval_res["vector"]
|
|
||||||
retrieval_res = retrieval_res.to_dict("records")
|
|
||||||
df = []
|
|
||||||
for i in idx:
|
|
||||||
df.append(retrieval_res[int(i)])
|
|
||||||
r = re.search(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), ans)
|
|
||||||
assert r, f"{i} => {ans}"
|
|
||||||
df[-1]["content"] = r.group(1)
|
|
||||||
ans = re.sub(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), "", ans)
|
|
||||||
if ans: df.append({"content": ans})
|
|
||||||
return pd.DataFrame(df)
|
|
||||||
|
|
||||||
return Generate.be_output(ans)
|
|
||||||
|
|
||||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
|
||||||
res = None
|
|
||||||
if "empty_response" in retrieval_res.columns and "\n- ".join(retrieval_res["content"]):
|
|
||||||
res = {"content": "\n- ".join(retrieval_res["content"]), "reference": []}
|
|
||||||
yield res
|
|
||||||
self.set_output(res)
|
|
||||||
return
|
|
||||||
|
|
||||||
answer = ""
|
|
||||||
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
|
||||||
self._param.gen_conf()):
|
|
||||||
res = {"content": ans, "reference": []}
|
|
||||||
answer = ans
|
|
||||||
yield res
|
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
|
||||||
answer, idx = retrievaler.insert_citations(answer,
|
|
||||||
[ck["content_ltks"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
[ck["vector"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
|
||||||
self._canvas.get_embedding_model()),
|
|
||||||
tkweight=0.7,
|
|
||||||
vtweight=0.3)
|
vtweight=0.3)
|
||||||
doc_ids = set([])
|
doc_ids = set([])
|
||||||
recall_docs = []
|
recall_docs = []
|
||||||
@ -166,6 +92,61 @@ class Generate(ComponentBase):
|
|||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||||
res = {"content": answer, "reference": reference}
|
res = {"content": answer, "reference": reference}
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
|
prompt = self._param.prompt
|
||||||
|
|
||||||
|
retrieval_res = self.get_input()
|
||||||
|
input = (" - " + "\n - ".join(retrieval_res["content"])) if "content" in retrieval_res else ""
|
||||||
|
for para in self._param.parameters:
|
||||||
|
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||||
|
_, out = cpn.output(allow_partial=False)
|
||||||
|
if "content" not in out.columns:
|
||||||
|
kwargs[para["key"]] = "Nothing"
|
||||||
|
else:
|
||||||
|
kwargs[para["key"]] = " - " + "\n - ".join(out["content"])
|
||||||
|
|
||||||
|
kwargs["input"] = input
|
||||||
|
for n, v in kwargs.items():
|
||||||
|
# prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
|
||||||
|
prompt = re.sub(r"\{%s\}" % n, str(v), prompt)
|
||||||
|
|
||||||
|
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||||
|
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||||
|
"obj"].component_name.lower() == "answer":
|
||||||
|
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||||
|
|
||||||
|
if "empty_response" in retrieval_res.columns:
|
||||||
|
return Generate.be_output(input)
|
||||||
|
|
||||||
|
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||||
|
self._param.gen_conf())
|
||||||
|
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||||
|
df = self.set_cite(retrieval_res, ans)
|
||||||
|
return pd.DataFrame(df)
|
||||||
|
|
||||||
|
return Generate.be_output(ans)
|
||||||
|
|
||||||
|
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||||
|
res = None
|
||||||
|
if "empty_response" in retrieval_res.columns and "\n- ".join(retrieval_res["content"]):
|
||||||
|
res = {"content": "\n- ".join(retrieval_res["content"]), "reference": []}
|
||||||
|
yield res
|
||||||
|
self.set_output(res)
|
||||||
|
return
|
||||||
|
|
||||||
|
answer = ""
|
||||||
|
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||||
|
self._param.gen_conf()):
|
||||||
|
res = {"content": ans, "reference": []}
|
||||||
|
answer = ans
|
||||||
|
yield res
|
||||||
|
|
||||||
|
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||||
|
res = self.set_cite(retrieval_res, answer)
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user