Fix: renrank_model and pdf_parser bugs | Update: session API (#2601)

### What problem does this PR solve?

Fix: renrank_model and pdf_parser bugs | Update: session API
#2575
#2559
### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
- [x] Refactoring

---------

Co-authored-by: liuhua <10215101452@stu.ecun.edu.cn>
This commit is contained in:
liuhua 2024-09-26 16:05:25 +08:00 committed by GitHub
parent f6bfe4d970
commit b68d349bd6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 68 additions and 41 deletions

View File

@ -87,9 +87,9 @@ def completion(tenant_id):
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [ # req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
# {"role": "user", "content": "上海有吗?"} # {"role": "user", "content": "上海有吗?"}
# ]} # ]}
if "id" not in req: if "session_id" not in req:
return get_data_error_result(retmsg="id is required") return get_data_error_result(retmsg="session_id is required")
conv = ConversationService.query(id=req["id"]) conv = ConversationService.query(id=req["session_id"])
if not conv: if not conv:
return get_data_error_result(retmsg="Session does not exist") return get_data_error_result(retmsg="Session does not exist")
conv = conv[0] conv = conv[0]
@ -108,7 +108,7 @@ def completion(tenant_id):
msg.append(m) msg.append(m)
message_id = msg[-1].get("id") message_id = msg[-1].get("id")
e, dia = DialogService.get_by_id(conv.dialog_id) e, dia = DialogService.get_by_id(conv.dialog_id)
del req["id"] del req["session_id"]
if not conv.reference: if not conv.reference:
conv.reference = [] conv.reference = []
@ -168,6 +168,9 @@ def get(tenant_id):
return get_data_error_result(retmsg="Session does not exist") return get_data_error_result(retmsg="Session does not exist")
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value): if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
return get_data_error_result(retmsg="You do not own the session") return get_data_error_result(retmsg="You do not own the session")
if "assistant_id" in req:
if req["assistant_id"] != conv[0].dialog_id:
return get_data_error_result(retmsg="The session doesn't belong to the assistant")
conv = conv[0].to_dict() conv = conv[0].to_dict()
conv['messages'] = conv.pop("message") conv['messages'] = conv.pop("message")
conv["assistant_id"] = conv.pop("dialog_id") conv["assistant_id"] = conv.pop("dialog_id")
@ -207,7 +210,7 @@ def list(tenant_id):
assistant_id = request.args["assistant_id"] assistant_id = request.args["assistant_id"]
if not DialogService.query(tenant_id=tenant_id, id=assistant_id, status=StatusEnum.VALID.value): if not DialogService.query(tenant_id=tenant_id, id=assistant_id, status=StatusEnum.VALID.value):
return get_json_result( return get_json_result(
data=False, retmsg=f'Only owner of the assistant is authorized for this operation.', data=False, retmsg=f"You don't own the assistant.",
retcode=RetCode.OPERATING_ERROR) retcode=RetCode.OPERATING_ERROR)
convs = ConversationService.query( convs = ConversationService.query(
dialog_id=assistant_id, dialog_id=assistant_id,

View File

@ -488,7 +488,7 @@ class RAGFlowPdfParser:
i += 1 i += 1
continue continue
if not down["text"].strip(): if not down["text"].strip() or not up["text"].strip():
i += 1 i += 1
continue continue

View File

@ -26,9 +26,11 @@ from api.utils.file_utils import get_home_cache_dir
from rag.utils import num_tokens_from_string, truncate from rag.utils import num_tokens_from_string, truncate
import json import json
def sigmoid(x): def sigmoid(x):
return 1 / (1 + np.exp(-x)) return 1 / (1 + np.exp(-x))
class Base(ABC): class Base(ABC):
def __init__(self, key, model_name): def __init__(self, key, model_name):
pass pass
@ -59,10 +61,13 @@ class DefaultRerank(Base):
with DefaultRerank._model_lock: with DefaultRerank._model_lock:
if not DefaultRerank._model: if not DefaultRerank._model:
try: try:
DefaultRerank._model = FlagReranker(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)), use_fp16=torch.cuda.is_available()) DefaultRerank._model = FlagReranker(
os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)),
use_fp16=torch.cuda.is_available())
except Exception as e: except Exception as e:
model_dir = snapshot_download(repo_id=model_name, model_dir = snapshot_download(repo_id=model_name,
local_dir=os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)), local_dir=os.path.join(get_home_cache_dir(),
re.sub(r"^[a-zA-Z]+/", "", model_name)),
local_dir_use_symlinks=False) local_dir_use_symlinks=False)
DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available()) DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available())
self._model = DefaultRerank._model self._model = DefaultRerank._model
@ -77,8 +82,10 @@ class DefaultRerank(Base):
for i in range(0, len(pairs), batch_size): for i in range(0, len(pairs), batch_size):
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048) scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048)
scores = sigmoid(np.array(scores)).tolist() scores = sigmoid(np.array(scores)).tolist()
if isinstance(scores, float): res.append(scores) if isinstance(scores, float):
else: res.extend(scores) res.append(scores)
else:
res.extend(scores)
return np.array(res), token_count return np.array(res), token_count
@ -101,7 +108,10 @@ class JinaRerank(Base):
"top_n": len(texts) "top_n": len(texts)
} }
res = requests.post(self.base_url, headers=self.headers, json=data).json() res = requests.post(self.base_url, headers=self.headers, json=data).json()
return np.array([d["relevance_score"] for d in res["results"]]), res["usage"]["total_tokens"] rank = np.zeros(len(texts), dtype=float)
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
return rank, res["usage"]["total_tokens"]
class YoudaoRerank(DefaultRerank): class YoudaoRerank(DefaultRerank):
@ -135,8 +145,10 @@ class YoudaoRerank(DefaultRerank):
for i in range(0, len(pairs), batch_size): for i in range(0, len(pairs), batch_size):
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length) scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
scores = sigmoid(np.array(scores)).tolist() scores = sigmoid(np.array(scores)).tolist()
if isinstance(scores, float): res.append(scores) if isinstance(scores, float):
else: res.extend(scores) res.append(scores)
else:
res.extend(scores)
return np.array(res), token_count return np.array(res), token_count
@ -162,7 +174,10 @@ class XInferenceRerank(Base):
"documents": texts "documents": texts
} }
res = requests.post(self.base_url, headers=self.headers, json=data).json() res = requests.post(self.base_url, headers=self.headers, json=data).json()
return np.array([d["relevance_score"] for d in res["results"]]), res["meta"]["tokens"]["input_tokens"]+res["meta"]["tokens"]["output_tokens"] rank = np.zeros(len(texts), dtype=float)
for d in res["results"]:
rank[d["index"]] = d["relevance_score"]
return rank, res["meta"]["tokens"]["input_tokens"] + res["meta"]["tokens"]["output_tokens"]
class LocalAIRerank(Base): class LocalAIRerank(Base):
@ -208,9 +223,10 @@ class NvidiaRerank(Base):
"top_n": len(texts), "top_n": len(texts),
} }
res = requests.post(self.base_url, headers=self.headers, json=data).json() res = requests.post(self.base_url, headers=self.headers, json=data).json()
rank = np.array([d["logit"] for d in res["rankings"]]) rank = np.zeros(len(texts), dtype=float)
indexs = [d["index"] for d in res["rankings"]] for d in res["rankings"]:
return rank[indexs], token_count rank[d["index"]] = d["logit"]
return rank, token_count
class LmStudioRerank(Base): class LmStudioRerank(Base):
@ -247,9 +263,10 @@ class CoHereRerank(Base):
top_n=len(texts), top_n=len(texts),
return_documents=False, return_documents=False,
) )
rank = np.array([d.relevance_score for d in res.results]) rank = np.zeros(len(texts), dtype=float)
indexs = [d.index for d in res.results] for d in res.results:
return rank[indexs], token_count rank[d.index] = d.relevance_score
return rank, token_count
class TogetherAIRerank(Base): class TogetherAIRerank(Base):
@ -287,10 +304,11 @@ class SILICONFLOWRerank(Base):
response = requests.post( response = requests.post(
self.base_url, json=payload, headers=self.headers self.base_url, json=payload, headers=self.headers
).json() ).json()
rank = np.array([d["relevance_score"] for d in response["results"]]) rank = np.zeros(len(texts), dtype=float)
indexs = [d["index"] for d in response["results"]] for d in response["results"]:
rank[d["index"]] = d["relevance_score"]
return ( return (
rank[indexs], rank,
response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"], response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"],
) )
@ -312,9 +330,10 @@ class BaiduYiyanRerank(Base):
documents=texts, documents=texts,
top_n=len(texts), top_n=len(texts),
).body ).body
rank = np.array([d["relevance_score"] for d in res["results"]]) rank = np.zeros(len(texts), dtype=float)
indexs = [d["index"] for d in res["results"]] for d in res["results"]:
return rank[indexs], res["usage"]["total_tokens"] rank[d["index"]] = d["relevance_score"]
return rank, res["usage"]["total_tokens"]
class VoyageRerank(Base): class VoyageRerank(Base):
@ -328,6 +347,7 @@ class VoyageRerank(Base):
res = self.client.rerank( res = self.client.rerank(
query=query, documents=texts, model=self.model_name, top_k=len(texts) query=query, documents=texts, model=self.model_name, top_k=len(texts)
) )
rank = np.array([r.relevance_score for r in res.results]) rank = np.zeros(len(texts), dtype=float)
indexs = [r.index for r in res.results] for r in res.results:
return rank[indexs], res.total_tokens rank[r.index] = r.relevance_score
return rank, res.total_tokens

View File

@ -76,7 +76,7 @@ class Assistant(Base):
raise Exception(res["retmsg"]) raise Exception(res["retmsg"])
def get_session(self, id) -> Session: def get_session(self, id) -> Session:
res = self.get("/session/get", {"id": id}) res = self.get("/session/get", {"id": id,"assistant_id":self.id})
res = res.json() res = res.json()
if res.get("retmsg") == "success": if res.get("retmsg") == "success":
return Session(self.rag, res["data"]) return Session(self.rag, res["data"])

View File

@ -16,9 +16,12 @@ class Session(Base):
if "reference" in message: if "reference" in message:
message.pop("reference") message.pop("reference")
res = self.post("/session/completion", res = self.post("/session/completion",
{"id": self.id, "question": question, "stream": stream}, stream=True) {"session_id": self.id, "question": question, "stream": True}, stream=stream)
for line in res.iter_lines(): for line in res.iter_lines():
line = line.decode("utf-8") line = line.decode("utf-8")
if line.startswith("{"):
json_data = json.loads(line)
raise Exception(json_data["retmsg"])
if line.startswith("data:"): if line.startswith("data:"):
json_data = json.loads(line[5:]) json_data = json.loads(line[5:])
if json_data["data"] != True: if json_data["data"] != True:
@ -69,6 +72,7 @@ class Message(Base):
self.reference = None self.reference = None
self.role = "assistant" self.role = "assistant"
self.prompt = None self.prompt = None
self.id = None
super().__init__(rag, res_dict) super().__init__(rag, res_dict)
@ -76,10 +80,10 @@ class Chunk(Base):
def __init__(self, rag, res_dict): def __init__(self, rag, res_dict):
self.id = None self.id = None
self.content = None self.content = None
self.document_id = None self.document_id = ""
self.document_name = None self.document_name = ""
self.knowledgebase_id = None self.knowledgebase_id = ""
self.image_id = None self.image_id = ""
self.similarity = None self.similarity = None
self.vector_similarity = None self.vector_similarity = None
self.term_similarity = None self.term_similarity = None

View File

@ -19,7 +19,7 @@ class TestSession:
question = "What is AI" question = "What is AI"
for ans in session.chat(question, stream=True): for ans in session.chat(question, stream=True):
pass pass
assert ans.content!="\n**ERROR**", "Please check this error." assert not ans.content.startswith("**ERROR**"), "Please check this error."
def test_delete_session_with_success(self): def test_delete_session_with_success(self):
rag = RAGFlow(API_KEY, HOST_ADDRESS) rag = RAGFlow(API_KEY, HOST_ADDRESS)