mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 07:05:54 +08:00
upgrade laws parser of docx (#1332)
### What problem does this PR solve? ### Type of change - [x] Refactoring
This commit is contained in:
parent
5eb21b9c7c
commit
92e9320657
@ -20,7 +20,7 @@ from flask_login import login_required, current_user
|
|||||||
from elasticsearch_dsl import Q
|
from elasticsearch_dsl import Q
|
||||||
|
|
||||||
from rag.app.qa import rmPrefix, beAdoc
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
from rag.nlp import search, rag_tokenizer
|
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
from rag.utils import rmSpace
|
from rag.utils import rmSpace
|
||||||
from api.db import LLMType, ParserType
|
from api.db import LLMType, ParserType
|
||||||
@ -268,6 +268,10 @@ def retrieval_test():
|
|||||||
rerank_mdl = TenantLLMService.model_instance(
|
rerank_mdl = TenantLLMService.model_instance(
|
||||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||||
|
|
||||||
|
if req.get("keyword", False):
|
||||||
|
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
||||||
|
question += keyword_extraction(chat_mdl, question)
|
||||||
|
|
||||||
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size,
|
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size,
|
||||||
similarity_threshold, vector_similarity_weight, top,
|
similarity_threshold, vector_similarity_weight, top,
|
||||||
doc_ids, rerank_mdl=rerank_mdl)
|
doc_ids, rerank_mdl=rerank_mdl)
|
||||||
|
@ -23,7 +23,7 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
|||||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||||
from api.settings import chat_logger, retrievaler
|
from api.settings import chat_logger, retrievaler
|
||||||
from rag.app.resume import forbidden_select_fields4resume
|
from rag.app.resume import forbidden_select_fields4resume
|
||||||
from rag.nlp.rag_tokenizer import is_chinese
|
from rag.nlp import keyword_extraction
|
||||||
from rag.nlp.search import index_name
|
from rag.nlp.search import index_name
|
||||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||||
|
|
||||||
@ -121,6 +121,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||||
else:
|
else:
|
||||||
|
if prompt_config.get("keyword", False):
|
||||||
|
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||||
dialog.similarity_threshold,
|
dialog.similarity_threshold,
|
||||||
dialog.vector_similarity_weight,
|
dialog.vector_similarity_weight,
|
||||||
|
@ -54,30 +54,14 @@ class Docx(DocxParser):
|
|||||||
self.doc = Document(
|
self.doc = Document(
|
||||||
filename) if not binary else Document(BytesIO(binary))
|
filename) if not binary else Document(BytesIO(binary))
|
||||||
pn = 0
|
pn = 0
|
||||||
last_question, last_answer, last_level = "", "", -1
|
|
||||||
lines = []
|
lines = []
|
||||||
root = DocxNode()
|
|
||||||
point = root
|
|
||||||
bull = bullets_category([p.text for p in self.doc.paragraphs])
|
bull = bullets_category([p.text for p in self.doc.paragraphs])
|
||||||
for p in self.doc.paragraphs:
|
for p in self.doc.paragraphs:
|
||||||
if pn > to_page:
|
if pn > to_page:
|
||||||
break
|
break
|
||||||
question_level, p_text = 0, ''
|
question_level, p_text = docx_question_level(p, bull)
|
||||||
if from_page <= pn < to_page and p.text.strip():
|
if not p_text.strip("\n"):continue
|
||||||
question_level, p_text = docx_question_level(p, bull)
|
lines.append((question_level, p_text))
|
||||||
if not question_level or question_level > 6: # not a question
|
|
||||||
last_answer = f'{last_answer}\n{p_text}'
|
|
||||||
else: # is a question
|
|
||||||
if last_question:
|
|
||||||
while last_level <= point.level:
|
|
||||||
point = point.parent
|
|
||||||
new_node = DocxNode(last_question, last_answer, last_level, [], point)
|
|
||||||
point.childs.append(new_node)
|
|
||||||
point = new_node
|
|
||||||
last_question, last_answer, last_level = '', '', -1
|
|
||||||
last_level = question_level
|
|
||||||
last_answer = ''
|
|
||||||
last_question = p_text
|
|
||||||
|
|
||||||
for run in p.runs:
|
for run in p.runs:
|
||||||
if 'lastRenderedPageBreak' in run._element.xml:
|
if 'lastRenderedPageBreak' in run._element.xml:
|
||||||
@ -85,31 +69,29 @@ class Docx(DocxParser):
|
|||||||
continue
|
continue
|
||||||
if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
|
if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
|
||||||
pn += 1
|
pn += 1
|
||||||
if last_question:
|
|
||||||
while last_level <= point.level:
|
visit = [False for _ in range(len(lines))]
|
||||||
point = point.parent
|
sections = []
|
||||||
new_node = DocxNode(last_question, last_answer, last_level, [], point)
|
for s in range(len(lines)):
|
||||||
point.childs.append(new_node)
|
e = s + 1
|
||||||
point = new_node
|
while e < len(lines):
|
||||||
last_question, last_answer, last_level = '', '', -1
|
if lines[e][0] <= lines[s][0]:
|
||||||
traversal_queue = [root]
|
break
|
||||||
while traversal_queue:
|
e += 1
|
||||||
current_node: DocxNode = traversal_queue.pop()
|
if e - s == 1 and visit[s]: continue
|
||||||
sum_text = f'{self.__clean(current_node.question)}\n{self.__clean(current_node.answer)}'
|
sec = []
|
||||||
if not current_node.childs and not current_node.answer.strip():
|
next_level = lines[s][0] + 1
|
||||||
continue
|
while not sec and next_level < 22:
|
||||||
for child in current_node.childs:
|
for i in range(s+1, e):
|
||||||
sum_text = f'{sum_text}\n{self.__clean(child.question)}'
|
if lines[i][0] != next_level: continue
|
||||||
traversal_queue.insert(0, child)
|
sec.append(lines[i][1])
|
||||||
lines.append(self.__clean(sum_text))
|
visit[i] = True
|
||||||
return [l for l in lines if l]
|
next_level += 1
|
||||||
class DocxNode:
|
sec.insert(0, lines[s][1])
|
||||||
def __init__(self, question: str = '', answer: str = '', level: int = 0, childs: list = [], parent = None) -> None:
|
|
||||||
self.question = question
|
sections.append("\n".join(sec))
|
||||||
self.answer = answer
|
return [l for l in sections if l]
|
||||||
self.level = level
|
|
||||||
self.childs = childs
|
|
||||||
self.parent = parent
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
return f'''
|
return f'''
|
||||||
question:{self.question},
|
question:{self.question},
|
||||||
|
@ -514,16 +514,19 @@ def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
|||||||
|
|
||||||
return cks
|
return cks
|
||||||
|
|
||||||
|
|
||||||
def docx_question_level(p, bull = -1):
|
def docx_question_level(p, bull = -1):
|
||||||
|
txt = re.sub(r"\u3000", " ", p.text).strip()
|
||||||
if p.style.name.startswith('Heading'):
|
if p.style.name.startswith('Heading'):
|
||||||
return int(p.style.name.split(' ')[-1]), re.sub(r"\u3000", " ", p.text).strip()
|
return int(p.style.name.split(' ')[-1]), txt
|
||||||
else:
|
else:
|
||||||
if bull < 0:
|
if bull < 0:
|
||||||
return 0, re.sub(r"\u3000", " ", p.text).strip()
|
return 0, txt
|
||||||
for j, title in enumerate(BULLET_PATTERN[bull]):
|
for j, title in enumerate(BULLET_PATTERN[bull]):
|
||||||
if re.match(title, re.sub(r"\u3000", " ", p.text).strip()):
|
if re.match(title, txt):
|
||||||
return j+1, re.sub(r"\u3000", " ", p.text).strip()
|
return j+1, txt
|
||||||
return 0, re.sub(r"\u3000", " ", p.text).strip()
|
return len(BULLET_PATTERN[bull]), txt
|
||||||
|
|
||||||
|
|
||||||
def concat_img(img1, img2):
|
def concat_img(img1, img2):
|
||||||
if img1 and not img2:
|
if img1 and not img2:
|
||||||
@ -544,6 +547,7 @@ def concat_img(img1, img2):
|
|||||||
|
|
||||||
return new_image
|
return new_image
|
||||||
|
|
||||||
|
|
||||||
def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
||||||
if not sections:
|
if not sections:
|
||||||
return []
|
return []
|
||||||
@ -574,3 +578,14 @@ def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
|||||||
add_chunk(sec, image, '')
|
add_chunk(sec, image, '')
|
||||||
|
|
||||||
return cks, images
|
return cks, images
|
||||||
|
|
||||||
|
|
||||||
|
def keyword_extraction(chat_mdl, content):
|
||||||
|
prompt = """
|
||||||
|
You're a question analyzer.
|
||||||
|
1. Please give me the most important keyword/phrase of this question.
|
||||||
|
Answer format: (in language of user's question)
|
||||||
|
- keyword:
|
||||||
|
"""
|
||||||
|
kwd, _ = chat_mdl.chat(prompt, [{"role": "user", "content": content}], {"temperature": 0.2})
|
||||||
|
return kwd
|
||||||
|
Loading…
x
Reference in New Issue
Block a user