mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-12 20:49:04 +08:00
Optimize docx handle method in laws parser (#1302)
### What problem does this PR solve? Optimize docx handle method in laws parser ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
parent
751447bd4f
commit
fc7cc1d36c
@ -18,7 +18,7 @@ from docx import Document
|
|||||||
|
|
||||||
from api.db import ParserType
|
from api.db import ParserType
|
||||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
||||||
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
make_colon_as_title, add_positions, tokenize_chunks, find_codec, docx_question_level
|
||||||
from rag.nlp import rag_tokenizer
|
from rag.nlp import rag_tokenizer
|
||||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||||
from rag.settings import cron_logger
|
from rag.settings import cron_logger
|
||||||
@ -32,7 +32,7 @@ class Docx(DocxParser):
|
|||||||
line = re.sub(r"\u3000", " ", line).strip()
|
line = re.sub(r"\u3000", " ", line).strip()
|
||||||
return line
|
return line
|
||||||
|
|
||||||
def __call__(self, filename, binary=None, from_page=0, to_page=100000):
|
def old_call(self, filename, binary=None, from_page=0, to_page=100000):
|
||||||
self.doc = Document(
|
self.doc = Document(
|
||||||
filename) if not binary else Document(BytesIO(binary))
|
filename) if not binary else Document(BytesIO(binary))
|
||||||
pn = 0
|
pn = 0
|
||||||
@ -50,6 +50,74 @@ class Docx(DocxParser):
|
|||||||
pn += 1
|
pn += 1
|
||||||
return [l for l in lines if l]
|
return [l for l in lines if l]
|
||||||
|
|
||||||
|
def __call__(self, filename, binary=None, from_page=0, to_page=100000):
|
||||||
|
self.doc = Document(
|
||||||
|
filename) if not binary else Document(BytesIO(binary))
|
||||||
|
pn = 0
|
||||||
|
last_question, last_answer, last_level = "", "", -1
|
||||||
|
lines = []
|
||||||
|
root = DocxNode()
|
||||||
|
point = root
|
||||||
|
bull = bullets_category([p.text for p in self.doc.paragraphs])
|
||||||
|
for p in self.doc.paragraphs:
|
||||||
|
if pn > to_page:
|
||||||
|
break
|
||||||
|
question_level, p_text = 0, ''
|
||||||
|
if from_page <= pn < to_page and p.text.strip():
|
||||||
|
question_level, p_text = docx_question_level(p, bull)
|
||||||
|
if not question_level or question_level > 6: # not a question
|
||||||
|
last_answer = f'{last_answer}\n{p_text}'
|
||||||
|
else: # is a question
|
||||||
|
if last_question:
|
||||||
|
while last_level <= point.level:
|
||||||
|
point = point.parent
|
||||||
|
new_node = DocxNode(last_question, last_answer, last_level, [], point)
|
||||||
|
point.childs.append(new_node)
|
||||||
|
point = new_node
|
||||||
|
last_question, last_answer, last_level = '', '', -1
|
||||||
|
last_level = question_level
|
||||||
|
last_answer = ''
|
||||||
|
last_question = p_text
|
||||||
|
|
||||||
|
for run in p.runs:
|
||||||
|
if 'lastRenderedPageBreak' in run._element.xml:
|
||||||
|
pn += 1
|
||||||
|
continue
|
||||||
|
if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
|
||||||
|
pn += 1
|
||||||
|
if last_question:
|
||||||
|
while last_level <= point.level:
|
||||||
|
point = point.parent
|
||||||
|
new_node = DocxNode(last_question, last_answer, last_level, [], point)
|
||||||
|
point.childs.append(new_node)
|
||||||
|
point = new_node
|
||||||
|
last_question, last_answer, last_level = '', '', -1
|
||||||
|
traversal_queue = [root]
|
||||||
|
while traversal_queue:
|
||||||
|
current_node: DocxNode = traversal_queue.pop()
|
||||||
|
sum_text = f'{self.__clean(current_node.question)}\n{self.__clean(current_node.answer)}'
|
||||||
|
if not current_node.childs and not current_node.answer.strip():
|
||||||
|
continue
|
||||||
|
for child in current_node.childs:
|
||||||
|
sum_text = f'{sum_text}\n{self.__clean(child.question)}'
|
||||||
|
traversal_queue.insert(0, child)
|
||||||
|
lines.append(self.__clean(sum_text))
|
||||||
|
return [l for l in lines if l]
|
||||||
|
class DocxNode:
|
||||||
|
def __init__(self, question: str = '', answer: str = '', level: int = 0, childs: list = [], parent = None) -> None:
|
||||||
|
self.question = question
|
||||||
|
self.answer = answer
|
||||||
|
self.level = level
|
||||||
|
self.childs = childs
|
||||||
|
self.parent = parent
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f'''
|
||||||
|
question:{self.question},
|
||||||
|
answer:{self.answer},
|
||||||
|
level:{self.level},
|
||||||
|
childs:{self.childs}
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
class Pdf(PdfParser):
|
class Pdf(PdfParser):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -94,11 +162,16 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
|
||||||
pdf_parser = None
|
pdf_parser = None
|
||||||
sections = []
|
sections = []
|
||||||
|
# is it English
|
||||||
|
eng = lang.lower() == "english" # is_english(sections)
|
||||||
|
|
||||||
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
for txt in Docx()(filename, binary):
|
for txt in Docx()(filename, binary):
|
||||||
sections.append(txt)
|
sections.append(txt)
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
chunks = sections
|
||||||
|
return tokenize_chunks(chunks, doc, eng, pdf_parser)
|
||||||
|
|
||||||
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||||
pdf_parser = Pdf() if kwargs.get(
|
pdf_parser = Pdf() if kwargs.get(
|
||||||
@ -143,8 +216,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"file type not supported yet(doc, docx, pdf, txt supported)")
|
"file type not supported yet(doc, docx, pdf, txt supported)")
|
||||||
|
|
||||||
# is it English
|
|
||||||
eng = lang.lower() == "english" # is_english(sections)
|
|
||||||
# Remove 'Contents' part
|
# Remove 'Contents' part
|
||||||
remove_contents_table(sections, eng)
|
remove_contents_table(sections, eng)
|
||||||
|
|
||||||
|
@ -514,10 +514,15 @@ def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
|||||||
|
|
||||||
return cks
|
return cks
|
||||||
|
|
||||||
def docx_question_level(p):
|
def docx_question_level(p, bull = -1):
|
||||||
if p.style.name.startswith('Heading'):
|
if p.style.name.startswith('Heading'):
|
||||||
return int(p.style.name.split(' ')[-1]), re.sub(r"\u3000", " ", p.text).strip()
|
return int(p.style.name.split(' ')[-1]), re.sub(r"\u3000", " ", p.text).strip()
|
||||||
else:
|
else:
|
||||||
|
if bull < 0:
|
||||||
|
return 0, re.sub(r"\u3000", " ", p.text).strip()
|
||||||
|
for j, title in enumerate(BULLET_PATTERN[bull]):
|
||||||
|
if re.match(title, re.sub(r"\u3000", " ", p.text).strip()):
|
||||||
|
return j+1, re.sub(r"\u3000", " ", p.text).strip()
|
||||||
return 0, re.sub(r"\u3000", " ", p.text).strip()
|
return 0, re.sub(r"\u3000", " ", p.text).strip()
|
||||||
|
|
||||||
def concat_img(img1, img2):
|
def concat_img(img1, img2):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user