mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 23:06:15 +08:00
### What problem does this PR solve? #917 #915 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
9ffd7ae321
commit
7eee193956
@ -392,7 +392,7 @@ class RAGFlowPdfParser:
|
|||||||
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
|
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
|
||||||
len(b["text"].strip()) > 1 and b["text"].strip(
|
len(b["text"].strip()) > 1 and b["text"].strip(
|
||||||
)[-2] in ",;:'\",‘“、;:",
|
)[-2] in ",;:'\",‘“、;:",
|
||||||
b["text"].strip()[0] in "。;?!?”)),,、:",
|
b_["text"].strip()[0] in "。;?!?”)),,、:",
|
||||||
]
|
]
|
||||||
# features for not concating
|
# features for not concating
|
||||||
feats = [
|
feats = [
|
||||||
|
@ -19,6 +19,8 @@ from deepdoc.parser.pdf_parser import PlainParser
|
|||||||
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
||||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
||||||
from rag.settings import cron_logger
|
from rag.settings import cron_logger
|
||||||
|
from rag.utils import num_tokens_from_string
|
||||||
|
|
||||||
|
|
||||||
class Docx(DocxParser):
|
class Docx(DocxParser):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -149,8 +151,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
if not l:
|
if not l:
|
||||||
break
|
break
|
||||||
txt += l
|
txt += l
|
||||||
sections = txt.split("\n")
|
sections = []
|
||||||
sections = [(l, "") for l in sections if l]
|
for sec in txt.split("\n"):
|
||||||
|
if num_tokens_from_string(sec) > 10 * parser_config.get("chunk_token_num", 128):
|
||||||
|
sections.append((sec[:int(len(sec)/2)], ""))
|
||||||
|
sections.append((sec[int(len(sec)/2):], ""))
|
||||||
|
else:
|
||||||
|
sections.append((sec, ""))
|
||||||
|
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
@ -163,7 +171,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
"file type not supported yet(doc, docx, pdf, txt supported)")
|
"file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
|
||||||
|
|
||||||
st = timer()
|
st = timer()
|
||||||
chunks = naive_merge(
|
chunks = naive_merge(
|
||||||
|
@ -24,7 +24,7 @@ class RagTokenizer:
|
|||||||
def loadDict_(self, fnm):
|
def loadDict_(self, fnm):
|
||||||
print("[HUQIE]:Build trie", fnm, file=sys.stderr)
|
print("[HUQIE]:Build trie", fnm, file=sys.stderr)
|
||||||
try:
|
try:
|
||||||
of = open(fnm, "r")
|
of = open(fnm, "r", encoding='utf-8')
|
||||||
while True:
|
while True:
|
||||||
line = of.readline()
|
line = of.readline()
|
||||||
if not line:
|
if not line:
|
||||||
|
@ -136,3 +136,4 @@ BCEmbedding
|
|||||||
loguru==0.7.2
|
loguru==0.7.2
|
||||||
umap-learn
|
umap-learn
|
||||||
fasttext==0.9.2
|
fasttext==0.9.2
|
||||||
|
volcengine
|
||||||
|
@ -124,3 +124,4 @@ ollama==0.1.8
|
|||||||
redis==5.0.4
|
redis==5.0.4
|
||||||
fasttext==0.9.2
|
fasttext==0.9.2
|
||||||
umap-learn
|
umap-learn
|
||||||
|
volcengine
|
||||||
|
Loading…
x
Reference in New Issue
Block a user