mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 19:35:52 +08:00
Add support for HTML file (#973)
### What problem does this PR solve? Add support for HTML file ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
parent
dded365b8d
commit
8dd45459be
@ -156,7 +156,7 @@ def filename_type(filename):
|
|||||||
return FileType.PDF.value
|
return FileType.PDF.value
|
||||||
|
|
||||||
if re.match(
|
if re.match(
|
||||||
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename):
|
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html)$", filename):
|
||||||
return FileType.DOC.value
|
return FileType.DOC.value
|
||||||
|
|
||||||
if re.match(
|
if re.match(
|
||||||
|
@ -4,3 +4,4 @@ from .pdf_parser import RAGFlowPdfParser as PdfParser, PlainParser
|
|||||||
from .docx_parser import RAGFlowDocxParser as DocxParser
|
from .docx_parser import RAGFlowDocxParser as DocxParser
|
||||||
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
||||||
from .ppt_parser import RAGFlowPptParser as PptParser
|
from .ppt_parser import RAGFlowPptParser as PptParser
|
||||||
|
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
||||||
|
27
deepdoc/parser/html_parser.py
Normal file
27
deepdoc/parser/html_parser.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from rag.nlp import find_codec
|
||||||
|
import readability
|
||||||
|
import html_text
|
||||||
|
import chardet
|
||||||
|
|
||||||
|
def get_encoding(file):
|
||||||
|
with open(file,'rb') as f:
|
||||||
|
tmp = chardet.detect(f.read())
|
||||||
|
return tmp['encoding']
|
||||||
|
|
||||||
|
class RAGFlowHtmlParser:
|
||||||
|
def __call__(self, fnm, binary=None):
|
||||||
|
txt = ""
|
||||||
|
if binary:
|
||||||
|
encoding = find_codec(binary)
|
||||||
|
txt = binary.decode(encoding, errors="ignore")
|
||||||
|
else:
|
||||||
|
with open(fnm, "r",encoding=get_encoding(fnm)) as f:
|
||||||
|
txt = f.read()
|
||||||
|
|
||||||
|
html_doc = readability.Document(txt)
|
||||||
|
title = html_doc.title()
|
||||||
|
content = html_text.extract_text(html_doc.summary(html_partial=True))
|
||||||
|
txt = f'{title}\n{content}'
|
||||||
|
sections = txt.split("\n")
|
||||||
|
return sections
|
@ -19,7 +19,7 @@ from rag.nlp import bullets_category, is_english, tokenize, remove_contents_tabl
|
|||||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
|
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
|
||||||
tokenize_chunks, find_codec
|
tokenize_chunks, find_codec
|
||||||
from rag.nlp import rag_tokenizer
|
from rag.nlp import rag_tokenizer
|
||||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||||
|
|
||||||
|
|
||||||
class Pdf(PdfParser):
|
class Pdf(PdfParser):
|
||||||
@ -105,6 +105,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
random_choices([t for t, _ in sections], k=200)))
|
random_choices([t for t, _ in sections], k=200)))
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
|
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||||
|
callback(0.1, "Start to parse.")
|
||||||
|
sections = HtmlParser()(filename, binary)
|
||||||
|
sections = [(l, "") for l in sections if l]
|
||||||
|
remove_contents_table(sections, eng=is_english(
|
||||||
|
random_choices([t for t, _ in sections], k=200)))
|
||||||
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
binary = BytesIO(binary)
|
binary = BytesIO(binary)
|
||||||
|
@ -20,7 +20,7 @@ from api.db import ParserType
|
|||||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
||||||
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
||||||
from rag.nlp import rag_tokenizer
|
from rag.nlp import rag_tokenizer
|
||||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||||
from rag.settings import cron_logger
|
from rag.settings import cron_logger
|
||||||
|
|
||||||
|
|
||||||
@ -125,6 +125,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
sections = [l for l in sections if l]
|
sections = [l for l in sections if l]
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
|
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||||
|
callback(0.1, "Start to parse.")
|
||||||
|
sections = HtmlParser()(filename, binary)
|
||||||
|
sections = [l for l in sections if l]
|
||||||
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
binary = BytesIO(binary)
|
binary = BytesIO(binary)
|
||||||
|
@ -17,7 +17,7 @@ from timeit import default_timer as timer
|
|||||||
import re
|
import re
|
||||||
from deepdoc.parser.pdf_parser import PlainParser
|
from deepdoc.parser.pdf_parser import PlainParser
|
||||||
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
||||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser
|
||||||
from rag.settings import cron_logger
|
from rag.settings import cron_logger
|
||||||
from rag.utils import num_tokens_from_string
|
from rag.utils import num_tokens_from_string
|
||||||
|
|
||||||
@ -161,6 +161,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
|
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
|
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||||
|
callback(0.1, "Start to parse.")
|
||||||
|
sections = HtmlParser()(filename, binary)
|
||||||
|
sections = [(l, "") for l in sections if l]
|
||||||
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
binary = BytesIO(binary)
|
binary = BytesIO(binary)
|
||||||
|
@ -15,7 +15,7 @@ from io import BytesIO
|
|||||||
import re
|
import re
|
||||||
from rag.app import laws
|
from rag.app import laws
|
||||||
from rag.nlp import rag_tokenizer, tokenize, find_codec
|
from rag.nlp import rag_tokenizer, tokenize, find_codec
|
||||||
from deepdoc.parser import PdfParser, ExcelParser, PlainParser
|
from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser
|
||||||
|
|
||||||
|
|
||||||
class Pdf(PdfParser):
|
class Pdf(PdfParser):
|
||||||
@ -97,6 +97,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
sections = [s for s in sections if s]
|
sections = [s for s in sections if s]
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
|
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||||
|
callback(0.1, "Start to parse.")
|
||||||
|
sections = HtmlParser()(filename, binary)
|
||||||
|
sections = [s for s in sections if s]
|
||||||
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
binary = BytesIO(binary)
|
binary = BytesIO(binary)
|
||||||
|
@ -137,3 +137,5 @@ loguru==0.7.2
|
|||||||
umap-learn
|
umap-learn
|
||||||
fasttext==0.9.2
|
fasttext==0.9.2
|
||||||
volcengine
|
volcengine
|
||||||
|
readability-lxml==0.8.1
|
||||||
|
html_text==0.6.2
|
@ -137,4 +137,6 @@ loguru==0.7.2
|
|||||||
umap-learn
|
umap-learn
|
||||||
fasttext==0.9.2
|
fasttext==0.9.2
|
||||||
volcengine
|
volcengine
|
||||||
opencv-python-headless==4.9.0.80
|
opencv-python-headless==4.9.0.80
|
||||||
|
readability-lxml==0.8.1
|
||||||
|
html_text==0.6.2
|
@ -125,3 +125,5 @@ redis==5.0.4
|
|||||||
fasttext==0.9.2
|
fasttext==0.9.2
|
||||||
umap-learn
|
umap-learn
|
||||||
volcengine
|
volcengine
|
||||||
|
readability-lxml==0.8.1
|
||||||
|
html_text==0.6.2
|
Loading…
x
Reference in New Issue
Block a user