fix(batch_create_segment_to_index_task): count max_position in memory. (#12929)

This commit is contained in:
-LAN- 2025-01-22 13:39:02 +08:00 committed by GitHub
parent c62b7cc679
commit f91f5c7401
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 78 additions and 62 deletions

View File

@ -13,6 +13,7 @@ from typing import Any, cast
from sqlalchemy import func from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped
from configs import dify_config from configs import dify_config
from core.rag.retrieval.retrieval_methods import RetrievalMethod from core.rag.retrieval.retrieval_methods import RetrievalMethod
@ -515,7 +516,7 @@ class DocumentSegment(db.Model): # type: ignore[name-defined]
tenant_id = db.Column(StringUUID, nullable=False) tenant_id = db.Column(StringUUID, nullable=False)
dataset_id = db.Column(StringUUID, nullable=False) dataset_id = db.Column(StringUUID, nullable=False)
document_id = db.Column(StringUUID, nullable=False) document_id = db.Column(StringUUID, nullable=False)
position = db.Column(db.Integer, nullable=False) position: Mapped[int]
content = db.Column(db.Text, nullable=False) content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True) answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False) word_count = db.Column(db.Integer, nullable=False)

View File

@ -5,7 +5,8 @@ import uuid
import click import click
from celery import shared_task # type: ignore from celery import shared_task # type: ignore
from sqlalchemy import func from sqlalchemy import func, select
from sqlalchemy.orm import Session
from core.model_manager import ModelManager from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
@ -18,7 +19,12 @@ from services.vector_service import VectorService
@shared_task(queue="dataset") @shared_task(queue="dataset")
def batch_create_segment_to_index_task( def batch_create_segment_to_index_task(
job_id: str, content: list, dataset_id: str, document_id: str, tenant_id: str, user_id: str job_id: str,
content: list,
dataset_id: str,
document_id: str,
tenant_id: str,
user_id: str,
): ):
""" """
Async batch create segment to index Async batch create segment to index
@ -37,71 +43,80 @@ def batch_create_segment_to_index_task(
indexing_cache_key = "segment_batch_import_{}".format(job_id) indexing_cache_key = "segment_batch_import_{}".format(job_id)
try: try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first() with Session(db.engine) as session:
if not dataset: dataset = session.get(Dataset, dataset_id)
raise ValueError("Dataset not exist.") if not dataset:
raise ValueError("Dataset not exist.")
dataset_document = db.session.query(Document).filter(Document.id == document_id).first() dataset_document = session.get(Document, document_id)
if not dataset_document: if not dataset_document:
raise ValueError("Document not exist.") raise ValueError("Document not exist.")
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != "completed": if (
raise ValueError("Document is not available.") not dataset_document.enabled
document_segments = [] or dataset_document.archived
embedding_model = None or dataset_document.indexing_status != "completed"
if dataset.indexing_technique == "high_quality": ):
model_manager = ModelManager() raise ValueError("Document is not available.")
embedding_model = model_manager.get_model_instance( document_segments = []
tenant_id=dataset.tenant_id, embedding_model = None
provider=dataset.embedding_model_provider, if dataset.indexing_technique == "high_quality":
model_type=ModelType.TEXT_EMBEDDING, model_manager = ModelManager()
model=dataset.embedding_model, embedding_model = model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model,
)
word_count_change = 0
segments_to_insert: list[str] = []
max_position_stmt = select(func.max(DocumentSegment.position)).where(
DocumentSegment.document_id == dataset_document.id
) )
word_count_change = 0 max_position = session.scalar(max_position_stmt) or 1
segments_to_insert: list[str] = [] # Explicitly type hint the list as List[str] for segment in content:
for segment in content: content_str = segment["content"]
content_str = segment["content"] doc_id = str(uuid.uuid4())
doc_id = str(uuid.uuid4()) segment_hash = helper.generate_text_hash(content_str)
segment_hash = helper.generate_text_hash(content_str) # calc embedding use tokens
# calc embedding use tokens tokens = embedding_model.get_text_embedding_num_tokens(texts=[content_str]) if embedding_model else 0
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content_str]) if embedding_model else 0 segment_document = DocumentSegment(
max_position = ( tenant_id=tenant_id,
db.session.query(func.max(DocumentSegment.position)) dataset_id=dataset_id,
.filter(DocumentSegment.document_id == dataset_document.id) document_id=document_id,
.scalar() index_node_id=doc_id,
) index_node_hash=segment_hash,
segment_document = DocumentSegment( position=max_position,
tenant_id=tenant_id, content=content_str,
dataset_id=dataset_id, word_count=len(content_str),
document_id=document_id, tokens=tokens,
index_node_id=doc_id, created_by=user_id,
index_node_hash=segment_hash, indexing_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
position=max_position + 1 if max_position else 1, status="completed",
content=content_str, completed_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
word_count=len(content_str), )
tokens=tokens, max_position += 1
created_by=user_id, if dataset_document.doc_form == "qa_model":
indexing_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None), segment_document.answer = segment["answer"]
status="completed", segment_document.word_count += len(segment["answer"])
completed_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None), word_count_change += segment_document.word_count
) session.add(segment_document)
if dataset_document.doc_form == "qa_model": document_segments.append(segment_document)
segment_document.answer = segment["answer"] segments_to_insert.append(str(segment)) # Cast to string if needed
segment_document.word_count += len(segment["answer"]) # update document word count
word_count_change += segment_document.word_count dataset_document.word_count += word_count_change
db.session.add(segment_document) session.add(dataset_document)
document_segments.append(segment_document) # add index to db
segments_to_insert.append(str(segment)) # Cast to string if needed VectorService.create_segments_vector(None, document_segments, dataset, dataset_document.doc_form)
# update document word count session.commit()
dataset_document.word_count += word_count_change
db.session.add(dataset_document)
# add index to db
VectorService.create_segments_vector(None, document_segments, dataset, dataset_document.doc_form)
db.session.commit()
redis_client.setex(indexing_cache_key, 600, "completed") redis_client.setex(indexing_cache_key, 600, "completed")
end_at = time.perf_counter() end_at = time.perf_counter()
logging.info( logging.info(
click.style("Segment batch created job: {} latency: {}".format(job_id, end_at - start_at), fg="green") click.style(
"Segment batch created job: {} latency: {}".format(job_id, end_at - start_at),
fg="green",
)
) )
except Exception as e: except Exception as e:
logging.exception("Segments batch created index failed") logging.exception("Segments batch created index failed")