mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-06-04 11:24:00 +08:00
Fix: task limiter issue. (#7873)
### What problem does this PR solve? #7869 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
aaefc3f44c
commit
959793e83c
@ -103,6 +103,7 @@ MAX_CONCURRENT_MINIO = int(os.environ.get('MAX_CONCURRENT_MINIO', '10'))
|
|||||||
task_limiter = trio.CapacityLimiter(MAX_CONCURRENT_TASKS)
|
task_limiter = trio.CapacityLimiter(MAX_CONCURRENT_TASKS)
|
||||||
chunk_limiter = trio.CapacityLimiter(MAX_CONCURRENT_CHUNK_BUILDERS)
|
chunk_limiter = trio.CapacityLimiter(MAX_CONCURRENT_CHUNK_BUILDERS)
|
||||||
minio_limiter = trio.CapacityLimiter(MAX_CONCURRENT_MINIO)
|
minio_limiter = trio.CapacityLimiter(MAX_CONCURRENT_MINIO)
|
||||||
|
kg_limiter = trio.CapacityLimiter(2)
|
||||||
WORKER_HEARTBEAT_TIMEOUT = int(os.environ.get('WORKER_HEARTBEAT_TIMEOUT', '120'))
|
WORKER_HEARTBEAT_TIMEOUT = int(os.environ.get('WORKER_HEARTBEAT_TIMEOUT', '120'))
|
||||||
stop_event = threading.Event()
|
stop_event = threading.Event()
|
||||||
|
|
||||||
@ -539,8 +540,6 @@ async def do_handle_task(task):
|
|||||||
chunks, token_count = await run_raptor(task, chat_model, embedding_model, vector_size, progress_callback)
|
chunks, token_count = await run_raptor(task, chat_model, embedding_model, vector_size, progress_callback)
|
||||||
# Either using graphrag or Standard chunking methods
|
# Either using graphrag or Standard chunking methods
|
||||||
elif task.get("task_type", "") == "graphrag":
|
elif task.get("task_type", "") == "graphrag":
|
||||||
global task_limiter
|
|
||||||
task_limiter = trio.CapacityLimiter(2)
|
|
||||||
if not task_parser_config.get("graphrag", {}).get("use_graphrag", False):
|
if not task_parser_config.get("graphrag", {}).get("use_graphrag", False):
|
||||||
return
|
return
|
||||||
graphrag_conf = task["kb_parser_config"].get("graphrag", {})
|
graphrag_conf = task["kb_parser_config"].get("graphrag", {})
|
||||||
@ -548,9 +547,9 @@ async def do_handle_task(task):
|
|||||||
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=task_llm_id, lang=task_language)
|
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=task_llm_id, lang=task_language)
|
||||||
with_resolution = graphrag_conf.get("resolution", False)
|
with_resolution = graphrag_conf.get("resolution", False)
|
||||||
with_community = graphrag_conf.get("community", False)
|
with_community = graphrag_conf.get("community", False)
|
||||||
|
async with kg_limiter:
|
||||||
await run_graphrag(task, task_language, with_resolution, with_community, chat_model, embedding_model, progress_callback)
|
await run_graphrag(task, task_language, with_resolution, with_community, chat_model, embedding_model, progress_callback)
|
||||||
progress_callback(prog=1.0, msg="Knowledge Graph done ({:.2f}s)".format(timer() - start_ts))
|
progress_callback(prog=1.0, msg="Knowledge Graph done ({:.2f}s)".format(timer() - start_ts))
|
||||||
task_limiter = trio.CapacityLimiter(MAX_CONCURRENT_TASKS)
|
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# Standard chunking methods
|
# Standard chunking methods
|
||||||
|
Loading…
x
Reference in New Issue
Block a user