mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 10:06:11 +08:00
reduce rerank batch size (#2801)
### What problem does this PR solve? ### Type of change - [x] Performance Improvement
This commit is contained in:
parent
bae30e5cc4
commit
5e7c1fb23a
@ -132,7 +132,7 @@ def init_llm_factory():
|
|||||||
TenantService.filter_update([1 == 1], {
|
TenantService.filter_update([1 == 1], {
|
||||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
||||||
## insert openai two embedding models to the current openai user.
|
## insert openai two embedding models to the current openai user.
|
||||||
print("Start to insert 2 OpenAI embedding models...")
|
# print("Start to insert 2 OpenAI embedding models...")
|
||||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||||
for tid in tenant_ids:
|
for tid in tenant_ids:
|
||||||
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
||||||
|
@ -142,7 +142,7 @@ class YoudaoRerank(DefaultRerank):
|
|||||||
token_count = 0
|
token_count = 0
|
||||||
for _, t in pairs:
|
for _, t in pairs:
|
||||||
token_count += num_tokens_from_string(t)
|
token_count += num_tokens_from_string(t)
|
||||||
batch_size = 32
|
batch_size = 8
|
||||||
res = []
|
res = []
|
||||||
for i in range(0, len(pairs), batch_size):
|
for i in range(0, len(pairs), batch_size):
|
||||||
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
|
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user