mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-13 08:59:02 +08:00
set ollama keep_alive (#985)
### What problem does this PR solve? #980 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
c62834f870
commit
f077b57f8b
@ -532,8 +532,8 @@ def init_llm_factory():
|
||||
{
|
||||
"fid": factory_infos[12]["name"],
|
||||
"llm_name": "BAAI/bge-reranker-v2-m3",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 16385,
|
||||
"tags": "RE-RANK,2k",
|
||||
"max_tokens": 2048,
|
||||
"model_type": LLMType.RERANK.value
|
||||
},
|
||||
]
|
||||
|
@ -303,7 +303,8 @@ class OllamaChat(Base):
|
||||
response = self.client.chat(
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
options=options
|
||||
options=options,
|
||||
keep_alive=-1
|
||||
)
|
||||
ans = response["message"]["content"].strip()
|
||||
return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
|
||||
@ -325,7 +326,8 @@ class OllamaChat(Base):
|
||||
model=self.model_name,
|
||||
messages=history,
|
||||
stream=True,
|
||||
options=options
|
||||
options=options,
|
||||
keep_alive=-1
|
||||
)
|
||||
for resp in response:
|
||||
if resp["done"]:
|
||||
|
Loading…
x
Reference in New Issue
Block a user