mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-14 07:35:55 +08:00
Fix: the error of Ollama embeddings interface returning "500 Internal Server Error" (#6350)
### What problem does this PR solve? Fix the error where the Ollama embeddings interface returns a “500 Internal Server Error” when using models such as xiaobu-embedding-v2 for embedding. ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
f537b6ca00
commit
85480f6292
@ -260,14 +260,16 @@ class OllamaEmbed(Base):
|
|||||||
tks_num = 0
|
tks_num = 0
|
||||||
for txt in texts:
|
for txt in texts:
|
||||||
res = self.client.embeddings(prompt=txt,
|
res = self.client.embeddings(prompt=txt,
|
||||||
model=self.model_name)
|
model=self.model_name,
|
||||||
|
options={"use_mmap": True})
|
||||||
arr.append(res["embedding"])
|
arr.append(res["embedding"])
|
||||||
tks_num += 128
|
tks_num += 128
|
||||||
return np.array(arr), tks_num
|
return np.array(arr), tks_num
|
||||||
|
|
||||||
def encode_queries(self, text):
|
def encode_queries(self, text):
|
||||||
res = self.client.embeddings(prompt=text,
|
res = self.client.embeddings(prompt=text,
|
||||||
model=self.model_name)
|
model=self.model_name,
|
||||||
|
options={"use_mmap": True})
|
||||||
return np.array(res["embedding"]), 128
|
return np.array(res["embedding"]), 128
|
||||||
|
|
||||||
|
|
||||||
@ -834,4 +836,4 @@ class GPUStackEmbed(OpenAIEmbed):
|
|||||||
|
|
||||||
print(key,base_url)
|
print(key,base_url)
|
||||||
self.client = OpenAI(api_key=key, base_url=base_url)
|
self.client = OpenAI(api_key=key, base_url=base_url)
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
|
Loading…
x
Reference in New Issue
Block a user