mirror of
https://git.mirrors.martin98.com/https://github.com/infiniflow/ragflow.git
synced 2025-08-12 23:19:03 +08:00
solve knowledgegraph issue when calling gemini model (#2738)
### What problem does this PR solve? #2720 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
parent
d92acdcf1d
commit
16472eb3ea
@ -252,7 +252,8 @@ class QWenChat(Base):
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
yield ans
|
||||
else:
|
||||
yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find("Access")<0 else "Out of credit. Please set the API key in **settings > Model providers.**"
|
||||
yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find(
|
||||
"Access") < 0 else "Out of credit. Please set the API key in **settings > Model providers.**"
|
||||
except Exception as e:
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
@ -623,7 +624,6 @@ class BedrockChat(Base):
|
||||
if not isinstance(item["content"], list) and not isinstance(item["content"], tuple):
|
||||
item["content"] = [{"text": item["content"]}]
|
||||
|
||||
|
||||
try:
|
||||
# Send the message to the model, using a basic inference configuration.
|
||||
response = self.client.converse(
|
||||
@ -702,7 +702,6 @@ class GeminiChat(Base):
|
||||
self.model = GenerativeModel(model_name=self.model_name)
|
||||
self.model._client = _client
|
||||
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
from google.generativeai.types import content_types
|
||||
|
||||
@ -717,6 +716,8 @@ class GeminiChat(Base):
|
||||
for item in history:
|
||||
if 'role' in item and item['role'] == 'assistant':
|
||||
item['role'] = 'model'
|
||||
if 'role' in item and item['role'] == 'system':
|
||||
item['role'] = 'user'
|
||||
if 'content' in item:
|
||||
item['parts'] = item.pop('content')
|
||||
|
||||
@ -1151,7 +1152,8 @@ class BaiduYiyanChat(Base):
|
||||
if system:
|
||||
self.system = system
|
||||
gen_conf["penalty_score"] = (
|
||||
(gen_conf.get("presence_penalty", 0) + gen_conf.get("frequency_penalty", 0)) / 2
|
||||
(gen_conf.get("presence_penalty", 0) + gen_conf.get("frequency_penalty",
|
||||
0)) / 2
|
||||
) + 1
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_output_tokens"] = gen_conf["max_tokens"]
|
||||
@ -1174,7 +1176,8 @@ class BaiduYiyanChat(Base):
|
||||
if system:
|
||||
self.system = system
|
||||
gen_conf["penalty_score"] = (
|
||||
(gen_conf.get("presence_penalty", 0) + gen_conf.get("frequency_penalty", 0)) / 2
|
||||
(gen_conf.get("presence_penalty", 0) + gen_conf.get("frequency_penalty",
|
||||
0)) / 2
|
||||
) + 1
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_output_tokens"] = gen_conf["max_tokens"]
|
||||
@ -1415,4 +1418,3 @@ class GoogleChat(Base):
|
||||
yield ans + "\n**ERROR**: " + str(e)
|
||||
|
||||
yield response._chunks[-1].usage_metadata.total_token_count
|
||||
|
Loading…
x
Reference in New Issue
Block a user