mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-12 16:29:01 +08:00
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
This commit is contained in:
parent
26fef2d481
commit
99e80a8ed0
@ -8,9 +8,9 @@ model_properties:
|
|||||||
parameter_rules:
|
parameter_rules:
|
||||||
- name: temperature
|
- name: temperature
|
||||||
use_template: temperature
|
use_template: temperature
|
||||||
- name: topP
|
- name: top_p
|
||||||
use_template: top_p
|
use_template: top_p
|
||||||
- name: topK
|
- name: top_k
|
||||||
label:
|
label:
|
||||||
zh_Hans: 取样数量
|
zh_Hans: 取样数量
|
||||||
en_US: Top K
|
en_US: Top K
|
||||||
|
@ -8,9 +8,9 @@ model_properties:
|
|||||||
parameter_rules:
|
parameter_rules:
|
||||||
- name: temperature
|
- name: temperature
|
||||||
use_template: temperature
|
use_template: temperature
|
||||||
- name: topP
|
- name: top_p
|
||||||
use_template: top_p
|
use_template: top_p
|
||||||
- name: topK
|
- name: top_k
|
||||||
label:
|
label:
|
||||||
zh_Hans: 取样数量
|
zh_Hans: 取样数量
|
||||||
en_US: Top K
|
en_US: Top K
|
||||||
|
@ -8,9 +8,9 @@ model_properties:
|
|||||||
parameter_rules:
|
parameter_rules:
|
||||||
- name: temperature
|
- name: temperature
|
||||||
use_template: temperature
|
use_template: temperature
|
||||||
- name: topP
|
- name: top_p
|
||||||
use_template: top_p
|
use_template: top_p
|
||||||
- name: topK
|
- name: top_k
|
||||||
label:
|
label:
|
||||||
zh_Hans: 取样数量
|
zh_Hans: 取样数量
|
||||||
en_US: Top K
|
en_US: Top K
|
||||||
|
@ -250,9 +250,12 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
|||||||
invoke = runtime_client.invoke_model
|
invoke = runtime_client.invoke_model
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
body_jsonstr=json.dumps(payload)
|
||||||
response = invoke(
|
response = invoke(
|
||||||
body=json.dumps(payload),
|
|
||||||
modelId=model,
|
modelId=model,
|
||||||
|
contentType="application/json",
|
||||||
|
accept= "*/*",
|
||||||
|
body=body_jsonstr
|
||||||
)
|
)
|
||||||
except ClientError as ex:
|
except ClientError as ex:
|
||||||
error_code = ex.response['Error']['Code']
|
error_code = ex.response['Error']['Code']
|
||||||
@ -385,7 +388,6 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
|||||||
if not chunk:
|
if not chunk:
|
||||||
exception_name = next(iter(event))
|
exception_name = next(iter(event))
|
||||||
full_ex_msg = f"{exception_name}: {event[exception_name]['message']}"
|
full_ex_msg = f"{exception_name}: {event[exception_name]['message']}"
|
||||||
|
|
||||||
raise self._map_client_to_invoke_error(exception_name, full_ex_msg)
|
raise self._map_client_to_invoke_error(exception_name, full_ex_msg)
|
||||||
|
|
||||||
payload = json.loads(chunk.get('bytes').decode())
|
payload = json.loads(chunk.get('bytes').decode())
|
||||||
@ -396,7 +398,7 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
|||||||
finish_reason = payload.get("completion_reason")
|
finish_reason = payload.get("completion_reason")
|
||||||
|
|
||||||
elif model_prefix == "anthropic":
|
elif model_prefix == "anthropic":
|
||||||
content_delta = payload
|
content_delta = payload.get("completion")
|
||||||
finish_reason = payload.get("stop_reason")
|
finish_reason = payload.get("stop_reason")
|
||||||
|
|
||||||
elif model_prefix == "cohere":
|
elif model_prefix == "cohere":
|
||||||
@ -410,11 +412,11 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
|||||||
else:
|
else:
|
||||||
raise ValueError(f"Got unknown model prefix {model_prefix} when handling stream response")
|
raise ValueError(f"Got unknown model prefix {model_prefix} when handling stream response")
|
||||||
|
|
||||||
index += 1
|
# transform assistant message to prompt message
|
||||||
|
|
||||||
assistant_prompt_message = AssistantPromptMessage(
|
assistant_prompt_message = AssistantPromptMessage(
|
||||||
content = content_delta if content_delta else '',
|
content = content_delta if content_delta else '',
|
||||||
)
|
)
|
||||||
|
index += 1
|
||||||
|
|
||||||
if not finish_reason:
|
if not finish_reason:
|
||||||
yield LLMResultChunk(
|
yield LLMResultChunk(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user