feat: add hunyuan-vision (#8529)

This commit is contained in:
MuYu 2024-09-19 18:08:01 +08:00 committed by GitHub
parent 7411bcf167
commit a03919c3b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 63 additions and 0 deletions

View File

@ -3,3 +3,4 @@
- hunyuan-standard-256k - hunyuan-standard-256k
- hunyuan-pro - hunyuan-pro
- hunyuan-turbo - hunyuan-turbo
- hunyuan-vision

View File

@ -0,0 +1,39 @@
model: hunyuan-vision
label:
zh_Hans: hunyuan-vision
en_US: hunyuan-vision
model_type: llm
features:
- agent-thought
- tool-call
- multi-tool-call
- stream-tool-call
- vision
model_properties:
mode: chat
context_size: 8000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 8000
- name: enable_enhance
label:
zh_Hans: 功能增强
en_US: Enable Enhancement
type: boolean
help:
zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。
en_US: Allow the model to perform external search to enhance the generation results.
required: false
default: true
pricing:
input: '0.018'
output: '0.018'
unit: '0.001'
currency: RMB

View File

@ -1,6 +1,7 @@
import json import json
import logging import logging
from collections.abc import Generator from collections.abc import Generator
from typing import cast
from tencentcloud.common import credential from tencentcloud.common import credential
from tencentcloud.common.exception import TencentCloudSDKException from tencentcloud.common.exception import TencentCloudSDKException
@ -11,9 +12,12 @@ from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import ( from core.model_runtime.entities.message_entities import (
AssistantPromptMessage, AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessage, PromptMessage,
PromptMessageContentType,
PromptMessageTool, PromptMessageTool,
SystemPromptMessage, SystemPromptMessage,
TextPromptMessageContent,
ToolPromptMessage, ToolPromptMessage,
UserPromptMessage, UserPromptMessage,
) )
@ -143,6 +147,25 @@ class HunyuanLargeLanguageModel(LargeLanguageModel):
tool_execute_result = {"result": message.content} tool_execute_result = {"result": message.content}
content = json.dumps(tool_execute_result, ensure_ascii=False) content = json.dumps(tool_execute_result, ensure_ascii=False)
dict_list.append({"Role": message.role.value, "Content": content, "ToolCallId": message.tool_call_id}) dict_list.append({"Role": message.role.value, "Content": content, "ToolCallId": message.tool_call_id})
elif isinstance(message, UserPromptMessage):
message = cast(UserPromptMessage, message)
if isinstance(message.content, str):
dict_list.append({"Role": message.role.value, "Content": message.content})
else:
sub_messages = []
for message_content in message.content:
if message_content.type == PromptMessageContentType.TEXT:
message_content = cast(TextPromptMessageContent, message_content)
sub_message_dict = {"Type": "text", "Text": message_content.data}
sub_messages.append(sub_message_dict)
elif message_content.type == PromptMessageContentType.IMAGE:
message_content = cast(ImagePromptMessageContent, message_content)
sub_message_dict = {
"Type": "image_url",
"ImageUrl": {"Url": message_content.data},
}
sub_messages.append(sub_message_dict)
dict_list.append({"Role": message.role.value, "Contents": sub_messages})
else: else:
dict_list.append({"Role": message.role.value, "Content": message.content}) dict_list.append({"Role": message.role.value, "Content": message.content})
return dict_list return dict_list