diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml index 1ec9aee641..2da670d4df 100644 --- a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml @@ -17,6 +17,13 @@ - deepseek-ai/DeepSeek-V2.5 - deepseek-ai/DeepSeek-V3 - deepseek-ai/DeepSeek-Coder-V2-Instruct +- deepseek-ai/DeepSeek-R1-Distill-Llama-8B +- deepseek-ai/DeepSeek-R1-Distill-Llama-70B +- deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B +- deepseek-ai/DeepSeek-R1-Distill-Qwen-7B +- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B +- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B +- deepseek-ai/Janus-Pro-7B - THUDM/glm-4-9b-chat - 01-ai/Yi-1.5-34B-Chat-16K - 01-ai/Yi-1.5-9B-Chat-16K diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-llama-70B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-llama-70B.yaml new file mode 100644 index 0000000000..59e0b4d68e --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-llama-70B.yaml @@ -0,0 +1,21 @@ +model: deepseek-ai/DeepSeek-R1-Distill-Llama-70B +label: + zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Llama-70B + en_US: deepseek-ai/DeepSeek-R1-Distill-Llama-70B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "4.3" + unit: "0.000001" + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-llama-8B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-llama-8B.yaml new file mode 100644 index 0000000000..f3256aa5a0 --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-llama-8B.yaml @@ -0,0 +1,21 @@ +model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B +label: + zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Llama-8B + en_US: deepseek-ai/DeepSeek-R1-Distill-Llama-8B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "0.00" + unit: "0.000001" + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-1.5B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-1.5B.yaml new file mode 100644 index 0000000000..7297278654 --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-1.5B.yaml @@ -0,0 +1,21 @@ +model: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B +label: + zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B + en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "1.26" + unit: "0.000001" + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-14B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-14B.yaml new file mode 100644 index 0000000000..24b5c89ebf --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-14B.yaml @@ -0,0 +1,21 @@ +model: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B +label: + zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B + en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "0.70" + unit: "0.000001" + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-32B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-32B.yaml new file mode 100644 index 0000000000..2a8cce1f96 --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-32B.yaml @@ -0,0 +1,21 @@ +model: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B +label: + zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B + en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "1.26" + unit: "0.000001" + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-7B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-7B.yaml new file mode 100644 index 0000000000..cde1c14aae --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-r1-distill-qwen-7B.yaml @@ -0,0 +1,21 @@ +model: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B +label: + zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B + en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "0.00" + unit: "0.000001" + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/janus-pro-7B.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/janus-pro-7B.yaml new file mode 100644 index 0000000000..dabbd745e5 --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/llm/janus-pro-7B.yaml @@ -0,0 +1,22 @@ +model: deepseek-ai/Janus-Pro-7B +label: + zh_Hans: deepseek-ai/Janus-Pro-7B + en_US: deepseek-ai/Janus-Pro-7B +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + min: 1 + max: 8192 + default: 4096 +pricing: + input: "0.00" + output: "0.00" + unit: "0.000001" + currency: RMB