mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-18 04:36:00 +08:00
feat: add parameter "reasoning_effort" and Openai o3-mini (#13243)
This commit is contained in:
parent
5a685f7156
commit
7203991032
@ -619,9 +619,9 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
# clear illegal prompt messages
|
||||
prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)
|
||||
|
||||
# o1 compatibility
|
||||
# o1, o3 compatibility
|
||||
block_as_stream = False
|
||||
if model.startswith("o1"):
|
||||
if model.startswith(("o1", "o3")):
|
||||
if "max_tokens" in model_parameters:
|
||||
model_parameters["max_completion_tokens"] = model_parameters["max_tokens"]
|
||||
del model_parameters["max_tokens"]
|
||||
@ -941,7 +941,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
]
|
||||
)
|
||||
|
||||
if model.startswith("o1"):
|
||||
if model.startswith(("o1", "o3")):
|
||||
system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)])
|
||||
if system_message_count > 0:
|
||||
new_prompt_messages = []
|
||||
@ -1053,7 +1053,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
model = model.split(":")[1]
|
||||
|
||||
# Currently, we can use gpt4o to calculate chatgpt-4o-latest's token.
|
||||
if model == "chatgpt-4o-latest" or model.startswith("o1"):
|
||||
if model == "chatgpt-4o-latest" or model.startswith(("o1", "o3")):
|
||||
model = "gpt-4o"
|
||||
|
||||
try:
|
||||
@ -1068,7 +1068,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
tokens_per_message = 4
|
||||
# if there's a name, the role is omitted
|
||||
tokens_per_name = -1
|
||||
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith("o1"):
|
||||
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith(("o1", "o3")):
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
|
@ -16,6 +16,19 @@ parameter_rules:
|
||||
default: 50000
|
||||
min: 1
|
||||
max: 50000
|
||||
- name: reasoning_effort
|
||||
label:
|
||||
zh_Hans: 推理工作
|
||||
en_US: reasoning_effort
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 限制推理模型的推理工作
|
||||
en_US: constrains effort on reasoning for reasoning models
|
||||
required: false
|
||||
options:
|
||||
- low
|
||||
- medium
|
||||
- high
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
@ -17,6 +17,19 @@ parameter_rules:
|
||||
default: 50000
|
||||
min: 1
|
||||
max: 50000
|
||||
- name: reasoning_effort
|
||||
label:
|
||||
zh_Hans: 推理工作
|
||||
en_US: reasoning_effort
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 限制推理模型的推理工作
|
||||
en_US: constrains effort on reasoning for reasoning models
|
||||
required: false
|
||||
options:
|
||||
- low
|
||||
- medium
|
||||
- high
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
@ -14,6 +14,19 @@ parameter_rules:
|
||||
default: 100000
|
||||
min: 1
|
||||
max: 100000
|
||||
- name: reasoning_effort
|
||||
label:
|
||||
zh_Hans: 推理工作
|
||||
en_US: reasoning_effort
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 限制推理模型的推理工作
|
||||
en_US: constrains effort on reasoning for reasoning models
|
||||
required: false
|
||||
options:
|
||||
- low
|
||||
- medium
|
||||
- high
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
@ -14,6 +14,19 @@ parameter_rules:
|
||||
default: 100000
|
||||
min: 1
|
||||
max: 100000
|
||||
- name: reasoning_effort
|
||||
label:
|
||||
zh_Hans: 推理工作
|
||||
en_US: reasoning_effort
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 限制推理模型的推理工作
|
||||
en_US: constrains effort on reasoning for reasoning models
|
||||
required: false
|
||||
options:
|
||||
- low
|
||||
- medium
|
||||
- high
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
11
api/poetry.lock
generated
11
api/poetry.lock
generated
@ -6562,15 +6562,15 @@ sympy = "*"
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.52.2"
|
||||
version = "1.61.0"
|
||||
description = "The official Python library for the openai API"
|
||||
optional = false
|
||||
python-versions = ">=3.7.1"
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc"},
|
||||
{file = "openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d"},
|
||||
{file = "openai-1.61.0-py3-none-any.whl", hash = "sha256:e8c512c0743accbdbe77f3429a1490d862f8352045de8dc81969301eb4a4f666"},
|
||||
{file = "openai-1.61.0.tar.gz", hash = "sha256:216f325a24ed8578e929b0f1b3fb2052165f3b04b0461818adaa51aa29c71f8a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@ -6585,6 +6585,7 @@ typing-extensions = ">=4.11,<5"
|
||||
|
||||
[package.extras]
|
||||
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
|
||||
realtime = ["websockets (>=13,<15)"]
|
||||
|
||||
[[package]]
|
||||
name = "opencensus"
|
||||
@ -12429,4 +12430,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "14896fb8a4495238d70515986341128d0096ba130c3acce127a29a903a59a811"
|
||||
content-hash = "786f04736264fce986f53ea41aa0f5b94e18df807a9aad0aa621cc0ac7078065"
|
||||
|
@ -58,7 +58,7 @@ nomic = "~3.1.2"
|
||||
novita-client = "~0.5.7"
|
||||
numpy = "~1.26.4"
|
||||
oci = "~2.135.1"
|
||||
openai = "~1.52.0"
|
||||
openai = "~1.61.0"
|
||||
openpyxl = "~3.1.5"
|
||||
opik = "~1.3.4"
|
||||
pandas = { version = "~2.2.2", extras = ["performance", "excel"] }
|
||||
|
Loading…
x
Reference in New Issue
Block a user