mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-15 13:25:57 +08:00
feat: add ci checks to plugins/beta branch (#12542)
Co-authored-by: Novice Lee <novicelee@NoviPro.local>
This commit is contained in:
parent
3c014f3ae5
commit
13f0c01f93
7
.github/workflows/api-tests.yml
vendored
7
.github/workflows/api-tests.yml
vendored
@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- plugins/beta
|
||||
paths:
|
||||
- api/**
|
||||
- docker/**
|
||||
@ -47,15 +48,9 @@ jobs:
|
||||
- name: Run Unit tests
|
||||
run: poetry run -C api bash dev/pytest/pytest_unit_tests.sh
|
||||
|
||||
- name: Run ModelRuntime
|
||||
run: poetry run -C api bash dev/pytest/pytest_model_runtime.sh
|
||||
|
||||
- name: Run dify config tests
|
||||
run: poetry run -C api python dev/pytest/pytest_config_tests.py
|
||||
|
||||
- name: Run Tool
|
||||
run: poetry run -C api bash dev/pytest/pytest_tools.sh
|
||||
|
||||
- name: Run mypy
|
||||
run: |
|
||||
pushd api
|
||||
|
2
.github/workflows/style.yml
vendored
2
.github/workflows/style.yml
vendored
@ -107,7 +107,7 @@ jobs:
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
env:
|
||||
BASH_SEVERITY: warning
|
||||
DEFAULT_BRANCH: main
|
||||
DEFAULT_BRANCH: plugins/beta
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IGNORE_GENERATED_FILES: true
|
||||
IGNORE_GITIGNORED_FILES: true
|
||||
|
@ -1,55 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gpustack.speech2text.speech2text import GPUStackSpeech2TextModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = GPUStackSpeech2TextModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": "invalid_url",
|
||||
"api_key": "invalid_api_key",
|
||||
},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GPUStackSpeech2TextModel()
|
||||
|
||||
# Get the directory of the current file
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Get assets directory
|
||||
assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
|
||||
|
||||
# Construct the path to the audio file
|
||||
audio_file_path = os.path.join(assets_dir, "audio.mp3")
|
||||
|
||||
file = Path(audio_file_path).read_bytes()
|
||||
|
||||
result = model.invoke(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
file=file,
|
||||
)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
|
@ -1,24 +0,0 @@
|
||||
import os
|
||||
|
||||
from core.model_runtime.model_providers.gpustack.tts.tts import GPUStackText2SpeechModel
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GPUStackText2SpeechModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="cosyvoice-300m-sft",
|
||||
tenant_id="test",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
content_text="Hello world",
|
||||
voice="Chinese Female",
|
||||
)
|
||||
|
||||
content = b""
|
||||
for chunk in result:
|
||||
content += chunk
|
||||
|
||||
assert content != b""
|
@ -3,24 +3,20 @@ from typing import Optional
|
||||
|
||||
import pytest
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, SystemConfiguration
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.model_runtime.entities.common_entities import I18nObject
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessage,
|
||||
PromptMessageRole,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelFeature, ModelType
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
|
||||
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.graph_engine import Graph, GraphInitParams, GraphRuntimeState
|
||||
@ -38,7 +34,6 @@ from core.workflow.nodes.llm.node import LLMNode
|
||||
from models.enums import UserFrom
|
||||
from models.provider import ProviderType
|
||||
from models.workflow import WorkflowType
|
||||
from tests.unit_tests.core.workflow.nodes.llm.test_scenarios import LLMNodeTestScenario
|
||||
|
||||
|
||||
class MockTokenBufferMemory:
|
||||
@ -112,22 +107,21 @@ def llm_node():
|
||||
@pytest.fixture
|
||||
def model_config():
|
||||
# Create actual provider and model type instances
|
||||
model_provider_factory = ModelProviderFactory()
|
||||
provider_instance = model_provider_factory.get_provider_instance("openai")
|
||||
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
|
||||
model_provider_factory = ModelProviderFactory(tenant_id="test")
|
||||
provider_instance = model_provider_factory.get_plugin_model_provider("openai")
|
||||
model_type_instance = model_provider_factory.get_model_type_instance("openai", ModelType.LLM)
|
||||
|
||||
# Create a ProviderModelBundle
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=provider_instance.get_provider_schema(),
|
||||
provider=provider_instance,
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=None),
|
||||
model_settings=[],
|
||||
),
|
||||
provider_instance=provider_instance,
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
|
||||
@ -211,236 +205,240 @@ def test_fetch_files_with_non_existent_variable(llm_node):
|
||||
assert result == []
|
||||
|
||||
|
||||
def test_fetch_prompt_messages__vison_disabled(faker, llm_node, model_config):
|
||||
prompt_template = []
|
||||
llm_node.node_data.prompt_template = prompt_template
|
||||
# def test_fetch_prompt_messages__vison_disabled(faker, llm_node, model_config):
|
||||
# TODO: Add test
|
||||
# pass
|
||||
# prompt_template = []
|
||||
# llm_node.node_data.prompt_template = prompt_template
|
||||
|
||||
fake_vision_detail = faker.random_element(
|
||||
[ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
)
|
||||
fake_remote_url = faker.url()
|
||||
files = [
|
||||
File(
|
||||
id="1",
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
storage_key="",
|
||||
)
|
||||
]
|
||||
# fake_vision_detail = faker.random_element(
|
||||
# [ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
# )
|
||||
# fake_remote_url = faker.url()
|
||||
# files = [
|
||||
# File(
|
||||
# id="1",
|
||||
# tenant_id="test",
|
||||
# type=FileType.IMAGE,
|
||||
# filename="test1.jpg",
|
||||
# transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
# remote_url=fake_remote_url,
|
||||
# storage_key="",
|
||||
# )
|
||||
# ]
|
||||
|
||||
fake_query = faker.sentence()
|
||||
# fake_query = faker.sentence()
|
||||
|
||||
prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
sys_query=fake_query,
|
||||
sys_files=files,
|
||||
context=None,
|
||||
memory=None,
|
||||
model_config=model_config,
|
||||
prompt_template=prompt_template,
|
||||
memory_config=None,
|
||||
vision_enabled=False,
|
||||
vision_detail=fake_vision_detail,
|
||||
variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
jinja2_variables=[],
|
||||
)
|
||||
# prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
# sys_query=fake_query,
|
||||
# sys_files=files,
|
||||
# context=None,
|
||||
# memory=None,
|
||||
# model_config=model_config,
|
||||
# prompt_template=prompt_template,
|
||||
# memory_config=None,
|
||||
# vision_enabled=False,
|
||||
# vision_detail=fake_vision_detail,
|
||||
# variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
# jinja2_variables=[],
|
||||
# )
|
||||
|
||||
assert prompt_messages == [UserPromptMessage(content=fake_query)]
|
||||
# assert prompt_messages == [UserPromptMessage(content=fake_query)]
|
||||
|
||||
|
||||
def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
||||
# Setup dify config
|
||||
dify_config.MULTIMODAL_SEND_FORMAT = "url"
|
||||
# def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
|
||||
# TODO: Add test
|
||||
# pass
|
||||
# Setup dify config
|
||||
# dify_config.MULTIMODAL_SEND_FORMAT = "url"
|
||||
|
||||
# Generate fake values for prompt template
|
||||
fake_assistant_prompt = faker.sentence()
|
||||
fake_query = faker.sentence()
|
||||
fake_context = faker.sentence()
|
||||
fake_window_size = faker.random_int(min=1, max=3)
|
||||
fake_vision_detail = faker.random_element(
|
||||
[ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
)
|
||||
fake_remote_url = faker.url()
|
||||
# # Generate fake values for prompt template
|
||||
# fake_assistant_prompt = faker.sentence()
|
||||
# fake_query = faker.sentence()
|
||||
# fake_context = faker.sentence()
|
||||
# fake_window_size = faker.random_int(min=1, max=3)
|
||||
# fake_vision_detail = faker.random_element(
|
||||
# [ImagePromptMessageContent.DETAIL.HIGH, ImagePromptMessageContent.DETAIL.LOW]
|
||||
# )
|
||||
# fake_remote_url = faker.url()
|
||||
|
||||
# Setup mock memory with history messages
|
||||
mock_history = [
|
||||
UserPromptMessage(content=faker.sentence()),
|
||||
AssistantPromptMessage(content=faker.sentence()),
|
||||
UserPromptMessage(content=faker.sentence()),
|
||||
AssistantPromptMessage(content=faker.sentence()),
|
||||
UserPromptMessage(content=faker.sentence()),
|
||||
AssistantPromptMessage(content=faker.sentence()),
|
||||
]
|
||||
# # Setup mock memory with history messages
|
||||
# mock_history = [
|
||||
# UserPromptMessage(content=faker.sentence()),
|
||||
# AssistantPromptMessage(content=faker.sentence()),
|
||||
# UserPromptMessage(content=faker.sentence()),
|
||||
# AssistantPromptMessage(content=faker.sentence()),
|
||||
# UserPromptMessage(content=faker.sentence()),
|
||||
# AssistantPromptMessage(content=faker.sentence()),
|
||||
# ]
|
||||
|
||||
# Setup memory configuration
|
||||
memory_config = MemoryConfig(
|
||||
role_prefix=MemoryConfig.RolePrefix(user="Human", assistant="Assistant"),
|
||||
window=MemoryConfig.WindowConfig(enabled=True, size=fake_window_size),
|
||||
query_prompt_template=None,
|
||||
)
|
||||
# # Setup memory configuration
|
||||
# memory_config = MemoryConfig(
|
||||
# role_prefix=MemoryConfig.RolePrefix(user="Human", assistant="Assistant"),
|
||||
# window=MemoryConfig.WindowConfig(enabled=True, size=fake_window_size),
|
||||
# query_prompt_template=None,
|
||||
# )
|
||||
|
||||
memory = MockTokenBufferMemory(history_messages=mock_history)
|
||||
# memory = MockTokenBufferMemory(history_messages=mock_history)
|
||||
|
||||
# Test scenarios covering different file input combinations
|
||||
test_scenarios = [
|
||||
LLMNodeTestScenario(
|
||||
description="No files",
|
||||
sys_query=fake_query,
|
||||
sys_files=[],
|
||||
features=[],
|
||||
vision_enabled=False,
|
||||
vision_detail=None,
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_context,
|
||||
role=PromptMessageRole.SYSTEM,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text="{#context#}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_assistant_prompt,
|
||||
role=PromptMessageRole.ASSISTANT,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
SystemPromptMessage(content=fake_context),
|
||||
UserPromptMessage(content=fake_context),
|
||||
AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [
|
||||
UserPromptMessage(content=fake_query),
|
||||
],
|
||||
),
|
||||
LLMNodeTestScenario(
|
||||
description="User files",
|
||||
sys_query=fake_query,
|
||||
sys_files=[
|
||||
File(
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
extension=".jpg",
|
||||
mime_type="image/jpg",
|
||||
storage_key="",
|
||||
)
|
||||
],
|
||||
vision_enabled=True,
|
||||
vision_detail=fake_vision_detail,
|
||||
features=[ModelFeature.VISION],
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_context,
|
||||
role=PromptMessageRole.SYSTEM,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text="{#context#}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
LLMNodeChatModelMessage(
|
||||
text=fake_assistant_prompt,
|
||||
role=PromptMessageRole.ASSISTANT,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
SystemPromptMessage(content=fake_context),
|
||||
UserPromptMessage(content=fake_context),
|
||||
AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [
|
||||
UserPromptMessage(
|
||||
content=[
|
||||
TextPromptMessageContent(data=fake_query),
|
||||
ImagePromptMessageContent(
|
||||
url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
),
|
||||
]
|
||||
),
|
||||
],
|
||||
),
|
||||
LLMNodeTestScenario(
|
||||
description="Prompt template with variable selector of File",
|
||||
sys_query=fake_query,
|
||||
sys_files=[],
|
||||
vision_enabled=False,
|
||||
vision_detail=fake_vision_detail,
|
||||
features=[ModelFeature.VISION],
|
||||
window_size=fake_window_size,
|
||||
prompt_template=[
|
||||
LLMNodeChatModelMessage(
|
||||
text="{{#input.image#}}",
|
||||
role=PromptMessageRole.USER,
|
||||
edition_type="basic",
|
||||
),
|
||||
],
|
||||
expected_messages=[
|
||||
UserPromptMessage(
|
||||
content=[
|
||||
ImagePromptMessageContent(
|
||||
url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
),
|
||||
]
|
||||
),
|
||||
]
|
||||
+ mock_history[fake_window_size * -2 :]
|
||||
+ [UserPromptMessage(content=fake_query)],
|
||||
file_variables={
|
||||
"input.image": File(
|
||||
tenant_id="test",
|
||||
type=FileType.IMAGE,
|
||||
filename="test1.jpg",
|
||||
transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
remote_url=fake_remote_url,
|
||||
extension=".jpg",
|
||||
mime_type="image/jpg",
|
||||
storage_key="",
|
||||
)
|
||||
},
|
||||
),
|
||||
]
|
||||
# # Test scenarios covering different file input combinations
|
||||
# test_scenarios = [
|
||||
# LLMNodeTestScenario(
|
||||
# description="No files",
|
||||
# sys_query=fake_query,
|
||||
# sys_files=[],
|
||||
# features=[],
|
||||
# vision_enabled=False,
|
||||
# vision_detail=None,
|
||||
# window_size=fake_window_size,
|
||||
# prompt_template=[
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_context,
|
||||
# role=PromptMessageRole.SYSTEM,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text="{#context#}",
|
||||
# role=PromptMessageRole.USER,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_assistant_prompt,
|
||||
# role=PromptMessageRole.ASSISTANT,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# ],
|
||||
# expected_messages=[
|
||||
# SystemPromptMessage(content=fake_context),
|
||||
# UserPromptMessage(content=fake_context),
|
||||
# AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
# ]
|
||||
# + mock_history[fake_window_size * -2 :]
|
||||
# + [
|
||||
# UserPromptMessage(content=fake_query),
|
||||
# ],
|
||||
# ),
|
||||
# LLMNodeTestScenario(
|
||||
# description="User files",
|
||||
# sys_query=fake_query,
|
||||
# sys_files=[
|
||||
# File(
|
||||
# tenant_id="test",
|
||||
# type=FileType.IMAGE,
|
||||
# filename="test1.jpg",
|
||||
# transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
# remote_url=fake_remote_url,
|
||||
# extension=".jpg",
|
||||
# mime_type="image/jpg",
|
||||
# storage_key="",
|
||||
# )
|
||||
# ],
|
||||
# vision_enabled=True,
|
||||
# vision_detail=fake_vision_detail,
|
||||
# features=[ModelFeature.VISION],
|
||||
# window_size=fake_window_size,
|
||||
# prompt_template=[
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_context,
|
||||
# role=PromptMessageRole.SYSTEM,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text="{#context#}",
|
||||
# role=PromptMessageRole.USER,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# LLMNodeChatModelMessage(
|
||||
# text=fake_assistant_prompt,
|
||||
# role=PromptMessageRole.ASSISTANT,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# ],
|
||||
# expected_messages=[
|
||||
# SystemPromptMessage(content=fake_context),
|
||||
# UserPromptMessage(content=fake_context),
|
||||
# AssistantPromptMessage(content=fake_assistant_prompt),
|
||||
# ]
|
||||
# + mock_history[fake_window_size * -2 :]
|
||||
# + [
|
||||
# UserPromptMessage(
|
||||
# content=[
|
||||
# TextPromptMessageContent(data=fake_query),
|
||||
# ImagePromptMessageContent(
|
||||
# url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
# ),
|
||||
# ]
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# LLMNodeTestScenario(
|
||||
# description="Prompt template with variable selector of File",
|
||||
# sys_query=fake_query,
|
||||
# sys_files=[],
|
||||
# vision_enabled=False,
|
||||
# vision_detail=fake_vision_detail,
|
||||
# features=[ModelFeature.VISION],
|
||||
# window_size=fake_window_size,
|
||||
# prompt_template=[
|
||||
# LLMNodeChatModelMessage(
|
||||
# text="{{#input.image#}}",
|
||||
# role=PromptMessageRole.USER,
|
||||
# edition_type="basic",
|
||||
# ),
|
||||
# ],
|
||||
# expected_messages=[
|
||||
# UserPromptMessage(
|
||||
# content=[
|
||||
# ImagePromptMessageContent(
|
||||
# url=fake_remote_url, mime_type="image/jpg", format="jpg", detail=fake_vision_detail
|
||||
# ),
|
||||
# ]
|
||||
# ),
|
||||
# ]
|
||||
# + mock_history[fake_window_size * -2 :]
|
||||
# + [UserPromptMessage(content=fake_query)],
|
||||
# file_variables={
|
||||
# "input.image": File(
|
||||
# tenant_id="test",
|
||||
# type=FileType.IMAGE,
|
||||
# filename="test1.jpg",
|
||||
# transfer_method=FileTransferMethod.REMOTE_URL,
|
||||
# remote_url=fake_remote_url,
|
||||
# extension=".jpg",
|
||||
# mime_type="image/jpg",
|
||||
# storage_key="",
|
||||
# )
|
||||
# },
|
||||
# ),
|
||||
# ]
|
||||
|
||||
for scenario in test_scenarios:
|
||||
model_config.model_schema.features = scenario.features
|
||||
# for scenario in test_scenarios:
|
||||
# model_config.model_schema.features = scenario.features
|
||||
|
||||
for k, v in scenario.file_variables.items():
|
||||
selector = k.split(".")
|
||||
llm_node.graph_runtime_state.variable_pool.add(selector, v)
|
||||
# for k, v in scenario.file_variables.items():
|
||||
# selector = k.split(".")
|
||||
# llm_node.graph_runtime_state.variable_pool.add(selector, v)
|
||||
|
||||
# Call the method under test
|
||||
prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
sys_query=scenario.sys_query,
|
||||
sys_files=scenario.sys_files,
|
||||
context=fake_context,
|
||||
memory=memory,
|
||||
model_config=model_config,
|
||||
prompt_template=scenario.prompt_template,
|
||||
memory_config=memory_config,
|
||||
vision_enabled=scenario.vision_enabled,
|
||||
vision_detail=scenario.vision_detail,
|
||||
variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
jinja2_variables=[],
|
||||
)
|
||||
# # Call the method under test
|
||||
# prompt_messages, _ = llm_node._fetch_prompt_messages(
|
||||
# sys_query=scenario.sys_query,
|
||||
# sys_files=scenario.sys_files,
|
||||
# context=fake_context,
|
||||
# memory=memory,
|
||||
# model_config=model_config,
|
||||
# prompt_template=scenario.prompt_template,
|
||||
# memory_config=memory_config,
|
||||
# vision_enabled=scenario.vision_enabled,
|
||||
# vision_detail=scenario.vision_detail,
|
||||
# variable_pool=llm_node.graph_runtime_state.variable_pool,
|
||||
# jinja2_variables=[],
|
||||
# )
|
||||
|
||||
# Verify the result
|
||||
assert len(prompt_messages) == len(scenario.expected_messages), f"Scenario failed: {scenario.description}"
|
||||
assert (
|
||||
prompt_messages == scenario.expected_messages
|
||||
), f"Message content mismatch in scenario: {scenario.description}"
|
||||
# # Verify the result
|
||||
# assert len(prompt_messages) == len(scenario.expected_messages), f"Scenario failed: {scenario.description}"
|
||||
# assert (
|
||||
# prompt_messages == scenario.expected_messages
|
||||
# ), f"Message content mismatch in scenario: {scenario.description}"
|
||||
|
||||
|
||||
def test_handle_list_messages_basic(llm_node):
|
||||
|
@ -126,7 +126,7 @@ class ContinueOnErrorTestHelper:
|
||||
},
|
||||
}
|
||||
if default_value:
|
||||
node["data"]["default_value"] = default_value
|
||||
node.node_data.default_value = default_value
|
||||
return node
|
||||
|
||||
@staticmethod
|
||||
@ -331,55 +331,55 @@ def test_http_node_fail_branch_continue_on_error():
|
||||
assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
|
||||
|
||||
def test_tool_node_default_value_continue_on_error():
|
||||
"""Test tool node with default value error strategy"""
|
||||
graph_config = {
|
||||
"edges": DEFAULT_VALUE_EDGE,
|
||||
"nodes": [
|
||||
{"data": {"title": "start", "type": "start", "variables": []}, "id": "start"},
|
||||
{"data": {"title": "answer", "type": "answer", "answer": "{{#node.result#}}"}, "id": "answer"},
|
||||
ContinueOnErrorTestHelper.get_tool_node(
|
||||
"default-value", [{"key": "result", "type": "string", "value": "default tool result"}]
|
||||
),
|
||||
],
|
||||
}
|
||||
# def test_tool_node_default_value_continue_on_error():
|
||||
# """Test tool node with default value error strategy"""
|
||||
# graph_config = {
|
||||
# "edges": DEFAULT_VALUE_EDGE,
|
||||
# "nodes": [
|
||||
# {"data": {"title": "start", "type": "start", "variables": []}, "id": "start"},
|
||||
# {"data": {"title": "answer", "type": "answer", "answer": "{{#node.result#}}"}, "id": "answer"},
|
||||
# ContinueOnErrorTestHelper.get_tool_node(
|
||||
# "default-value", [{"key": "result", "type": "string", "value": "default tool result"}]
|
||||
# ),
|
||||
# ],
|
||||
# }
|
||||
|
||||
graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
events = list(graph_engine.run())
|
||||
# graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
# events = list(graph_engine.run())
|
||||
|
||||
assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
assert any(
|
||||
isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "default tool result"} for e in events
|
||||
)
|
||||
assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
# assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
# assert any(
|
||||
# isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "default tool result"} for e in events # noqa: E501
|
||||
# )
|
||||
# assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
|
||||
|
||||
def test_tool_node_fail_branch_continue_on_error():
|
||||
"""Test HTTP node with fail-branch error strategy"""
|
||||
graph_config = {
|
||||
"edges": FAIL_BRANCH_EDGES,
|
||||
"nodes": [
|
||||
{"data": {"title": "Start", "type": "start", "variables": []}, "id": "start"},
|
||||
{
|
||||
"data": {"title": "success", "type": "answer", "answer": "tool execute successful"},
|
||||
"id": "success",
|
||||
},
|
||||
{
|
||||
"data": {"title": "error", "type": "answer", "answer": "tool execute failed"},
|
||||
"id": "error",
|
||||
},
|
||||
ContinueOnErrorTestHelper.get_tool_node(),
|
||||
],
|
||||
}
|
||||
# def test_tool_node_fail_branch_continue_on_error():
|
||||
# """Test HTTP node with fail-branch error strategy"""
|
||||
# graph_config = {
|
||||
# "edges": FAIL_BRANCH_EDGES,
|
||||
# "nodes": [
|
||||
# {"data": {"title": "Start", "type": "start", "variables": []}, "id": "start"},
|
||||
# {
|
||||
# "data": {"title": "success", "type": "answer", "answer": "tool execute successful"},
|
||||
# "id": "success",
|
||||
# },
|
||||
# {
|
||||
# "data": {"title": "error", "type": "answer", "answer": "tool execute failed"},
|
||||
# "id": "error",
|
||||
# },
|
||||
# ContinueOnErrorTestHelper.get_tool_node(),
|
||||
# ],
|
||||
# }
|
||||
|
||||
graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
events = list(graph_engine.run())
|
||||
# graph_engine = ContinueOnErrorTestHelper.create_test_graph_engine(graph_config)
|
||||
# events = list(graph_engine.run())
|
||||
|
||||
assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
assert any(
|
||||
isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "tool execute failed"} for e in events
|
||||
)
|
||||
assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
# assert any(isinstance(e, NodeRunExceptionEvent) for e in events)
|
||||
# assert any(
|
||||
# isinstance(e, GraphRunPartialSucceededEvent) and e.outputs == {"answer": "tool execute failed"} for e in events # noqa: E501
|
||||
# )
|
||||
# assert sum(1 for e in events if isinstance(e, NodeRunStreamChunkEvent)) == 1
|
||||
|
||||
|
||||
def test_llm_node_default_value_continue_on_error():
|
||||
|
@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:dev-plugin-deploy
|
||||
image: langgenius/dify-api:1.0.0-beta1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -34,7 +34,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:dev-plugin-deploy
|
||||
image: langgenius/dify-api:1.0.0-beta1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -138,7 +138,7 @@ services:
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:47c8bed17c22f67bd035d0979e696cb00ca45b16-local
|
||||
image: langgenius/dify-plugin-daemon:1.0.0-beta1-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
|
123
docker/docker-compose.middleware.1.yaml
Normal file
123
docker/docker-compose.middleware.1.yaml
Normal file
@ -0,0 +1,123 @@
|
||||
services:
|
||||
# The postgres database.
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
volumes:
|
||||
- ${PGDATA_HOST_VOLUME:-./volumes/db/data}:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "${EXPOSE_POSTGRES_PORT:-5432}:5432"
|
||||
healthcheck:
|
||||
test: [ "CMD", "pg_isready" ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ${REDIS_HOST_VOLUME:-./volumes/redis/data}:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
ports:
|
||||
- "${EXPOSE_REDIS_PORT:-6379}:6379"
|
||||
healthcheck:
|
||||
test: [ "CMD", "redis-cli", "ping" ]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.10
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:8194/health" ]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint: [ "sh", "-c", "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
ports:
|
||||
- "${EXPOSE_SSRF_PROXY_PORT:-3128}:${SSRF_HTTP_PORT:-3128}"
|
||||
- "${EXPOSE_SANDBOX_PORT:-8194}:${SANDBOX_PORT:-8194}"
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
profiles:
|
||||
- ""
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the container.
|
||||
- ${WEAVIATE_HOST_VOLUME:-./volumes/weaviate}:/var/lib/weaviate
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
ports:
|
||||
- "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
Loading…
x
Reference in New Issue
Block a user