mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-07-04 09:45:11 +08:00
chore: apply flake8-pytest-style linter rules (#8307)
This commit is contained in:
parent
40fb4d16ef
commit
8815511ccb
@ -18,6 +18,7 @@ select = [
|
|||||||
"FURB", # refurb rules
|
"FURB", # refurb rules
|
||||||
"I", # isort rules
|
"I", # isort rules
|
||||||
"N", # pep8-naming
|
"N", # pep8-naming
|
||||||
|
"PT", # flake8-pytest-style rules
|
||||||
"RUF019", # unnecessary-key-check
|
"RUF019", # unnecessary-key-check
|
||||||
"RUF100", # unused-noqa
|
"RUF100", # unused-noqa
|
||||||
"RUF101", # redirected-noqa
|
"RUF101", # redirected-noqa
|
||||||
@ -50,6 +51,7 @@ ignore = [
|
|||||||
"B905", # zip-without-explicit-strict
|
"B905", # zip-without-explicit-strict
|
||||||
"N806", # non-lowercase-variable-in-function
|
"N806", # non-lowercase-variable-in-function
|
||||||
"N815", # mixed-case-variable-in-class-scope
|
"N815", # mixed-case-variable-in-class-scope
|
||||||
|
"PT011", # pytest-raises-too-broad
|
||||||
"SIM102", # collapsible-if
|
"SIM102", # collapsible-if
|
||||||
"SIM103", # needless-bool
|
"SIM103", # needless-bool
|
||||||
"SIM105", # suppressible-exception
|
"SIM105", # suppressible-exception
|
||||||
|
@ -20,7 +20,7 @@ from tests.integration_tests.model_runtime.__mock.openai import setup_openai_moc
|
|||||||
from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock
|
from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
|
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
|
||||||
def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock):
|
def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock):
|
||||||
model = XinferenceAILargeLanguageModel()
|
model = XinferenceAILargeLanguageModel()
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
|
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
|
||||||
def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
|
def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
|
||||||
model = XinferenceAILargeLanguageModel()
|
model = XinferenceAILargeLanguageModel()
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock):
|
|||||||
assert response.usage.total_tokens > 0
|
assert response.usage.total_tokens > 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["chat", "none"]], indirect=True)
|
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True)
|
||||||
def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
|
def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
|
||||||
model = XinferenceAILargeLanguageModel()
|
model = XinferenceAILargeLanguageModel()
|
||||||
|
|
||||||
@ -236,7 +236,7 @@ def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock):
|
|||||||
# assert response.message.tool_calls[0].function.name == 'get_current_weather'
|
# assert response.message.tool_calls[0].function.name == 'get_current_weather'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
|
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
|
||||||
def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinference_mock):
|
def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinference_mock):
|
||||||
model = XinferenceAILargeLanguageModel()
|
model = XinferenceAILargeLanguageModel()
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinf
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
|
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
|
||||||
def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
|
def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
|
||||||
model = XinferenceAILargeLanguageModel()
|
model = XinferenceAILargeLanguageModel()
|
||||||
|
|
||||||
@ -286,7 +286,7 @@ def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock):
|
|||||||
assert response.usage.total_tokens > 0
|
assert response.usage.total_tokens > 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("setup_openai_mock, setup_xinference_mock", [["completion", "none"]], indirect=True)
|
@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True)
|
||||||
def test_invoke_stream_generation_model(setup_openai_mock, setup_xinference_mock):
|
def test_invoke_stream_generation_model(setup_openai_mock, setup_xinference_mock):
|
||||||
model = XinferenceAILargeLanguageModel()
|
model = XinferenceAILargeLanguageModel()
|
||||||
|
|
||||||
|
@ -9,7 +9,8 @@ def test_loading_subclass_from_source():
|
|||||||
module = load_single_subclass_from_source(
|
module = load_single_subclass_from_source(
|
||||||
module_name="ChildClass", script_path=os.path.join(current_path, "child_class.py"), parent_type=ParentClass
|
module_name="ChildClass", script_path=os.path.join(current_path, "child_class.py"), parent_type=ParentClass
|
||||||
)
|
)
|
||||||
assert module and module.__name__ == "ChildClass"
|
assert module
|
||||||
|
assert module.__name__ == "ChildClass"
|
||||||
|
|
||||||
|
|
||||||
def test_load_import_module_from_source():
|
def test_load_import_module_from_source():
|
||||||
@ -17,7 +18,8 @@ def test_load_import_module_from_source():
|
|||||||
module = import_module_from_source(
|
module = import_module_from_source(
|
||||||
module_name="ChildClass", py_file_path=os.path.join(current_path, "child_class.py")
|
module_name="ChildClass", py_file_path=os.path.join(current_path, "child_class.py")
|
||||||
)
|
)
|
||||||
assert module and module.__name__ == "ChildClass"
|
assert module
|
||||||
|
assert module.__name__ == "ChildClass"
|
||||||
|
|
||||||
|
|
||||||
def test_lazy_loading_subclass_from_source():
|
def test_lazy_loading_subclass_from_source():
|
||||||
|
@ -34,7 +34,7 @@ class TestOpenSearchVector:
|
|||||||
self.vector._client = MagicMock()
|
self.vector._client = MagicMock()
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"search_response, expected_length, expected_doc_id",
|
("search_response", "expected_length", "expected_doc_id"),
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,7 @@ CACHED_APP = Flask(__name__)
|
|||||||
CACHED_APP.config.update({"TESTING": True})
|
CACHED_APP.config.update({"TESTING": True})
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture
|
||||||
def app() -> Flask:
|
def app() -> Flask:
|
||||||
return CACHED_APP
|
return CACHED_APP
|
||||||
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
import random
|
import random
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from core.helper.ssrf_proxy import SSRF_DEFAULT_MAX_RETRIES, STATUS_FORCELIST, make_request
|
from core.helper.ssrf_proxy import SSRF_DEFAULT_MAX_RETRIES, STATUS_FORCELIST, make_request
|
||||||
|
|
||||||
|
|
||||||
@ -22,11 +24,9 @@ def test_retry_exceed_max_retries(mock_request):
|
|||||||
side_effects = [mock_response] * SSRF_DEFAULT_MAX_RETRIES
|
side_effects = [mock_response] * SSRF_DEFAULT_MAX_RETRIES
|
||||||
mock_request.side_effect = side_effects
|
mock_request.side_effect = side_effects
|
||||||
|
|
||||||
try:
|
with pytest.raises(Exception) as e:
|
||||||
make_request("GET", "http://example.com", max_retries=SSRF_DEFAULT_MAX_RETRIES - 1)
|
make_request("GET", "http://example.com", max_retries=SSRF_DEFAULT_MAX_RETRIES - 1)
|
||||||
raise AssertionError("Expected Exception not raised")
|
assert str(e.value) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com"
|
||||||
except Exception as e:
|
|
||||||
assert str(e) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com"
|
|
||||||
|
|
||||||
|
|
||||||
@patch("httpx.request")
|
@patch("httpx.request")
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
from libs.helper import email
|
from libs.helper import email
|
||||||
|
|
||||||
|
|
||||||
@ -9,17 +11,11 @@ def test_email_with_valid_email():
|
|||||||
|
|
||||||
|
|
||||||
def test_email_with_invalid_email():
|
def test_email_with_invalid_email():
|
||||||
try:
|
with pytest.raises(ValueError, match="invalid_email is not a valid email."):
|
||||||
email("invalid_email")
|
email("invalid_email")
|
||||||
except ValueError as e:
|
|
||||||
assert str(e) == "invalid_email is not a valid email."
|
|
||||||
|
|
||||||
try:
|
with pytest.raises(ValueError, match="@example.com is not a valid email."):
|
||||||
email("@example.com")
|
email("@example.com")
|
||||||
except ValueError as e:
|
|
||||||
assert str(e) == "@example.com is not a valid email."
|
|
||||||
|
|
||||||
try:
|
with pytest.raises(ValueError, match="()@example.com is not a valid email."):
|
||||||
email("()@example.com")
|
email("()@example.com")
|
||||||
except ValueError as e:
|
|
||||||
assert str(e) == "()@example.com is not a valid email."
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user