From 14a19a3da9867ddbab292bfbf1f65633d2fc6f7f Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Thu, 8 Feb 2024 14:11:10 +0800 Subject: [PATCH] chore: apply ruff's pyflakes linter rules (#2420) --- api/controllers/console/app/app.py | 4 ++-- api/controllers/console/datasets/datasets.py | 8 ++++---- .../console/datasets/datasets_document.py | 16 ++++++++-------- .../console/datasets/datasets_segments.py | 12 ++++++------ api/controllers/console/datasets/hit_testing.py | 4 ++-- api/controllers/console/explore/parameter.py | 2 +- api/controllers/files/image_preview.py | 2 +- api/controllers/files/tool_files.py | 2 +- api/controllers/service_api/app/app.py | 2 +- api/controllers/service_api/dataset/segment.py | 12 ++++++------ api/controllers/web/app.py | 2 +- api/core/app_runner/assistant_app_runner.py | 2 +- api/core/app_runner/basic_app_runner.py | 2 +- api/core/features/assistant_base_runner.py | 2 +- api/core/features/assistant_cot_runner.py | 4 ++-- api/core/features/assistant_fc_runner.py | 2 +- api/core/index/vector_index/vector_index.py | 2 +- api/core/model_manager.py | 12 ++++++------ .../model_runtime/callbacks/logging_callback.py | 8 ++++---- .../model_providers/baichuan/llm/llm.py | 2 +- .../openai_api_compatible/llm/llm.py | 6 +++--- .../text_embedding/text_embedding.py | 4 ++-- .../model_providers/wenxin/llm/ernie_bot.py | 16 ++++++++-------- .../zhipuai/zhipuai_sdk/_client.py | 2 +- .../output_parser/rule_config_generator.py | 4 ++-- api/core/tools/model/tool_model_manager.py | 8 ++++---- .../provider/builtin/yahoo/tools/analytics.py | 2 +- .../tools/provider/builtin/yahoo/tools/news.py | 2 +- .../tools/provider/builtin/yahoo/tools/ticker.py | 2 +- api/core/tools/tool/tool.py | 2 +- api/libs/helper.py | 2 +- api/pyproject.toml | 9 +++++++-- api/services/dataset_service.py | 8 ++++---- api/services/tools_manage_service.py | 8 ++++---- 34 files changed, 91 insertions(+), 86 deletions(-) diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 5036d2074d..c06193f91a 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -133,8 +133,8 @@ class AppListApi(Resource): if not model_instance: raise ProviderNotInitializeError( - f"No Default System Reasoning Model available. Please configure " - f"in the Settings -> Model Provider.") + "No Default System Reasoning Model available. Please configure " + "in the Settings -> Model Provider.") else: model_config_dict["model"]["provider"] = model_instance.provider model_config_dict["model"]["name"] = model_instance.model diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index a6d869593b..5a71ccd6e6 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -288,8 +288,8 @@ class DatasetIndexingEstimateApi(Resource): args['indexing_technique']) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) elif args['info_list']['data_source_type'] == 'notion_import': @@ -304,8 +304,8 @@ class DatasetIndexingEstimateApi(Resource): args['indexing_technique']) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) else: diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 88bbd25645..612838a316 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -296,8 +296,8 @@ class DatasetInitApi(Resource): ) except InvokeAuthorizationError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) @@ -372,8 +372,8 @@ class DocumentIndexingEstimateApi(DocumentResource): 'English', dataset_id) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) @@ -442,8 +442,8 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): 'English', dataset_id) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) elif dataset.data_source_type == 'notion_import': @@ -456,8 +456,8 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): None, 'English', dataset_id) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) else: diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index 9cfc5ad796..319b78b6d1 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -143,8 +143,8 @@ class DatasetDocumentSegmentApi(Resource): ) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) @@ -234,8 +234,8 @@ class DatasetDocumentSegmentAddApi(Resource): ) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) try: @@ -286,8 +286,8 @@ class DatasetDocumentSegmentUpdateApi(Resource): ) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) # check segment diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index 4738566241..faadc9a145 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -76,8 +76,8 @@ class HitTestingApi(Resource): raise ProviderModelCurrentlyNotSupportError() except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model or Reranking Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model or Reranking Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except InvokeError as e: raise CompletionRequestError(e.description) except ValueError as e: diff --git a/api/controllers/console/explore/parameter.py b/api/controllers/console/explore/parameter.py index f37bf3e1e5..4b18be6dc6 100644 --- a/api/controllers/console/explore/parameter.py +++ b/api/controllers/console/explore/parameter.py @@ -78,7 +78,7 @@ class ExploreAppMetaApi(InstalledAppResource): # get all tools tools = agent_config.get('tools', []) url_prefix = (current_app.config.get("CONSOLE_API_URL") - + f"/console/api/workspaces/current/tool-provider/builtin/") + + "/console/api/workspaces/current/tool-provider/builtin/") for tool in tools: keys = list(tool.keys()) if len(keys) >= 4: diff --git a/api/controllers/files/image_preview.py b/api/controllers/files/image_preview.py index 66b9eee0de..247b5d45e1 100644 --- a/api/controllers/files/image_preview.py +++ b/api/controllers/files/image_preview.py @@ -41,7 +41,7 @@ class WorkspaceWebappLogoApi(Resource): webapp_logo_file_id = custom_config.get('replace_webapp_logo') if custom_config is not None else None if not webapp_logo_file_id: - raise NotFound(f'webapp logo is not found') + raise NotFound('webapp logo is not found') try: generator, mimetype = FileService.get_public_image_preview( diff --git a/api/controllers/files/tool_files.py b/api/controllers/files/tool_files.py index ecafd7b231..0a254c1699 100644 --- a/api/controllers/files/tool_files.py +++ b/api/controllers/files/tool_files.py @@ -32,7 +32,7 @@ class ToolFilePreviewApi(Resource): ) if not result: - raise NotFound(f'file is not found') + raise NotFound('file is not found') generator, mimetype = result except Exception: diff --git a/api/controllers/service_api/app/app.py b/api/controllers/service_api/app/app.py index 8e1ecebce7..89d99d66f3 100644 --- a/api/controllers/service_api/app/app.py +++ b/api/controllers/service_api/app/app.py @@ -78,7 +78,7 @@ class AppMetaApi(AppApiResource): # get all tools tools = agent_config.get('tools', []) url_prefix = (current_app.config.get("CONSOLE_API_URL") - + f"/console/api/workspaces/current/tool-provider/builtin/") + + "/console/api/workspaces/current/tool-provider/builtin/") for tool in tools: keys = list(tool.keys()) if len(keys) >= 4: diff --git a/api/controllers/service_api/dataset/segment.py b/api/controllers/service_api/dataset/segment.py index 495d2f56a5..d4a6b6aa4f 100644 --- a/api/controllers/service_api/dataset/segment.py +++ b/api/controllers/service_api/dataset/segment.py @@ -46,8 +46,8 @@ class SegmentApi(DatasetApiResource): ) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) # validate args @@ -90,8 +90,8 @@ class SegmentApi(DatasetApiResource): ) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) @@ -182,8 +182,8 @@ class DatasetSegmentApi(DatasetApiResource): ) except LLMBadRequestError: raise ProviderNotInitializeError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) # check segment diff --git a/api/controllers/web/app.py b/api/controllers/web/app.py index 4dc15b9bee..6e62c042d4 100644 --- a/api/controllers/web/app.py +++ b/api/controllers/web/app.py @@ -77,7 +77,7 @@ class AppMeta(WebApiResource): # get all tools tools = agent_config.get('tools', []) url_prefix = (current_app.config.get("CONSOLE_API_URL") - + f"/console/api/workspaces/current/tool-provider/builtin/") + + "/console/api/workspaces/current/tool-provider/builtin/") for tool in tools: keys = list(tool.keys()) if len(keys) >= 4: diff --git a/api/core/app_runner/assistant_app_runner.py b/api/core/app_runner/assistant_app_runner.py index 7e5581008f..a4845d0ff1 100644 --- a/api/core/app_runner/assistant_app_runner.py +++ b/api/core/app_runner/assistant_app_runner.py @@ -38,7 +38,7 @@ class AssistantApplicationRunner(AppRunner): """ app_record = db.session.query(App).filter(App.id == application_generate_entity.app_id).first() if not app_record: - raise ValueError(f"App not found") + raise ValueError("App not found") app_orchestration_config = application_generate_entity.app_orchestration_config_entity diff --git a/api/core/app_runner/basic_app_runner.py b/api/core/app_runner/basic_app_runner.py index ae2b712187..e1972efb51 100644 --- a/api/core/app_runner/basic_app_runner.py +++ b/api/core/app_runner/basic_app_runner.py @@ -35,7 +35,7 @@ class BasicApplicationRunner(AppRunner): """ app_record = db.session.query(App).filter(App.id == application_generate_entity.app_id).first() if not app_record: - raise ValueError(f"App not found") + raise ValueError("App not found") app_orchestration_config = application_generate_entity.app_orchestration_config_entity diff --git a/api/core/features/assistant_base_runner.py b/api/core/features/assistant_base_runner.py index adc8f3b663..4c0bde989a 100644 --- a/api/core/features/assistant_base_runner.py +++ b/api/core/features/assistant_base_runner.py @@ -134,7 +134,7 @@ class BaseAssistantApplicationRunner(AppRunner): result += f"result link: {response.message}. please tell user to check it." elif response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or \ response.type == ToolInvokeMessage.MessageType.IMAGE: - result += f"image has been created and sent to user already, you should tell user to check it now." + result += "image has been created and sent to user already, you should tell user to check it now." else: result += f"tool response: {response.message}." diff --git a/api/core/features/assistant_cot_runner.py b/api/core/features/assistant_cot_runner.py index 9d35832316..5464069838 100644 --- a/api/core/features/assistant_cot_runner.py +++ b/api/core/features/assistant_cot_runner.py @@ -238,7 +238,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner): message_file_ids = [message_file.id for message_file, _ in message_files] except ToolProviderCredentialValidationError as e: - error_response = f"Please check your tool provider credentials" + error_response = "Please check your tool provider credentials" except ( ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError ) as e: @@ -473,7 +473,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner): next_iteration = agent_prompt_message.next_iteration if not isinstance(first_prompt, str) or not isinstance(next_iteration, str): - raise ValueError(f"first_prompt or next_iteration is required in CoT agent mode") + raise ValueError("first_prompt or next_iteration is required in CoT agent mode") # check instruction, tools, and tool_names slots if not first_prompt.find("{{instruction}}") >= 0: diff --git a/api/core/features/assistant_fc_runner.py b/api/core/features/assistant_fc_runner.py index f0a55aa80b..b0e3d3a7af 100644 --- a/api/core/features/assistant_fc_runner.py +++ b/api/core/features/assistant_fc_runner.py @@ -277,7 +277,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner): message_file_ids.append(message_file.id) except ToolProviderCredentialValidationError as e: - error_response = f"Please check your tool provider credentials" + error_response = "Please check your tool provider credentials" except ( ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError ) as e: diff --git a/api/core/index/vector_index/vector_index.py b/api/core/index/vector_index/vector_index.py index 74b5f0adc1..ed6e2699d6 100644 --- a/api/core/index/vector_index/vector_index.py +++ b/api/core/index/vector_index/vector_index.py @@ -26,7 +26,7 @@ class VectorIndex: vector_type = self._dataset.index_struct_dict['type'] if not vector_type: - raise ValueError(f"Vector store must be specified.") + raise ValueError("Vector store must be specified.") if vector_type == "weaviate": from core.index.vector_index.weaviate_vector_index import WeaviateConfig, WeaviateVectorIndex diff --git a/api/core/model_manager.py b/api/core/model_manager.py index 8a622e4f5b..68df0ac31a 100644 --- a/api/core/model_manager.py +++ b/api/core/model_manager.py @@ -63,7 +63,7 @@ class ModelInstance: :return: full response or stream response chunk generator result """ if not isinstance(self.model_type_instance, LargeLanguageModel): - raise Exception(f"Model type instance is not LargeLanguageModel") + raise Exception("Model type instance is not LargeLanguageModel") self.model_type_instance = cast(LargeLanguageModel, self.model_type_instance) return self.model_type_instance.invoke( @@ -88,7 +88,7 @@ class ModelInstance: :return: embeddings result """ if not isinstance(self.model_type_instance, TextEmbeddingModel): - raise Exception(f"Model type instance is not TextEmbeddingModel") + raise Exception("Model type instance is not TextEmbeddingModel") self.model_type_instance = cast(TextEmbeddingModel, self.model_type_instance) return self.model_type_instance.invoke( @@ -112,7 +112,7 @@ class ModelInstance: :return: rerank result """ if not isinstance(self.model_type_instance, RerankModel): - raise Exception(f"Model type instance is not RerankModel") + raise Exception("Model type instance is not RerankModel") self.model_type_instance = cast(RerankModel, self.model_type_instance) return self.model_type_instance.invoke( @@ -135,7 +135,7 @@ class ModelInstance: :return: false if text is safe, true otherwise """ if not isinstance(self.model_type_instance, ModerationModel): - raise Exception(f"Model type instance is not ModerationModel") + raise Exception("Model type instance is not ModerationModel") self.model_type_instance = cast(ModerationModel, self.model_type_instance) return self.model_type_instance.invoke( @@ -155,7 +155,7 @@ class ModelInstance: :return: text for given audio file """ if not isinstance(self.model_type_instance, Speech2TextModel): - raise Exception(f"Model type instance is not Speech2TextModel") + raise Exception("Model type instance is not Speech2TextModel") self.model_type_instance = cast(Speech2TextModel, self.model_type_instance) return self.model_type_instance.invoke( @@ -176,7 +176,7 @@ class ModelInstance: :return: text for given audio file """ if not isinstance(self.model_type_instance, TTSModel): - raise Exception(f"Model type instance is not TTSModel") + raise Exception("Model type instance is not TTSModel") self.model_type_instance = cast(TTSModel, self.model_type_instance) return self.model_type_instance.invoke( diff --git a/api/core/model_runtime/callbacks/logging_callback.py b/api/core/model_runtime/callbacks/logging_callback.py index e6268a7b09..4864858445 100644 --- a/api/core/model_runtime/callbacks/logging_callback.py +++ b/api/core/model_runtime/callbacks/logging_callback.py @@ -30,7 +30,7 @@ class LoggingCallback(Callback): """ self.print_text("\n[on_llm_before_invoke]\n", color='blue') self.print_text(f"Model: {model}\n", color='blue') - self.print_text(f"Parameters:\n", color='blue') + self.print_text("Parameters:\n", color='blue') for key, value in model_parameters.items(): self.print_text(f"\t{key}: {value}\n", color='blue') @@ -38,7 +38,7 @@ class LoggingCallback(Callback): self.print_text(f"\tstop: {stop}\n", color='blue') if tools: - self.print_text(f"\tTools:\n", color='blue') + self.print_text("\tTools:\n", color='blue') for tool in tools: self.print_text(f"\t\t{tool.name}\n", color='blue') @@ -47,7 +47,7 @@ class LoggingCallback(Callback): if user: self.print_text(f"User: {user}\n", color='blue') - self.print_text(f"Prompt messages:\n", color='blue') + self.print_text("Prompt messages:\n", color='blue') for prompt_message in prompt_messages: if prompt_message.name: self.print_text(f"\tname: {prompt_message.name}\n", color='blue') @@ -101,7 +101,7 @@ class LoggingCallback(Callback): self.print_text(f"Content: {result.message.content}\n", color='yellow') if result.message.tool_calls: - self.print_text(f"Tool calls:\n", color='yellow') + self.print_text("Tool calls:\n", color='yellow') for tool_call in result.message.tool_calls: self.print_text(f"\t{tool_call.id}\n", color='yellow') self.print_text(f"\t{tool_call.function.name}\n", color='yellow') diff --git a/api/core/model_runtime/model_providers/baichuan/llm/llm.py b/api/core/model_runtime/model_providers/baichuan/llm/llm.py index d9a73477f6..a7c6119d10 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/llm.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/llm.py @@ -110,7 +110,7 @@ class BaichuanLarguageModel(LargeLanguageModel): stop: List[str] | None = None, stream: bool = True, user: str | None = None) \ -> LLMResult | Generator: if tools is not None and len(tools) > 0: - raise InvokeBadRequestError(f"Baichuan model doesn't support tools") + raise InvokeBadRequestError("Baichuan model doesn't support tools") instance = BaichuanModel( api_key=credentials['api_key'], diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index 2430ff2b2d..9a26f3dc08 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -146,16 +146,16 @@ class OAIAPICompatLargeLanguageModel(_CommonOAI_API_Compat, LargeLanguageModel): try: json_result = response.json() except json.JSONDecodeError as e: - raise CredentialsValidateFailedError(f'Credentials validation failed: JSON decode error') + raise CredentialsValidateFailedError('Credentials validation failed: JSON decode error') if (completion_type is LLMMode.CHAT and ('object' not in json_result or json_result['object'] != 'chat.completion')): raise CredentialsValidateFailedError( - f'Credentials validation failed: invalid response object, must be \'chat.completion\'') + 'Credentials validation failed: invalid response object, must be \'chat.completion\'') elif (completion_type is LLMMode.COMPLETION and ('object' not in json_result or json_result['object'] != 'text_completion')): raise CredentialsValidateFailedError( - f'Credentials validation failed: invalid response object, must be \'text_completion\'') + 'Credentials validation failed: invalid response object, must be \'text_completion\'') except CredentialsValidateFailedError: raise except Exception as ex: diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py index 4c75682de2..3467cd6dfd 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py @@ -179,11 +179,11 @@ class OAICompatEmbeddingModel(_CommonOAI_API_Compat, TextEmbeddingModel): try: json_result = response.json() except json.JSONDecodeError as e: - raise CredentialsValidateFailedError(f'Credentials validation failed: JSON decode error') + raise CredentialsValidateFailedError('Credentials validation failed: JSON decode error') if 'model' not in json_result: raise CredentialsValidateFailedError( - f'Credentials validation failed: invalid response') + 'Credentials validation failed: invalid response') except CredentialsValidateFailedError: raise except Exception as ex: diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py index f13fd27b91..af04eca59b 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py @@ -231,15 +231,15 @@ class ErnieBotModel(object): # so, we just disable function calling for now. if tools is not None and len(tools) > 0: - raise BadRequestError(f'function calling is not supported yet.') + raise BadRequestError('function calling is not supported yet.') if stop is not None: if len(stop) > 4: - raise BadRequestError(f'stop list should not exceed 4 items.') + raise BadRequestError('stop list should not exceed 4 items.') for s in stop: if len(s) > 20: - raise BadRequestError(f'stop item should not exceed 20 characters.') + raise BadRequestError('stop item should not exceed 20 characters.') def _build_request_body(self, model: str, messages: List[ErnieMessage], stream: bool, parameters: Dict[str, Any], tools: List[PromptMessageTool], stop: List[str], user: str) -> Dict[str, Any]: @@ -252,9 +252,9 @@ class ErnieBotModel(object): stop: List[str], user: str) \ -> Dict[str, Any]: if len(messages) % 2 == 0: - raise BadRequestError(f'The number of messages should be odd.') + raise BadRequestError('The number of messages should be odd.') if messages[0].role == 'function': - raise BadRequestError(f'The first message should be user message.') + raise BadRequestError('The first message should be user message.') """ TODO: implement function calling @@ -264,7 +264,7 @@ class ErnieBotModel(object): parameters: Dict[str, Any], stop: List[str], user: str) \ -> Dict[str, Any]: if len(messages) == 0: - raise BadRequestError(f'The number of messages should not be zero.') + raise BadRequestError('The number of messages should not be zero.') # check if the first element is system, shift it system_message = '' @@ -273,9 +273,9 @@ class ErnieBotModel(object): system_message = message.content if len(messages) % 2 == 0: - raise BadRequestError(f'The number of messages should be odd.') + raise BadRequestError('The number of messages should be odd.') if messages[0].role != 'user': - raise BadRequestError(f'The first message should be user message.') + raise BadRequestError('The first message should be user message.') body = { 'messages': [message.to_dict() for message in messages], 'stream': stream, diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py index 573f0715c4..23fd968f30 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py @@ -37,7 +37,7 @@ class ZhipuAI(HttpClient): if base_url is None: base_url = os.environ.get("ZHIPUAI_BASE_URL") if base_url is None: - base_url = f"https://open.bigmodel.cn/api/paas/v4" + base_url = "https://open.bigmodel.cn/api/paas/v4" from .__version__ import __version__ super().__init__( version=__version__, diff --git a/api/core/prompt/output_parser/rule_config_generator.py b/api/core/prompt/output_parser/rule_config_generator.py index 2755910c28..619555ce2e 100644 --- a/api/core/prompt/output_parser/rule_config_generator.py +++ b/api/core/prompt/output_parser/rule_config_generator.py @@ -19,11 +19,11 @@ class RuleConfigGeneratorOutputParser(BaseOutputParser): raise ValueError("Expected 'prompt' to be a string.") if not isinstance(parsed["variables"], list): raise ValueError( - f"Expected 'variables' to be a list." + "Expected 'variables' to be a list." ) if not isinstance(parsed["opening_statement"], str): raise ValueError( - f"Expected 'opening_statement' to be a str." + "Expected 'opening_statement' to be a str." ) return parsed except Exception as e: diff --git a/api/core/tools/model/tool_model_manager.py b/api/core/tools/model/tool_model_manager.py index ec24786046..176d5dc637 100644 --- a/api/core/tools/model/tool_model_manager.py +++ b/api/core/tools/model/tool_model_manager.py @@ -39,13 +39,13 @@ class ToolModelManager: ) if not model_instance: - raise InvokeModelError(f'Model not found') + raise InvokeModelError('Model not found') llm_model = cast(LargeLanguageModel, model_instance.model_type_instance) schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials) if not schema: - raise InvokeModelError(f'No model schema found') + raise InvokeModelError('No model schema found') max_tokens = schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE, None) if max_tokens is None: @@ -69,7 +69,7 @@ class ToolModelManager: ) if not model_instance: - raise InvokeModelError(f'Model not found') + raise InvokeModelError('Model not found') llm_model = cast(LargeLanguageModel, model_instance.model_type_instance) @@ -156,7 +156,7 @@ class ToolModelManager: except InvokeConnectionError as e: raise InvokeModelError(f'Invoke connection error: {e}') except InvokeAuthorizationError as e: - raise InvokeModelError(f'Invoke authorization error') + raise InvokeModelError('Invoke authorization error') except InvokeServerUnavailableError as e: raise InvokeModelError(f'Invoke server unavailable error: {e}') except Exception as e: diff --git a/api/core/tools/provider/builtin/yahoo/tools/analytics.py b/api/core/tools/provider/builtin/yahoo/tools/analytics.py index 74504b25a2..e28a051dff 100644 --- a/api/core/tools/provider/builtin/yahoo/tools/analytics.py +++ b/api/core/tools/provider/builtin/yahoo/tools/analytics.py @@ -66,5 +66,5 @@ class YahooFinanceAnalyticsTool(BuiltinTool): try: return self.create_text_message(str(summary_df.to_dict())) except (HTTPError, ReadTimeout): - return self.create_text_message(f'There is a internet connection problem. Please try again later.') + return self.create_text_message('There is a internet connection problem. Please try again later.') \ No newline at end of file diff --git a/api/core/tools/provider/builtin/yahoo/tools/news.py b/api/core/tools/provider/builtin/yahoo/tools/news.py index f1e4070974..b6c455215a 100644 --- a/api/core/tools/provider/builtin/yahoo/tools/news.py +++ b/api/core/tools/provider/builtin/yahoo/tools/news.py @@ -21,7 +21,7 @@ class YahooFinanceSearchTickerTool(BuiltinTool): try: return self.run(ticker=query, user_id=user_id) except (HTTPError, ReadTimeout): - return self.create_text_message(f'There is a internet connection problem. Please try again later.') + return self.create_text_message('There is a internet connection problem. Please try again later.') def run(self, ticker: str, user_id: str) -> ToolInvokeMessage: company = yfinance.Ticker(ticker) diff --git a/api/core/tools/provider/builtin/yahoo/tools/ticker.py b/api/core/tools/provider/builtin/yahoo/tools/ticker.py index 8064ae49b4..50a3e7b3e9 100644 --- a/api/core/tools/provider/builtin/yahoo/tools/ticker.py +++ b/api/core/tools/provider/builtin/yahoo/tools/ticker.py @@ -20,7 +20,7 @@ class YahooFinanceSearchTickerTool(BuiltinTool): try: return self.create_text_message(self.run(ticker=query)) except (HTTPError, ReadTimeout): - return self.create_text_message(f'There is a internet connection problem. Please try again later.') + return self.create_text_message('There is a internet connection problem. Please try again later.') def run(self, ticker: str) -> str: return str(Ticker(ticker).info) \ No newline at end of file diff --git a/api/core/tools/tool/tool.py b/api/core/tools/tool/tool.py index 3c96ef2fe9..fd98a0b320 100644 --- a/api/core/tools/tool/tool.py +++ b/api/core/tools/tool/tool.py @@ -221,7 +221,7 @@ class Tool(BaseModel, ABC): result += f"result link: {response.message}. please tell user to check it." elif response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or \ response.type == ToolInvokeMessage.MessageType.IMAGE: - result += f"image has been created and sent to user already, you should tell user to check it now." + result += "image has been created and sent to user already, you should tell user to check it now." elif response.type == ToolInvokeMessage.MessageType.BLOB: if len(response.message) > 114: result += str(response.message[:114]) + '...' diff --git a/api/libs/helper.py b/api/libs/helper.py index eb2dd94047..95781b0ece 100644 --- a/api/libs/helper.py +++ b/api/libs/helper.py @@ -101,7 +101,7 @@ class datetime_string(object): datetime.strptime(value, self.format) except ValueError: error = ('Invalid {arg}: {val}. {arg} must be conform to the format {format}' - .format(arg=self.argument, val=value, lo=self.format)) + .format(arg=self.argument, val=value, format=self.format)) raise ValueError(error) return value diff --git a/api/pyproject.toml b/api/pyproject.toml index 9c12f58527..d8a4eda5ad 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -11,8 +11,13 @@ line-length = 120 [tool.ruff.lint] ignore-init-module-imports = true select = [ - "F401", # unused-import + "F", # pyflakes rules "I001", # unsorted-imports "I002", # missing-required-import - "F811", # redefined-while-unused +] +ignore = [ + "F403", # undefined-local-with-import-star + "F405", # undefined-local-with-import-star-usage + "F821", # undefined-name + "F841", # unused-variable ] diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 8e587f07a7..0b1fcc2423 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -139,8 +139,8 @@ class DatasetService: ) except LLMBadRequestError: raise ValueError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ValueError(f"The dataset in unavailable, due to: " f"{ex.description}") @@ -176,8 +176,8 @@ class DatasetService: filtered_data['collection_binding_id'] = dataset_collection_binding.id except LLMBadRequestError: raise ValueError( - f"No Embedding Model available. Please configure a valid provider " - f"in the Settings -> Model Provider.") + "No Embedding Model available. Please configure a valid provider " + "in the Settings -> Model Provider.") except ProviderTokenNotInitError as ex: raise ValueError(ex.description) diff --git a/api/services/tools_manage_service.py b/api/services/tools_manage_service.py index e5f800f53e..68d87e84e6 100644 --- a/api/services/tools_manage_service.py +++ b/api/services/tools_manage_service.py @@ -50,7 +50,7 @@ class ToolManageService: :param provider: the provider dict """ url_prefix = (current_app.config.get("CONSOLE_API_URL") - + f"/console/api/workspaces/current/tool-provider/builtin/") + + "/console/api/workspaces/current/tool-provider/builtin/") if 'icon' in provider: if provider['type'] == UserToolProvider.ProviderType.BUILTIN.value: @@ -211,7 +211,7 @@ class ToolManageService: tool_bundles, schema_type = ToolManageService.convert_schema_to_tool_bundles(schema, extra_info) if len(tool_bundles) > 10: - raise ValueError(f'the number of apis should be less than 10') + raise ValueError('the number of apis should be less than 10') # create db provider db_provider = ApiToolProvider( @@ -269,7 +269,7 @@ class ToolManageService: # try to parse schema, avoid SSRF attack ToolManageService.parser_api_schema(schema) except Exception as e: - raise ValueError(f'invalid schema, please check the url you provided') + raise ValueError('invalid schema, please check the url you provided') return { 'schema': schema @@ -490,7 +490,7 @@ class ToolManageService: try: tool_bundles, _ = ApiBasedToolSchemaParser.auto_parse_to_tool_bundle(schema) except Exception as e: - raise ValueError(f'invalid schema') + raise ValueError('invalid schema') # get tool bundle tool_bundle = next(filter(lambda tb: tb.operation_id == tool_name, tool_bundles), None)