fix: some typos using typos (#11374)

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
This commit is contained in:
yihong 2024-12-05 13:24:06 +08:00 committed by GitHub
parent 6180762160
commit 5669cac16d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 16 additions and 16 deletions

View File

@ -259,7 +259,7 @@ def migrate_knowledge_vector_database():
skipped_count = 0
total_count = 0
vector_type = dify_config.VECTOR_STORE
upper_colletion_vector_types = {
upper_collection_vector_types = {
VectorType.MILVUS,
VectorType.PGVECTOR,
VectorType.RELYT,
@ -267,7 +267,7 @@ def migrate_knowledge_vector_database():
VectorType.ORACLE,
VectorType.ELASTICSEARCH,
}
lower_colletion_vector_types = {
lower_collection_vector_types = {
VectorType.ANALYTICDB,
VectorType.CHROMA,
VectorType.MYSCALE,
@ -307,7 +307,7 @@ def migrate_knowledge_vector_database():
continue
collection_name = ""
dataset_id = dataset.id
if vector_type in upper_colletion_vector_types:
if vector_type in upper_collection_vector_types:
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
elif vector_type == VectorType.QDRANT:
if dataset.collection_binding_id:
@ -323,7 +323,7 @@ def migrate_knowledge_vector_database():
else:
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
elif vector_type in lower_colletion_vector_types:
elif vector_type in lower_collection_vector_types:
collection_name = Dataset.gen_collection_name_by_id(dataset_id).lower()
else:
raise ValueError(f"Vector store {vector_type} is not supported.")

View File

@ -2,7 +2,7 @@
Due to the presence of tasks in App Runner that require long execution times, such as LLM generation and external requests, Flask-Sqlalchemy's strategy for database connection pooling is to allocate one connection (transaction) per request. This approach keeps a connection occupied even during non-DB tasks, leading to the inability to acquire new connections during high concurrency requests due to multiple long-running tasks.
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid deattach errors.
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid detach errors.
Examples:

View File

@ -91,7 +91,7 @@ class XinferenceProvider(Provider):
"""
```
也可以直接抛出对应Erros并做如下定义这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
也可以直接抛出对应 Errors并做如下定义这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
```python
@property

View File

@ -61,7 +61,7 @@ class WolframAlphaTool(BuiltinTool):
params["input"] = query
else:
finished = True
if "souces" in response_data["queryresult"]:
if "sources" in response_data["queryresult"]:
return self.create_link_message(response_data["queryresult"]["sources"]["url"])
elif "pods" in response_data["queryresult"]:
result = response_data["queryresult"]["pods"][0]["subpods"][0]["plaintext"]

View File

@ -24,7 +24,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
"""
node_inputs: dict[str, list] = {"conditions": []}
process_datas: dict[str, list] = {"condition_results": []}
process_data: dict[str, list] = {"condition_results": []}
input_conditions = []
final_result = False
@ -40,7 +40,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
operator=case.logical_operator,
)
process_datas["condition_results"].append(
process_data["condition_results"].append(
{
"group": case.model_dump(),
"results": group_result,
@ -65,7 +65,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
selected_case_id = "true" if final_result else "false"
process_datas["condition_results"].append(
process_data["condition_results"].append(
{"group": "default", "results": group_result, "final_result": final_result}
)
@ -73,7 +73,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
except Exception as e:
return NodeRunResult(
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_datas, error=str(e)
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_data, error=str(e)
)
outputs = {"result": final_result, "selected_case_id": selected_case_id}
@ -81,7 +81,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
data = NodeRunResult(
status=WorkflowNodeExecutionStatus.SUCCEEDED,
inputs=node_inputs,
process_data=process_datas,
process_data=process_data,
edge_source_handle=selected_case_id or "false", # Use case ID or 'default'
outputs=outputs,
)

View File

@ -7,8 +7,8 @@ from .enums import InputType, Operation
class OperationNotSupportedError(VariableOperatorNodeError):
def __init__(self, *, operation: Operation, varialbe_type: str):
super().__init__(f"Operation {operation} is not supported for type {varialbe_type}")
def __init__(self, *, operation: Operation, variable_type: str):
super().__init__(f"Operation {operation} is not supported for type {variable_type}")
class InputTypeNotSupportedError(VariableOperatorNodeError):

View File

@ -45,7 +45,7 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
# Check if operation is supported
if not helpers.is_operation_supported(variable_type=variable.value_type, operation=item.operation):
raise OperationNotSupportedError(operation=item.operation, varialbe_type=variable.value_type)
raise OperationNotSupportedError(operation=item.operation, variable_type=variable.value_type)
# Check if variable input is supported
if item.input_type == InputType.VARIABLE and not helpers.is_variable_input_supported(
@ -156,4 +156,4 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
case Operation.DIVIDE:
return variable.value / value
case _:
raise OperationNotSupportedError(operation=operation, varialbe_type=variable.value_type)
raise OperationNotSupportedError(operation=operation, variable_type=variable.value_type)