diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml
index 4ba91c467f..8173bee58e 100644
--- a/.github/workflows/api-tests.yml
+++ b/.github/workflows/api-tests.yml
@@ -21,6 +21,7 @@ jobs:
python-version:
- "3.10"
- "3.11"
+ - "3.12"
steps:
- name: Checkout code
diff --git a/api/Dockerfile b/api/Dockerfile
index 55776f80e1..ac8381faf5 100644
--- a/api/Dockerfile
+++ b/api/Dockerfile
@@ -41,8 +41,12 @@ ENV TZ=UTC
WORKDIR /app/api
RUN apt-get update \
- && apt-get install -y --no-install-recommends curl wget vim nodejs ffmpeg libgmp-dev libmpfr-dev libmpc-dev \
- && apt-get autoremove \
+ && apt-get install -y --no-install-recommends curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
+ && echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
+ && apt-get update \
+ # For Security
+ && apt-get install -y --no-install-recommends zlib1g=1:1.3.dfsg+really1.3.1-1 expat=2.6.2-1 libldap-2.5-0=2.5.18+dfsg-2 perl=5.38.2-5 libsqlite3-0=3.46.0-1 \
+ && apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*
# Copy Python environment and packages
diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py
index 13c55ca425..1104e298b1 100644
--- a/api/configs/packaging/__init__.py
+++ b/api/configs/packaging/__init__.py
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description='Dify version',
- default='0.6.15',
+ default='0.6.16',
)
COMMIT_SHA: str = Field(
diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py
index 1ac8e60dcd..bc15919a99 100644
--- a/api/controllers/console/app/annotation.py
+++ b/api/controllers/console/app/annotation.py
@@ -23,8 +23,7 @@ class AnnotationReplyActionApi(Resource):
@account_initialization_required
@cloud_edition_billing_resource_check('annotation')
def post(self, app_id, action):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -47,8 +46,7 @@ class AppAnnotationSettingDetailApi(Resource):
@login_required
@account_initialization_required
def get(self, app_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -61,8 +59,7 @@ class AppAnnotationSettingUpdateApi(Resource):
@login_required
@account_initialization_required
def post(self, app_id, annotation_setting_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -82,8 +79,7 @@ class AnnotationReplyActionStatusApi(Resource):
@account_initialization_required
@cloud_edition_billing_resource_check('annotation')
def get(self, app_id, job_id, action):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
job_id = str(job_id)
@@ -110,8 +106,7 @@ class AnnotationListApi(Resource):
@login_required
@account_initialization_required
def get(self, app_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
page = request.args.get('page', default=1, type=int)
@@ -135,8 +130,7 @@ class AnnotationExportApi(Resource):
@login_required
@account_initialization_required
def get(self, app_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -154,8 +148,7 @@ class AnnotationCreateApi(Resource):
@cloud_edition_billing_resource_check('annotation')
@marshal_with(annotation_fields)
def post(self, app_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -174,8 +167,7 @@ class AnnotationUpdateDeleteApi(Resource):
@cloud_edition_billing_resource_check('annotation')
@marshal_with(annotation_fields)
def post(self, app_id, annotation_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -191,8 +183,7 @@ class AnnotationUpdateDeleteApi(Resource):
@login_required
@account_initialization_required
def delete(self, app_id, annotation_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -207,8 +198,7 @@ class AnnotationBatchImportApi(Resource):
@account_initialization_required
@cloud_edition_billing_resource_check('annotation')
def post(self, app_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
app_id = str(app_id)
@@ -232,8 +222,7 @@ class AnnotationBatchImportStatusApi(Resource):
@account_initialization_required
@cloud_edition_billing_resource_check('annotation')
def get(self, app_id, job_id):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
job_id = str(job_id)
@@ -259,8 +248,7 @@ class AnnotationHitHistoryListApi(Resource):
@login_required
@account_initialization_required
def get(self, app_id, annotation_id):
- # The role of the current user in the table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
page = request.args.get('page', default=1, type=int)
diff --git a/api/controllers/console/app/conversation.py b/api/controllers/console/app/conversation.py
index 96cd9a6ea1..844788a9e3 100644
--- a/api/controllers/console/app/conversation.py
+++ b/api/controllers/console/app/conversation.py
@@ -143,7 +143,7 @@ class ChatConversationApi(Resource):
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@marshal_with(conversation_with_summary_pagination_fields)
def get(self, app_model):
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, location='args')
@@ -245,7 +245,7 @@ class ChatConversationDetailApi(Resource):
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@marshal_with(conversation_detail_fields)
def get(self, app_model, conversation_id):
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
conversation_id = str(conversation_id)
diff --git a/api/controllers/console/app/message.py b/api/controllers/console/app/message.py
index 636c071795..056415f19a 100644
--- a/api/controllers/console/app/message.py
+++ b/api/controllers/console/app/message.py
@@ -149,8 +149,7 @@ class MessageAnnotationApi(Resource):
@get_app_model
@marshal_with(annotation_fields)
def post(self, app_model):
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py
index c446f523b6..3e98843280 100644
--- a/api/controllers/console/datasets/datasets.py
+++ b/api/controllers/console/datasets/datasets.py
@@ -189,8 +189,6 @@ class DatasetApi(Resource):
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
- # check user's model setting
- DatasetService.check_dataset_model_setting(dataset)
parser = reqparse.RequestParser()
parser.add_argument('name', nullable=False,
@@ -215,6 +213,13 @@ class DatasetApi(Resource):
args = parser.parse_args()
data = request.get_json()
+ # check embedding model setting
+ if data.get('indexing_technique') == 'high_quality':
+ DatasetService.check_embedding_model_setting(dataset.tenant_id,
+ data.get('embedding_model_provider'),
+ data.get('embedding_model')
+ )
+
# The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
DatasetPermissionService.check_permission(
current_user, dataset, data.get('permission'), data.get('partial_member_list')
@@ -233,7 +238,8 @@ class DatasetApi(Resource):
DatasetPermissionService.update_partial_member_list(
tenant_id, dataset_id_str, data.get('partial_member_list')
)
- else:
+ # clear partial member list when permission is only_me or all_team_members
+ elif data.get('permission') == 'only_me' or data.get('permission') == 'all_team_members':
DatasetPermissionService.clear_partial_member_list(dataset_id_str)
partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py
index 3dcade6152..a4210d5a0c 100644
--- a/api/controllers/console/datasets/datasets_segments.py
+++ b/api/controllers/console/datasets/datasets_segments.py
@@ -223,8 +223,7 @@ class DatasetDocumentSegmentAddApi(Resource):
document = DocumentService.get_document(dataset_id, document_id)
if not document:
raise NotFound('Document not found.')
- # The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
# check embedding model setting
if dataset.indexing_technique == 'high_quality':
@@ -347,7 +346,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
if not segment:
raise NotFound('Segment not found.')
# The role of the current user in the ta table must be admin or owner
- if not current_user.is_admin_or_owner:
+ if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py
index 9bd8f37d85..06492bb12f 100644
--- a/api/core/agent/cot_agent_runner.py
+++ b/api/core/agent/cot_agent_runner.py
@@ -79,6 +79,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
llm_usage.completion_tokens += usage.completion_tokens
llm_usage.prompt_price += usage.prompt_price
llm_usage.completion_price += usage.completion_price
+ llm_usage.total_price += usage.total_price
model_instance = self.model_instance
diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py
index 7019b5e39f..3ee6e47742 100644
--- a/api/core/agent/fc_agent_runner.py
+++ b/api/core/agent/fc_agent_runner.py
@@ -62,6 +62,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
llm_usage.completion_tokens += usage.completion_tokens
llm_usage.prompt_price += usage.prompt_price
llm_usage.completion_price += usage.completion_price
+ llm_usage.total_price += usage.total_price
model_instance = self.model_instance
diff --git a/api/core/app/apps/advanced_chat/app_generator_tts_publisher.py b/api/core/app/apps/advanced_chat/app_generator_tts_publisher.py
index 8325994608..0caff4a2e3 100644
--- a/api/core/app/apps/advanced_chat/app_generator_tts_publisher.py
+++ b/api/core/app/apps/advanced_chat/app_generator_tts_publisher.py
@@ -5,7 +5,12 @@ import queue
import re
import threading
-from core.app.entities.queue_entities import QueueAgentMessageEvent, QueueLLMChunkEvent, QueueTextChunkEvent
+from core.app.entities.queue_entities import (
+ QueueAgentMessageEvent,
+ QueueLLMChunkEvent,
+ QueueNodeSucceededEvent,
+ QueueTextChunkEvent,
+)
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
@@ -88,6 +93,8 @@ class AppGeneratorTTSPublisher:
self.msg_text += message.event.chunk.delta.message.content
elif isinstance(message.event, QueueTextChunkEvent):
self.msg_text += message.event.text
+ elif isinstance(message.event, QueueNodeSucceededEvent):
+ self.msg_text += message.event.outputs.get('output', '')
self.last_message = message
sentence_arr, text_tmp = self._extract_sentence(self.msg_text)
if len(sentence_arr) >= min(self.MAX_SENTENCE, 7):
diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
index be72d89c1e..a042d30e00 100644
--- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py
+++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
@@ -244,7 +244,12 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
:return:
"""
for message in self._queue_manager.listen():
- if publisher:
+ if hasattr(message.event, 'metadata') and message.event.metadata.get('is_answer_previous_node', False) and publisher:
+ publisher.publish(message=message)
+ elif (hasattr(message.event, 'execution_metadata')
+ and message.event.execution_metadata
+ and message.event.execution_metadata.get('is_answer_previous_node', False)
+ and publisher):
publisher.publish(message=message)
event = message.event
diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py
index df6a35918b..53780bdfb0 100644
--- a/api/core/app/apps/agent_chat/app_generator.py
+++ b/api/core/app/apps/agent_chat/app_generator.py
@@ -110,7 +110,8 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
)
# get tracing instance
- trace_manager = TraceQueueManager(app_model.id)
+ user_id = user.id if isinstance(user, Account) else user.session_id
+ trace_manager = TraceQueueManager(app_model.id, user_id)
# init application generate entity
application_generate_entity = AgentChatAppGenerateEntity(
diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py
index b1986dbcee..df40aec154 100644
--- a/api/core/app/apps/workflow/app_generator.py
+++ b/api/core/app/apps/workflow/app_generator.py
@@ -74,7 +74,8 @@ class WorkflowAppGenerator(BaseAppGenerator):
)
# get tracing instance
- trace_manager = TraceQueueManager(app_model.id)
+ user_id = user.id if isinstance(user, Account) else user.session_id
+ trace_manager = TraceQueueManager(app_model.id, user_id)
# init application generate entity
application_generate_entity = WorkflowAppGenerateEntity(
diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py
index 513fc692ff..b4859edbd9 100644
--- a/api/core/app/task_pipeline/workflow_cycle_manage.py
+++ b/api/core/app/task_pipeline/workflow_cycle_manage.py
@@ -131,6 +131,7 @@ class WorkflowCycleManage(WorkflowIterationCycleManage):
TraceTaskName.WORKFLOW_TRACE,
workflow_run=workflow_run,
conversation_id=conversation_id,
+ user_id=trace_manager.user_id,
)
)
@@ -173,6 +174,7 @@ class WorkflowCycleManage(WorkflowIterationCycleManage):
TraceTaskName.WORKFLOW_TRACE,
workflow_run=workflow_run,
conversation_id=conversation_id,
+ user_id=trace_manager.user_id,
)
)
diff --git a/api/core/model_runtime/model_providers/__base/tts_model.py b/api/core/model_runtime/model_providers/__base/tts_model.py
index 086a189246..64e85d2c11 100644
--- a/api/core/model_runtime/model_providers/__base/tts_model.py
+++ b/api/core/model_runtime/model_providers/__base/tts_model.py
@@ -1,18 +1,16 @@
-import hashlib
import logging
import re
-import subprocess
-import uuid
from abc import abstractmethod
from typing import Optional
from pydantic import ConfigDict
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
-from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.model_runtime.model_providers.__base.ai_model import AIModel
logger = logging.getLogger(__name__)
+
+
class TTSModel(AIModel):
"""
Model class for ttstext model.
@@ -37,8 +35,6 @@ class TTSModel(AIModel):
:return: translated audio file
"""
try:
- logger.info(f"Invoke TTS model: {model} , invoke content : {content_text}")
- self._is_ffmpeg_installed()
return self._invoke(model=model, credentials=credentials, user=user,
content_text=content_text, voice=voice, tenant_id=tenant_id)
except Exception as e:
@@ -75,7 +71,8 @@ class TTSModel(AIModel):
if model_schema and ModelPropertyKey.VOICES in model_schema.model_properties:
voices = model_schema.model_properties[ModelPropertyKey.VOICES]
if language:
- return [{'name': d['name'], 'value': d['mode']} for d in voices if language and language in d.get('language')]
+ return [{'name': d['name'], 'value': d['mode']} for d in voices if
+ language and language in d.get('language')]
else:
return [{'name': d['name'], 'value': d['mode']} for d in voices]
@@ -146,28 +143,3 @@ class TTSModel(AIModel):
if one_sentence != '':
result.append(one_sentence)
return result
-
- @staticmethod
- def _is_ffmpeg_installed():
- try:
- output = subprocess.check_output("ffmpeg -version", shell=True)
- if "ffmpeg version" in output.decode("utf-8"):
- return True
- else:
- raise InvokeBadRequestError("ffmpeg is not installed, "
- "details: https://docs.dify.ai/getting-started/install-self-hosted"
- "/install-faq#id-14.-what-to-do-if-this-error-occurs-in-text-to-speech")
- except Exception:
- raise InvokeBadRequestError("ffmpeg is not installed, "
- "details: https://docs.dify.ai/getting-started/install-self-hosted"
- "/install-faq#id-14.-what-to-do-if-this-error-occurs-in-text-to-speech")
-
- # Todo: To improve the streaming function
- @staticmethod
- def _get_file_name(file_content: str) -> str:
- hash_object = hashlib.sha256(file_content.encode())
- hex_digest = hash_object.hexdigest()
-
- namespace_uuid = uuid.UUID('a5da6ef9-b303-596f-8e88-bf8fa40f4b31')
- unique_uuid = uuid.uuid5(namespace_uuid, hex_digest)
- return str(unique_uuid)
diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml
index c2fa0e5a6e..b4e024a81e 100644
--- a/api/core/model_runtime/model_providers/_position.yaml
+++ b/api/core/model_runtime/model_providers/_position.yaml
@@ -6,6 +6,7 @@
- nvidia
- nvidia_nim
- cohere
+- upstage
- bedrock
- togetherai
- openrouter
diff --git a/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml b/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml
index 98655a4c9f..91d0e30765 100644
--- a/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml
+++ b/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml
@@ -19,7 +19,7 @@ parameter_rules:
min: 1
max: 8192
pricing:
- input: '0.05'
- output: '0.1'
+ input: '0.59'
+ output: '0.79'
unit: '0.000001'
currency: USD
diff --git a/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml b/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml
index d85bb7709b..b6154f761f 100644
--- a/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml
+++ b/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml
@@ -19,7 +19,7 @@ parameter_rules:
min: 1
max: 8192
pricing:
- input: '0.59'
- output: '0.79'
+ input: '0.05'
+ output: '0.08'
unit: '0.000001'
currency: USD
diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml
index d6338c3d19..6eb15e6c0d 100644
--- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml
+++ b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml
@@ -37,7 +37,7 @@ parameter_rules:
- text
- json_object
pricing:
- input: '0.001'
- output: '0.002'
+ input: '0.0005'
+ output: '0.0015'
unit: '0.001'
currency: USD
diff --git a/api/core/model_runtime/model_providers/upstage/__init__.py b/api/core/model_runtime/model_providers/upstage/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/upstage/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/upstage/_assets/icon_l_en.svg
new file mode 100644
index 0000000000..0761f85ba6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/_assets/icon_l_en.svg
@@ -0,0 +1,14 @@
+
diff --git a/api/core/model_runtime/model_providers/upstage/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/upstage/_assets/icon_s_en.svg
new file mode 100644
index 0000000000..44ef12b730
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/_assets/icon_s_en.svg
@@ -0,0 +1,3 @@
+
diff --git a/api/core/model_runtime/model_providers/upstage/_common.py b/api/core/model_runtime/model_providers/upstage/_common.py
new file mode 100644
index 0000000000..13b73181e9
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/_common.py
@@ -0,0 +1,57 @@
+
+from collections.abc import Mapping
+
+import openai
+from httpx import Timeout
+
+from core.model_runtime.errors.invoke import (
+ InvokeAuthorizationError,
+ InvokeBadRequestError,
+ InvokeConnectionError,
+ InvokeError,
+ InvokeRateLimitError,
+ InvokeServerUnavailableError,
+)
+
+
+class _CommonUpstage:
+ def _to_credential_kwargs(self, credentials: Mapping) -> dict:
+ """
+ Transform credentials to kwargs for model instance
+
+ :param credentials:
+ :return:
+ """
+ credentials_kwargs = {
+ "api_key": credentials['upstage_api_key'],
+ "base_url": "https://api.upstage.ai/v1/solar",
+ "timeout": Timeout(315.0, read=300.0, write=20.0, connect=10.0),
+ "max_retries": 1
+ }
+
+ return credentials_kwargs
+
+ @property
+ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
+ """
+ Map model invoke error to unified error
+ The key is the error type thrown to the caller
+ The value is the error type thrown by the model,
+ which needs to be converted into a unified error type for the caller.
+
+ :return: Invoke error mapping
+ """
+ return {
+ InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError],
+ InvokeServerUnavailableError: [openai.InternalServerError],
+ InvokeRateLimitError: [openai.RateLimitError],
+ InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError],
+ InvokeBadRequestError: [
+ openai.BadRequestError,
+ openai.NotFoundError,
+ openai.UnprocessableEntityError,
+ openai.APIError,
+ ],
+ }
+
+
diff --git a/api/core/model_runtime/model_providers/upstage/llm/__init__.py b/api/core/model_runtime/model_providers/upstage/llm/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/upstage/llm/_position.yaml b/api/core/model_runtime/model_providers/upstage/llm/_position.yaml
new file mode 100644
index 0000000000..d4f03e1988
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/llm/_position.yaml
@@ -0,0 +1 @@
+- soloar-1-mini-chat
diff --git a/api/core/model_runtime/model_providers/upstage/llm/llm.py b/api/core/model_runtime/model_providers/upstage/llm/llm.py
new file mode 100644
index 0000000000..d1ed4619d6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/llm/llm.py
@@ -0,0 +1,575 @@
+import logging
+from collections.abc import Generator
+from typing import Optional, Union, cast
+
+from openai import OpenAI, Stream
+from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall
+from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall, ChoiceDeltaToolCall
+from openai.types.chat.chat_completion_message import FunctionCall
+from tokenizers import Tokenizer
+
+from core.model_runtime.callbacks.base_callback import Callback
+from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
+from core.model_runtime.entities.message_entities import (
+ AssistantPromptMessage,
+ ImagePromptMessageContent,
+ PromptMessage,
+ PromptMessageContentType,
+ PromptMessageTool,
+ SystemPromptMessage,
+ TextPromptMessageContent,
+ ToolPromptMessage,
+ UserPromptMessage,
+)
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
+from core.model_runtime.model_providers.upstage._common import _CommonUpstage
+
+logger = logging.getLogger(__name__)
+
+UPSTAGE_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object.
+The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure
+if you are not sure about the structure.
+
+
+{{instructions}}
+
+"""
+
+class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel):
+ """
+ Model class for Upstage large language model.
+ """
+
+ def _invoke(self, model: str, credentials: dict,
+ prompt_messages: list[PromptMessage], model_parameters: dict,
+ tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
+ stream: bool = True, user: Optional[str] = None) -> Union[LLMResult, Generator]:
+ """
+ Invoke large language model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param prompt_messages: prompt messages
+ :param model_parameters: model parameters
+ :param tools: tools for tool calling
+ :param stop: stop words
+ :param stream: is stream response
+ :param user: unique user id
+ :return: full response or stream response chunk generator result
+ """
+
+ return self._chat_generate(
+ model=model,
+ credentials=credentials,
+ prompt_messages=prompt_messages,
+ model_parameters=model_parameters,
+ tools=tools,
+ stop=stop,
+ stream=stream,
+ user=user
+ )
+
+ def _code_block_mode_wrapper(self,
+ model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, stream: bool = True, user: Optional[str] = None, callbacks: Optional[list[Callback]] = None) -> Union[LLMResult, Generator]:
+ """
+ Code block mode wrapper for invoking large language model
+ """
+ if 'response_format' in model_parameters and model_parameters['response_format'] in ['JSON', 'XML']:
+ stop = stop or []
+ self._transform_chat_json_prompts(
+ model=model,
+ credentials=credentials,
+ prompt_messages=prompt_messages,
+ model_parameters=model_parameters,
+ tools=tools,
+ stop=stop,
+ stream=stream,
+ user=user,
+ response_format=model_parameters['response_format']
+ )
+ model_parameters.pop('response_format')
+
+ return self._invoke(
+ model=model,
+ credentials=credentials,
+ prompt_messages=prompt_messages,
+ model_parameters=model_parameters,
+ tools=tools,
+ stop=stop,
+ stream=stream,
+ user=user
+ )
+
+ def _transform_chat_json_prompts(self, model: str, credentials: dict,
+ prompt_messages: list[PromptMessage], model_parameters: dict,
+ tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None,
+ stream: bool = True, user: str | None = None, response_format: str = 'JSON') -> None:
+ """
+ Transform json prompts
+ """
+ if stop is None:
+ stop = []
+ if "```\n" not in stop:
+ stop.append("```\n")
+ if "\n```" not in stop:
+ stop.append("\n```")
+
+ if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage):
+ prompt_messages[0] = SystemPromptMessage(
+ content=UPSTAGE_BLOCK_MODE_PROMPT
+ .replace("{{instructions}}", prompt_messages[0].content)
+ .replace("{{block}}", response_format)
+ )
+ prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}\n"))
+ else:
+ prompt_messages.insert(0, SystemPromptMessage(
+ content=UPSTAGE_BLOCK_MODE_PROMPT
+ .replace("{{instructions}}", f"Please output a valid {response_format} object.")
+ .replace("{{block}}", response_format)
+ ))
+ prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}"))
+
+ def get_num_tokens(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None) -> int:
+ """
+ Get number of tokens for given prompt messages
+
+ :param model: model name
+ :param credentials: model credentials
+ :param prompt_messages: prompt messages
+ :param tools: tools for tool calling
+ :return:
+ """
+ return self._num_tokens_from_messages(model, prompt_messages, tools)
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ credentials_kwargs = self._to_credential_kwargs(credentials)
+ client = OpenAI(**credentials_kwargs)
+
+ client.chat.completions.create(
+ messages=[{"role": "user", "content": "ping"}],
+ model=model,
+ temperature=0,
+ max_tokens=10,
+ stream=False
+ )
+ except Exception as e:
+ raise CredentialsValidateFailedError(str(e))
+
+ def _chat_generate(self, model: str, credentials: dict,
+ prompt_messages: list[PromptMessage], model_parameters: dict,
+ tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
+ stream: bool = True, user: Optional[str] = None) -> Union[LLMResult, Generator]:
+ credentials_kwargs = self._to_credential_kwargs(credentials)
+ client = OpenAI(**credentials_kwargs)
+
+ extra_model_kwargs = {}
+
+ if tools:
+ extra_model_kwargs["functions"] = [{
+ "name": tool.name,
+ "description": tool.description,
+ "parameters": tool.parameters
+ } for tool in tools]
+
+ if stop:
+ extra_model_kwargs["stop"] = stop
+
+ if user:
+ extra_model_kwargs["user"] = user
+
+ # chat model
+ response = client.chat.completions.create(
+ messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages],
+ model=model,
+ stream=stream,
+ **model_parameters,
+ **extra_model_kwargs,
+ )
+
+ if stream:
+ return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools)
+ return self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools)
+
+ def _handle_chat_generate_response(self, model: str, credentials: dict, response: ChatCompletion,
+ prompt_messages: list[PromptMessage],
+ tools: Optional[list[PromptMessageTool]] = None) -> LLMResult:
+ """
+ Handle llm chat response
+
+ :param model: model name
+ :param credentials: credentials
+ :param response: response
+ :param prompt_messages: prompt messages
+ :param tools: tools for tool calling
+ :return: llm response
+ """
+ assistant_message = response.choices[0].message
+ # assistant_message_tool_calls = assistant_message.tool_calls
+ assistant_message_function_call = assistant_message.function_call
+
+ # extract tool calls from response
+ # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
+ function_call = self._extract_response_function_call(assistant_message_function_call)
+ tool_calls = [function_call] if function_call else []
+
+ # transform assistant message to prompt message
+ assistant_prompt_message = AssistantPromptMessage(
+ content=assistant_message.content,
+ tool_calls=tool_calls
+ )
+
+ # calculate num tokens
+ if response.usage:
+ # transform usage
+ prompt_tokens = response.usage.prompt_tokens
+ completion_tokens = response.usage.completion_tokens
+ else:
+ # calculate num tokens
+ prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools)
+ completion_tokens = self._num_tokens_from_messages(model, [assistant_prompt_message])
+
+ # transform usage
+ usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
+
+ # transform response
+ response = LLMResult(
+ model=response.model,
+ prompt_messages=prompt_messages,
+ message=assistant_prompt_message,
+ usage=usage,
+ system_fingerprint=response.system_fingerprint,
+ )
+
+ return response
+
+ def _handle_chat_generate_stream_response(self, model: str, credentials: dict, response: Stream[ChatCompletionChunk],
+ prompt_messages: list[PromptMessage],
+ tools: Optional[list[PromptMessageTool]] = None) -> Generator:
+ """
+ Handle llm chat stream response
+
+ :param model: model name
+ :param response: response
+ :param prompt_messages: prompt messages
+ :param tools: tools for tool calling
+ :return: llm response chunk generator
+ """
+ full_assistant_content = ''
+ delta_assistant_message_function_call_storage: Optional[ChoiceDeltaFunctionCall] = None
+ prompt_tokens = 0
+ completion_tokens = 0
+ final_tool_calls = []
+ final_chunk = LLMResultChunk(
+ model=model,
+ prompt_messages=prompt_messages,
+ delta=LLMResultChunkDelta(
+ index=0,
+ message=AssistantPromptMessage(content=''),
+ )
+ )
+
+ for chunk in response:
+ if len(chunk.choices) == 0:
+ if chunk.usage:
+ # calculate num tokens
+ prompt_tokens = chunk.usage.prompt_tokens
+ completion_tokens = chunk.usage.completion_tokens
+ continue
+
+ delta = chunk.choices[0]
+ has_finish_reason = delta.finish_reason is not None
+
+ if not has_finish_reason and (delta.delta.content is None or delta.delta.content == '') and \
+ delta.delta.function_call is None:
+ continue
+
+ # assistant_message_tool_calls = delta.delta.tool_calls
+ assistant_message_function_call = delta.delta.function_call
+
+ # extract tool calls from response
+ if delta_assistant_message_function_call_storage is not None:
+ # handle process of stream function call
+ if assistant_message_function_call:
+ # message has not ended ever
+ delta_assistant_message_function_call_storage.arguments += assistant_message_function_call.arguments
+ continue
+ else:
+ # message has ended
+ assistant_message_function_call = delta_assistant_message_function_call_storage
+ delta_assistant_message_function_call_storage = None
+ else:
+ if assistant_message_function_call:
+ # start of stream function call
+ delta_assistant_message_function_call_storage = assistant_message_function_call
+ if delta_assistant_message_function_call_storage.arguments is None:
+ delta_assistant_message_function_call_storage.arguments = ''
+ if not has_finish_reason:
+ continue
+
+ # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
+ function_call = self._extract_response_function_call(assistant_message_function_call)
+ tool_calls = [function_call] if function_call else []
+ if tool_calls:
+ final_tool_calls.extend(tool_calls)
+
+ # transform assistant message to prompt message
+ assistant_prompt_message = AssistantPromptMessage(
+ content=delta.delta.content if delta.delta.content else '',
+ tool_calls=tool_calls
+ )
+
+ full_assistant_content += delta.delta.content if delta.delta.content else ''
+
+ if has_finish_reason:
+ final_chunk = LLMResultChunk(
+ model=chunk.model,
+ prompt_messages=prompt_messages,
+ system_fingerprint=chunk.system_fingerprint,
+ delta=LLMResultChunkDelta(
+ index=delta.index,
+ message=assistant_prompt_message,
+ finish_reason=delta.finish_reason,
+ )
+ )
+ else:
+ yield LLMResultChunk(
+ model=chunk.model,
+ prompt_messages=prompt_messages,
+ system_fingerprint=chunk.system_fingerprint,
+ delta=LLMResultChunkDelta(
+ index=delta.index,
+ message=assistant_prompt_message,
+ )
+ )
+
+ if not prompt_tokens:
+ prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools)
+
+ if not completion_tokens:
+ full_assistant_prompt_message = AssistantPromptMessage(
+ content=full_assistant_content,
+ tool_calls=final_tool_calls
+ )
+ completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message])
+
+ # transform usage
+ usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
+ final_chunk.delta.usage = usage
+
+ yield final_chunk
+
+ def _extract_response_tool_calls(self,
+ response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall]) \
+ -> list[AssistantPromptMessage.ToolCall]:
+ """
+ Extract tool calls from response
+
+ :param response_tool_calls: response tool calls
+ :return: list of tool calls
+ """
+ tool_calls = []
+ if response_tool_calls:
+ for response_tool_call in response_tool_calls:
+ function = AssistantPromptMessage.ToolCall.ToolCallFunction(
+ name=response_tool_call.function.name,
+ arguments=response_tool_call.function.arguments
+ )
+
+ tool_call = AssistantPromptMessage.ToolCall(
+ id=response_tool_call.id,
+ type=response_tool_call.type,
+ function=function
+ )
+ tool_calls.append(tool_call)
+
+ return tool_calls
+
+ def _extract_response_function_call(self, response_function_call: FunctionCall | ChoiceDeltaFunctionCall) \
+ -> AssistantPromptMessage.ToolCall:
+ """
+ Extract function call from response
+
+ :param response_function_call: response function call
+ :return: tool call
+ """
+ tool_call = None
+ if response_function_call:
+ function = AssistantPromptMessage.ToolCall.ToolCallFunction(
+ name=response_function_call.name,
+ arguments=response_function_call.arguments
+ )
+
+ tool_call = AssistantPromptMessage.ToolCall(
+ id=response_function_call.name,
+ type="function",
+ function=function
+ )
+
+ return tool_call
+
+ def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict:
+ """
+ Convert PromptMessage to dict for Upstage API
+ """
+ if isinstance(message, UserPromptMessage):
+ message = cast(UserPromptMessage, message)
+ if isinstance(message.content, str):
+ message_dict = {"role": "user", "content": message.content}
+ else:
+ sub_messages = []
+ for message_content in message.content:
+ if message_content.type == PromptMessageContentType.TEXT:
+ message_content = cast(TextPromptMessageContent, message_content)
+ sub_message_dict = {
+ "type": "text",
+ "text": message_content.data
+ }
+ sub_messages.append(sub_message_dict)
+ elif message_content.type == PromptMessageContentType.IMAGE:
+ message_content = cast(ImagePromptMessageContent, message_content)
+ sub_message_dict = {
+ "type": "image_url",
+ "image_url": {
+ "url": message_content.data,
+ "detail": message_content.detail.value
+ }
+ }
+ sub_messages.append(sub_message_dict)
+
+ message_dict = {"role": "user", "content": sub_messages}
+ elif isinstance(message, AssistantPromptMessage):
+ message = cast(AssistantPromptMessage, message)
+ message_dict = {"role": "assistant", "content": message.content}
+ if message.tool_calls:
+ # message_dict["tool_calls"] = [tool_call.dict() for tool_call in
+ # message.tool_calls]
+ function_call = message.tool_calls[0]
+ message_dict["function_call"] = {
+ "name": function_call.function.name,
+ "arguments": function_call.function.arguments,
+ }
+ elif isinstance(message, SystemPromptMessage):
+ message = cast(SystemPromptMessage, message)
+ message_dict = {"role": "system", "content": message.content}
+ elif isinstance(message, ToolPromptMessage):
+ message = cast(ToolPromptMessage, message)
+ # message_dict = {
+ # "role": "tool",
+ # "content": message.content,
+ # "tool_call_id": message.tool_call_id
+ # }
+ message_dict = {
+ "role": "function",
+ "content": message.content,
+ "name": message.tool_call_id
+ }
+ else:
+ raise ValueError(f"Got unknown type {message}")
+
+ if message.name:
+ message_dict["name"] = message.name
+
+ return message_dict
+
+ def _get_tokenizer(self) -> Tokenizer:
+ return Tokenizer.from_pretrained("upstage/solar-1-mini-tokenizer")
+
+ def _num_tokens_from_messages(self, model: str, messages: list[PromptMessage],
+ tools: Optional[list[PromptMessageTool]] = None) -> int:
+ """
+ Calculate num tokens for solar with Huggingface Solar tokenizer.
+ Solar tokenizer is opened in huggingface https://huggingface.co/upstage/solar-1-mini-tokenizer
+ """
+ tokenizer = self._get_tokenizer()
+ tokens_per_message = 5 # <|im_start|>{role}\n{message}<|im_end|>
+ tokens_prefix = 1 # <|startoftext|>
+ tokens_suffix = 3 # <|im_start|>assistant\n
+
+ num_tokens = 0
+ num_tokens += tokens_prefix
+
+ messages_dict = [self._convert_prompt_message_to_dict(message) for message in messages]
+ for message in messages_dict:
+ num_tokens += tokens_per_message
+ for key, value in message.items():
+ if isinstance(value, list):
+ text = ''
+ for item in value:
+ if isinstance(item, dict) and item['type'] == 'text':
+ text += item['text']
+ value = text
+
+ if key == "tool_calls":
+ for tool_call in value:
+ for t_key, t_value in tool_call.items():
+ num_tokens += len(tokenizer.encode(t_key, add_special_tokens=False))
+ if t_key == "function":
+ for f_key, f_value in t_value.items():
+ num_tokens += len(tokenizer.encode(f_key, add_special_tokens=False))
+ num_tokens += len(tokenizer.encode(f_value, add_special_tokens=False))
+ else:
+ num_tokens += len(tokenizer.encode(t_key, add_special_tokens=False))
+ num_tokens += len(tokenizer.encode(t_value, add_special_tokens=False))
+ else:
+ num_tokens += len(tokenizer.encode(str(value), add_special_tokens=False))
+ num_tokens += tokens_suffix
+
+ if tools:
+ num_tokens += self._num_tokens_for_tools(tokenizer, tools)
+
+ return num_tokens
+
+ def _num_tokens_for_tools(self, tokenizer: Tokenizer, tools: list[PromptMessageTool]) -> int:
+ """
+ Calculate num tokens for tool calling with upstage tokenizer.
+
+ :param tokenizer: huggingface tokenizer
+ :param tools: tools for tool calling
+ :return: number of tokens
+ """
+ num_tokens = 0
+ for tool in tools:
+ num_tokens += len(tokenizer.encode('type'))
+ num_tokens += len(tokenizer.encode('function'))
+
+ # calculate num tokens for function object
+ num_tokens += len(tokenizer.encode('name'))
+ num_tokens += len(tokenizer.encode(tool.name))
+ num_tokens += len(tokenizer.encode('description'))
+ num_tokens += len(tokenizer.encode(tool.description))
+ parameters = tool.parameters
+ num_tokens += len(tokenizer.encode('parameters'))
+ if 'title' in parameters:
+ num_tokens += len(tokenizer.encode('title'))
+ num_tokens += len(tokenizer.encode(parameters.get("title")))
+ num_tokens += len(tokenizer.encode('type'))
+ num_tokens += len(tokenizer.encode(parameters.get("type")))
+ if 'properties' in parameters:
+ num_tokens += len(tokenizer.encode('properties'))
+ for key, value in parameters.get('properties').items():
+ num_tokens += len(tokenizer.encode(key))
+ for field_key, field_value in value.items():
+ num_tokens += len(tokenizer.encode(field_key))
+ if field_key == 'enum':
+ for enum_field in field_value:
+ num_tokens += 3
+ num_tokens += len(tokenizer.encode(enum_field))
+ else:
+ num_tokens += len(tokenizer.encode(field_key))
+ num_tokens += len(tokenizer.encode(str(field_value)))
+ if 'required' in parameters:
+ num_tokens += len(tokenizer.encode('required'))
+ for required_field in parameters['required']:
+ num_tokens += 3
+ num_tokens += len(tokenizer.encode(required_field))
+
+ return num_tokens
diff --git a/api/core/model_runtime/model_providers/upstage/llm/solar-1-mini-chat.yaml b/api/core/model_runtime/model_providers/upstage/llm/solar-1-mini-chat.yaml
new file mode 100644
index 0000000000..787ac83f8a
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/llm/solar-1-mini-chat.yaml
@@ -0,0 +1,43 @@
+model: solar-1-mini-chat
+label:
+ zh_Hans: solar-1-mini-chat
+ en_US: solar-1-mini-chat
+ ko_KR: solar-1-mini-chat
+model_type: llm
+features:
+ - multi-tool-call
+ - agent-thought
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 32768
+ - name: seed
+ label:
+ zh_Hans: 种子
+ en_US: Seed
+ type: int
+ help:
+ zh_Hans:
+ 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
+ 响应参数来监视变化。
+ en_US:
+ If specified, model will make a best effort to sample deterministically,
+ such that repeated requests with the same seed and parameters should return
+ the same result. Determinism is not guaranteed, and you should refer to the
+ system_fingerprint response parameter to monitor changes in the backend.
+ required: false
+pricing:
+ input: "0.5"
+ output: "0.5"
+ unit: "0.000001"
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/__init__.py b/api/core/model_runtime/model_providers/upstage/text_embedding/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-passage.yaml b/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-passage.yaml
new file mode 100644
index 0000000000..d838a5bbb1
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-passage.yaml
@@ -0,0 +1,9 @@
+model: solar-embedding-1-large-passage
+model_type: text-embedding
+model_properties:
+ context_size: 4000
+ max_chunks: 32
+pricing:
+ input: '0.1'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-query.yaml b/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-query.yaml
new file mode 100644
index 0000000000..c77645cffd
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-query.yaml
@@ -0,0 +1,9 @@
+model: solar-embedding-1-large-query
+model_type: text-embedding
+model_properties:
+ context_size: 4000
+ max_chunks: 32
+pricing:
+ input: '0.1'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py
new file mode 100644
index 0000000000..05ae8665d6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py
@@ -0,0 +1,195 @@
+import base64
+import time
+from collections.abc import Mapping
+from typing import Union
+
+import numpy as np
+from openai import OpenAI
+from tokenizers import Tokenizer
+
+from core.model_runtime.entities.model_entities import PriceType
+from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
+from core.model_runtime.model_providers.upstage._common import _CommonUpstage
+
+
+class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel):
+ """
+ Model class for Upstage text embedding model.
+ """
+ def _get_tokenizer(self) -> Tokenizer:
+ return Tokenizer.from_pretrained("upstage/solar-1-mini-tokenizer")
+
+ def _invoke(self, model: str, credentials: dict, texts: list[str], user: str | None = None) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :return: embeddings result
+ """
+
+ credentials_kwargs = self._to_credential_kwargs(credentials)
+ client = OpenAI(**credentials_kwargs)
+
+ extra_model_kwargs = {}
+ if user:
+ extra_model_kwargs["user"] = user
+ extra_model_kwargs["encoding_format"] = "base64"
+
+ context_size = self._get_context_size(model, credentials)
+ max_chunks = self._get_max_chunks(model, credentials)
+
+ embeddings: list[list[float]] = [[] for _ in range(len(texts))]
+ tokens = []
+ indices = []
+ used_tokens = 0
+
+ tokenizer = self._get_tokenizer()
+
+ for i, text in enumerate(texts):
+ token = tokenizer.encode(text, add_special_tokens=False).tokens
+ for j in range(0, len(token), context_size):
+ tokens += [token[j:j+context_size]]
+ indices += [i]
+
+ batched_embeddings = []
+ _iter = range(0, len(tokens), max_chunks)
+
+ for i in _iter:
+ embeddings_batch, embedding_used_tokens = self._embedding_invoke(
+ model=model,
+ client=client,
+ texts=tokens[i:i+max_chunks],
+ extra_model_kwargs=extra_model_kwargs,
+ )
+
+ used_tokens += embedding_used_tokens
+ batched_embeddings += embeddings_batch
+
+ results: list[list[list[float]]] = [[] for _ in range(len(texts))]
+ num_tokens_in_batch: list[list[int]] = [[] for _ in range(len(texts))]
+
+ for i in range(len(indices)):
+ results[indices[i]].append(batched_embeddings[i])
+ num_tokens_in_batch[indices[i]].append(len(tokens[i]))
+
+ for i in range(len(texts)):
+ _result = results[i]
+ if len(_result) == 0:
+ embeddings_batch, embedding_used_tokens = self._embedding_invoke(
+ model=model,
+ client=client,
+ texts=[texts[i]],
+ extra_model_kwargs=extra_model_kwargs,
+ )
+ used_tokens += embedding_used_tokens
+ average = embeddings_batch[0]
+ else:
+ average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
+ embeddings[i] = (average / np.linalg.norm(average)).tolist()
+
+ usage = self._calc_response_usage(
+ model=model,
+ credentials=credentials,
+ tokens=used_tokens
+ )
+
+ return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model)
+
+ def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
+ tokenizer = self._get_tokenizer()
+ """
+ Get number of tokens for given prompt messages
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :return:
+ """
+ if len(texts) == 0:
+ return 0
+
+ tokenizer = self._get_tokenizer()
+
+ total_num_tokens = 0
+ for text in texts:
+ # calculate the number of tokens in the encoded text
+ tokenized_text = tokenizer.encode(text)
+ total_num_tokens += len(tokenized_text)
+
+ return total_num_tokens
+
+ def validate_credentials(self, model: str, credentials: Mapping) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ # transform credentials to kwargs for model instance
+ credentials_kwargs = self._to_credential_kwargs(credentials)
+ client = OpenAI(**credentials_kwargs)
+
+ # call embedding model
+ self._embedding_invoke(
+ model=model,
+ client=client,
+ texts=['ping'],
+ extra_model_kwargs={}
+ )
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
+
+ def _embedding_invoke(self, model: str, client: OpenAI, texts: Union[list[str], str], extra_model_kwargs: dict) -> tuple[list[list[float]], int]:
+ """
+ Invoke embedding model
+ :param model: model name
+ :param client: model client
+ :param texts: texts to embed
+ :param extra_model_kwargs: extra model kwargs
+ :return: embeddings and used tokens
+ """
+ response = client.embeddings.create(
+ model=model,
+ input=texts,
+ **extra_model_kwargs
+ )
+
+ if 'encoding_format' in extra_model_kwargs and extra_model_kwargs['encoding_format'] == 'base64':
+ return ([list(np.frombuffer(base64.b64decode(embedding.embedding), dtype=np.float32)) for embedding in response.data], response.usage.total_tokens)
+
+ return [data.embedding for data in response.data], response.usage.total_tokens
+
+ def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
+ """
+ Calculate response usage
+
+ :param model: model name
+ :param credentials: model credentials
+ :param tokens: input tokens
+ :return: usage
+ """
+ input_price_info = self.get_price(
+ model=model,
+ credentials=credentials,
+ tokens=tokens,
+ price_type=PriceType.INPUT
+ )
+
+ usage = EmbeddingUsage(
+ tokens=tokens,
+ total_tokens=tokens,
+ unit_price=input_price_info.unit_price,
+ price_unit=input_price_info.unit,
+ total_price=input_price_info.total_amount,
+ currency=input_price_info.currency,
+ latency=time.perf_counter() - self.started_at
+ )
+
+ return usage
diff --git a/api/core/model_runtime/model_providers/upstage/upstage.py b/api/core/model_runtime/model_providers/upstage/upstage.py
new file mode 100644
index 0000000000..56c91c0061
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/upstage.py
@@ -0,0 +1,32 @@
+import logging
+
+from core.model_runtime.entities.model_entities import ModelType
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.model_provider import ModelProvider
+
+logger = logging.getLogger(__name__)
+
+
+class UpstageProvider(ModelProvider):
+
+ def validate_provider_credentials(self, credentials: dict) -> None:
+ """
+ Validate provider credentials
+ if validate failed, raise exception
+
+ :param credentials: provider credentials, credentials from defined in `provider_credential_schema`.
+ """
+ try:
+ model_instance = self.get_model_instance(ModelType.LLM)
+
+ model_instance.validate_credentials(
+ model="solar-1-mini-chat",
+ credentials=credentials
+ )
+ except CredentialsValidateFailedError as e:
+ logger.exception(f'{self.get_provider_schema().provider} credentials validate failed')
+ raise e
+ except Exception as e:
+ logger.exception(f'{self.get_provider_schema().provider} credentials validate failed')
+ raise e
+
diff --git a/api/core/model_runtime/model_providers/upstage/upstage.yaml b/api/core/model_runtime/model_providers/upstage/upstage.yaml
new file mode 100644
index 0000000000..837667cfa9
--- /dev/null
+++ b/api/core/model_runtime/model_providers/upstage/upstage.yaml
@@ -0,0 +1,49 @@
+provider: upstage
+label:
+ en_US: Upstage
+description:
+ en_US: Models provided by Upstage, such as Solar-1-mini-chat.
+ zh_Hans: Upstage 提供的模型,例如 Solar-1-mini-chat.
+icon_small:
+ en_US: icon_s_en.svg
+icon_large:
+ en_US: icon_l_en.svg
+background: "#FFFFF"
+help:
+ title:
+ en_US: Get your API Key from Upstage
+ zh_Hans: 从 Upstage 获取 API Key
+ url:
+ en_US: https://console.upstage.ai/api-keys
+supported_model_types:
+ - llm
+ - text-embedding
+configurate_methods:
+ - predefined-model
+model_credential_schema:
+ model:
+ label:
+ en_US: Model Name
+ zh_Hans: 模型名称
+ placeholder:
+ en_US: Enter your model name
+ zh_Hans: 输入模型名称
+ credential_form_schemas:
+ - variable: upstage_api_key
+ label:
+ en_US: API Key
+ type: secret-input
+ required: true
+ placeholder:
+ zh_Hans: 在此输入您的 API Key
+ en_US: Enter your API Key
+provider_credential_schema:
+ credential_form_schemas:
+ - variable: upstage_api_key
+ label:
+ en_US: API Key
+ type: secret-input
+ required: true
+ placeholder:
+ zh_Hans: 在此输入您的 API Key
+ en_US: Enter your API Key
diff --git a/api/core/model_runtime/model_providers/xinference/rerank/rerank.py b/api/core/model_runtime/model_providers/xinference/rerank/rerank.py
index b361806bcd..4e7543fd99 100644
--- a/api/core/model_runtime/model_providers/xinference/rerank/rerank.py
+++ b/api/core/model_runtime/model_providers/xinference/rerank/rerank.py
@@ -51,17 +51,22 @@ class XinferenceRerankModel(RerankModel):
server_url = server_url[:-1]
auth_headers = {'Authorization': f'Bearer {api_key}'} if api_key else {}
+ params = {
+ 'documents': docs,
+ 'query': query,
+ 'top_n': top_n,
+ 'return_documents': True
+ }
try:
handle = RESTfulRerankModelHandle(model_uid, server_url, auth_headers)
- response = handle.rerank(
- documents=docs,
- query=query,
- top_n=top_n,
- return_documents=True
- )
+ response = handle.rerank(**params)
except RuntimeError as e:
- raise InvokeServerUnavailableError(str(e))
+ if "rerank hasn't support extra parameter" not in str(e):
+ raise InvokeServerUnavailableError(str(e))
+ # compatible xinference server between v0.10.1 - v0.12.1, not support 'return_len'
+ handle = RESTfulRerankModelHandleWithoutExtraParameter(model_uid, server_url, auth_headers)
+ response = handle.rerank(**params)
rerank_documents = []
for idx, result in enumerate(response['results']):
@@ -167,8 +172,40 @@ class XinferenceRerankModel(RerankModel):
),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.RERANK,
- model_properties={ },
+ model_properties={},
parameter_rules=[]
)
return entity
+
+
+class RESTfulRerankModelHandleWithoutExtraParameter(RESTfulRerankModelHandle):
+
+ def rerank(
+ self,
+ documents: list[str],
+ query: str,
+ top_n: Optional[int] = None,
+ max_chunks_per_doc: Optional[int] = None,
+ return_documents: Optional[bool] = None,
+ **kwargs
+ ):
+ url = f"{self._base_url}/v1/rerank"
+ request_body = {
+ "model": self._model_uid,
+ "documents": documents,
+ "query": query,
+ "top_n": top_n,
+ "max_chunks_per_doc": max_chunks_per_doc,
+ "return_documents": return_documents,
+ }
+
+ import requests
+
+ response = requests.post(url, json=request_body, headers=self.auth_headers)
+ if response.status_code != 200:
+ raise InvokeServerUnavailableError(
+ f"Failed to rerank documents, detail: {response.json()['detail']}"
+ )
+ response_data = response.json()
+ return response_data
diff --git a/api/core/model_runtime/model_providers/xinference/tts/tts.py b/api/core/model_runtime/model_providers/xinference/tts/tts.py
index c106e38781..a564a021b1 100644
--- a/api/core/model_runtime/model_providers/xinference/tts/tts.py
+++ b/api/core/model_runtime/model_providers/xinference/tts/tts.py
@@ -1,11 +1,7 @@
import concurrent.futures
-from functools import reduce
-from io import BytesIO
from typing import Optional
-from flask import Response
-from pydub import AudioSegment
-from xinference_client.client.restful.restful_client import Client, RESTfulAudioModelHandle
+from xinference_client.client.restful.restful_client import RESTfulAudioModelHandle
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType
@@ -19,6 +15,7 @@ from core.model_runtime.errors.invoke import (
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.tts_model import TTSModel
+from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper
class XinferenceText2SpeechModel(TTSModel):
@@ -26,7 +23,12 @@ class XinferenceText2SpeechModel(TTSModel):
def __init__(self):
# preset voices, need support custom voice
self.model_voices = {
- 'chattts': {
+ '__default': {
+ 'all': [
+ {'name': 'Default', 'value': 'default'},
+ ]
+ },
+ 'ChatTTS': {
'all': [
{'name': 'Alloy', 'value': 'alloy'},
{'name': 'Echo', 'value': 'echo'},
@@ -36,7 +38,7 @@ class XinferenceText2SpeechModel(TTSModel):
{'name': 'Shimmer', 'value': 'shimmer'},
]
},
- 'cosyvoice': {
+ 'CosyVoice': {
'zh-Hans': [
{'name': '中文男', 'value': '中文男'},
{'name': '中文女', 'value': '中文女'},
@@ -77,18 +79,21 @@ class XinferenceText2SpeechModel(TTSModel):
if credentials['server_url'].endswith('/'):
credentials['server_url'] = credentials['server_url'][:-1]
- # initialize client
- client = Client(
- base_url=credentials['server_url']
+ extra_param = XinferenceHelper.get_xinference_extra_parameter(
+ server_url=credentials['server_url'],
+ model_uid=credentials['model_uid']
)
- xinference_client = client.get_model(model_uid=credentials['model_uid'])
-
- if not isinstance(xinference_client, RESTfulAudioModelHandle):
+ if 'text-to-audio' not in extra_param.model_ability:
raise InvokeBadRequestError(
- 'please check model type, the model you want to invoke is not a audio model')
+ 'please check model type, the model you want to invoke is not a text-to-audio model')
- self._tts_invoke(
+ if extra_param.model_family and extra_param.model_family in self.model_voices:
+ credentials['audio_model_name'] = extra_param.model_family
+ else:
+ credentials['audio_model_name'] = '__default'
+
+ self._tts_invoke_streaming(
model=model,
credentials=credentials,
content_text='Hello Dify!',
@@ -110,7 +115,7 @@ class XinferenceText2SpeechModel(TTSModel):
:param user: unique user id
:return: text translated to audio file
"""
- return self._tts_invoke(model, credentials, content_text, voice)
+ return self._tts_invoke_streaming(model, credentials, content_text, voice)
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
"""
@@ -161,13 +166,15 @@ class XinferenceText2SpeechModel(TTSModel):
}
def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list:
+ audio_model_name = credentials.get('audio_model_name', '__default')
for key, voices in self.model_voices.items():
- if key in model.lower():
- if language in voices:
+ if key in audio_model_name:
+ if language and language in voices:
return voices[language]
elif 'all' in voices:
return voices['all']
- return []
+
+ return self.model_voices['__default']['all']
def _get_model_default_voice(self, model: str, credentials: dict) -> any:
return ""
@@ -181,60 +188,55 @@ class XinferenceText2SpeechModel(TTSModel):
def _get_model_workers_limit(self, model: str, credentials: dict) -> int:
return 5
- def _tts_invoke(self, model: str, credentials: dict, content_text: str, voice: str) -> any:
+ def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str,
+ voice: str) -> any:
"""
- _tts_invoke text2speech model
+ _tts_invoke_streaming text2speech model
:param model: model name
:param credentials: model credentials
- :param voice: model timbre
:param content_text: text content to be translated
+ :param voice: model timbre
:return: text translated to audio file
"""
if credentials['server_url'].endswith('/'):
credentials['server_url'] = credentials['server_url'][:-1]
- word_limit = self._get_model_word_limit(model, credentials)
- audio_type = self._get_model_audio_type(model, credentials)
- handle = RESTfulAudioModelHandle(credentials['model_uid'], credentials['server_url'], auth_headers={})
-
try:
- sentences = list(self._split_text_into_sentences(org_text=content_text, max_length=word_limit))
- audio_bytes_list = []
+ handle = RESTfulAudioModelHandle(credentials['model_uid'], credentials['server_url'], auth_headers={})
- with concurrent.futures.ThreadPoolExecutor(max_workers=min((3, len(sentences)))) as executor:
+ model_support_voice = [x.get("value") for x in
+ self.get_tts_model_voices(model=model, credentials=credentials)]
+ if not voice or voice not in model_support_voice:
+ voice = self._get_model_default_voice(model, credentials)
+ word_limit = self._get_model_word_limit(model, credentials)
+ if len(content_text) > word_limit:
+ sentences = self._split_text_into_sentences(content_text, max_length=word_limit)
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences)))
futures = [executor.submit(
- handle.speech, input=sentence, voice=voice, response_format="mp3", speed=1.0, stream=False)
- for sentence in sentences]
- for future in futures:
- try:
- if future.result():
- audio_bytes_list.append(future.result())
- except Exception as ex:
- raise InvokeBadRequestError(str(ex))
+ handle.speech,
+ input=sentences[i],
+ voice=voice,
+ response_format="mp3",
+ speed=1.0,
+ stream=False
+ )
+ for i in range(len(sentences))]
- if len(audio_bytes_list) > 0:
- audio_segments = [AudioSegment.from_file(
- BytesIO(audio_bytes), format=audio_type) for audio_bytes in
- audio_bytes_list if audio_bytes]
- combined_segment = reduce(lambda x, y: x + y, audio_segments)
- buffer: BytesIO = BytesIO()
- combined_segment.export(buffer, format=audio_type)
- buffer.seek(0)
- return Response(buffer.read(), status=200, mimetype=f"audio/{audio_type}")
+ for index, future in enumerate(futures):
+ response = future.result()
+ for i in range(0, len(response), 1024):
+ yield response[i:i + 1024]
+ else:
+ response = handle.speech(
+ input=content_text.strip(),
+ voice=voice,
+ response_format="mp3",
+ speed=1.0,
+ stream=False
+ )
+
+ for i in range(0, len(response), 1024):
+ yield response[i:i + 1024]
except Exception as ex:
raise InvokeBadRequestError(str(ex))
-
- def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> any:
- """
- _tts_invoke_streaming text2speech model
-
- Attention: stream api may return error [Parallel generation is not supported by ggml]
-
- :param model: model name
- :param credentials: model credentials
- :param voice: model timbre
- :param content_text: text content to be translated
- :return: text translated to audio file
- """
- pass
diff --git a/api/core/model_runtime/model_providers/xinference/xinference.yaml b/api/core/model_runtime/model_providers/xinference/xinference.yaml
index aca076b6e1..be9073c1ca 100644
--- a/api/core/model_runtime/model_providers/xinference/xinference.yaml
+++ b/api/core/model_runtime/model_providers/xinference/xinference.yaml
@@ -33,7 +33,7 @@ model_credential_schema:
label:
zh_Hans: 服务器URL
en_US: Server url
- type: text-input
+ type: secret-input
required: true
placeholder:
zh_Hans: 在此输入Xinference的服务器地址,如 http://192.168.1.100:9997
@@ -51,7 +51,7 @@ model_credential_schema:
label:
zh_Hans: API密钥
en_US: API key
- type: text-input
+ type: secret-input
required: false
placeholder:
zh_Hans: 在此输入您的API密钥
diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py
index 9a3fc9b193..7db483a485 100644
--- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py
+++ b/api/core/model_runtime/model_providers/xinference/xinference_helper.py
@@ -1,5 +1,6 @@
from threading import Lock
from time import time
+from typing import Optional
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, MissingSchema, Timeout
@@ -15,9 +16,11 @@ class XinferenceModelExtraParameter:
context_length: int = 2048
support_function_call: bool = False
support_vision: bool = False
+ model_family: Optional[str]
def __init__(self, model_format: str, model_handle_type: str, model_ability: list[str],
- support_function_call: bool, support_vision: bool, max_tokens: int, context_length: int) -> None:
+ support_function_call: bool, support_vision: bool, max_tokens: int, context_length: int,
+ model_family: Optional[str]) -> None:
self.model_format = model_format
self.model_handle_type = model_handle_type
self.model_ability = model_ability
@@ -25,6 +28,7 @@ class XinferenceModelExtraParameter:
self.support_vision = support_vision
self.max_tokens = max_tokens
self.context_length = context_length
+ self.model_family = model_family
cache = {}
cache_lock = Lock()
@@ -78,9 +82,16 @@ class XinferenceHelper:
model_format = response_json.get('model_format', 'ggmlv3')
model_ability = response_json.get('model_ability', [])
+ model_family = response_json.get('model_family', None)
if response_json.get('model_type') == 'embedding':
model_handle_type = 'embedding'
+ elif response_json.get('model_type') == 'audio':
+ model_handle_type = 'audio'
+ if model_family and model_family in ['ChatTTS', 'CosyVoice']:
+ model_ability.append('text-to-audio')
+ else:
+ model_ability.append('audio-to-text')
elif model_format == 'ggmlv3' and 'chatglm' in response_json['model_name']:
model_handle_type = 'chatglm'
elif 'generate' in model_ability:
@@ -88,7 +99,7 @@ class XinferenceHelper:
elif 'chat' in model_ability:
model_handle_type = 'chat'
else:
- raise NotImplementedError(f'xinference model handle type {model_handle_type} is not supported')
+ raise NotImplementedError('xinference model handle type is not supported')
support_function_call = 'tools' in model_ability
support_vision = 'vision' in model_ability
@@ -103,5 +114,6 @@ class XinferenceHelper:
support_function_call=support_function_call,
support_vision=support_vision,
max_tokens=max_tokens,
- context_length=context_length
- )
\ No newline at end of file
+ context_length=context_length,
+ model_family=model_family
+ )
diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py
index cb86396420..c520fe2aa9 100644
--- a/api/core/ops/langfuse_trace/langfuse_trace.py
+++ b/api/core/ops/langfuse_trace/langfuse_trace.py
@@ -65,12 +65,13 @@ class LangFuseDataTrace(BaseTraceInstance):
def workflow_trace(self, trace_info: WorkflowTraceInfo):
trace_id = trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id
+ user_id = trace_info.metadata.get("user_id")
if trace_info.message_id:
trace_id = trace_info.message_id
name = f"message_{trace_info.message_id}"
trace_data = LangfuseTrace(
id=trace_info.message_id,
- user_id=trace_info.tenant_id,
+ user_id=user_id,
name=name,
input=trace_info.workflow_run_inputs,
output=trace_info.workflow_run_outputs,
@@ -95,7 +96,7 @@ class LangFuseDataTrace(BaseTraceInstance):
else:
trace_data = LangfuseTrace(
id=trace_id,
- user_id=trace_info.tenant_id,
+ user_id=user_id,
name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
input=trace_info.workflow_run_inputs,
output=trace_info.workflow_run_outputs,
diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py
index 4f6ab2fb94..61279e3f5f 100644
--- a/api/core/ops/ops_trace_manager.py
+++ b/api/core/ops/ops_trace_manager.py
@@ -153,27 +153,12 @@ class OpsTraceManager:
def get_ops_trace_instance(
cls,
app_id: Optional[Union[UUID, str]] = None,
- message_id: Optional[str] = None,
- conversation_id: Optional[str] = None
):
"""
Get ops trace through model config
:param app_id: app_id
- :param message_id: message_id
- :param conversation_id: conversation_id
:return:
"""
- if conversation_id is not None:
- conversation_data: Conversation = db.session.query(Conversation).filter(
- Conversation.id == conversation_id
- ).first()
- if conversation_data:
- app_id = conversation_data.app_id
-
- if message_id is not None:
- record: Message = db.session.query(Message).filter(Message.id == message_id).first()
- app_id = record.app_id
-
if isinstance(app_id, UUID):
app_id = str(app_id)
@@ -286,6 +271,7 @@ class TraceTask:
message_id: Optional[str] = None,
workflow_run: Optional[WorkflowRun] = None,
conversation_id: Optional[str] = None,
+ user_id: Optional[str] = None,
timer: Optional[Any] = None,
**kwargs
):
@@ -293,17 +279,22 @@ class TraceTask:
self.message_id = message_id
self.workflow_run = workflow_run
self.conversation_id = conversation_id
+ self.user_id = user_id
self.timer = timer
self.kwargs = kwargs
self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001")
+ self.app_id = None
+
def execute(self):
return self.preprocess()
def preprocess(self):
preprocess_map = {
TraceTaskName.CONVERSATION_TRACE: lambda: self.conversation_trace(**self.kwargs),
- TraceTaskName.WORKFLOW_TRACE: lambda: self.workflow_trace(self.workflow_run, self.conversation_id),
+ TraceTaskName.WORKFLOW_TRACE: lambda: self.workflow_trace(
+ self.workflow_run, self.conversation_id, self.user_id
+ ),
TraceTaskName.MESSAGE_TRACE: lambda: self.message_trace(self.message_id),
TraceTaskName.MODERATION_TRACE: lambda: self.moderation_trace(
self.message_id, self.timer, **self.kwargs
@@ -326,7 +317,7 @@ class TraceTask:
def conversation_trace(self, **kwargs):
return kwargs
- def workflow_trace(self, workflow_run: WorkflowRun, conversation_id):
+ def workflow_trace(self, workflow_run: WorkflowRun, conversation_id, user_id):
workflow_id = workflow_run.workflow_id
tenant_id = workflow_run.tenant_id
workflow_run_id = workflow_run.id
@@ -371,6 +362,7 @@ class TraceTask:
"total_tokens": total_tokens,
"file_list": file_list,
"triggered_form": workflow_run.triggered_from,
+ "user_id": user_id,
}
workflow_trace_info = WorkflowTraceInfo(
@@ -667,13 +659,12 @@ trace_manager_batch_size = int(os.getenv("TRACE_QUEUE_MANAGER_BATCH_SIZE", 100))
class TraceQueueManager:
- def __init__(self, app_id=None, conversation_id=None, message_id=None):
+ def __init__(self, app_id=None, user_id=None):
global trace_manager_timer
self.app_id = app_id
- self.conversation_id = conversation_id
- self.message_id = message_id
- self.trace_instance = OpsTraceManager.get_ops_trace_instance(app_id, conversation_id, message_id)
+ self.user_id = user_id
+ self.trace_instance = OpsTraceManager.get_ops_trace_instance(app_id)
self.flask_app = current_app._get_current_object()
if trace_manager_timer is None:
self.start_timer()
@@ -683,6 +674,7 @@ class TraceQueueManager:
global trace_manager_queue
try:
if self.trace_instance:
+ trace_task.app_id = self.app_id
trace_manager_queue.put(trace_task)
except Exception as e:
logging.debug(f"Error adding trace task: {e}")
@@ -721,9 +713,7 @@ class TraceQueueManager:
for task in tasks:
trace_info = task.execute()
task_data = {
- "app_id": self.app_id,
- "conversation_id": self.conversation_id,
- "message_id": self.message_id,
+ "app_id": task.app_id,
"trace_info_type": type(trace_info).__name__,
"trace_info": trace_info.model_dump() if trace_info else {},
}
diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py
index a69fcffbb4..2c5d920a9a 100644
--- a/api/core/rag/retrieval/dataset_retrieval.py
+++ b/api/core/rag/retrieval/dataset_retrieval.py
@@ -322,23 +322,26 @@ class DatasetRetrieval:
for thread in threads:
thread.join()
- if reranking_enable:
- # do rerank for searched documents
- data_post_processor = DataPostProcessor(tenant_id, reranking_mode,
- reranking_model, weights, False)
+ with measure_time() as timer:
+ if reranking_enable:
+ # do rerank for searched documents
+ data_post_processor = DataPostProcessor(
+ tenant_id, reranking_mode,
+ reranking_model, weights, False
+ )
- with measure_time() as timer:
all_documents = data_post_processor.invoke(
query=query,
documents=all_documents,
score_threshold=score_threshold,
top_n=top_k
)
- else:
- if index_type == "economy":
- all_documents = self.calculate_keyword_score(query, all_documents, top_k)
- elif index_type == "high_quality":
- all_documents = self.calculate_vector_score(all_documents, top_k, score_threshold)
+ else:
+ if index_type == "economy":
+ all_documents = self.calculate_keyword_score(query, all_documents, top_k)
+ elif index_type == "high_quality":
+ all_documents = self.calculate_vector_score(all_documents, top_k, score_threshold)
+
self._on_query(query, dataset_ids, app_id, user_from, user_id)
if all_documents:
diff --git a/api/core/tools/provider/_position.yaml b/api/core/tools/provider/_position.yaml
index 3a3ff64426..25d9f403a0 100644
--- a/api/core/tools/provider/_position.yaml
+++ b/api/core/tools/provider/_position.yaml
@@ -2,6 +2,7 @@
- bing
- duckduckgo
- searchapi
+- serper
- searxng
- dalle
- azuredalle
diff --git a/api/core/tools/provider/builtin/serper/_assets/icon.svg b/api/core/tools/provider/builtin/serper/_assets/icon.svg
new file mode 100644
index 0000000000..3f973a552e
--- /dev/null
+++ b/api/core/tools/provider/builtin/serper/_assets/icon.svg
@@ -0,0 +1,12 @@
+
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/serper/serper.py b/api/core/tools/provider/builtin/serper/serper.py
new file mode 100644
index 0000000000..2a42109373
--- /dev/null
+++ b/api/core/tools/provider/builtin/serper/serper.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.serper.tools.serper_search import SerperSearchTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class SerperProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict[str, Any]) -> None:
+ try:
+ SerperSearchTool().fork_tool_runtime(
+ runtime={
+ "credentials": credentials,
+ }
+ ).invoke(
+ user_id='',
+ tool_parameters={
+ "query": "test",
+ "result_type": "link"
+ },
+ )
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
diff --git a/api/core/tools/provider/builtin/serper/serper.yaml b/api/core/tools/provider/builtin/serper/serper.yaml
new file mode 100644
index 0000000000..b3b2d76c4b
--- /dev/null
+++ b/api/core/tools/provider/builtin/serper/serper.yaml
@@ -0,0 +1,31 @@
+identity:
+ author: zhuhao
+ name: serper
+ label:
+ en_US: Serper
+ zh_Hans: Serper
+ pt_BR: Serper
+ description:
+ en_US: Serper is a powerful real-time search engine tool API that provides structured data from Google Search.
+ zh_Hans: Serper 是一个强大的实时搜索引擎工具API,可提供来自 Google 搜索引擎搜索的结构化数据。
+ pt_BR: Serper is a powerful real-time search engine tool API that provides structured data from Google Search.
+ icon: icon.svg
+ tags:
+ - search
+credentials_for_provider:
+ serperapi_api_key:
+ type: secret-input
+ required: true
+ label:
+ en_US: Serper API key
+ zh_Hans: Serper API key
+ pt_BR: Serper API key
+ placeholder:
+ en_US: Please input your Serper API key
+ zh_Hans: 请输入你的 Serper API key
+ pt_BR: Please input your Serper API key
+ help:
+ en_US: Get your Serper API key from Serper
+ zh_Hans: 从 Serper 获取您的 Serper API key
+ pt_BR: Get your Serper API key from Serper
+ url: https://serper.dev/api-key
diff --git a/api/core/tools/provider/builtin/serper/tools/serper_search.py b/api/core/tools/provider/builtin/serper/tools/serper_search.py
new file mode 100644
index 0000000000..24facaf4ec
--- /dev/null
+++ b/api/core/tools/provider/builtin/serper/tools/serper_search.py
@@ -0,0 +1,44 @@
+from typing import Any, Union
+
+import requests
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+SERPER_API_URL = "https://google.serper.dev/search"
+
+
+class SerperSearchTool(BuiltinTool):
+
+ def _parse_response(self, response: dict) -> dict:
+ result = {}
+ if "knowledgeGraph" in response:
+ result["title"] = response["knowledgeGraph"].get("title", "")
+ result["description"] = response["knowledgeGraph"].get("description", "")
+ if "organic" in response:
+ result["organic"] = [
+ {
+ "title": item.get("title", ""),
+ "link": item.get("link", ""),
+ "snippet": item.get("snippet", "")
+ }
+ for item in response["organic"]
+ ]
+ return result
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ params = {
+ "q": tool_parameters['query'],
+ "gl": "us",
+ "hl": "en"
+ }
+ headers = {
+ 'X-API-KEY': self.runtime.credentials['serperapi_api_key'],
+ 'Content-Type': 'application/json'
+ }
+ response = requests.get(url=SERPER_API_URL, params=params,headers=headers)
+ response.raise_for_status()
+ valuable_res = self._parse_response(response.json())
+ return self.create_json_message(valuable_res)
diff --git a/api/core/tools/provider/builtin/serper/tools/serper_search.yaml b/api/core/tools/provider/builtin/serper/tools/serper_search.yaml
new file mode 100644
index 0000000000..e1c0a056e6
--- /dev/null
+++ b/api/core/tools/provider/builtin/serper/tools/serper_search.yaml
@@ -0,0 +1,27 @@
+identity:
+ name: serper
+ author: zhuhao
+ label:
+ en_US: Serper
+ zh_Hans: Serper
+ pt_BR: Serper
+description:
+ human:
+ en_US: A tool for performing a Google search and extracting snippets and webpages.Input should be a search query.
+ zh_Hans: 一个用于执行 Google 搜索并提取片段和网页的工具。输入应该是一个搜索查询。
+ pt_BR: A tool for performing a Google search and extracting snippets and webpages.Input should be a search query.
+ llm: A tool for performing a Google search and extracting snippets and webpages.Input should be a search query.
+parameters:
+ - name: query
+ type: string
+ required: true
+ label:
+ en_US: Query string
+ zh_Hans: 查询语句
+ pt_BR: Query string
+ human_description:
+ en_US: used for searching
+ zh_Hans: 用于搜索网页内容
+ pt_BR: used for searching
+ llm_description: key words for searching
+ form: llm
diff --git a/api/core/workflow/nodes/base_node.py b/api/core/workflow/nodes/base_node.py
index f42cee4ccd..d8c812e7ef 100644
--- a/api/core/workflow/nodes/base_node.py
+++ b/api/core/workflow/nodes/base_node.py
@@ -49,6 +49,8 @@ class BaseNode(ABC):
callbacks: Sequence[WorkflowCallback]
+ is_answer_previous_node: bool = False
+
def __init__(self, tenant_id: str,
app_id: str,
workflow_id: str,
@@ -110,6 +112,7 @@ class BaseNode(ABC):
text=text,
metadata={
"node_type": self.node_type,
+ "is_answer_previous_node": self.is_answer_previous_node,
"value_selector": value_selector
}
)
diff --git a/api/core/workflow/workflow_engine_manager.py b/api/core/workflow/workflow_engine_manager.py
index 32f0dbba06..bd2b3eafa7 100644
--- a/api/core/workflow/workflow_engine_manager.py
+++ b/api/core/workflow/workflow_engine_manager.py
@@ -177,6 +177,19 @@ class WorkflowEngineManager:
graph = workflow.graph_dict
try:
+ answer_prov_node_ids = []
+ for node in graph.get('nodes', []):
+ if node.get('id', '') == 'answer':
+ try:
+ answer_prov_node_ids.append(node.get('data', {})
+ .get('answer', '')
+ .replace('#', '')
+ .replace('.text', '')
+ .replace('{{', '')
+ .replace('}}', '').split('.')[0])
+ except Exception as e:
+ logger.error(e)
+
predecessor_node: BaseNode | None = None
current_iteration_node: BaseIterationNode | None = None
has_entry_node = False
@@ -301,6 +314,9 @@ class WorkflowEngineManager:
else:
next_node = self._get_node(workflow_run_state=workflow_run_state, graph=graph, node_id=next_node_id, callbacks=callbacks)
+ if next_node and next_node.node_id in answer_prov_node_ids:
+ next_node.is_answer_previous_node = True
+
# run workflow, run multiple target nodes in the future
self._run_workflow_node(
workflow_run_state=workflow_run_state,
@@ -854,6 +870,10 @@ class WorkflowEngineManager:
raise ValueError(f"Node {node.node_data.title} run failed: {node_run_result.error}")
+ if node.is_answer_previous_node and not isinstance(node, LLMNode):
+ if not node_run_result.metadata:
+ node_run_result.metadata = {}
+ node_run_result.metadata["is_answer_previous_node"]=True
workflow_nodes_and_result.result = node_run_result
# node run success
diff --git a/api/migrations/README b/api/migrations/README
index 0e04844159..220678df7a 100644
--- a/api/migrations/README
+++ b/api/migrations/README
@@ -1 +1,2 @@
Single-database configuration for Flask.
+
diff --git a/api/poetry.lock b/api/poetry.lock
index b30cc8ce68..8cf952411c 100644
--- a/api/poetry.lock
+++ b/api/poetry.lock
@@ -2761,64 +2761,66 @@ test = ["cffi (>=1.12.2)", "coverage (>=5.0)", "dnspython (>=1.16.0,<2.0)", "idn
[[package]]
name = "gmpy2"
-version = "2.1.5"
-description = "gmpy2 interface to GMP/MPIR, MPFR, and MPC for Python 2.7 and 3.5+"
+version = "2.2.1"
+description = "gmpy2 interface to GMP, MPFR, and MPC for Python 3.7+"
optional = false
-python-versions = "*"
+python-versions = ">=3.7"
files = [
- {file = "gmpy2-2.1.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:d8e531a799f09cc66bd2de16b867cf19ce981bbc005bd026fa8d9af46cbdc08b"},
- {file = "gmpy2-2.1.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:eec3b3c9413dd1ea4413af57fc9c92ccbb4d5bb8336da5efbbda8f107fd90eec"},
- {file = "gmpy2-2.1.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:55dcf08d4278b439c1ba37d9b6893bb77bc34b55ccc9b1ad8645d4596a12700e"},
- {file = "gmpy2-2.1.5-cp27-cp27m-win_amd64.whl", hash = "sha256:8947f3b8a1c90f5bae26caf83b9ba2313e52cd06472f7c2be7a5b3a32bdc1bdd"},
- {file = "gmpy2-2.1.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:3459447d579dd0620a09c2aa4a9c1dbfc46cc8084b6928b901607e8565f04a83"},
- {file = "gmpy2-2.1.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:931adb3006afb55562094e9a866a1db584c11bc9b4a370d1f4719b551b5403fe"},
- {file = "gmpy2-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df404ae9a97b9f399d9ca6890b02bef175a373f87e317f93cbaae00f68774e11"},
- {file = "gmpy2-2.1.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cd876ee5232b0d70dd0bae2b39f54a75f6cc9bbf1dd90b8f0fda8c267fa383a2"},
- {file = "gmpy2-2.1.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4877978256fbb6d6b51cc3892183327171c174fbf60671962ab7aa5e70af8eb3"},
- {file = "gmpy2-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565d0444f0d174d84bcbcb0da8feede0ce09733dabd905b63343b94d666e46c0"},
- {file = "gmpy2-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:85614559144edad1223a46cae4a3e965818022cb2bb44438f3c42406395a9eb7"},
- {file = "gmpy2-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:29441b7d31ea60c93249667c6ef33f2560d34ce3cf284d7e4e32e91ed1f9ac1b"},
- {file = "gmpy2-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:8946dc912c647f7cd29a587339c9e79860d9b34a3a59cbdc04d6d6fe20cfff39"},
- {file = "gmpy2-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58097d7ef48f3eabc86e55ca078d3eee5fa3574d9d585f944ee7bc0f00900864"},
- {file = "gmpy2-2.1.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa312ec90e643c8ed2224e204f43239c2e27d14261b349c84912c8858a54c5d5"},
- {file = "gmpy2-2.1.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ac72073e7938c2307e7e4645367709a32036787f5e176c4acf881c7d8efff28"},
- {file = "gmpy2-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3245fd34217649f6c48515ef42da67eb43794f24a20fc961dc2c0c99bb8ebb39"},
- {file = "gmpy2-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9d64c1e1e66a2137617c361714022da3de75787d51bd1aed205eb28ddb362c"},
- {file = "gmpy2-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81004086f2543399b6b425989fc96cc02dd38ab74dcbfd3acb324af1a6770eaf"},
- {file = "gmpy2-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:03beaccf3843c9e9d9cf70102a74cd1e617e792337b64ae73a417b80bf96b385"},
- {file = "gmpy2-2.1.5-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:131d441cc0e77620d88a900eaa6eee8648ba630621b8337b966cda76964e7662"},
- {file = "gmpy2-2.1.5-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:b6a04cfa85607b47e86eefe102b1124c6d0a8981f4197a3afd7071f0719ac9b6"},
- {file = "gmpy2-2.1.5-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:09800f5a7566093d74702ad31f775f176df539f1138f4475ba8edf11903a2b2b"},
- {file = "gmpy2-2.1.5-cp35-cp35m-win_amd64.whl", hash = "sha256:a3a61cd88aca0a891e26ada53f2bf3f4433d4fb1c771f12dec97e8edc17f9f7e"},
- {file = "gmpy2-2.1.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:411d1ea2f5a04d8857a7fe1e59d28d384f19232cb7519f29565c087bda364685"},
- {file = "gmpy2-2.1.5-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5fa902b1c6911d41e6045c94eac57cf2ea76f71946ca65ab65ae8f5d20b2aae"},
- {file = "gmpy2-2.1.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51b95d8e2d6914552118d0316c8ce566441b709e001e66c5db16495be1a429ac"},
- {file = "gmpy2-2.1.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7bbe8d39d83e96b5f81b26e65f99a3e8794cf1edfd891e154a233757a26764fb"},
- {file = "gmpy2-2.1.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e7f324dd859a1324bbc5d5375f431f1ac81c6487035a34cba12fbe8658a888f0"},
- {file = "gmpy2-2.1.5-cp36-cp36m-win_amd64.whl", hash = "sha256:c9e9909d12d06697867568007e9b945246f567116fa5b830513f72766ca8b0c7"},
- {file = "gmpy2-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4957d9164a8b2a93263e8a43f99c635a84c1a4044a256e1a496503dd624376a8"},
- {file = "gmpy2-2.1.5-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efda6e0508d0c7fe79d0fc3fccd3bab90937dba05384224cbc08398856805ce6"},
- {file = "gmpy2-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8f59c69dd138d84d471530e0907c254429855a839b93b00c7e9fa7ec766feae"},
- {file = "gmpy2-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:943078c7abef7757758bb0f313b4346cf9b0c91f93039b5980d22f2ee0d53177"},
- {file = "gmpy2-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c935af5fcd2fbd2ed89d0e0cf1c7fd11603101293dbddb46fd1325c56363573f"},
- {file = "gmpy2-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:18233c35d5bbddfe2ec8c269e216dc841ce24ba5f2b00e79e8278ba843eb22dc"},
- {file = "gmpy2-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6300a0e427bb8b12442db2629b7b271d4d0cd3dbffe2e3880c408932993d31ba"},
- {file = "gmpy2-2.1.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8d2299682455ee22830f7c0f5851a86ae121ccc5fca2f483be7229a91a2f3be5"},
- {file = "gmpy2-2.1.5-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f50d91d779fe24e7dd3feaa1c06e47e11452a73d0a8c67daeea055a6d58cf233"},
- {file = "gmpy2-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c398e6e5bb470f0529ca4e2490d5a396bc9c50c860818f297f47486e51e86673"},
- {file = "gmpy2-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cfdb61f87edf9a7897e7c3e9204f141ddb1de68ecb7038edf0c676bdea815ef2"},
- {file = "gmpy2-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:356c986799a3b34bdcf845961976398556bcfe104e115379effefc50b2cce320"},
- {file = "gmpy2-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:c40ed4d68e0b54efa53a9d9fe62662342dd85212f08382b852ca9effab2e7666"},
- {file = "gmpy2-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6131ccb4f34849b0fa54b9dd8261c00b16fcf4c3332696cb16469a21c217f884"},
- {file = "gmpy2-2.1.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2fee8bb2934300173d8de0ce670bdfedbb5b09817db94c2467aafa18380a1286"},
- {file = "gmpy2-2.1.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c23c98db9cccb63872dd32bdd98275c9503809117d8a23ddd683d8baa3e3ee67"},
- {file = "gmpy2-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2764dfc443c364b918506ecad8973a61b76ca0b5afdf460f940134166a2a3e7"},
- {file = "gmpy2-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4fccf90d28f934f76cc4252007d2e94cc38700ed016d3fd787974f79819381fd"},
- {file = "gmpy2-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0baf36b2724e154bf98ea17f4ff8234543dc7af7297ce3a0a7098bca0209b768"},
- {file = "gmpy2-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:8739ca54323ff28bc317920ed96723a13558a3c442ef77ac325eb3cdd5d32d05"},
- {file = "gmpy2-2.1.5.tar.gz", hash = "sha256:bc297f1fd8c377ae67a4f493fc0f926e5d1b157e5c342e30a4d84dc7b9f95d96"},
+ {file = "gmpy2-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:431d599e1542b6e0b3618d3e296702c25215c97fb461d596e27adbe69d765dc6"},
+ {file = "gmpy2-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e51848975837751d1038e82d006e8bb488b179f093ba7fc8a59e1d8a2c61663"},
+ {file = "gmpy2-2.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89bdf26520b0bf39e148f97a7c9dd17e163637fdcd5fa3699fd70b5e9c246531"},
+ {file = "gmpy2-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a187cf303b94efb4c8915106406acac16e8dbaa3cdb6e856fa096673c3c02f1b"},
+ {file = "gmpy2-2.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d26806e518dadd9ed6cf57fc5fb67e8e6ca533bd9a77fd079558ffadd57150c8"},
+ {file = "gmpy2-2.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416d2f1c4a1af3c00946a8f85b4547ba2bede3903cae3095be12fbc0128f9f5f"},
+ {file = "gmpy2-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:b3cb0f02570f483d27581ea5659c43df0ff7759aaeb475219e0d9e10e8511a80"},
+ {file = "gmpy2-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:98e947491c67523d3147a500f377bb64d0b115e4ab8a12d628fb324bb0e142bf"},
+ {file = "gmpy2-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ccd319a3a87529484167ae1391f937ac4a8724169fd5822bbb541d1eab612b0"},
+ {file = "gmpy2-2.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:827bcd433e5d62f1b732f45e6949419da4a53915d6c80a3c7a5a03d5a783a03a"},
+ {file = "gmpy2-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7131231fc96f57272066295c81cbf11b3233a9471659bca29ddc90a7bde9bfa"},
+ {file = "gmpy2-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1cc6f2bb68ee00c20aae554e111dc781a76140e00c31e4eda5c8f2d4168ed06c"},
+ {file = "gmpy2-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ae388fe46e3d20af4675451a4b6c12fc1bb08e6e0e69ee47072638be21bf42d8"},
+ {file = "gmpy2-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:8b472ee3c123b77979374da2293ebf2c170b88212e173d64213104956d4678fb"},
+ {file = "gmpy2-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:90d03a1be1b1ad3944013fae5250316c3f4e6aec45ecdf189a5c7422d640004d"},
+ {file = "gmpy2-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd09dd43d199908c1d1d501c5de842b3bf754f99b94af5b5ef0e26e3b716d2d5"},
+ {file = "gmpy2-2.2.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3232859fda3e96fd1aecd6235ae20476ed4506562bcdef6796a629b78bb96acd"},
+ {file = "gmpy2-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30fba6f7cf43fb7f8474216701b5aaddfa5e6a06d560e88a67f814062934e863"},
+ {file = "gmpy2-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9b33cae533ede8173bc7d4bb855b388c5b636ca9f22a32c949f2eb7e0cc531b2"},
+ {file = "gmpy2-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:954e7e1936c26e370ca31bbd49729ebeeb2006a8f9866b1e778ebb89add2e941"},
+ {file = "gmpy2-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c929870137b20d9c3f7dd97f43615b2d2c1a2470e50bafd9a5eea2e844f462e9"},
+ {file = "gmpy2-2.2.1-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:a3859ef1706bc631ee7fbdf3ae0367da1709fae1e2538b0e1bc6c53fa3ee7ef4"},
+ {file = "gmpy2-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6468fc604d5a322fe037b8880848eef2fef7e9f843872645c4c11eef276896ad"},
+ {file = "gmpy2-2.2.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a845a7701217da4ff81a2e4ae8df479e904621b7953d3a6b4ca0ff139f1fa71f"},
+ {file = "gmpy2-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0b1e14ef1793a1e0176e7b54b29b44c1d93cf8699ca8e4a93ed53fdd16e2c52"},
+ {file = "gmpy2-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:13b0e00170c14ed4cd1e007cc6f1bcb3417b5677d2ef964d46959a1833aa84ab"},
+ {file = "gmpy2-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:831280e3943897ae6bf69ebd868dc6de2a46c078230b9f2a9f66b4ad793d0440"},
+ {file = "gmpy2-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74235fcce8a1bee207bf8d43955cb04563f71ba8231a3bbafc6dd7869503d05c"},
+ {file = "gmpy2-2.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67aa03a50ad85687193174875a72e145114946fc3aa64b1c9d4a724b70afc18d"},
+ {file = "gmpy2-2.2.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1854e35312088608880139d06326683a56d7547d68a5817f472ac9046920b7c8"},
+ {file = "gmpy2-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c35081bc42741fe5d491cffcff2c71107970b85b6687e6b0001db5fcc70d644"},
+ {file = "gmpy2-2.2.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:152e8aaec5046fd4887e45719ab5ea5fac90df0077574c79fc124dc93fd237c0"},
+ {file = "gmpy2-2.2.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31826f502cd575898ef1fd5959b48114b3e91540385491ab9303ffa04d88a6eb"},
+ {file = "gmpy2-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:98f5c85177225f91b93caf64e1876e081108c5dd1d53f0b79f917561935fb389"},
+ {file = "gmpy2-2.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235f69d2e83d7418252871f1950bf8fb8e80bf2e572c30859c85d7ee14196f3d"},
+ {file = "gmpy2-2.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5079db302762e2669e0d664ea8fb56f46509514dd0387d98951e399838d9bb07"},
+ {file = "gmpy2-2.2.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e387faa6e860424a934ac23152803202980bd0c30605d8bd180bb015d8b09f75"},
+ {file = "gmpy2-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887471cf563c5fc96456c404c805fb4a09c7e834123d7725b22f5394a48cff46"},
+ {file = "gmpy2-2.2.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:1adf779213b9bbf4b0270d1dea1822e3865c433ae02d4b97d20db8be8532e2f8"},
+ {file = "gmpy2-2.2.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2ef74ffffbb16a84243098b51672b584f83baaa53535209639174244863aea8c"},
+ {file = "gmpy2-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:6699b88068c2af9abaf28cd078c876892a917750d8bee6734d8dfa708312fdf3"},
+ {file = "gmpy2-2.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:623e0f701dc74690d15037951b550160d24d75bf66213fc6642a51ac6a2e055e"},
+ {file = "gmpy2-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31b9bfde30478d3b9c85641b4b7146554af16d60320962d79c3e45d724d1281d"},
+ {file = "gmpy2-2.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:674da3d7aeb7dbde52abc0adc0a285bf1b2f3d142779dad15acdbdb819fe9bc2"},
+ {file = "gmpy2-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23505c2ab66734f8a1b1fc5c4c1f8bbbd489bb02eef5940bbd974de69f2ddc2d"},
+ {file = "gmpy2-2.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:99f515dbd242cb07bf06e71c93e69c99a703ad55a22f5deac198256fd1c305ed"},
+ {file = "gmpy2-2.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1c2daa0bb603734e6bee6245e275e57ed305a08da50dc3ce7b48eedece61216c"},
+ {file = "gmpy2-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:fbe36fcc45a591d4ef30fe38ac8db0afa35edfafdf325dbe4fe9162ceb264c0d"},
+ {file = "gmpy2-2.2.1.tar.gz", hash = "sha256:e83e07567441b78cb87544910cb3cc4fe94e7da987e93ef7622e76fb96650432"},
]
+[package.extras]
+docs = ["sphinx (>=4)", "sphinx-rtd-theme (>=1)"]
+tests = ["cython", "hypothesis", "mpmath", "pytest", "setuptools"]
+
[[package]]
name = "google-ai-generativelanguage"
version = "0.6.1"
@@ -3287,133 +3289,151 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[[package]]
name = "grpcio"
-version = "1.58.0"
+version = "1.62.2"
description = "HTTP/2-based RPC framework"
optional = false
python-versions = ">=3.7"
files = [
- {file = "grpcio-1.58.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:3e6bebf1dfdbeb22afd95650e4f019219fef3ab86d3fca8ebade52e4bc39389a"},
- {file = "grpcio-1.58.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:cde11577d5b6fd73a00e6bfa3cf5f428f3f33c2d2878982369b5372bbc4acc60"},
- {file = "grpcio-1.58.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a2d67ff99e70e86b2be46c1017ae40b4840d09467d5455b2708de6d4c127e143"},
- {file = "grpcio-1.58.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ed979b273a81de36fc9c6716d9fb09dd3443efa18dcc8652501df11da9583e9"},
- {file = "grpcio-1.58.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:458899d2ebd55d5ca2350fd3826dfd8fcb11fe0f79828ae75e2b1e6051d50a29"},
- {file = "grpcio-1.58.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc7ffef430b80345729ff0a6825e9d96ac87efe39216e87ac58c6c4ef400de93"},
- {file = "grpcio-1.58.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5b23d75e5173faa3d1296a7bedffb25afd2fddb607ef292dfc651490c7b53c3d"},
- {file = "grpcio-1.58.0-cp310-cp310-win32.whl", hash = "sha256:fad9295fe02455d4f158ad72c90ef8b4bcaadfdb5efb5795f7ab0786ad67dd58"},
- {file = "grpcio-1.58.0-cp310-cp310-win_amd64.whl", hash = "sha256:bc325fed4d074367bebd465a20763586e5e1ed5b943e9d8bc7c162b1f44fd602"},
- {file = "grpcio-1.58.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:652978551af02373a5a313e07bfef368f406b5929cf2d50fa7e4027f913dbdb4"},
- {file = "grpcio-1.58.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:9f13a171281ebb4d7b1ba9f06574bce2455dcd3f2f6d1fbe0fd0d84615c74045"},
- {file = "grpcio-1.58.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8774219e21b05f750eef8adc416e9431cf31b98f6ce9def288e4cea1548cbd22"},
- {file = "grpcio-1.58.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09206106848462763f7f273ca93d2d2d4d26cab475089e0de830bb76be04e9e8"},
- {file = "grpcio-1.58.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62831d5e251dd7561d9d9e83a0b8655084b2a1f8ea91e4bd6b3cedfefd32c9d2"},
- {file = "grpcio-1.58.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:212f38c6a156862098f6bdc9a79bf850760a751d259d8f8f249fc6d645105855"},
- {file = "grpcio-1.58.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4b12754af201bb993e6e2efd7812085ddaaef21d0a6f0ff128b97de1ef55aa4a"},
- {file = "grpcio-1.58.0-cp311-cp311-win32.whl", hash = "sha256:3886b4d56bd4afeac518dbc05933926198aa967a7d1d237a318e6fbc47141577"},
- {file = "grpcio-1.58.0-cp311-cp311-win_amd64.whl", hash = "sha256:002f228d197fea12797a14e152447044e14fb4fdb2eb5d6cfa496f29ddbf79ef"},
- {file = "grpcio-1.58.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b5e8db0aff0a4819946215f156bd722b6f6c8320eb8419567ffc74850c9fd205"},
- {file = "grpcio-1.58.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:201e550b7e2ede113b63e718e7ece93cef5b0fbf3c45e8fe4541a5a4305acd15"},
- {file = "grpcio-1.58.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:d79b660681eb9bc66cc7cbf78d1b1b9e335ee56f6ea1755d34a31108b80bd3c8"},
- {file = "grpcio-1.58.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ef8d4a76d2c7d8065aba829f8d0bc0055495c998dce1964ca5b302d02514fb3"},
- {file = "grpcio-1.58.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cba491c638c76d3dc6c191d9c75041ca5b8f5c6de4b8327ecdcab527f130bb4"},
- {file = "grpcio-1.58.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6801ff6652ecd2aae08ef994a3e49ff53de29e69e9cd0fd604a79ae4e545a95c"},
- {file = "grpcio-1.58.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:24edec346e69e672daf12b2c88e95c6f737f3792d08866101d8c5f34370c54fd"},
- {file = "grpcio-1.58.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7e473a7abad9af48e3ab5f3b5d237d18208024d28ead65a459bd720401bd2f8f"},
- {file = "grpcio-1.58.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:4891bbb4bba58acd1d620759b3be11245bfe715eb67a4864c8937b855b7ed7fa"},
- {file = "grpcio-1.58.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:e9f995a8a421405958ff30599b4d0eec244f28edc760de82f0412c71c61763d2"},
- {file = "grpcio-1.58.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:2f85f87e2f087d9f632c085b37440a3169fda9cdde80cb84057c2fc292f8cbdf"},
- {file = "grpcio-1.58.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb6b92036ff312d5b4182fa72e8735d17aceca74d0d908a7f08e375456f03e07"},
- {file = "grpcio-1.58.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d81c2b2b24c32139dd2536972f1060678c6b9fbd106842a9fcdecf07b233eccd"},
- {file = "grpcio-1.58.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:fbcecb6aedd5c1891db1d70efbfbdc126c986645b5dd616a045c07d6bd2dfa86"},
- {file = "grpcio-1.58.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92ae871a902cf19833328bd6498ec007b265aabf2fda845ab5bd10abcaf4c8c6"},
- {file = "grpcio-1.58.0-cp38-cp38-win32.whl", hash = "sha256:dc72e04620d49d3007771c0e0348deb23ca341c0245d610605dddb4ac65a37cb"},
- {file = "grpcio-1.58.0-cp38-cp38-win_amd64.whl", hash = "sha256:1c1c5238c6072470c7f1614bf7c774ffde6b346a100521de9ce791d1e4453afe"},
- {file = "grpcio-1.58.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fe643af248442221db027da43ed43e53b73e11f40c9043738de9a2b4b6ca7697"},
- {file = "grpcio-1.58.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:128eb1f8e70676d05b1b0c8e6600320fc222b3f8c985a92224248b1367122188"},
- {file = "grpcio-1.58.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:039003a5e0ae7d41c86c768ef8b3ee2c558aa0a23cf04bf3c23567f37befa092"},
- {file = "grpcio-1.58.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f061722cad3f9aabb3fbb27f3484ec9d4667b7328d1a7800c3c691a98f16bb0"},
- {file = "grpcio-1.58.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0af11938acf8cd4cf815c46156bcde36fa5850518120920d52620cc3ec1830"},
- {file = "grpcio-1.58.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d4cef77ad2fed42b1ba9143465856d7e737279854e444925d5ba45fc1f3ba727"},
- {file = "grpcio-1.58.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24765a627eb4d9288ace32d5104161c3654128fe27f2808ecd6e9b0cfa7fc8b9"},
- {file = "grpcio-1.58.0-cp39-cp39-win32.whl", hash = "sha256:f0241f7eb0d2303a545136c59bc565a35c4fc3b924ccbd69cb482f4828d6f31c"},
- {file = "grpcio-1.58.0-cp39-cp39-win_amd64.whl", hash = "sha256:dcfba7befe3a55dab6fe1eb7fc9359dc0c7f7272b30a70ae0af5d5b063842f28"},
- {file = "grpcio-1.58.0.tar.gz", hash = "sha256:532410c51ccd851b706d1fbc00a87be0f5312bd6f8e5dbf89d4e99c7f79d7499"},
+ {file = "grpcio-1.62.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:66344ea741124c38588a664237ac2fa16dfd226964cca23ddc96bd4accccbde5"},
+ {file = "grpcio-1.62.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:5dab7ac2c1e7cb6179c6bfad6b63174851102cbe0682294e6b1d6f0981ad7138"},
+ {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:3ad00f3f0718894749d5a8bb0fa125a7980a2f49523731a9b1fabf2b3522aa43"},
+ {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e72ddfee62430ea80133d2cbe788e0d06b12f865765cb24a40009668bd8ea05"},
+ {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53d3a59a10af4c2558a8e563aed9f256259d2992ae0d3037817b2155f0341de1"},
+ {file = "grpcio-1.62.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1511a303f8074f67af4119275b4f954189e8313541da7b88b1b3a71425cdb10"},
+ {file = "grpcio-1.62.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b94d41b7412ef149743fbc3178e59d95228a7064c5ab4760ae82b562bdffb199"},
+ {file = "grpcio-1.62.2-cp310-cp310-win32.whl", hash = "sha256:a75af2fc7cb1fe25785be7bed1ab18cef959a376cdae7c6870184307614caa3f"},
+ {file = "grpcio-1.62.2-cp310-cp310-win_amd64.whl", hash = "sha256:80407bc007754f108dc2061e37480238b0dc1952c855e86a4fc283501ee6bb5d"},
+ {file = "grpcio-1.62.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:c1624aa686d4b36790ed1c2e2306cc3498778dffaf7b8dd47066cf819028c3ad"},
+ {file = "grpcio-1.62.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:1c1bb80299bdef33309dff03932264636450c8fdb142ea39f47e06a7153d3063"},
+ {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:db068bbc9b1fa16479a82e1ecf172a93874540cb84be69f0b9cb9b7ac3c82670"},
+ {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2cc8a308780edbe2c4913d6a49dbdb5befacdf72d489a368566be44cadaef1a"},
+ {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0695ae31a89f1a8fc8256050329a91a9995b549a88619263a594ca31b76d756"},
+ {file = "grpcio-1.62.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88b4f9ee77191dcdd8810241e89340a12cbe050be3e0d5f2f091c15571cd3930"},
+ {file = "grpcio-1.62.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a0204532aa2f1afd467024b02b4069246320405bc18abec7babab03e2644e75"},
+ {file = "grpcio-1.62.2-cp311-cp311-win32.whl", hash = "sha256:6e784f60e575a0de554ef9251cbc2ceb8790914fe324f11e28450047f264ee6f"},
+ {file = "grpcio-1.62.2-cp311-cp311-win_amd64.whl", hash = "sha256:112eaa7865dd9e6d7c0556c8b04ae3c3a2dc35d62ad3373ab7f6a562d8199200"},
+ {file = "grpcio-1.62.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:65034473fc09628a02fb85f26e73885cf1ed39ebd9cf270247b38689ff5942c5"},
+ {file = "grpcio-1.62.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d2c1771d0ee3cf72d69bb5e82c6a82f27fbd504c8c782575eddb7839729fbaad"},
+ {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:3abe6838196da518863b5d549938ce3159d809218936851b395b09cad9b5d64a"},
+ {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5ffeb269f10cedb4f33142b89a061acda9f672fd1357331dbfd043422c94e9e"},
+ {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404d3b4b6b142b99ba1cff0b2177d26b623101ea2ce51c25ef6e53d9d0d87bcc"},
+ {file = "grpcio-1.62.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:262cda97efdabb20853d3b5a4c546a535347c14b64c017f628ca0cc7fa780cc6"},
+ {file = "grpcio-1.62.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17708db5b11b966373e21519c4c73e5a750555f02fde82276ea2a267077c68ad"},
+ {file = "grpcio-1.62.2-cp312-cp312-win32.whl", hash = "sha256:b7ec9e2f8ffc8436f6b642a10019fc513722858f295f7efc28de135d336ac189"},
+ {file = "grpcio-1.62.2-cp312-cp312-win_amd64.whl", hash = "sha256:aa787b83a3cd5e482e5c79be030e2b4a122ecc6c5c6c4c42a023a2b581fdf17b"},
+ {file = "grpcio-1.62.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:cfd23ad29bfa13fd4188433b0e250f84ec2c8ba66b14a9877e8bce05b524cf54"},
+ {file = "grpcio-1.62.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:af15e9efa4d776dfcecd1d083f3ccfb04f876d613e90ef8432432efbeeac689d"},
+ {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:f4aa94361bb5141a45ca9187464ae81a92a2a135ce2800b2203134f7a1a1d479"},
+ {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82af3613a219512a28ee5c95578eb38d44dd03bca02fd918aa05603c41018051"},
+ {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55ddaf53474e8caeb29eb03e3202f9d827ad3110475a21245f3c7712022882a9"},
+ {file = "grpcio-1.62.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79b518c56dddeec79e5500a53d8a4db90da995dfe1738c3ac57fe46348be049"},
+ {file = "grpcio-1.62.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a5eb4844e5e60bf2c446ef38c5b40d7752c6effdee882f716eb57ae87255d20a"},
+ {file = "grpcio-1.62.2-cp37-cp37m-win_amd64.whl", hash = "sha256:aaae70364a2d1fb238afd6cc9fcb10442b66e397fd559d3f0968d28cc3ac929c"},
+ {file = "grpcio-1.62.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:1bcfe5070e4406f489e39325b76caeadab28c32bf9252d3ae960c79935a4cc36"},
+ {file = "grpcio-1.62.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:da6a7b6b938c15fa0f0568e482efaae9c3af31963eec2da4ff13a6d8ec2888e4"},
+ {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:41955b641c34db7d84db8d306937b72bc4968eef1c401bea73081a8d6c3d8033"},
+ {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c772f225483905f675cb36a025969eef9712f4698364ecd3a63093760deea1bc"},
+ {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07ce1f775d37ca18c7a141300e5b71539690efa1f51fe17f812ca85b5e73262f"},
+ {file = "grpcio-1.62.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26f415f40f4a93579fd648f48dca1c13dfacdfd0290f4a30f9b9aeb745026811"},
+ {file = "grpcio-1.62.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:db707e3685ff16fc1eccad68527d072ac8bdd2e390f6daa97bc394ea7de4acea"},
+ {file = "grpcio-1.62.2-cp38-cp38-win32.whl", hash = "sha256:589ea8e75de5fd6df387de53af6c9189c5231e212b9aa306b6b0d4f07520fbb9"},
+ {file = "grpcio-1.62.2-cp38-cp38-win_amd64.whl", hash = "sha256:3c3ed41f4d7a3aabf0f01ecc70d6b5d00ce1800d4af652a549de3f7cf35c4abd"},
+ {file = "grpcio-1.62.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:162ccf61499c893831b8437120600290a99c0bc1ce7b51f2c8d21ec87ff6af8b"},
+ {file = "grpcio-1.62.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:f27246d7da7d7e3bd8612f63785a7b0c39a244cf14b8dd9dd2f2fab939f2d7f1"},
+ {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:2507006c8a478f19e99b6fe36a2464696b89d40d88f34e4b709abe57e1337467"},
+ {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a90ac47a8ce934e2c8d71e317d2f9e7e6aaceb2d199de940ce2c2eb611b8c0f4"},
+ {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99701979bcaaa7de8d5f60476487c5df8f27483624f1f7e300ff4669ee44d1f2"},
+ {file = "grpcio-1.62.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:af7dc3f7a44f10863b1b0ecab4078f0a00f561aae1edbd01fd03ad4dcf61c9e9"},
+ {file = "grpcio-1.62.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fa63245271920786f4cb44dcada4983a3516be8f470924528cf658731864c14b"},
+ {file = "grpcio-1.62.2-cp39-cp39-win32.whl", hash = "sha256:c6ad9c39704256ed91a1cffc1379d63f7d0278d6a0bad06b0330f5d30291e3a3"},
+ {file = "grpcio-1.62.2-cp39-cp39-win_amd64.whl", hash = "sha256:16da954692fd61aa4941fbeda405a756cd96b97b5d95ca58a92547bba2c1624f"},
+ {file = "grpcio-1.62.2.tar.gz", hash = "sha256:c77618071d96b7a8be2c10701a98537823b9c65ba256c0b9067e0594cdbd954d"},
]
[package.extras]
-protobuf = ["grpcio-tools (>=1.58.0)"]
+protobuf = ["grpcio-tools (>=1.62.2)"]
[[package]]
name = "grpcio-status"
-version = "1.58.0"
+version = "1.62.2"
description = "Status proto mapping for gRPC"
optional = false
python-versions = ">=3.6"
files = [
- {file = "grpcio-status-1.58.0.tar.gz", hash = "sha256:0b42e70c0405a66a82d9e9867fa255fe59e618964a6099b20568c31dd9099766"},
- {file = "grpcio_status-1.58.0-py3-none-any.whl", hash = "sha256:36d46072b71a00147709ebce49344ac59b4b8960942acf0f813a8a7d6c1c28e0"},
+ {file = "grpcio-status-1.62.2.tar.gz", hash = "sha256:62e1bfcb02025a1cd73732a2d33672d3e9d0df4d21c12c51e0bbcaf09bab742a"},
+ {file = "grpcio_status-1.62.2-py3-none-any.whl", hash = "sha256:206ddf0eb36bc99b033f03b2c8e95d319f0044defae9b41ae21408e7e0cda48f"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
-grpcio = ">=1.58.0"
+grpcio = ">=1.62.2"
protobuf = ">=4.21.6"
[[package]]
name = "grpcio-tools"
-version = "1.58.0"
+version = "1.62.2"
description = "Protobuf code generator for gRPC"
optional = false
python-versions = ">=3.7"
files = [
- {file = "grpcio-tools-1.58.0.tar.gz", hash = "sha256:6f4d80ceb591e31ca4dceec747dbe56132e1392a0a9bb1c8fe001d1b5cac898a"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:60c874908f3b40f32f1bb0221f7b3ab65ecb53a4d0a9f0a394f031f1b292c177"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:1852e798f31e5437ca7b37abc910e028b34732fb19364862cedb87b1dab66fad"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:149fb48f53cb691a6328f68bed8e4036c730f7106b7f98e92c2c0403f0b9e93c"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba3d383e5ca93826038b70f326fce8e8d12dd9b2f64d363a3d612f7475f12dd2"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6997511e9d2979f7a2389479682dbb06823f21a904e8fb0a5c6baaf1b4b4a863"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8de0b701da479643f71fad71fe66885cddd89441ae16e2c724939b47742dc72e"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43cc23908b63fcaefe690b10f68a2d8652c994b5b36ab77d2271d9608c895320"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-win32.whl", hash = "sha256:2c2221123d010dc6231799e63a37f2f4786bf614ef65b23009c387cd20d8b193"},
- {file = "grpcio_tools-1.58.0-cp310-cp310-win_amd64.whl", hash = "sha256:df2788736bdf58abe7b0e4d6b1ff806f7686c98c5ad900da312252e3322d91c4"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:b6ea5578712cdb29b0ff60bfc6405bf0e8d681b9c71d106dd1cda54fe7fe4e55"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:c29880f491581c83181c0a84a4d11402af2b13166a5266f64e246adf1da7aa66"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:32d51e933c3565414dd0835f930bb28a1cdeba435d9d2c87fa3cf8b1d284db3c"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ad9d77f25514584b1ddc981d70c9e50dfcfc388aa5ba943eee67520c5267ed9"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4882382631e6352819059278a5c878ce0b067008dd490911d16d5616e8a36d85"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d84091a189d848d94645b7c48b61734c12ec03b0d46e5fc0049343a26989ac5c"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:85ac28a9621e9b92a3fc416288c4ce45542db0b4c31b3e23031dd8e0a0ec5590"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-win32.whl", hash = "sha256:7371d8ea80234b29affec145e25569523f549520ed7e53b2aa92bed412cdecfd"},
- {file = "grpcio_tools-1.58.0-cp311-cp311-win_amd64.whl", hash = "sha256:6997df6e7c5cf4d3ddc764240c1ff6a04b45d70ec28913b38fbc6396ef743e12"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:ac65b8d6e3acaf88b815edf9af88ff844b6600ff3d2591c05ba4f655b45d5fb4"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:88e8191d0dd789bebf42533808728f5ce75d2c51e2a72bdf20abe5b5e3fbec42"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:a3dbece2a121761499a659b799979d4b738586d1065439053de553773eee11ca"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1086fe240c4c879b9721952b47d46996deb283c2d9355a8dc24a804811aacf70"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ae3dca059d5b358dd03fb63277428fa7d771605d4074a019138dd38d70719a"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3f8904ac7fc3da2e874f00b3a986e8b7e004f499344a8e7eb213c26dfb025041"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:aadbd8393ae332e49731adb31e741f2e689989150569b7acc939f5ea43124e2d"},
- {file = "grpcio_tools-1.58.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1cb6e24194786687d4f23c64de1f0ce553af51de22746911bc37340f85f9783e"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:6ec43909095c630df3e479e77469bdad367067431f4af602f6ccb978a3b78afd"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:4be49ed320b0ebcbc21d19ef555fbf229c1c452105522b728e1171ee2052078e"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:28eefebddec3d3adf19baca78f8b82a2287d358e1b1575ae018cdca8eacc6269"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ef8c696e9d78676cc3f583a92bbbf2c84e94e350f7ad22f150a52559f4599d1"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9aeb5949e46558d21c51fd3ec3eeecc59c94dbca76c67c0a80d3da6b7437930c"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f7144aad9396d35fb1b80429600a970b559c2ad4d07020eeb180fe83cea2bee"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4ee26e9253a721fff355737649678535f76cf5d642aa3ac0cd937832559b90af"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-win32.whl", hash = "sha256:343f572312039059a8797d6e29a7fc62196e73131ab01755660a9d48202267c1"},
- {file = "grpcio_tools-1.58.0-cp38-cp38-win_amd64.whl", hash = "sha256:cd7acfbb43b7338a78cf4a67528d05530d574d92b7c829d185b78dfc451d158f"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:46628247fbce86d18232eead24bd22ed0826c79f3fe2fc2fbdbde45971361049"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:51587842a54e025a3d0d37afcf4ef2b7ac1def9a5d17448665cb424b53d6c287"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:a062ae3072a2a39a3c057f4d68b57b021f1dd2956cd09aab39709f6af494e1de"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eec3c93a08df11c80ef1c29a616bcbb0d83dbc6ea41b48306fcacc720416dfa7"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b63f823ac991ff77104da614d2a2485a59d37d57830eb2e387a6e2a3edc7fa2b"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:579c11a9f198847ed48dbc4f211c67fe96a73320b87c81f01b044b72e24a7d77"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2fc1dd8049d417a5034d944c9df05cee76f855b3e431627ab4292e7c01c47"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-win32.whl", hash = "sha256:453023120114c35d3d9d6717ea0820e5d5c140f51f9d0b621de4397ff854471b"},
- {file = "grpcio_tools-1.58.0-cp39-cp39-win_amd64.whl", hash = "sha256:b6c896f1df99c35cf062d4803c15663ff00a33ff09add28baa6e475cf6b5e258"},
+ {file = "grpcio-tools-1.62.2.tar.gz", hash = "sha256:5fd5e1582b678e6b941ee5f5809340be5e0724691df5299aae8226640f94e18f"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:1679b4903aed2dc5bd8cb22a452225b05dc8470a076f14fd703581efc0740cdb"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:9d41e0e47dd075c075bb8f103422968a65dd0d8dc8613288f573ae91eb1053ba"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:987e774f74296842bbffd55ea8826370f70c499e5b5f71a8cf3103838b6ee9c3"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40cd4eeea4b25bcb6903b82930d579027d034ba944393c4751cdefd9c49e6989"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6746bc823958499a3cf8963cc1de00072962fb5e629f26d658882d3f4c35095"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2ed775e844566ce9ce089be9a81a8b928623b8ee5820f5e4d58c1a9d33dfc5ae"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bdc5dd3f57b5368d5d661d5d3703bcaa38bceca59d25955dff66244dbc987271"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-win32.whl", hash = "sha256:3a8d6f07e64c0c7756f4e0c4781d9d5a2b9cc9cbd28f7032a6fb8d4f847d0445"},
+ {file = "grpcio_tools-1.62.2-cp310-cp310-win_amd64.whl", hash = "sha256:e33b59fb3efdddeb97ded988a871710033e8638534c826567738d3edce528752"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:472505d030135d73afe4143b0873efe0dcb385bd6d847553b4f3afe07679af00"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:ec674b4440ef4311ac1245a709e87b36aca493ddc6850eebe0b278d1f2b6e7d1"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:184b4174d4bd82089d706e8223e46c42390a6ebac191073b9772abc77308f9fa"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c195d74fe98541178ece7a50dad2197d43991e0f77372b9a88da438be2486f12"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a34d97c62e61bfe9e6cff0410fe144ac8cca2fc979ad0be46b7edf026339d161"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cbb8453ae83a1db2452b7fe0f4b78e4a8dd32be0f2b2b73591ae620d4d784d3d"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f989e5cebead3ae92c6abf6bf7b19949e1563a776aea896ac5933f143f0c45d"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-win32.whl", hash = "sha256:c48fabe40b9170f4e3d7dd2c252e4f1ff395dc24e49ac15fc724b1b6f11724da"},
+ {file = "grpcio_tools-1.62.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c616d0ad872e3780693fce6a3ac8ef00fc0963e6d7815ce9dcfae68ba0fc287"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:10cc3321704ecd17c93cf68c99c35467a8a97ffaaed53207e9b2da6ae0308ee1"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:9be84ff6d47fd61462be7523b49d7ba01adf67ce4e1447eae37721ab32464dd8"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:d82f681c9a9d933a9d8068e8e382977768e7779ddb8870fa0cf918d8250d1532"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04c607029ae3660fb1624ed273811ffe09d57d84287d37e63b5b802a35897329"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72b61332f1b439c14cbd3815174a8f1d35067a02047c32decd406b3a09bb9890"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8214820990d01b52845f9fbcb92d2b7384a0c321b303e3ac614c219dc7d1d3af"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:462e0ab8dd7c7b70bfd6e3195eebc177549ede5cf3189814850c76f9a340d7ce"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-win32.whl", hash = "sha256:fa107460c842e4c1a6266150881694fefd4f33baa544ea9489601810c2210ef8"},
+ {file = "grpcio_tools-1.62.2-cp312-cp312-win_amd64.whl", hash = "sha256:759c60f24c33a181bbbc1232a6752f9b49fbb1583312a4917e2b389fea0fb0f2"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:45db5da2bcfa88f2b86b57ef35daaae85c60bd6754a051d35d9449c959925b57"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:ab84bae88597133f6ea7a2bdc57b2fda98a266fe8d8d4763652cbefd20e73ad7"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7a49bccae1c7d154b78e991885c3111c9ad8c8fa98e91233de425718f47c6139"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7e439476b29d6dac363b321781a113794397afceeb97dad85349db5f1cb5e9a"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ea369c4d1567d1acdf69c8ea74144f4ccad9e545df7f9a4fc64c94fa7684ba3"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f955702dc4b530696375251319d05223b729ed24e8673c2129f7a75d2caefbb"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3708a747aa4b6b505727282ca887041174e146ae030ebcadaf4c1d346858df62"},
+ {file = "grpcio_tools-1.62.2-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce149ea55eadb486a7fb75a20f63ef3ac065ee6a0240ed25f3549ce7954c653"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:58cbb24b3fa6ae35aa9c210fcea3a51aa5fef0cd25618eb4fd94f746d5a9b703"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:6413581e14a80e0b4532577766cf0586de4dd33766a31b3eb5374a746771c07d"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:47117c8a7e861382470d0e22d336e5a91fdc5f851d1db44fa784b9acea190d87"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9f1ba79a253df9e553d20319c615fa2b429684580fa042dba618d7f6649ac7e4"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04a394cf5e51ba9be412eb9f6c482b6270bd81016e033e8eb7d21b8cc28fe8b5"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3c53b221378b035ae2f1881cbc3aca42a6075a8e90e1a342c2f205eb1d1aa6a1"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c384c838b34d1b67068e51b5bbe49caa6aa3633acd158f1ab16b5da8d226bc53"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-win32.whl", hash = "sha256:19ea69e41c3565932aa28a202d1875ec56786aea46a2eab54a3b28e8a27f9517"},
+ {file = "grpcio_tools-1.62.2-cp38-cp38-win_amd64.whl", hash = "sha256:1d768a5c07279a4c461ebf52d0cec1c6ca85c6291c71ec2703fe3c3e7e28e8c4"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:5b07b5874187e170edfbd7aa2ca3a54ebf3b2952487653e8c0b0d83601c33035"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:d58389fe8be206ddfb4fa703db1e24c956856fcb9a81da62b13577b3a8f7fda7"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:7d8b4e00c3d7237b92260fc18a561cd81f1da82e8be100db1b7d816250defc66"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe08d2038f2b7c53259b5c49e0ad08c8e0ce2b548d8185993e7ef67e8592cca"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19216e1fb26dbe23d12a810517e1b3fbb8d4f98b1a3fbebeec9d93a79f092de4"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b8574469ecc4ff41d6bb95f44e0297cdb0d95bade388552a9a444db9cd7485cd"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4f6f32d39283ea834a493fccf0ebe9cfddee7577bdcc27736ad4be1732a36399"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-win32.whl", hash = "sha256:76eb459bdf3fb666e01883270beee18f3f11ed44488486b61cd210b4e0e17cc1"},
+ {file = "grpcio_tools-1.62.2-cp39-cp39-win_amd64.whl", hash = "sha256:217c2ee6a7ce519a55958b8622e21804f6fdb774db08c322f4c9536c35fdce7c"},
]
[package.dependencies]
-grpcio = ">=1.58.0"
+grpcio = ">=1.62.2"
protobuf = ">=4.21.6,<5.0dev"
setuptools = "*"
@@ -6280,17 +6300,6 @@ python-dotenv = ">=0.21.0"
toml = ["tomli (>=2.0.1)"]
yaml = ["pyyaml (>=6.0.1)"]
-[[package]]
-name = "pydub"
-version = "0.25.1"
-description = "Manipulate audio with an simple and easy high level interface"
-optional = false
-python-versions = "*"
-files = [
- {file = "pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6"},
- {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"},
-]
-
[[package]]
name = "pygments"
version = "2.18.0"
@@ -9501,4 +9510,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
-content-hash = "50acbb78f2a273dfa8683d9d292596e89d13a420c6ecb1afad331f2c38dd1423"
+content-hash = "ca0efc924f1f20acdfba068aa571015a10f0e185427dc3b22333be252d706de2"
diff --git a/api/pyproject.toml b/api/pyproject.toml
index c2c1d56403..05e36405d7 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -1,5 +1,5 @@
[project]
-requires-python = ">=3.10"
+requires-python = ">=3.10,<3.13"
[build-system]
requires = ["poetry-core"]
@@ -73,6 +73,7 @@ quote-style = "single"
[tool.pytest_env]
OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii"
+UPSTAGE_API_KEY = "up-aaaaaaaaaaaaaaaaaaaa"
AZURE_OPENAI_API_BASE = "https://difyai-openai.openai.azure.com"
AZURE_OPENAI_API_KEY = "xxxxb1707exxxxxxxxxxaaxxxxxf94"
ANTHROPIC_API_KEY = "sk-ant-api11-IamNotARealKeyJustForMockTestKawaiiiiiiiiii-NotBaka-ASkksz"
@@ -123,7 +124,7 @@ flask-migrate = "~4.0.5"
flask-restful = "~0.3.10"
Flask-SQLAlchemy = "~3.1.1"
gevent = "~23.9.1"
-gmpy2 = "~2.1.5"
+gmpy2 = "~2.2.1"
google-ai-generativelanguage = "0.6.1"
google-api-core = "2.18.0"
google-api-python-client = "2.90.0"
@@ -151,7 +152,6 @@ pycryptodome = "3.19.1"
pydantic = "~2.8.2"
pydantic-settings = "~2.3.4"
pydantic_extra_types = "~2.9.0"
-pydub = "~0.25.1"
pyjwt = "~2.8.0"
pypdfium2 = "~4.17.0"
python = ">=3.10,<3.13"
@@ -178,6 +178,8 @@ yarl = "~1.9.4"
zhipuai = "1.0.7"
rank-bm25 = "~0.2.2"
openpyxl = "^3.1.5"
+kaleido = "0.2.1"
+
############################################################
# Tool dependencies required by tool implementations
############################################################
diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py
index d5a54ba731..9052a0b785 100644
--- a/api/services/dataset_service.py
+++ b/api/services/dataset_service.py
@@ -197,6 +197,28 @@ class DatasetService:
f"{ex.description}"
)
+ @staticmethod
+ def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model:str):
+ try:
+ model_manager = ModelManager()
+ model_manager.get_model_instance(
+ tenant_id=tenant_id,
+ provider=embedding_model_provider,
+ model_type=ModelType.TEXT_EMBEDDING,
+ model=embedding_model
+ )
+ except LLMBadRequestError:
+ raise ValueError(
+ "No Embedding Model available. Please configure a valid provider "
+ "in the Settings -> Model Provider."
+ )
+ except ProviderTokenNotInitError as ex:
+ raise ValueError(
+ f"The dataset in unavailable, due to: "
+ f"{ex.description}"
+ )
+
+
@staticmethod
def update_dataset(dataset_id, data, user):
data.pop('partial_member_list', None)
diff --git a/api/services/file_service.py b/api/services/file_service.py
index c686b190fe..9139962240 100644
--- a/api/services/file_service.py
+++ b/api/services/file_service.py
@@ -109,7 +109,7 @@ class FileService:
tenant_id=current_user.current_tenant_id,
storage_type=dify_config.STORAGE_TYPE,
key=file_key,
- name=text_name + '.txt',
+ name=text_name,
size=len(text),
extension='txt',
mime_type='text/plain',
diff --git a/api/tasks/ops_trace_task.py b/api/tasks/ops_trace_task.py
index 1d33609205..6b4cab55b3 100644
--- a/api/tasks/ops_trace_task.py
+++ b/api/tasks/ops_trace_task.py
@@ -22,10 +22,8 @@ def process_trace_tasks(tasks_data):
trace_info = tasks_data.get('trace_info')
app_id = tasks_data.get('app_id')
- conversation_id = tasks_data.get('conversation_id')
- message_id = tasks_data.get('message_id')
trace_info_type = tasks_data.get('trace_info_type')
- trace_instance = OpsTraceManager.get_ops_trace_instance(app_id, conversation_id, message_id)
+ trace_instance = OpsTraceManager.get_ops_trace_instance(app_id)
if trace_info.get('message_data'):
trace_info['message_data'] = Message.from_dict(data=trace_info['message_data'])
diff --git a/api/tests/integration_tests/model_runtime/upstage/__init__.py b/api/tests/integration_tests/model_runtime/upstage/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/tests/integration_tests/model_runtime/upstage/test_llm.py b/api/tests/integration_tests/model_runtime/upstage/test_llm.py
new file mode 100644
index 0000000000..c35580a8b1
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/upstage/test_llm.py
@@ -0,0 +1,245 @@
+import os
+from collections.abc import Generator
+
+import pytest
+
+from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
+from core.model_runtime.entities.message_entities import (
+ AssistantPromptMessage,
+ PromptMessageTool,
+ SystemPromptMessage,
+ UserPromptMessage,
+)
+from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
+from core.model_runtime.model_providers.upstage.llm.llm import UpstageLargeLanguageModel
+
+"""FOR MOCK FIXTURES, DO NOT REMOVE"""
+from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
+
+
+def test_predefined_models():
+ model = UpstageLargeLanguageModel()
+ model_schemas = model.predefined_models()
+
+ assert len(model_schemas) >= 1
+ assert isinstance(model_schemas[0], AIModelEntity)
+
+@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
+def test_validate_credentials_for_chat_model(setup_openai_mock):
+ model = UpstageLargeLanguageModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ # model name to gpt-3.5-turbo because of mocking
+ model.validate_credentials(
+ model='gpt-3.5-turbo',
+ credentials={
+ 'upstage_api_key': 'invalid_key'
+ }
+ )
+
+ model.validate_credentials(
+ model='solar-1-mini-chat',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ }
+ )
+
+@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
+def test_invoke_chat_model(setup_openai_mock):
+ model = UpstageLargeLanguageModel()
+
+ result = model.invoke(
+ model='solar-1-mini-chat',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ },
+ prompt_messages=[
+ SystemPromptMessage(
+ content='You are a helpful AI assistant.',
+ ),
+ UserPromptMessage(
+ content='Hello World!'
+ )
+ ],
+ model_parameters={
+ 'temperature': 0.0,
+ 'top_p': 1.0,
+ 'presence_penalty': 0.0,
+ 'frequency_penalty': 0.0,
+ 'max_tokens': 10
+ },
+ stop=['How'],
+ stream=False,
+ user="abc-123"
+ )
+
+ assert isinstance(result, LLMResult)
+ assert len(result.message.content) > 0
+
+@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
+def test_invoke_chat_model_with_tools(setup_openai_mock):
+ model = UpstageLargeLanguageModel()
+
+ result = model.invoke(
+ model='solar-1-mini-chat',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ },
+ prompt_messages=[
+ SystemPromptMessage(
+ content='You are a helpful AI assistant.',
+ ),
+ UserPromptMessage(
+ content="what's the weather today in London?",
+ )
+ ],
+ model_parameters={
+ 'temperature': 0.0,
+ 'max_tokens': 100
+ },
+ tools=[
+ PromptMessageTool(
+ name='get_weather',
+ description='Determine weather in my location',
+ parameters={
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state e.g. San Francisco, CA"
+ },
+ "unit": {
+ "type": "string",
+ "enum": [
+ "c",
+ "f"
+ ]
+ }
+ },
+ "required": [
+ "location"
+ ]
+ }
+ ),
+ PromptMessageTool(
+ name='get_stock_price',
+ description='Get the current stock price',
+ parameters={
+ "type": "object",
+ "properties": {
+ "symbol": {
+ "type": "string",
+ "description": "The stock symbol"
+ }
+ },
+ "required": [
+ "symbol"
+ ]
+ }
+ )
+ ],
+ stream=False,
+ user="abc-123"
+ )
+
+ assert isinstance(result, LLMResult)
+ assert isinstance(result.message, AssistantPromptMessage)
+ assert len(result.message.tool_calls) > 0
+
+@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
+def test_invoke_stream_chat_model(setup_openai_mock):
+ model = UpstageLargeLanguageModel()
+
+ result = model.invoke(
+ model='solar-1-mini-chat',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ },
+ prompt_messages=[
+ SystemPromptMessage(
+ content='You are a helpful AI assistant.',
+ ),
+ UserPromptMessage(
+ content='Hello World!'
+ )
+ ],
+ model_parameters={
+ 'temperature': 0.0,
+ 'max_tokens': 100
+ },
+ stream=True,
+ user="abc-123"
+ )
+
+ assert isinstance(result, Generator)
+
+ for chunk in result:
+ assert isinstance(chunk, LLMResultChunk)
+ assert isinstance(chunk.delta, LLMResultChunkDelta)
+ assert isinstance(chunk.delta.message, AssistantPromptMessage)
+ assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
+ if chunk.delta.finish_reason is not None:
+ assert chunk.delta.usage is not None
+ assert chunk.delta.usage.completion_tokens > 0
+
+
+def test_get_num_tokens():
+ model = UpstageLargeLanguageModel()
+
+ num_tokens = model.get_num_tokens(
+ model='solar-1-mini-chat',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ },
+ prompt_messages=[
+ UserPromptMessage(
+ content='Hello World!'
+ )
+ ]
+ )
+
+ assert num_tokens == 13
+
+ num_tokens = model.get_num_tokens(
+ model='solar-1-mini-chat',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ },
+ prompt_messages=[
+ SystemPromptMessage(
+ content='You are a helpful AI assistant.',
+ ),
+ UserPromptMessage(
+ content='Hello World!'
+ )
+ ],
+ tools=[
+ PromptMessageTool(
+ name='get_weather',
+ description='Determine weather in my location',
+ parameters={
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state e.g. San Francisco, CA"
+ },
+ "unit": {
+ "type": "string",
+ "enum": [
+ "c",
+ "f"
+ ]
+ }
+ },
+ "required": [
+ "location"
+ ]
+ }
+ ),
+ ]
+ )
+
+ assert num_tokens == 106
diff --git a/api/tests/integration_tests/model_runtime/upstage/test_provider.py b/api/tests/integration_tests/model_runtime/upstage/test_provider.py
new file mode 100644
index 0000000000..c33eef49b2
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/upstage/test_provider.py
@@ -0,0 +1,23 @@
+import os
+
+import pytest
+
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.upstage.upstage import UpstageProvider
+from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
+
+
+@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
+def test_validate_provider_credentials(setup_openai_mock):
+ provider = UpstageProvider()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ provider.validate_provider_credentials(
+ credentials={}
+ )
+
+ provider.validate_provider_credentials(
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ }
+ )
diff --git a/api/tests/integration_tests/model_runtime/upstage/test_text_embedding.py b/api/tests/integration_tests/model_runtime/upstage/test_text_embedding.py
new file mode 100644
index 0000000000..54135a0e74
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/upstage/test_text_embedding.py
@@ -0,0 +1,67 @@
+import os
+
+import pytest
+
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.upstage.text_embedding.text_embedding import UpstageTextEmbeddingModel
+from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
+
+
+@pytest.mark.parametrize('setup_openai_mock', [['text_embedding']], indirect=True)
+def test_validate_credentials(setup_openai_mock):
+ model = UpstageTextEmbeddingModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model='solar-embedding-1-large-passage',
+ credentials={
+ 'upstage_api_key': 'invalid_key'
+ }
+ )
+
+ model.validate_credentials(
+ model='solar-embedding-1-large-passage',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY')
+ }
+ )
+
+@pytest.mark.parametrize('setup_openai_mock', [['text_embedding']], indirect=True)
+def test_invoke_model(setup_openai_mock):
+ model = UpstageTextEmbeddingModel()
+
+ result = model.invoke(
+ model='solar-embedding-1-large-passage',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY'),
+ },
+ texts=[
+ "hello",
+ "world",
+ " ".join(["long_text"] * 100),
+ " ".join(["another_long_text"] * 100)
+ ],
+ user="abc-123"
+ )
+
+ assert isinstance(result, TextEmbeddingResult)
+ assert len(result.embeddings) == 4
+ assert result.usage.total_tokens == 2
+
+
+def test_get_num_tokens():
+ model = UpstageTextEmbeddingModel()
+
+ num_tokens = model.get_num_tokens(
+ model='solar-embedding-1-large-passage',
+ credentials={
+ 'upstage_api_key': os.environ.get('UPSTAGE_API_KEY'),
+ },
+ texts=[
+ "hello",
+ "world"
+ ]
+ )
+
+ assert num_tokens == 5
diff --git a/dev/pytest/pytest_model_runtime.sh b/dev/pytest/pytest_model_runtime.sh
index 2e113346c7..aba13292ab 100755
--- a/dev/pytest/pytest_model_runtime.sh
+++ b/dev/pytest/pytest_model_runtime.sh
@@ -5,4 +5,6 @@ pytest api/tests/integration_tests/model_runtime/anthropic \
api/tests/integration_tests/model_runtime/azure_openai \
api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm \
api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference \
- api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py
+ api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py \
+ api/tests/integration_tests/model_runtime/upstage
+
diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml
index 9c2efdd817..807946f3fe 100644
--- a/docker-legacy/docker-compose.yaml
+++ b/docker-legacy/docker-compose.yaml
@@ -2,7 +2,7 @@ version: '3'
services:
# API service
api:
- image: langgenius/dify-api:0.6.15
+ image: langgenius/dify-api:0.6.16
restart: always
environment:
# Startup mode, 'api' starts the API server.
@@ -224,7 +224,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:0.6.15
+ image: langgenius/dify-api:0.6.16
restart: always
environment:
CONSOLE_WEB_URL: ''
@@ -390,7 +390,7 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:0.6.15
+ image: langgenius/dify-web:0.6.16
restart: always
environment:
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
diff --git a/docker/README.md b/docker/README.md
index 86c367a63f..1223a58024 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -7,18 +7,14 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T
- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.
For more information, refer `docker/certbot/README.md`.
-- **Persistent Environment Variables
- **: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
+- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
> What is `.env`?
> The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
-- **Unified Vector Database Services
- **: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
-- **Mandatory .env File
- **: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
-- **Legacy Support
- **: Previous deployment files are now located in the `docker-legacy` directory and will no longer be maintained.
+- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
+- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
+- **Legacy Support**: Previous deployment files are now located in the `docker-legacy` directory and will no longer be maintained.
### How to Deploy Dify with `docker-compose.yaml`
@@ -29,7 +25,7 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T
- Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
3. **Running the Services**:
- Execute `docker compose up` from the `docker` directory to start the services.
- - To specify a vector database, set the `VECTOR_store` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
+ - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
4. **SSL Certificate Setup**:
- Rrefer `docker/certbot/README.md` to set up SSL certificates using Certbot.
@@ -56,10 +52,8 @@ For users migrating from the `docker-legacy` setup:
#### Key Modules and Customization
-- **Vector Database Services
- **: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
-- **Storage Services
- **: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
+- **Vector Database Services**: Depending on the type of vector database used (`VECTOR_STORE`), users can set specific endpoints, ports, and authentication details.
+- **Storage Services**: Depending on the storage type (`STORAGE_TYPE`), users can configure specific settings for S3, Azure Blob, Google Storage, etc.
- **API and Web Services**: Users can define URLs and other settings that affect how the API and web frontends operate.
#### Other notable variables
@@ -99,9 +93,7 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w
### Additional Information
-- **Continuous Improvement Phase
- **: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
-- **Support
- **: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
+- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
+- **Support**: For detailed configuration options and environment variable settings, refer to the `.env.example` file and the Docker Compose configuration files in the `docker` directory.
-This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
\ No newline at end of file
+This README aims to guide you through the deployment process using the new Docker Compose setup. For any issues or further assistance, please refer to the official documentation or contact support.
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index 1caf244fb1..1864542668 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -182,7 +182,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
- image: langgenius/dify-api:0.6.15
+ image: langgenius/dify-api:0.6.16
restart: always
environment:
# Use the shared environment variables.
@@ -202,7 +202,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:0.6.15
+ image: langgenius/dify-api:0.6.16
restart: always
environment:
# Use the shared environment variables.
@@ -221,7 +221,7 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:0.6.15
+ image: langgenius/dify-web:0.6.16
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
diff --git a/web/app/(commonLayout)/datasets/DatasetCard.tsx b/web/app/(commonLayout)/datasets/DatasetCard.tsx
index d4b83f8a1f..ed8d680c19 100644
--- a/web/app/(commonLayout)/datasets/DatasetCard.tsx
+++ b/web/app/(commonLayout)/datasets/DatasetCard.tsx
@@ -1,7 +1,7 @@
'use client'
import { useContext } from 'use-context-selector'
-import Link from 'next/link'
+import { useRouter } from 'next/navigation'
import { useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import {
@@ -33,6 +33,8 @@ const DatasetCard = ({
}: DatasetCardProps) => {
const { t } = useTranslation()
const { notify } = useContext(ToastContext)
+ const { push } = useRouter()
+
const { isCurrentWorkspaceDatasetOperator } = useAppContext()
const [tags, setTags] = useState(dataset.tags)
@@ -107,10 +109,13 @@ const DatasetCard = ({
return (
<>
- {
+ e.preventDefault()
+ push(`/datasets/${dataset.id}/documents`)
+ }}
>
-
+
{showRenameModal && (
= ({
validated={validatedSuccess}
placeholder={placeholder?.[language] || placeholder?.en_US}
disabled={disabed}
- type={formSchema.type === FormTypeEnum.textNumber ? 'number' : formSchema.type === FormTypeEnum.secretInput ? 'password' : 'text'}
+ type={formSchema.type === FormTypeEnum.textNumber ? 'number' : 'text'}
{...(formSchema.type === FormTypeEnum.textNumber ? { min: (formSchema as CredentialFormSchemaNumberInput).min, max: (formSchema as CredentialFormSchemaNumberInput).max } : {})}
/>
{fieldMoreInfo?.(formSchema)}
diff --git a/web/app/components/workflow/constants.ts b/web/app/components/workflow/constants.ts
index de72ce6401..1f2c7b9116 100644
--- a/web/app/components/workflow/constants.ts
+++ b/web/app/components/workflow/constants.ts
@@ -313,7 +313,7 @@ export const NODE_WIDTH = 240
export const X_OFFSET = 60
export const NODE_WIDTH_X_OFFSET = NODE_WIDTH + X_OFFSET
export const Y_OFFSET = 39
-export const MAX_TREE_DEEPTH = 50
+export const MAX_TREE_DEPTH = 50
export const START_INITIAL_POSITION = { x: 80, y: 282 }
export const AUTO_LAYOUT_OFFSET = {
x: -42,
diff --git a/web/app/components/workflow/hooks/use-checklist.ts b/web/app/components/workflow/hooks/use-checklist.ts
index 142f96ed2a..7f45769acd 100644
--- a/web/app/components/workflow/hooks/use-checklist.ts
+++ b/web/app/components/workflow/hooks/use-checklist.ts
@@ -16,7 +16,7 @@ import {
} from '../utils'
import {
CUSTOM_NODE,
- MAX_TREE_DEEPTH,
+ MAX_TREE_DEPTH,
} from '../constants'
import type { ToolNodeType } from '../nodes/tool/types'
import { useIsChatMode } from './use-workflow'
@@ -119,8 +119,8 @@ export const useChecklistBeforePublish = () => {
maxDepth,
} = getValidTreeNodes(nodes.filter(node => node.type === CUSTOM_NODE), edges)
- if (maxDepth > MAX_TREE_DEEPTH) {
- notify({ type: 'error', message: t('workflow.common.maxTreeDepth', { depth: MAX_TREE_DEEPTH }) })
+ if (maxDepth > MAX_TREE_DEPTH) {
+ notify({ type: 'error', message: t('workflow.common.maxTreeDepth', { depth: MAX_TREE_DEPTH }) })
return false
}
diff --git a/web/app/components/workflow/nodes/_base/components/variable/utils.ts b/web/app/components/workflow/nodes/_base/components/variable/utils.ts
index f1a93f9988..b3840dd458 100644
--- a/web/app/components/workflow/nodes/_base/components/variable/utils.ts
+++ b/web/app/components/workflow/nodes/_base/components/variable/utils.ts
@@ -432,12 +432,12 @@ export const getVarType = ({
else {
(valueSelector as ValueSelector).slice(1).forEach((key, i) => {
const isLast = i === valueSelector.length - 2
- curr = curr.find((v: any) => v.variable === key)
+ curr = curr?.find((v: any) => v.variable === key)
if (isLast) {
type = curr?.type
}
else {
- if (curr.type === VarType.object)
+ if (curr?.type === VarType.object)
curr = curr.children
}
})
diff --git a/web/app/components/workflow/nodes/knowledge-retrieval/default.ts b/web/app/components/workflow/nodes/knowledge-retrieval/default.ts
index d533cd5b3c..f66cab6ca9 100644
--- a/web/app/components/workflow/nodes/knowledge-retrieval/default.ts
+++ b/web/app/components/workflow/nodes/knowledge-retrieval/default.ts
@@ -1,6 +1,7 @@
import { BlockEnum } from '../../types'
import type { NodeDefault } from '../../types'
import type { KnowledgeRetrievalNodeType } from './types'
+import { RerankingModeEnum } from '@/models/datasets'
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
import { DATASET_DEFAULT } from '@/config'
import { RETRIEVE_TYPE } from '@/types/app'
@@ -35,7 +36,7 @@ const nodeDefault: NodeDefault = {
if (!errorMessages && (!payload.dataset_ids || payload.dataset_ids.length === 0))
errorMessages = t(`${i18nPrefix}.errorMsg.fieldRequired`, { field: t(`${i18nPrefix}.nodes.knowledgeRetrieval.knowledge`) })
- if (!errorMessages && payload.retrieval_mode === RETRIEVE_TYPE.multiWay && !payload.multiple_retrieval_config?.reranking_model?.provider)
+ if (!errorMessages && payload.retrieval_mode === RETRIEVE_TYPE.multiWay && payload.multiple_retrieval_config?.reranking_mode === RerankingModeEnum.RerankingModel && !payload.multiple_retrieval_config?.reranking_model?.provider)
errorMessages = t(`${i18nPrefix}.errorMsg.fieldRequired`, { field: t(`${i18nPrefix}.errorMsg.fields.rerankModel`) })
if (!errorMessages && payload.retrieval_mode === RETRIEVE_TYPE.oneWay && !payload.single_retrieval_config?.model?.provider)
diff --git a/web/package.json b/web/package.json
index f567f57b2a..2d6cd0a511 100644
--- a/web/package.json
+++ b/web/package.json
@@ -1,6 +1,6 @@
{
"name": "dify-web",
- "version": "0.6.15",
+ "version": "0.6.16",
"private": true,
"engines": {
"node": ">=18.17.0"