From cae73b9a329cb8edc9dae8ef53593fa29f5c2d9f Mon Sep 17 00:00:00 2001 From: Hiroshi Fujita Date: Mon, 23 Sep 2024 10:05:02 +0900 Subject: [PATCH 01/64] Make WORKFLOW_* configurable as environment variables. (#8644) --- docker/.env.example | 5 +++++ docker/docker-compose.yaml | 3 +++ 2 files changed, 8 insertions(+) diff --git a/docker/.env.example b/docker/.env.example index c892c15636..7eaaceb928 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -563,6 +563,11 @@ CODE_MAX_STRING_ARRAY_LENGTH=30 CODE_MAX_OBJECT_ARRAY_LENGTH=30 CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +# Workflow runtime configuration +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 + # SSRF Proxy server HTTP URL SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 # SSRF Proxy server HTTPS URL diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index e72c3724f9..16bef279bc 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -202,6 +202,9 @@ x-shared-env: &shared-api-worker-env CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} + WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} + WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} + WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_MAX_EXECUTION_TIME:-5} SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} From 03fdf5e7f8e44950cce683fc8c6ef4b89b90cf02 Mon Sep 17 00:00:00 2001 From: Shota Totsuka <153569547+totsukash@users.noreply.github.com> Date: Mon, 23 Sep 2024 10:06:01 +0900 Subject: [PATCH 02/64] chore: Enable Japanese descriptions for Tools (#8646) --- api/core/tools/entities/common_entities.py | 5 ++++- api/core/tools/provider/builtin/arxiv/arxiv.yaml | 2 ++ .../tools/provider/builtin/arxiv/tools/arxiv_search.yaml | 4 ++++ api/services/tools/tools_transform_service.py | 2 ++ web/i18n/language.ts | 6 ++---- 5 files changed, 14 insertions(+), 5 deletions(-) diff --git a/api/core/tools/entities/common_entities.py b/api/core/tools/entities/common_entities.py index 37a926697b..b52119fdc4 100644 --- a/api/core/tools/entities/common_entities.py +++ b/api/core/tools/entities/common_entities.py @@ -10,6 +10,7 @@ class I18nObject(BaseModel): zh_Hans: Optional[str] = None pt_BR: Optional[str] = None + ja_JP: Optional[str] = None en_US: str def __init__(self, **data): @@ -18,6 +19,8 @@ class I18nObject(BaseModel): self.zh_Hans = self.en_US if not self.pt_BR: self.pt_BR = self.en_US + if not self.ja_JP: + self.ja_JP = self.en_US def to_dict(self) -> dict: - return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR} + return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR, "ja_JP": self.ja_JP} diff --git a/api/core/tools/provider/builtin/arxiv/arxiv.yaml b/api/core/tools/provider/builtin/arxiv/arxiv.yaml index d26993b336..25aec97bb7 100644 --- a/api/core/tools/provider/builtin/arxiv/arxiv.yaml +++ b/api/core/tools/provider/builtin/arxiv/arxiv.yaml @@ -4,9 +4,11 @@ identity: label: en_US: ArXiv zh_Hans: ArXiv + ja_JP: ArXiv description: en_US: Access to a vast repository of scientific papers and articles in various fields of research. zh_Hans: 访问各个研究领域大量科学论文和文章的存储库。 + ja_JP: 多様な研究分野の科学論文や記事の膨大なリポジトリへのアクセス。 icon: icon.svg tags: - search diff --git a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.yaml b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.yaml index 7439a48658..afc1925df3 100644 --- a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.yaml +++ b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.yaml @@ -4,10 +4,12 @@ identity: label: en_US: Arxiv Search zh_Hans: Arxiv 搜索 + ja_JP: Arxiv 検索 description: human: en_US: A tool for searching scientific papers and articles from the Arxiv repository. Input can be an Arxiv ID or an author's name. zh_Hans: 一个用于从Arxiv存储库搜索科学论文和文章的工具。 输入可以是Arxiv ID或作者姓名。 + ja_JP: Arxivリポジトリから科学論文や記事を検索するためのツールです。入力はArxiv IDまたは著者名にすることができます。 llm: A tool for searching scientific papers and articles from the Arxiv repository. Input can be an Arxiv ID or an author's name. parameters: - name: query @@ -16,8 +18,10 @@ parameters: label: en_US: Query string zh_Hans: 查询字符串 + ja_JP: クエリ文字列 human_description: en_US: The Arxiv ID or author's name used for searching. zh_Hans: 用于搜索的Arxiv ID或作者姓名。 + ja_JP: 検索に使用されるArxiv IDまたは著者名。 llm_description: The Arxiv ID or author's name used for searching. form: llm diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 7ae1b9f231..2bc48c4185 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -74,12 +74,14 @@ class ToolTransformService: en_US=provider_controller.identity.description.en_US, zh_Hans=provider_controller.identity.description.zh_Hans, pt_BR=provider_controller.identity.description.pt_BR, + ja_JP=provider_controller.identity.description.ja_JP, ), icon=provider_controller.identity.icon, label=I18nObject( en_US=provider_controller.identity.label.en_US, zh_Hans=provider_controller.identity.label.zh_Hans, pt_BR=provider_controller.identity.label.pt_BR, + ja_JP=provider_controller.identity.label.ja_JP, ), type=ToolProviderType.BUILT_IN, masked_credentials={}, diff --git a/web/i18n/language.ts b/web/i18n/language.ts index fde69328bd..c2e23cc19a 100644 --- a/web/i18n/language.ts +++ b/web/i18n/language.ts @@ -31,10 +31,8 @@ export const languages = data.languages export const LanguagesSupported = languages.filter(item => item.supported).map(item => item.value) export const getLanguage = (locale: string) => { - if (locale === 'zh-Hans') - return locale.replace('-', '_') - - return LanguagesSupported[0].replace('-', '_') + const supportedLocale = LanguagesSupported.find(lang => lang.startsWith(locale.split('-')[0])) + return (supportedLocale || LanguagesSupported[0]).replace('-', '_') } export const NOTICE_I18N = { From 3618a97c20e386aa49a39c1f6af55ce7414ffc5c Mon Sep 17 00:00:00 2001 From: Aaron Ji <127167174+DresAaron@users.noreply.github.com> Date: Mon, 23 Sep 2024 13:45:09 +0800 Subject: [PATCH 03/64] feat: extend api params for Jina Embeddings V3 (#8657) --- .../model_providers/jina/jina.yaml | 43 +++++++++++++++++++ .../jina/text_embedding/text_embedding.py | 42 ++++++++++++++---- 2 files changed, 76 insertions(+), 9 deletions(-) diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml index 23e18ad75f..9c70d6ff33 100644 --- a/api/core/model_runtime/model_providers/jina/jina.yaml +++ b/api/core/model_runtime/model_providers/jina/jina.yaml @@ -67,3 +67,46 @@ model_credential_schema: required: false type: text-input default: '8192' + - variable: task + label: + zh_Hans: 下游任务 + en_US: Downstream task + placeholder: + zh_Hans: 选择将使用向量模型的下游任务。模型将返回针对该任务优化的向量。 + en_US: Select the downstream task for which the embeddings will be used. The model will return the optimized embeddings for that task. + required: false + type: select + options: + - value: retrieval.query + label: + en_US: retrieval.query + - value: retrieval.passage + label: + en_US: retrieval.passage + - value: separation + label: + en_US: separation + - value: classification + label: + en_US: classification + - value: text-matching + label: + en_US: text-matching + - variable: dimensions + label: + zh_Hans: 输出维度 + en_US: Output dimensions + placeholder: + zh_Hans: 输入您的输出维度 + en_US: Enter output dimensions + required: false + type: text-input + - variable: late_chunking + label: + zh_Hans: 后期分块 + en_US: Late chunking + placeholder: + zh_Hans: 应用后期分块技术来利用模型的长上下文功能来生成上下文块向量化。 + en_US: Apply the late chunking technique to leverage the model's long-context capabilities for generating contextual chunk embeddings. + required: false + type: switch diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index ceb79567d5..6c96699ea2 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -27,6 +27,38 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): api_base: str = "https://api.jina.ai/v1" + def _to_payload(self, model: str, texts: list[str], credentials: dict) -> dict: + """ + Parse model credentials + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :return: parsed credentials + """ + + def transform_jina_input_text(model, text): + if model == "jina-clip-v1": + return {"text": text} + return text + + data = {"model": model, "input": [transform_jina_input_text(model, text) for text in texts]} + + task = credentials.get("task") + dimensions = credentials.get("dimensions") + late_chunking = credentials.get("late_chunking") + + if task is not None: + data["task"] = task + + if dimensions is not None: + data["dimensions"] = int(dimensions) + + if late_chunking is not None: + data["late_chunking"] = late_chunking + + return data + def _invoke( self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None ) -> TextEmbeddingResult: @@ -49,15 +81,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): url = base_url + "/embeddings" headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - def transform_jina_input_text(model, text): - if model == "jina-clip-v1": - return {"text": text} - return text - - data = {"model": model, "input": [transform_jina_input_text(model, text) for text in texts]} - - if model == "jina-embeddings-v3": - data["task"] = "text-matching" + data = self._to_payload(model=model, texts=texts, credentials=credentials) try: response = post(url, headers=headers, data=dumps(data)) From 4c7beb9d7b5e566209600ea0975677c7d46a6926 Mon Sep 17 00:00:00 2001 From: haike-1213 <2100797950@qq.com> Date: Mon, 23 Sep 2024 07:23:52 +0000 Subject: [PATCH 04/64] fix: Assignment exception (#8663) Co-authored-by: fum --- api/core/app/apps/base_app_generate_response_converter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/api/core/app/apps/base_app_generate_response_converter.py b/api/core/app/apps/base_app_generate_response_converter.py index c6855ac854..62e79ec444 100644 --- a/api/core/app/apps/base_app_generate_response_converter.py +++ b/api/core/app/apps/base_app_generate_response_converter.py @@ -75,10 +75,10 @@ class AppGenerateResponseConverter(ABC): :return: """ # show_retrieve_source + updated_resources = [] if "retriever_resources" in metadata: - metadata["retriever_resources"] = [] for resource in metadata["retriever_resources"]: - metadata["retriever_resources"].append( + updated_resources.append( { "segment_id": resource["segment_id"], "position": resource["position"], @@ -87,6 +87,7 @@ class AppGenerateResponseConverter(ABC): "content": resource["content"], } ) + metadata["retriever_resources"] = updated_resources # show annotation reply if "annotation_reply" in metadata: From 86f90fd9ff4905e5c1b85cd445466cfb3f2c457c Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Mon, 23 Sep 2024 15:28:57 +0800 Subject: [PATCH 05/64] chore: skip PLR6201 linter rule (#8666) --- api/core/tools/provider/builtin/firecrawl/tools/crawl.py | 4 ++-- api/core/tools/provider/builtin/firecrawl/tools/scrape.py | 4 ++-- api/pyproject.toml | 1 - 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/api/core/tools/provider/builtin/firecrawl/tools/crawl.py b/api/core/tools/provider/builtin/firecrawl/tools/crawl.py index 9675b8eb91..15ab510c6c 100644 --- a/api/core/tools/provider/builtin/firecrawl/tools/crawl.py +++ b/api/core/tools/provider/builtin/firecrawl/tools/crawl.py @@ -35,10 +35,10 @@ class CrawlTool(BuiltinTool): scrapeOptions["excludeTags"] = get_array_params(tool_parameters, "excludeTags") scrapeOptions["onlyMainContent"] = tool_parameters.get("onlyMainContent", False) scrapeOptions["waitFor"] = tool_parameters.get("waitFor", 0) - scrapeOptions = {k: v for k, v in scrapeOptions.items() if v not in {None, ""}} + scrapeOptions = {k: v for k, v in scrapeOptions.items() if v not in (None, "")} payload["scrapeOptions"] = scrapeOptions or None - payload = {k: v for k, v in payload.items() if v not in {None, ""}} + payload = {k: v for k, v in payload.items() if v not in (None, "")} crawl_result = app.crawl_url(url=tool_parameters["url"], wait=wait_for_results, **payload) diff --git a/api/core/tools/provider/builtin/firecrawl/tools/scrape.py b/api/core/tools/provider/builtin/firecrawl/tools/scrape.py index 538b4a1fcb..f00a9b31ce 100644 --- a/api/core/tools/provider/builtin/firecrawl/tools/scrape.py +++ b/api/core/tools/provider/builtin/firecrawl/tools/scrape.py @@ -29,10 +29,10 @@ class ScrapeTool(BuiltinTool): extract["schema"] = get_json_params(tool_parameters, "schema") extract["systemPrompt"] = tool_parameters.get("systemPrompt") extract["prompt"] = tool_parameters.get("prompt") - extract = {k: v for k, v in extract.items() if v not in {None, ""}} + extract = {k: v for k, v in extract.items() if v not in (None, "")} payload["extract"] = extract or None - payload = {k: v for k, v in payload.items() if v not in {None, ""}} + payload = {k: v for k, v in payload.items() if v not in (None, "")} crawl_result = app.scrape_url(url=tool_parameters["url"], **payload) markdown_result = crawl_result.get("data", {}).get("markdown", "") diff --git a/api/pyproject.toml b/api/pyproject.toml index 506f379aaf..066b4772a9 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -28,7 +28,6 @@ select = [ "PLR0402", # manual-from-import "PLR1711", # useless-return "PLR1714", # repeated-equality-comparison - "PLR6201", # literal-membership "RUF019", # unnecessary-key-check "RUF100", # unused-noqa "RUF101", # redirected-noqa From b37954b9661067977f45524dce0bdb8863309b9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Mon, 23 Sep 2024 15:33:06 +0800 Subject: [PATCH 06/64] fix: png avatar upload as jpeg (#8665) --- .../components/base/app-icon-picker/index.tsx | 2 +- .../components/base/app-icon-picker/utils.ts | 21 ++++++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/web/app/components/base/app-icon-picker/index.tsx b/web/app/components/base/app-icon-picker/index.tsx index 8254815475..ba375abdd9 100644 --- a/web/app/components/base/app-icon-picker/index.tsx +++ b/web/app/components/base/app-icon-picker/index.tsx @@ -88,7 +88,7 @@ const AppIconPicker: FC = ({ if (!imageCropInfo) return setUploading(true) - const blob = await getCroppedImg(imageCropInfo.tempUrl, imageCropInfo.croppedAreaPixels) + const blob = await getCroppedImg(imageCropInfo.tempUrl, imageCropInfo.croppedAreaPixels, imageCropInfo.fileName) const file = new File([blob], imageCropInfo.fileName, { type: blob.type }) handleLocalFileUpload(file) } diff --git a/web/app/components/base/app-icon-picker/utils.ts b/web/app/components/base/app-icon-picker/utils.ts index 0c90e96feb..14c9ae3f28 100644 --- a/web/app/components/base/app-icon-picker/utils.ts +++ b/web/app/components/base/app-icon-picker/utils.ts @@ -11,6 +11,23 @@ export function getRadianAngle(degreeValue: number) { return (degreeValue * Math.PI) / 180 } +export function getMimeType(fileName: string): string { + const extension = fileName.split('.').pop()?.toLowerCase() + switch (extension) { + case 'png': + return 'image/png' + case 'jpg': + case 'jpeg': + return 'image/jpeg' + case 'gif': + return 'image/gif' + case 'webp': + return 'image/webp' + default: + return 'image/jpeg' + } +} + /** * Returns the new bounding area of a rotated rectangle. */ @@ -31,12 +48,14 @@ export function rotateSize(width: number, height: number, rotation: number) { export default async function getCroppedImg( imageSrc: string, pixelCrop: { x: number; y: number; width: number; height: number }, + fileName: string, rotation = 0, flip = { horizontal: false, vertical: false }, ): Promise { const image = await createImage(imageSrc) const canvas = document.createElement('canvas') const ctx = canvas.getContext('2d') + const mimeType = getMimeType(fileName) if (!ctx) throw new Error('Could not create a canvas context') @@ -93,6 +112,6 @@ export default async function getCroppedImg( resolve(file) else reject(new Error('Could not create a blob')) - }, 'image/jpeg') + }, mimeType) }) } From c66cecaa555722b7f82e069f58c3874bf39c771f Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:18:55 +0800 Subject: [PATCH 07/64] add Qwen model translate (#8674) --- .../tongyi/llm/farui-plus.yaml | 4 +++ .../tongyi/llm/qwen-coder-turbo-0919.yaml | 4 +++ .../tongyi/llm/qwen-coder-turbo-latest.yaml | 4 +++ .../tongyi/llm/qwen-coder-turbo.yaml | 4 +++ .../model_providers/tongyi/llm/qwen-long.yaml | 4 +++ .../tongyi/llm/qwen-math-plus-0816.yaml | 4 +++ .../tongyi/llm/qwen-math-plus-0919.yaml | 4 +++ .../tongyi/llm/qwen-math-plus-latest.yaml | 4 +++ .../tongyi/llm/qwen-math-plus.yaml | 4 +++ .../tongyi/llm/qwen-math-turbo-0919.yaml | 4 +++ .../tongyi/llm/qwen-math-turbo-latest.yaml | 4 +++ .../tongyi/llm/qwen-math-turbo.yaml | 4 +++ .../tongyi/llm/qwen-max-0107.yaml | 4 +++ .../tongyi/llm/qwen-max-0403.yaml | 4 +++ .../tongyi/llm/qwen-max-0428.yaml | 4 +++ .../tongyi/llm/qwen-max-0919.yaml | 4 +++ .../tongyi/llm/qwen-max-latest.yaml | 4 +++ .../tongyi/llm/qwen-max-longcontext.yaml | 4 +++ .../model_providers/tongyi/llm/qwen-max.yaml | 4 +++ .../tongyi/llm/qwen-plus-0206.yaml | 4 +++ .../tongyi/llm/qwen-plus-0624.yaml | 4 +++ .../tongyi/llm/qwen-plus-0723.yaml | 4 +++ .../tongyi/llm/qwen-plus-0806.yaml | 4 +++ .../tongyi/llm/qwen-plus-0919.yaml | 4 +++ .../tongyi/llm/qwen-plus-chat.yaml | 4 +++ .../tongyi/llm/qwen-plus-latest.yaml | 4 +++ .../model_providers/tongyi/llm/qwen-plus.yaml | 4 +++ .../tongyi/llm/qwen-turbo-0206.yaml | 4 +++ .../tongyi/llm/qwen-turbo-0624.yaml | 4 +++ .../tongyi/llm/qwen-turbo-0919.yaml | 4 +++ .../tongyi/llm/qwen-turbo-chat.yaml | 4 +++ .../tongyi/llm/qwen-turbo-latest.yaml | 4 +++ .../tongyi/llm/qwen-turbo.yaml | 4 +++ .../tongyi/llm/qwen-vl-max-0809.yaml | 30 +++++++++++++++++++ .../tongyi/llm/qwen-vl-max.yaml | 30 +++++++++++++++++++ .../tongyi/llm/qwen-vl-plus-0201.yaml | 30 +++++++++++++++++++ .../tongyi/llm/qwen-vl-plus-0809.yaml | 30 +++++++++++++++++++ .../tongyi/llm/qwen-vl-plus.yaml | 30 +++++++++++++++++++ .../tongyi/llm/qwen2-math-1.5b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2-math-72b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2-math-7b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-0.5b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-1.5b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-14b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-32b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-3b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-72b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-7b-instruct.yaml | 4 +++ .../tongyi/llm/qwen2.5-coder-7b-instruct.yaml | 4 +++ 49 files changed, 326 insertions(+) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml index aad07f5673..e5de586c1c 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml index ebba565d57..6ab39cde2d 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml index 361e2c2373..be6d9a0e07 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml index f4032a4dd3..d2aca4f514 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml index dbe7d024a5..a59a3350f6 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml @@ -63,6 +63,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -70,6 +71,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml index 89d1302abe..cab7233c98 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml index 032b3c970d..f82fba0c01 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml index 31dd9f6972..e2fb6e0e55 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml index 1a51d57f78..8803e747e5 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml index 1894eea417..0dc5a066f0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml index b8365618b0..2ac0e4692a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml index 8d346d691e..9a7f1312e9 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml index c0ad12b85e..c0eef37557 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml index b00fb44d29..c12444bd7b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml index 1848dcc07d..173c55b6b9 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml index 238882bb12..692a38140d 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml index 9d7d3c2fcb..afd7fb4b77 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml index a7bdc42f73..d02ba7af18 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml index 57888406af..c6a64dc507 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml index 1e0b816617..1111298c37 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml index f70c373922..ef8dd083ad 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml index c6007e9164..87a4417df5 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml index 2f53c43336..967f258fa9 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml index 90b54ca52e..9d44852ac9 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml index 59e8851240..df9448ae04 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml index 2a821dbcfe..32ccb8d615 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml index 626884f4b2..f3fce30209 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml index 844fced77a..bf976b518a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml index 0152f75579..060e7fb4c9 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml index 19c6c8d293..97cd34929b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml index f557f311ef..8d77ba7a2a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml index be2475847e..4458c706aa 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml index 90f13dc19f..33f05967c2 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml @@ -62,6 +62,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -69,6 +70,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml index 41d45966e9..12e9e0dd56 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml @@ -9,6 +9,15 @@ model_properties: mode: chat context_size: 32000 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - name: top_p use_template: top_p type: float @@ -50,6 +59,27 @@ parameter_rules: en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: response_format use_template: response_format + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + zh_Hans: 重复惩罚 + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. + - name: response_format + use_template: response_format pricing: input: '0.02' output: '0.02' diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml index 78d0509374..b811fdece4 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml @@ -9,6 +9,15 @@ model_properties: mode: chat context_size: 32000 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - name: top_p use_template: top_p type: float @@ -50,6 +59,27 @@ parameter_rules: en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: response_format use_template: response_format + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + zh_Hans: 重复惩罚 + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. + - name: response_format + use_template: response_format pricing: input: '0.02' output: '0.02' diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml index 8944388b1e..188dea389a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml @@ -9,6 +9,15 @@ model_properties: mode: chat context_size: 8000 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - name: top_p use_template: top_p type: float @@ -50,6 +59,27 @@ parameter_rules: en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: response_format use_template: response_format + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + zh_Hans: 重复惩罚 + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. + - name: response_format + use_template: response_format pricing: input: '0.02' output: '0.02' diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml index 869e0ea71c..bc623e2f03 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml @@ -9,6 +9,15 @@ model_properties: mode: chat context_size: 32768 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - name: top_p use_template: top_p type: float @@ -50,6 +59,27 @@ parameter_rules: en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: response_format use_template: response_format + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + zh_Hans: 重复惩罚 + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. + - name: response_format + use_template: response_format pricing: input: '0.008' output: '0.008' diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml index da11bacc64..8977e12e4f 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml @@ -9,6 +9,15 @@ model_properties: mode: chat context_size: 8000 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 + en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - name: top_p use_template: top_p type: float @@ -50,6 +59,27 @@ parameter_rules: en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: response_format use_template: response_format + - name: repetition_penalty + required: false + type: float + default: 1.1 + label: + zh_Hans: 重复惩罚 + en_US: Repetition penalty + help: + zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. + - name: response_format + use_template: response_format pricing: input: '0.008' output: '0.008' diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml index cfe4b5a666..de237842af 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml index e541c197b0..1fda35abaf 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml index ba4514e3d6..06fd33c5f4 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml index e5596041af..ebf8099553 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml index 4004c59417..e9bc99339d 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml index d8f53666ce..3ed85dade8 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml index 890f7e6e4e..328519c168 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml index 6d3d2dd5bb..d1ed3c2a73 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml index 17d0eb5b35..0e88c24aa8 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml index 435b3f90a2..35313cd1f7 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml index 435b3f90a2..35313cd1f7 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml @@ -60,6 +60,7 @@ parameter_rules: type: float default: 1.1 label: + zh_Hans: 重复惩罚 en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 @@ -67,6 +68,9 @@ parameter_rules: - name: enable_search type: boolean default: false + label: + zh_Hans: 联网搜索 + en_US: Web Search help: zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. From 3554a803e70152e1e5064e2163d5dc13d389264d Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:19:42 +0800 Subject: [PATCH 08/64] add zhipuai web search (#8668) --- .../model_providers/zhipuai/llm/glm-4-0520.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm-4-air.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm-4-airx.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm-4-flash.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm_3_turbo.yaml | 9 +++++++++ .../model_runtime/model_providers/zhipuai/llm/glm_4.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm_4_long.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm_4_plus.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm_4v.yaml | 9 +++++++++ .../model_providers/zhipuai/llm/glm_4v_plus.yaml | 9 +++++++++ 10 files changed, 90 insertions(+) diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml index b1f9b7485c..7fcf692202 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.1' output: '0.1' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml index 4e7d5fd3cc..fcd7c7768c 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.001' output: '0.001' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml index 14f17db5d6..c9ae5abf19 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.01' output: '0.01' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml index 3361474d73..98c4f72c72 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0' output: '0' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml index bf0135d198..0b5391ce2f 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 8192 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.001' output: '0.001' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml index ab4b32dd82..62f453fb77 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.1' output: '0.1' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml index d1b01731f5..350b080c3f 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml @@ -49,6 +49,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.001' output: '0.001' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml index 9ede308f18..2d7ebd71cf 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml @@ -46,6 +46,15 @@ parameter_rules: default: 1024 min: 1 max: 4095 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.05' output: '0.05' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml index 28286580a7..3a1120ff37 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml @@ -44,6 +44,15 @@ parameter_rules: default: 1024 min: 1 max: 1024 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.05' output: '0.05' diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml index 4c5fa24034..14b9623e5a 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml +++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml @@ -44,6 +44,15 @@ parameter_rules: default: 1024 min: 1 max: 1024 + - name: web_search + type: boolean + label: + zh_Hans: 联网搜索 + en_US: Web Search + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. pricing: input: '0.01' output: '0.01' From a126d535cf146ce5473974bfbe500d209a5b1693 Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:39:46 +0800 Subject: [PATCH 09/64] add Spark Max-32K (#8676) --- .../model_providers/spark/llm/_client.py | 3 +- .../model_providers/spark/llm/_position.yaml | 1 + .../spark/llm/spark-max-32k.yaml | 33 +++++++++++++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml diff --git a/api/core/model_runtime/model_providers/spark/llm/_client.py b/api/core/model_runtime/model_providers/spark/llm/_client.py index b99a657e71..48911f657a 100644 --- a/api/core/model_runtime/model_providers/spark/llm/_client.py +++ b/api/core/model_runtime/model_providers/spark/llm/_client.py @@ -25,6 +25,7 @@ class SparkLLMClient: "spark-pro": {"version": "v3.1", "chat_domain": "generalv3"}, "spark-pro-128k": {"version": "pro-128k", "chat_domain": "pro-128k"}, "spark-max": {"version": "v3.5", "chat_domain": "generalv3.5"}, + "spark-max-32k": {"version": "max-32k", "chat_domain": "max-32k"}, "spark-4.0-ultra": {"version": "v4.0", "chat_domain": "4.0Ultra"}, } @@ -32,7 +33,7 @@ class SparkLLMClient: self.chat_domain = model_api_configs[model]["chat_domain"] - if model == "spark-pro-128k": + if model in ["spark-pro-128k", "spark-max-32k"]: self.api_base = f"wss://{domain}/{endpoint}/{api_version}" else: self.api_base = f"wss://{domain}/{api_version}/{endpoint}" diff --git a/api/core/model_runtime/model_providers/spark/llm/_position.yaml b/api/core/model_runtime/model_providers/spark/llm/_position.yaml index 458397f2aa..73f39cb119 100644 --- a/api/core/model_runtime/model_providers/spark/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/spark/llm/_position.yaml @@ -1,3 +1,4 @@ +- spark-max-32k - spark-4.0-ultra - spark-max - spark-pro-128k diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml new file mode 100644 index 0000000000..1a1ab6844c --- /dev/null +++ b/api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml @@ -0,0 +1,33 @@ +model: spark-max-32k +label: + en_US: Spark Max-32K +model_type: llm +model_properties: + mode: chat +parameter_rules: + - name: temperature + use_template: temperature + default: 0.5 + help: + zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 + en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. + - name: max_tokens + use_template: max_tokens + default: 4096 + min: 1 + max: 8192 + help: + zh_Hans: 模型回答的tokens的最大长度。 + en_US: Maximum length of tokens for the model response. + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + default: 4 + min: 1 + max: 6 + help: + zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 + en_US: Randomly select one from k candidates (non-equal probability). + required: false From c7eacd1aacf7c67e9ec10c885d6f46bee1333fc2 Mon Sep 17 00:00:00 2001 From: Nam Vu Date: Mon, 23 Sep 2024 17:40:40 +0700 Subject: [PATCH 10/64] chore: Optimize I18nObject class for better performance and readability (#8681) --- api/core/tools/entities/common_entities.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/api/core/tools/entities/common_entities.py b/api/core/tools/entities/common_entities.py index b52119fdc4..924e6fc0cf 100644 --- a/api/core/tools/entities/common_entities.py +++ b/api/core/tools/entities/common_entities.py @@ -1,6 +1,6 @@ from typing import Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field class I18nObject(BaseModel): @@ -8,19 +8,16 @@ class I18nObject(BaseModel): Model class for i18n object. """ - zh_Hans: Optional[str] = None - pt_BR: Optional[str] = None - ja_JP: Optional[str] = None en_US: str + zh_Hans: Optional[str] = Field(default=None) + pt_BR: Optional[str] = Field(default=None) + ja_JP: Optional[str] = Field(default=None) def __init__(self, **data): super().__init__(**data) - if not self.zh_Hans: - self.zh_Hans = self.en_US - if not self.pt_BR: - self.pt_BR = self.en_US - if not self.ja_JP: - self.ja_JP = self.en_US + self.zh_Hans = self.zh_Hans or self.en_US + self.pt_BR = self.pt_BR or self.en_US + self.ja_JP = self.ja_JP or self.en_US def to_dict(self) -> dict: return {"zh_Hans": self.zh_Hans, "en_US": self.en_US, "pt_BR": self.pt_BR, "ja_JP": self.ja_JP} From 11d09a92d0cd954aa81e6cbcfad961e6d8ddb815 Mon Sep 17 00:00:00 2001 From: Hash Brown Date: Mon, 23 Sep 2024 18:44:09 +0800 Subject: [PATCH 11/64] fix: send message error when last sent message not succeeded (#8682) --- .../debug/debug-with-single-model/index.tsx | 13 ++++--------- .../base/chat/chat-with-history/chat-wrapper.tsx | 13 ++++--------- .../base/chat/embedded-chatbot/chat-wrapper.tsx | 13 ++++--------- web/app/components/base/chat/types.ts | 2 +- web/app/components/base/chat/utils.ts | 10 ++++++++++ .../panel/debug-and-preview/chat-wrapper.tsx | 13 ++++--------- 6 files changed, 27 insertions(+), 37 deletions(-) diff --git a/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx b/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx index 5faef46d98..2de500a3a6 100644 --- a/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx +++ b/web/app/components/app/configuration/debug/debug-with-single-model/index.tsx @@ -22,6 +22,7 @@ import { import Avatar from '@/app/components/base/avatar' import { useAppContext } from '@/context/app-context' import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' +import { getLastAnswer } from '@/app/components/base/chat/utils' type DebugWithSingleModelProps = { checkCanSend?: () => boolean @@ -83,17 +84,11 @@ const DebugWithSingleModel = forwardRef { diff --git a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx index 225bbac714..9d7b360f38 100644 --- a/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx +++ b/web/app/components/base/chat/chat-with-history/chat-wrapper.tsx @@ -6,6 +6,7 @@ import type { OnSend, } from '../types' import { useChat } from '../chat/hooks' +import { getLastAnswer } from '../utils' import { useChatWithHistoryContext } from './context' import Header from './header' import ConfigPanel from './config-panel' @@ -67,17 +68,11 @@ const ChatWrapper = () => { }, []) const doSend: OnSend = useCallback((message, files, last_answer) => { - const lastAnswer = chatListRef.current.at(-1) - const data: any = { query: message, inputs: currentConversationId ? currentConversationItem?.inputs : newConversationInputs, conversation_id: currentConversationId, - parent_message_id: last_answer?.id || (lastAnswer - ? lastAnswer.isOpeningStatement - ? null - : lastAnswer.id - : null), + parent_message_id: last_answer?.id || getLastAnswer(chatListRef.current)?.id || null, } if (appConfig?.file_upload?.image.enabled && files?.length) @@ -111,13 +106,13 @@ const ChatWrapper = () => { const prevMessages = chatList.slice(0, index) const question = prevMessages.pop() - const lastAnswer = prevMessages.at(-1) + const lastAnswer = getLastAnswer(prevMessages) if (!question) return handleUpdateChatList(prevMessages) - doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + doSend(question.content, question.message_files, lastAnswer) }, [chatList, handleUpdateChatList, doSend]) const chatNode = useMemo(() => { diff --git a/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx b/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx index ed2f24274d..b97c940eec 100644 --- a/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx +++ b/web/app/components/base/chat/embedded-chatbot/chat-wrapper.tsx @@ -6,6 +6,7 @@ import type { OnSend, } from '../types' import { useChat } from '../chat/hooks' +import { getLastAnswer } from '../utils' import { useEmbeddedChatbotContext } from './context' import ConfigPanel from './config-panel' import { isDify } from './utils' @@ -69,17 +70,11 @@ const ChatWrapper = () => { }, []) const doSend: OnSend = useCallback((message, files, last_answer) => { - const lastAnswer = chatListRef.current.at(-1) - const data: any = { query: message, inputs: currentConversationId ? currentConversationItem?.inputs : newConversationInputs, conversation_id: currentConversationId, - parent_message_id: last_answer?.id || (lastAnswer - ? lastAnswer.isOpeningStatement - ? null - : lastAnswer.id - : null), + parent_message_id: last_answer?.id || getLastAnswer(chatListRef.current)?.id || null, } if (appConfig?.file_upload?.image.enabled && files?.length) @@ -113,13 +108,13 @@ const ChatWrapper = () => { const prevMessages = chatList.slice(0, index) const question = prevMessages.pop() - const lastAnswer = prevMessages.at(-1) + const lastAnswer = getLastAnswer(prevMessages) if (!question) return handleUpdateChatList(prevMessages) - doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + doSend(question.content, question.message_files, lastAnswer) }, [chatList, handleUpdateChatList, doSend]) const chatNode = useMemo(() => { diff --git a/web/app/components/base/chat/types.ts b/web/app/components/base/chat/types.ts index 489dbb44cf..0bc50518eb 100644 --- a/web/app/components/base/chat/types.ts +++ b/web/app/components/base/chat/types.ts @@ -63,7 +63,7 @@ export type ChatItem = IChatItem & { conversationId?: string } -export type OnSend = (message: string, files?: VisionFile[], last_answer?: ChatItem) => void +export type OnSend = (message: string, files?: VisionFile[], last_answer?: ChatItem | null) => void export type OnRegenerate = (chatItem: ChatItem) => void diff --git a/web/app/components/base/chat/utils.ts b/web/app/components/base/chat/utils.ts index e851c4c463..305df5995d 100644 --- a/web/app/components/base/chat/utils.ts +++ b/web/app/components/base/chat/utils.ts @@ -19,6 +19,15 @@ function getProcessedInputsFromUrlParams(): Record { return inputs } +function getLastAnswer(chatList: ChatItem[]) { + for (let i = chatList.length - 1; i >= 0; i--) { + const item = chatList[i] + if (item.isAnswer && !item.isOpeningStatement) + return item + } + return null +} + function appendQAToChatList(chatList: ChatItem[], item: any) { // we append answer first and then question since will reverse the whole chatList later chatList.push({ @@ -71,5 +80,6 @@ function getPrevChatList(fetchedMessages: any[]) { export { getProcessedInputsFromUrlParams, + getLastAnswer, getPrevChatList, } diff --git a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx index 86519af603..230b2d7fa0 100644 --- a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx +++ b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx @@ -25,6 +25,7 @@ import { stopChatMessageResponding, } from '@/service/debug' import { useStore as useAppStore } from '@/app/components/app/store' +import { getLastAnswer } from '@/app/components/base/chat/utils' type ChatWrapperProps = { showConversationVariableModal: boolean @@ -76,19 +77,13 @@ const ChatWrapper = forwardRef(({ showConv ) const doSend = useCallback((query, files, last_answer) => { - const lastAnswer = chatListRef.current.at(-1) - handleSend( { query, files, inputs: workflowStore.getState().inputs, conversation_id: conversationId, - parent_message_id: last_answer?.id || (lastAnswer - ? lastAnswer.isOpeningStatement - ? null - : lastAnswer.id - : null), + parent_message_id: last_answer?.id || getLastAnswer(chatListRef.current)?.id || null, }, { onGetSuggestedQuestions: (messageId, getAbortController) => fetchSuggestedQuestions(appDetail!.id, messageId, getAbortController), @@ -103,13 +98,13 @@ const ChatWrapper = forwardRef(({ showConv const prevMessages = chatList.slice(0, index) const question = prevMessages.pop() - const lastAnswer = prevMessages.at(-1) + const lastAnswer = getLastAnswer(prevMessages) if (!question) return handleUpdateChatList(prevMessages) - doSend(question.content, question.message_files, (!lastAnswer || lastAnswer.isOpeningStatement) ? undefined : lastAnswer) + doSend(question.content, question.message_files, lastAnswer) }, [chatList, handleUpdateChatList, doSend]) useImperativeHandle(ref, () => { From 52da5b16e7002fc15343d2df73838b3f8220cccd Mon Sep 17 00:00:00 2001 From: Likename Haojie Date: Mon, 23 Sep 2024 18:44:24 +0800 Subject: [PATCH 12/64] fixbug tts(stream) not work on ios safari(17.1+) (#8645) Co-authored-by: crazywoola <427733928@qq.com> --- web/app/components/base/audio-btn/audio.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/web/app/components/base/audio-btn/audio.ts b/web/app/components/base/audio-btn/audio.ts index a61fd085d4..baf675d0be 100644 --- a/web/app/components/base/audio-btn/audio.ts +++ b/web/app/components/base/audio-btn/audio.ts @@ -12,7 +12,7 @@ export default class AudioPlayer { mediaSource: MediaSource | null audio: HTMLAudioElement audioContext: AudioContext - sourceBuffer?: SourceBuffer + sourceBuffer?: any cacheBuffers: ArrayBuffer[] = [] pauseTimer: number | null = null msgId: string | undefined @@ -33,7 +33,7 @@ export default class AudioPlayer { this.callback = callback // Compatible with iphone ios17 ManagedMediaSource - const MediaSource = window.MediaSource || window.ManagedMediaSource + const MediaSource = window.ManagedMediaSource || window.MediaSource if (!MediaSource) { Toast.notify({ message: 'Your browser does not support audio streaming, if you are using an iPhone, please update to iOS 17.1 or later.', @@ -43,6 +43,10 @@ export default class AudioPlayer { this.mediaSource = MediaSource ? new MediaSource() : null this.audio = new Audio() this.setCallback(callback) + if (!window.MediaSource) { // if use ManagedMediaSource + this.audio.disableRemotePlayback = true + this.audio.controls = true + } this.audio.src = this.mediaSource ? URL.createObjectURL(this.mediaSource) : '' this.audio.autoplay = true From 4f69adc8ab0b67d45ff5f578502c521086f0ba13 Mon Sep 17 00:00:00 2001 From: Vikey Chen Date: Mon, 23 Sep 2024 18:45:10 +0800 Subject: [PATCH 13/64] fix: document_create_args_validate (#8569) --- api/services/dataset_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 30c010ef29..e96f06ed40 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -1100,8 +1100,8 @@ class DocumentService: DocumentService.data_source_args_validate(args) DocumentService.process_rule_args_validate(args) else: - if ("data_source" not in args and not args["data_source"]) and ( - "process_rule" not in args and not args["process_rule"] + if ("data_source" not in args or not args["data_source"]) and ( + "process_rule" not in args or not args["process_rule"] ): raise ValueError("Data source or Process rule is required") else: From d7aada38a111028ade07b597ff2f9e201d57e35b Mon Sep 17 00:00:00 2001 From: ice yao Date: Mon, 23 Sep 2024 19:57:21 +0800 Subject: [PATCH 14/64] Add nomic embedding model provider (#8640) --- .../model_providers/nomic/__init__.py | 0 .../nomic/_assets/icon_l_en.svg | 13 ++ .../nomic/_assets/icon_s_en.png | Bin 0 -> 25814 bytes .../model_providers/nomic/_common.py | 28 +++ .../model_providers/nomic/nomic.py | 26 +++ .../model_providers/nomic/nomic.yaml | 29 +++ .../nomic/text_embedding/__init__.py | 0 .../text_embedding/nomic-embed-text-v1.5.yaml | 8 + .../text_embedding/nomic-embed-text-v1.yaml | 8 + .../nomic/text_embedding/text_embedding.py | 170 ++++++++++++++++++ api/poetry.lock | 78 +++++++- api/pyproject.toml | 2 + .../model_runtime/__mock/nomic_embeddings.py | 59 ++++++ .../model_runtime/nomic/__init__.py | 0 .../model_runtime/nomic/test_embeddings.py | 62 +++++++ .../model_runtime/nomic/test_provider.py | 22 +++ dev/pytest/pytest_model_runtime.sh | 3 +- 17 files changed, 506 insertions(+), 2 deletions(-) create mode 100644 api/core/model_runtime/model_providers/nomic/__init__.py create mode 100644 api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg create mode 100644 api/core/model_runtime/model_providers/nomic/_assets/icon_s_en.png create mode 100644 api/core/model_runtime/model_providers/nomic/_common.py create mode 100644 api/core/model_runtime/model_providers/nomic/nomic.py create mode 100644 api/core/model_runtime/model_providers/nomic/nomic.yaml create mode 100644 api/core/model_runtime/model_providers/nomic/text_embedding/__init__.py create mode 100644 api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml create mode 100644 api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml create mode 100644 api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py create mode 100644 api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py create mode 100644 api/tests/integration_tests/model_runtime/nomic/__init__.py create mode 100644 api/tests/integration_tests/model_runtime/nomic/test_embeddings.py create mode 100644 api/tests/integration_tests/model_runtime/nomic/test_provider.py diff --git a/api/core/model_runtime/model_providers/nomic/__init__.py b/api/core/model_runtime/model_providers/nomic/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg new file mode 100644 index 0000000000..6c4a1058ab --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/api/core/model_runtime/model_providers/nomic/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/nomic/_assets/icon_s_en.png new file mode 100644 index 0000000000000000000000000000000000000000..3eba3b82bc1e3fcb6b27cd9038786e19b7c2adcf GIT binary patch literal 25814 zcmce8c{o*H`?qc#G9K{_ZDQ;ou+Q{u_24=Hr{Y2>&m?^_GR7 z`1k~x9X4&V0E2h$-VF~A9~c;DY;3HotYk14=gysriHW&(?OIh;)$`}i-@JKq`SRtr zZ{IdGHPzJAE zO=o9k5n*8~E32fWq>mpzDk>?l+3YiC&gkjsMMg#<2$GhTW^QgiciucYo&M<2qoJXp zg9i^vN=T@xsumU&E?&I2y1M$ri4!MJo@{S#-??+=yLayn95}#Yv0PnU@87?F<;s=$ z^XD5G8Ch6ZJbwIm=B!yGBO^zT9t{l*6%rDvudmnD)g2rh%*@P`m6c^OnGgg;M@Rel z`Ptdog@lBxSh2#!#>U;<-O|!hO-=32ojV^sd=M28xpe8$g9i^98XEfg`lO_!UcP)e zIy$PUsX1%*>_dkRJ$(4^#*G{Q{PRyjLPAPPijR-a_3PJ%hlkg!S<}|mrlhP~P*8C8 z?AdMGw#CNAmXwrqc6R#u`l_p|8yFarm6Zhq1UNZ4xwyEjTenV9Qu683r!6fl$;ruK zVPQHtI@`Bz-?eL3U|?WkVxpj+U`|fX#fum7^Yb@u+}P34v1-+-O`A5=*4DPRwq|5x zWM^lanwoCdut8ZxB`z+GLZJi)2XEfIIVdRT`0?YrckfP5PtVHAl2=ezzkdDfIdiP7 zttG_8U%YrBCMxRh?>|po{`KqEMMXs@ist3ztz5bC!i5XX&CR!N-I_CZE)2uHy}f(( z>{+^W>5?T&jvP56ARyr7<;Bm>udS_ZW@aWVuD=o3;-@29?6&g#{@wqZ_n*4oWq;^LcUAUq_vZa67j00{( zd|LNnJnF6&mE3uB#kT~0Q8V0OxAN+uAK4+ox=lZ58BR05T)cVjw9F2LrT!LD zX>kp=a?J==Mp5R|VYe1mOgKBFzV5g3KS^{{i%N47S~8fTH^;{476;!ilsPtej^)uL+RE-Z8X_a|`auUSr+RpEgcUro*5gR1BbyM%C}bPd9}i7dkv3LBv3 z(p(1E!tg>iTd=R;p7JESJNb<{GF)oe*j9|it;WKH=_kjPkBsQD40UI0f=Z;hWiHlG zpVsf=Lk2-*PinRsMtF0Kc4auSs_uW@M~~8tbGwEaERGP*VhvFAktA%*YW^oC=c4S2 zZwYULG+M@(g>aJn5=d6-^~EG#`o0;LFh!-&J=s+6(x|QWQ3nUST%lvItvfQox4cP# zcEum*QTfq7^TCY=vXX&O-laDOCLuQEF;%uuZKPU9-kM9;3)AT2d@7pm{Pn)<>t4iJQBZ%qb3N6Msewul zEKq$os%n>8D3b^O@^j4SWO8rD&x1ckjemdaLoX8b<$U_$BWl}!yZ)xFH57FT{p|Yc z+fPMjewNk5KBg{Oa%gRfujzTo&H`9>2Q(?^3*KGlZ{N!1GcZxQ4V@j?xkrbAY5((X zTwZQ*9Hy;(46d!N#yZ_mxHhO+h%G&8jikM})(_hZ&ieU00)h0F*>6R9%7;2e_ctPG z<}B_LWbLf)3#&D}>57>GbQc-t=3qe|j@!Z-bAS7U*5fuEn5Kp+LvSU+=~;C zX|{jU+Y^nx2jx0ljq9mLfOKr{O}KqO{=1?#+Aa&JJ#lexqneEny4+<@o%n7SdQ2I1MPc+FP%?dHra9eNp-Zmn zq)+^}JwGj(PGe(>H&1_KeouW>NrG<(w2sB4=a?Mu;%r}3dT{1rr=A5jV8_$YCw!|q zjs9B4r-LDB>sVbH>60xdu{#c*ki2yh58iI`M_DU-qpYA-_CGI9(s!*c%|#_T*(usZ;=8vxIv&WuB6P+b&$+uFBXf)4;ngAU`8F^0l?~mWreN6K zz4!)fc^di-_EZubKX-dkvCh5wNNr-0r;J?g4J_zs_-4KFEy-bvy-H&TD5k(N=a#=2 z{0!}{54ak84>67*Uc8POXs)I#Pg+W!5JC@ZS>U!(+`i!=_Ne;9tIy8+A1=L^eh-1n z7TJZOk+RqI-$fZZz4SVP-BJG7P^GT?WnZXyD_j2is|cxa2@lUQ?8jAAxSMw23R1i1 z_cyKnqB%iV81V6j_3rL=_Ald^hMCT@MlY$9x+`xy<;yWxcjXgO+c#RqSlIcVzRCjH zF8f<>weB9XR<=oJ&!T}ig|Sul+5YJB`8ndHaM$47D;A+)`+hiXWmbt(UDhm#ezgJm zQnp5ZrNaF^q1BjCX5rZqxM$3*aou-&726h9!Fu>2Ft{#&Sg}ZdJD?(Tvi{kddfJ6! z$b{uD-L%a^9L|ClQr*nbt)eEC~WNy$DhBk zt$#IaqjD_Nhvjlr_4`EzJQ0?p)xT72!yet>w3d%j?fM=O$h31N+=hV5W~Ry#iU|(UMXmH8tAD_4k{L zS(AEKtBlU-qrza@SZx1oEMr5Bv$46GMlDyv+9J~*pQmXSxQA7BZ@+l($2V0`+v&fL zuG{{!D`;+F9<|fMOK<1m1A2Nc(2`91zJv6#p*YozA>yHCWyx98NAt|4}zKp8mjo_X~d%Rv&P5xgw|i!K{Y1&LS5pCpPuFmJ70=tFn@hk2yZw&$5t7kl!1`j4)h`*iE0(z}13xRM3un?z|X zypGMxxB*-L^WmiTgc;;_K2X1`0w zg2N9z)3w`%!t?If*lTnhR}2>ylg*dc*bH5ZhRy%& z{xc}0_R6LU*zpfw#wDay>h{_}&*170+jm5}DU4oE+lxL_8C_(iacHrx?6~+<`(F?o zeSFE6m9;Lm>ef(cP|VFUmyonmeVG?H_D#a7Pd7kM1In@2GTbeIsa;V22wHby?hw?+ z`JS;5{WdB3QI{1xIq&NitCX*NnKQ;*im0*kcS3ujp(jAxC9vQ%% z`Q9A4D&;p?(mDOUvJY?7j^50cNU^{v2<5 zFqm_)o&BWwd3Sc1KrkvY1VX&P)z1VHZ;2qygG8#Xi3-ty&X{VyZ|>%PSj+& zc)0&Q#j}XChg&zY4=S_>!k&e!Yb$Sq&|-d`t-Y4uaW-3V2$^4uC@ zSxQ?ILKRlq42^~t#}vRi>cNlRMsk`5eeUjU@zz&BBv;ARn9&s(f-XD@9?*B!?mARF?xYpsc0Y!0bMLkd8x#qeZek4Hd_P{91k9*y@i zSzPse`IsHzV)r`PT7a@6$7XzZ%S}$Ve(R<4+F!?mX&A+dzPhh1yd`JO*xlZ*p4dMN zeK>~)jo!&?WMK*U5sLfP9T$_gy7G05Iqy()sxFdt4>3P(XmG4_0lloNhk3;^3;R)# z9&movW@v){GW%eg#Q67!-=2>iH?**|Kc9x8(F-@@FBieD_zwk@c?ZsJ{&*Gf>$nY|j3H0o_sz~4%3m!Un)BDuV$XQ;P=%`X=s*~o##mn-zLsD=J8B; zei0uh+FgqWT-g9GWIyn>h9<+pPH3%1-hBJGcq?@CG!z07a{SEvC20ES zti9^3lu{?d%ZTKv-bQ8A8oFp?>&-cqsU{gBzCjJuZD@Bb7;t@0F0~Od{2{dS4D<1D zhwTk)zBG3wK#7z^c3Y7zlZ$Wv5058q+9#aa0Nr-&d+Vdq?5`6H;PB?x`nmA0=cd+k z)zHt!0+WhhT0PtgFp$;RwiK-R&RG+=x9YoFDN%mtLxoZFa;>m;{EYnNsLM#wUVehu zZlvc#lB|17@A3`Md7}f~oHdyXB@+n7JK1zN{_?ujgH^Yj6EOKhNGltg+NlKo)p#{x zLC9)Iw3V{J&Ka4wYJWkPB+r*_Z&#PN7r#=2At*k(iaejTAyQ zvEFp)Oid5o9GwP#op6$+Ja|!9)_HY;qf%I{9{x#kbP2Qje0Jq&LN<}uKT_N=Qe++o zRAhfqmr$bwbDl?asu`h1#WvU9CYf4aZYg-_pt_pPu4e{Z?~&5Cg*?CSy&TV5YMm5U z038&X8|p*nIL|;n(jH)N~v%%v_KAr z87Vw?xac!J>pgRjo4Uq3zN;v0J{p#sbTg_nDTuFZ}mg#4Zv zTx@?S#6;={cyYFO=@0)>FH)BI@Z+%iUvsGYOAkI}IziP=I2hCfYh}E;mPlB@KBS1+ zyG)T#$#d`0yV>@N-tpZW1S+YT8oA?YQ!QC_k#b~(pa1T zF!0OPG=#8+b1Wx%wbL5X9@^P{nI>I)CBUX1B1Hzh%SfAE?nB?1X+p^L-;X)-d5_;A zMXLZCS9jxVY=-QTln7M^pdE^%b4Zt&%c_dDr|=$6X1(y|Aa1iYa1+?o?i#)?)nr02 z9qbEM((hfSO2!!eIoRltsmE^q z7woQul5!}759}TzJW?L?lU8YbTv%z)upAdeAvM!{LA=l!8%Rov87v7#4(jqo;9qwr z~Q5xB&MZaqO7kR@_>O{n{daQ&xR zxGP+Tw~l-vRHKHN-`E*9EBO4o^DFT=HUc2oQ+ON6QPmDzuBYy0M(<6*PCWRWwE>#^ z@{F8FLnbm?x>iI(bzYqC9QgF>qnq>KsOMjl`Hv)H@-)O;8>IY%7WRc!SMx)+HQU(8 zdy;xJG+k=<1Eo>*Y&a>GH!uJBpGD2bM(ds!$Ey_2w}I9YYv%JdbjMR6-UKAq%b4WF zoJ)AZD+9LJ<)Ezdl-c#FaZQseZL4=Ye;&Mn?1QbE{WxYF3j3uKiWg~f%^|yOis#-~ zT*G8oF9;2TmQuKt8*7<{KfSAE^RFrmlts)_@#W2HXY1ANOWiEsCbBRj@6Rap;MC~z zx_ymX1$1N=-n#W?^s=hrElTq?#DmXA=Y_v4p2(#ie$H}$CVOH@mKlhoUaufXJ_K8! z^ZAGrlLKm@FnSFbZRs^5*NZ+)$NUH)qkMFp`pc^D*4YM?>XO7q`N*F2`@QJV2a|o5 zoDj*Gk7Kr=lc|R?aw9T;UF?8<8~j-F$X`|-o$Pses_sq>tOk_Qk@2mG!L+*j&OCy} zRq0>%dw{YMnRxLsx$ab}Yl4yMY+w3~1Eg>5fZ_$_{gA~qn(g5vFmEHFonpB3<$J=g zBUvCWJpWl%)!aQzf`*NHsLo!5nZG@!!Q49IefvEwy zY!7L$)!2L~?kApB&w+l(6MZ7XO5XqplZrTQG|1JFLtD}?1NUJGsbc)+Fpps$i6Z>XE1xP$-~S@6et_~aim z1O-p`N)i&B!+d19TX=T6qB-NkhbAJXFTr!A4IVjwMck)BwL9PMoa3d5>TYOpMaiy` zuV&ml5u~<_x6%bHPBjb-irZPG)c5PV#XP}@3%FN;xn|5~q!e=Cn24Ck7y z5YZ;w7QB$hi*5_OIg9%Lxr^%=@^0yiCNvyEpV`7SAnLw6G~bAuj|GYe;Gft7?Wn z{McV7kSB>*sKPI`jnK2&K8x#^k2KyHAbh#3gfQ_Hot>{o6dhQb6h(&~{?(pX9%O-`SfS9+^Jro0!G86s`Ib@qe^TRn* zk<|$FZD>(tJ=LAb{aEN5L0V9xm+Uyd(R1Dgao7vt&K)v%NV~Ol9=|UU6C;@1mX@v^ zDm<^whLa;!rPRY$c?7DVVItKHQwzMDKJr{ZyQXh3NqhJNqt>7PJ+*O;)bBP#9Rm zEY1O*^*(85x7Mvp!=sb)vAQ+xmkGCKU@}75wNK_Z#n2W2x#eL;>)~fSeb7)9(cK1* z=aW*uf!#4ZcKP`Xp-iQ|q2Z{`Kb*Hy#!g0jSl?4%+sMn%0v(jGL&byAnteB7j`>?3x@}|fh9s*H){*Clyo;TiJugL;8_~kf zjdBv;DZZ03+`K^zh)B&1dL`_bLYRE8sE)8Bk;VCa;{&}GHiHT)p%gBPNY2F(UvNi% zZrw&AMY^NFvQ1jm_3Gmt*qQ*GOe9YIvUR^)8iGS`O0jio0|4c}_l#9bM;;py+ zqQ-fA`nW*7n}VyIa7pe0aPR!#gGvdWja`~xYKF$#4a#uO z{o%0%@c8y#Ze#=nY`+d2m*zhGJ2a*jvdvqK5lxi%4N2IpHt=XpZig5Ht_HtyP$x{#Qcm_vPZEs-V(Fn<2jI(e1V*i9pUn2uDHTu7UG6%nzcf%c^#15bvP>W+iKj_yS_!QB1{LMql5jg zXGsY1F9$g!7x@CpXxzzDULOTpyiE`F+M~t*y+Fne$czI2a#FZ;d{vF#;Fl&6EZuv2%-xhu(4L6Sa$ZMWM$vE4uO+51X2ZsiRinbjWW=@ zfe}TlZ(0wx;xTTEvQ8^AT3Im!+Nnki?S{1fJ7hIAM5%{Y)1K=baZJD!Z2|CA=F~G2dRRo~woRandwiaVQH$IoBmPS}kM}g^ShQgTZm$>v2RWsKemW4g)HnRup~ubNm5L%Fyp~UjzdM{xjsDm=Nh>J zl(t$wAUBIf6ZDRcjKs7BH|9ul_w&!u8P--O@cVjHJt|Q&Mu1-$>?49ysE#J%AQG1t z2*|Oa|7=!cL^2=Ot%K_PIgA_!(O7Mxe?ZOyg_)!%nyjKIt`BTb4Kb##mmVgHm*-~` z2l-c#^6bc%AcY=z3i~D2UTvh__K*gf#TAg7Z$s6iErU!`J!aFw0DY&h1oXC^dJ^8% z^IZ}*uMm$T&qB6+pC9pqXdwktOCD7*y?&aI-xknd-ksq#Q>*~=;Q&aaN6fDn z*noE$aQ2US(X%hm@U?-`r7h=lD{7t~e7!UKoFCupL>#+=!yz_L%!`okbpyQddS9y@X~@sDVI>2u5a$4}5`A%OH)Oe-2%JT-(%{cUKjAOp01>nx#o zzU|Ef;NQD<4r%7#tm|HHiJF{)E8^jvO>G>=c*0{^8-eI+)$oZRJ{4v+j;ySaScBJ8 zwdZdHEG^R^5&}Sp<1LbyrkD_(@8rEXFzs@;gW6uE=+CHSgkF!Jt5p@nw}Cd3JUAR= zJj-xkP$M@M#cMC%4FkzjE~|fg0@J&*F`)hl&IOi%&zA*)aP$O@*K?l0 zwI>oTaT*H+77Nm`R58+c)CW>@YBQe%K_8Ia!74cBHxtn-a6K5-tiqrEPc znoVH1`Q{)zFUI)W*YZ6C%H5AtD2!gFe%eIrat6!0BObA69GC6P6_yu`8b|Js#%2ZS2>4POrG?r-4^#~iw3q#k4sFaP5MaJmSf*S*60U>kj>hF^a*+3aQb3TnYi)eOG@r2swzJ~~qiMiA!z(Ffe zt2vr1%N^lqyrGq?*3%dbB{hA66ojIAP?^4 z(O`on%W$ReY7M03hPI41ykJp?#Zg>}e5*yzV{w+*BwzMW9N;S%ESiT*2s!wD$BimX zAa*`s&n9>zp(M@|D*)X!8ncSBYh)ecu)1StE>t=*1lj55I74k()M9i~tnpz8ik{iqxfcZ_^=I3U51!{hbToyWlqt9cQ z-PuE{90=GR;|K7@TuRCeG0kNl%yqf4{hN;Ql;%g|o^*aADMm5i$`#aaE}tga=7xoT z&2n3qOQ!EiV9d>5^aS|#0lUMHMi3}?*>T`Hi~~%3dk77iQE>G2#Z?*tzIQHNTt~!_ zanSD7pGg8ZZhp?WE`d4u1nOs5FZm_3?IMwSKrtZXxL-)%IuOkWD^;&o5AXluG1d)7 z#!dO6#E>$xYLP?07;f+fF382PfAKS_wp0&~kjUv25f*Jknw9ckKFX#kT58`KZIM4=jRa-PJ2=ZG1 z3bm05C5I!QaT!c5Uv+|*dzF=X9C#M!?Sh)9p*>57&*Bw4^f6e_>28AJzy?MEQM@if zCLRste&7W~ZKPIQv*svnC|g0H3O8t`v7SFLG*vBwdHRZIN`?ig@&A!#e!iA4lVe+UAKkp@+>`)KnlK zeByfOgKDFRRKbtRQ(bqYs|_ZrQzj#?6CMUij5lP3SO=;)e-1}dXL4f5ri~XHHCr$J-)W&Ipmp~8>dOj2` z&4V82n1hFa(7{Xnxzr=ogZK#~uC2>hM0OdrL5e^mN4+`AXno_H>XL}zI~Q8q2$$H0 z95&LQSpfR0pIX_vV7PR}nBdV(mf=Kby6BCArYI(jW&yZN#hM2)+IW$29|DQ*fRFD| zV`Kqaw}oEfR)D6qx$>4ZKfbYPlO*m`AanNU=6{N!4U{li*ioy|U?B4cpP8D66iZz(`SqEMmZ5rf!>36_K{~Q06sVUqp?s}KmvHT9NL$$T_MC|`PiFa zA4hIa;QH5gcxx$LKt^7o-Hai3%_iHrcp*uYd}Jv-U*-S5Q=UW`nxaBwEl=N1&4riizL zR`gLHjwqP@De4)Uw6D&LZ@lE4k2zXau0D~h2NA_^)()r>_})8EUSuG8@?_5jlo#NH zVka%~B8|2hsJjSbpB$-3GJID_)W;S#u^U1XBNyewG(jU8RKNiTKR-?g3mU8F?bHx_ zNYv<>os@u4@279nA_D-uPAD}VEHHT3`M;aqK!#l+{FqUa<;LXDZs-v&7Tf{1TDrYN zyY>Z>yWYai17vy~h%S`H)!Un`Qk>AYlMvlITy!Qe@A9Xohym)6q~p zfOGNEHULG1wpGt0U-|A+>QCNVz+K}OC<5ERe5TP>0ah!pU*|`TXokO_YvI91G7g(5 zLvDJNRi#zlc1oT^!0X@1KGt9tUG93^gV2?{R}%)(_$A;CJ0aLMu|#H1 z+lf1bB39rHj)lLX4e?QcfwP6bnZ^1Aie+jKO=vo};iO6mqUx$A5k(MlaZUPiLiZM# zZ}>i|8(ihaPaGK39$;Pn0IAYZ^c0v2&G|~UW2~X?K*_p*iz-awG_;79sdsoaedYO& z3sh=_tw}i9WNu5M_PK~2U;} z#8{wvm=>q$O;j=9jKf8`eTOJO#TP(m_AL$OEAQr4c4#LXC4blc)WsG}crMc~XyHP( zF|w$QJP!#+sWE;*d`)u$2_C=+gceZpMxbOl6yDa(uMj8o`eW7&9ex#}WCb;(7DG%? zOcPN2smt}fe<^;Ap9K^(3stvh&Xi+ZWIm2yz;yN6^hfrMS2hyo1M8uD0Si|?0D1uv zLTE|sNRXm$>ZoKy2XfVrG-u@ProDHQj4GJ#c7s+LPL3r~frl%w9m9*W0Us=%K8X9z z90?&%fyp~CyCw|E2gE~btKaNq#PDXDn+n#Lf>;U`==b8x-djJ*D0lk!pVj-l=ySl} zj%kDI(LKPUPXW<`k8a}qjtYFbtYv;#@(5Ai-`@!B=lS~?%yB6_3Eb{bMZY-;ARCAy ztz2pau#`2x+Am=26C+1w(3j9P367VcC4%jd6cHIvxXLFmqeIgnU8jqN7l{sDGM1q5 z{M-gLMjSAQ9N0HOMd$UEvD2jLS~I$V>QsR0tRa}I?(EtyA@zeiN!kQCPEixIawy8A z=+ITV;@2MWA)>W+>M~Xg!U4GDDzyXYk?tN_7o;~toU}^n0W2$_9vHC_keY%8t*^YD zpdvlBYCPX5ipWR4eLx6W=preu77w4SXk%}oU4tXn&=deUwh-MCGkKJ@9Gs(pM&*5y z8hp|jETz)xh+z4k?!hvzD*fgSM5HiA6?jT-W(FJMgH_;Y5D#1A$TlFEW}wbFc+5Fh-+3|0kWy`HVMgk1(9wKEHZ&eugQ1zcocqZS6#c*wSfq{a?P z6-FL&zir(E|V*w8X%K(%|L>N70?E3xtv7Vwi}}@>SQx9Lzz8 z=wQn%a7NubHu7ef-`V@Mpsj?<1rJYdVtL|t63nG_vMbYLD_OpOy88+|A`@6u`#rN)p^79ij*tj&p*et*y=X~i zx7Vs#OVaH+GVTGl1D7KCcS7m))OsF}@?%w*IvjZL>TQGsFuL_nh4jRM))voM^`;en z&b5o+t&u(p0Pjly?=KN3E4yjd=`d1lMbs#uHhgL&_CDzAW#5-%W_yGz1IQ4s@Z5b% zx!uhY*ffPgUJkMDOF?DqCbOV;Xn#%uPT0X-ItPs5{RuEOk)U0h_TRB$&?f|AZ3)`Om#2@xeC@`p zB0kW)$4jh@Jt-Xdmv2=nZEpyX?dMZ0%u+RS)$2O~)qv`wixKy+Co52MKxlAdkN>{h`vtph#l3BkQbmI1J^1GFRy7;xm@F2ggH zp`_+Z76l)nF}b>LGGwKPJ|`TCkLR5)-ro%k^9h%Z?vT|2oU}Ky0|bw0dG?y~R_#&M zdEf$i5X}T7oCeM&ztkwT3Wl3PSlB@ z>8~PkQ5n?TWuR^4U=68ewD;Fz(uB|k7;rfd+Cy;Gu@Onr?>)>rjk|9SSxjZU&fp>A z&#PIPhYIeR@?wAq)9DCU&=D+X13`8Lck|7|2);qbU@hbQyZX6;zQDd%OhahhWVg&W z0^m!sc))fD`6qdp~2K5s{SRtGbnUXoKqe40fpNC z*|5Eh{k=l##u46tDmsT3`9P`hEdSf9$LD_c<$vGDyDjpm#TCRka5Nmh)&hk+i@!sm z=SZflpu3{iWp+vwgGzW?JKNObA@}nVax+yloo^ew|I-%tTwp|5*j(k{J>d?IWYlEJK6d!}t|qyzglR3G<$>Vg+~IMuJy&kY|`l*cfncT#$0;WJ@9U z>=&QRp=}fKJSzQLU4H>8*8wVR(dXfxY99*@4-$tDk{YOr@I{r|Gk1Whx}Xrwc%w}U z3-m2MnCeLH7T3b*?_l@*pnqAqoRF0UdVueuN+>WAP$)ZWhd85mdy1HZ(_LkzhLg`h z6VGq6IjKI2;psHe8sbBIu z0fcS~>F}MhS1FVE&$}E7co%rsK9D`UJ8xP05S1|2+Np$57f_ z--Cn*&OHCFOt!I$K!$0hoN~_)pZi$S45jjKqm)VQ042{9UEjSq>lkC1c{+lQVBska zjw3y)1t$FNnX;LQvjfu?1G$dQ@ncltEM+XYwVltrrm0Uhn4oK?+COk!224bvQKs_*{ z4BWI(LpO^NjrC9MY-I+<1k+a$2x3`4)}`|Kyx=8=hG|q_duXUPeT94M|6|7@EC3G% z(|5dq7|+l&4U{ee*9oR}1X(Vz_qD9RL!HcWksSa_f-?;%zfBRkt0$*WljWG~@B*h%tV?1^N-5<`}|R zu>G8N(Wl`n1HlRQUqdK?@i+1A*^~=aVRt0kGnstV1kjt+Wlxfnr^Y0PRhoRD<#d(WO(fA9o!O57uh8p;YhZG3lQw2nkUVa`hIf1nT7XkNX zr<-P2Rq?|sw3vrZg5ZkHU*?6tAXc-eBfvm7ORCg^=RI5fF2lP{FRzoIi_QdP(QRliy?FL6kQv)lQRZ7hs zEf4Eh(q{02^ zzjOfC_4H1vk68Q$=m&ovG^HPbba8-}E*M^*13&hAuB!xIDon)%Bg6)@?W;kK<9(Y*94<^5e;js4;Kfrq)e>y|}vGeBK*bEWH=XYhQH%+6E;my%xwEP^7Xq=wo zuV6CZRZy?u?*gP^1Z+P}kZ1e0kP=_)d)XP0|C$1NnwO6V$i(($Yv89!3;ra-es5Xa znKw?`v)`Mez}RY!EWkcm{>iuZ+Snen{J~u6Kcxpjiaa&z;q14c$Q%Dh5$AbB%wqh@ zt=K=12k@iew~IgBRp(!usrkqPr0_4F1pOUPM)JQVpD2K-k&eW7WoIx7u%vfuc_-I$ z;5*-TZUO!&+Vriu{B}_#m^H5W$7@p*uWhG*TFQSqXt>u-@%MT=NPI9h<+z*B1L9vV z;CHpSZptaplT63YhPzsf*Qgo1*=-{LlHOtNl8-bw#{A<;?{MMDY@t zo;)2`;0RxQ`V)Uvuxj%GePH41A+vrUO;Mh~g1~QBaZriB&vJT@1h1Qtum8!X!cNfP zh=bYFqewM4cO5bS-y;6?#vnxn7-WQjJ^#-2ZZtl?5)ewWCAOAe)GYF z)7@?-)X@EG!Sr1kkTglA61dwrE&R~U>W|ZE6Q}Rvhw?{T#sQQBJi!x$-Jeptuq17uL4Tp{K&Yz>4G_fvG*c}4lq0nr_0$55*1Xboi69^ zh_UR@O8|C3@*W2QRI_R)j#=+@xsDSo|FHxMVKiNUIWirPf;5zVpwTlTC4KtNbHD{E zn^)5{w|OlTEV$fw`nF5GO20R6AUcVvhq5k>E#UFi-t79}U)zF+Y^KEhsn=WqZlq2z zoOK%07C9CKQ0!D52d$#hv?lQR_*5E;QAD8GL)}1;fVT?3n~5X|Ouzu>PEFSva0l<< zye58Iy8`kXnJEHOkXP5CIzQH%H}}ueePe%Rq!jG(>LDM$_}ITT$@gKG7e0FG^bBngf5q4 zs*t_<%V3wQ;#0f4DyJJOc^S|CL)4{1DMJ3pu|;T~5HSOI?usau)1l#HyeR}eA_foC z(2keKmZM>kL&j3XytPQt&2PJq9?|I!0ztG29GaNJ;H)6RhvNV+|2e=!C<|UlwX=7V z&s!ixn*PuXjPq$@o7HocyQpvL*|ja9Lv}bA)pKFMm{2MUr>`G;`WFr5Uv||XQ@Sg`glG7gmLF9jDzA)t(YXauu2WMd!0=r~MKm7AR zgm?gApa?(%2l)UR(0%=E6Ivq8q=3VXKXSJ~)A^@A0K`{)XPU}-QRV*9bPgGY=L}KB z{yw>a7z+~*fH_0JoNwXxiGZ-oSyc<0A-TVTUB%)GGPr7l-4d#qfrho^2XYSn>!GiY z4tKh~t|L7ZFH^q1`&~yS^qNEYz`Ee>0tmM!Oa?dO;wwaI<)^<0ghR)?`-b~O^i?DN za=9I)O8n;mXl9U>zX4iR>FNYE@%QDE7xFbh1ssik+aZ5we=&AYsK%beh`3-?TG(Nx zC4+~k&0+YVqIW!DJ%Oy?HUD`~rSqQ$slm930PS;txW=gG4yvIUGN3TMxa$vn>QNJ@|kh{G!T4 z#ILPnc*%hkol8r=XN~`PurKmI54vf;3(3IH>Ka?J0O`4x|4n{yuQD!15cR1*Y7JY% zdF!*IBlxzGag*5K1}w|RpGU$Ff=3r<;=d0b^G|;x&Wp1ovs3btq|l`kgxszGuPMeO z4N&_Nr14LZCCvQ}mhjx3?CJ3SjS0@&^sfg&qSGG$Jyx2Q%nJ}paK}$vd3yZDs#-uk zSP$6)D;pCc)Z}Rij(~jB?l1gn#e!a(RN6`Fzw!`mX5akR10bLW`QMCW`DQHst6B;T zDiS0f0EsOF6073bStW)E%-=+IRB#Tc5!f-h(8^7JU7C6V`1te(fCY`PT{^F=)#}&3 zM0#Wzn31+c(h-h5;0Iw&;lLZK6n-7rQqc98RvI09%vSvb)+w@b)5#LpCUTQ;Hxm)} z3M{lO+KhuoZ%!NSs%Pvm6~3=)o)vV>p6|txB9@(SQfpgdP}0f2YqAKu3oc-#<50h4 zo}LqQSN+aM@VhjTc0#C15-1Fvp$6l_bJ84(9(3O!_D>Ody?bw)PdKAP$8VRdU<=#8 z4xabfQSE`yOT_%Wilv!mL+y)k8@)FK39=!k&tnZrS0 zuT8+O_k^gAR(P+9l|!5b#dP^AF}Y>oeR&=6k&Zc;*6X3h!vMa8UUfFf1mD+%e)33j zr1S4mm<&IrsLl*Fl>0{jTqgYz_v_f)bUVv6i4<=wP%GuZ9T7&8Nw&KtufLWAMQ}9W zED6*XIJwz(Pg`hgC>wM>qDWLF7(=Ha?Pq zp5?@?90CX7pyi9MRn42owa8XVv~w#T(vOe_mzZt|$GH2--m;qft?PBtc|J;ngCsL2K89pe6wOSRTAL|0NQ|mY7s&oVXqFbsP#&6$9KQmhUK6E-M92|66G6 zEBJjU!AaFFv`|Tx8LV_f_R%$8PG;gZkyqP#XMXsSF=h&4_bD%ALNOu#?!J)Bwgl7P zyV;&A&S2V~eeRq|ON7eAYOvAuTasl>;!gf5KYZcdX^=7{T9{Q@V1~_S!RRvXkMz05 zitDA&Z|wNqlZS?o9=UVde^&JhUNaDN9udRAyDh}!3;8iwva!dV^G$krkp3C)jfrH9 zq~tYUS!iUkNTT+(RNy%4NgSwmz}j6C%5nwYyQm{D`&Hw9n>O(*>QHI-n`TxQq;hdM zvV3u7UEbda$|U(;H%-|vV8#hB{*{}+{0hEf_c&Z+}rpI@t4Px z_J#Xa*v8@v*g_x%+{VCfEp;-&E8pE%Jj()Vh6dELq^{i#$F%$CgoewKl0^Ur*)!G(X# zD7!QGYIc?v-SeyK*CH>@PW_(6JKvd|O8H%I j#hd$5Bz3DL**g^>vTwt4UwW3_V z9g*2R>O21oCZBnk?{}ph__eriGFJ8I<;}{+$~)xER=s-Z0Cez~87+G?fIZxBZ>{9Z z-cXO2v*0_c{@Tq}5a(1|?(f1t-_T*p!7ZS?oh{(QQBZ3Ypjhk3bd8#M+^w)!-@?8Q zx`v(2uJ?D|iC>g*TuKMUI!5@(mobju<+%wzhOfz#%t=L`l?M(cq33a-SMsFoeZR;`@M#t*)#b`jq7@u+jXr)bhU8noYUdfC4+(5|EITW|A#6K<7A_< zMN8STjYJ}3Q#x5O)+LqLO-!G48J9_oq*+~D<}hw;YE?t`*v6tr%0x4U#v!6wng|(& z&QOf4=`_TgL&MCz{)zqheLm+rpXc*F?{oXU-*?)hna(B5r>}uTes#IS+xpwSROg31 zOoLCkjvGU@i0iH~Y|p*etrRW8SjVTR9C1RYlp|3-B~K^z8e;J*)Gb{av4``)a<(dY z^L1+Ciz|)-u2SLrM<0Xb1v{H{b48pd4S?&BDbIq>&Quiy9 zxEaxyTGi#ZC1=MNf^3(#ENigg^TaODWDsvC&Hmy+|9RIAgb~y9&||e^U+5xE$ztqr z!fsvu66j8&L@+2|JqWf5_pO%ce=hLk5vvGe+7kok@=^rdw~!mXiczWv0x}dZ?r2Kc zu0BqPz%(J@;Qq!^e8}g=;7P2qqj@@=&Q9Runa9op4`lI6a$hcN*5fY#M9Wh(fjy+- z!X16tjR0`(5sE#?A)jr%V!sI1zm2?otqRx&BQ$+#M~YiuWEpVyT8S_7UELZzd%wQm zwb+_W^hUk}R?;RGc!*PUJ5KTLj1R?s>jfX)oH@mGDTYc*@h<-{1dUnm5UV@T+iz8jT>j`~`_2;7NvKD4LkZ=HaF^=O+pbeLIBk+F<^y`X(X2&pT^fdvME4;m5cJ9mrWO(yMw5T28?}RWT3{D*_2)NHjWQj zVR`hOFw(8j@)G_1?D-1Nz4!KgWO=f1K@1mCHMUbHGiHfTk^unywv@c4GQ!xzR|gh_ zW?RJ*=$dNl*~sj$;h}Mwc@DdHT>F{V?lP9vNFR! zJEKpLFEI^H2U0ee689*)bb7+~bD46bkuQvth!Jv5Z4Mc)^Ow zHI7+{^Mk=Njw+FR#CgqcLdhnOITq#)tndf&H@E3!GgybP3wEIA+L@0tk);B?b!=Ix zw7gailx6Q*>z36;YJ->^Uy`*%synCt*|dtx2fY}dB3GbmHp_64YoqW4*N;Nxsh-8p{~bnJJUYSp)SZ{xD=Au)hy9vq(sbd!`!dQlMcj(4<=<&jVmPYjcDH$22;RV`KF{;)-h%9}ACh=h`Y=U2n0 z7aOK6V4Iza!(P(XN@z!9_al<4lZHPCQkTJ|^ z=W+^HPlQM{akHN={{93IH0WXQB)I8G@`J^C?fY8_Ny2y)L{ekrGf=7~t^Rn4K5e#! z47xqdMwX+v|J) literal 0 HcmV?d00001 diff --git a/api/core/model_runtime/model_providers/nomic/_common.py b/api/core/model_runtime/model_providers/nomic/_common.py new file mode 100644 index 0000000000..406577dcd7 --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/_common.py @@ -0,0 +1,28 @@ +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) + + +class _CommonNomic: + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + The key is the error type thrown to the caller + The value is the error type thrown by the model, + which needs to be converted into a unified error type for the caller. + + :return: Invoke error mapping + """ + return { + InvokeConnectionError: [InvokeConnectionError], + InvokeServerUnavailableError: [InvokeServerUnavailableError], + InvokeRateLimitError: [InvokeRateLimitError], + InvokeAuthorizationError: [InvokeAuthorizationError], + InvokeBadRequestError: [KeyError, InvokeBadRequestError], + } diff --git a/api/core/model_runtime/model_providers/nomic/nomic.py b/api/core/model_runtime/model_providers/nomic/nomic.py new file mode 100644 index 0000000000..d4e5da2e98 --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/nomic.py @@ -0,0 +1,26 @@ +import logging + +from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.model_provider import ModelProvider + +logger = logging.getLogger(__name__) + + +class NomicAtlasProvider(ModelProvider): + def validate_provider_credentials(self, credentials: dict) -> None: + """ + Validate provider credentials + + if validate failed, raise exception + + :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. + """ + try: + model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING) + model_instance.validate_credentials(model="nomic-embed-text-v1.5", credentials=credentials) + except CredentialsValidateFailedError as ex: + raise ex + except Exception as ex: + logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") + raise ex diff --git a/api/core/model_runtime/model_providers/nomic/nomic.yaml b/api/core/model_runtime/model_providers/nomic/nomic.yaml new file mode 100644 index 0000000000..60dcf1facb --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/nomic.yaml @@ -0,0 +1,29 @@ +provider: nomic +label: + zh_Hans: Nomic Atlas + en_US: Nomic Atlas +icon_small: + en_US: icon_s_en.png +icon_large: + en_US: icon_l_en.svg +background: "#EFF1FE" +help: + title: + en_US: Get your API key from Nomic Atlas + zh_Hans: 从Nomic Atlas获取 API Key + url: + en_US: https://atlas.nomic.ai/data +supported_model_types: + - text-embedding +configurate_methods: + - predefined-model +provider_credential_schema: + credential_form_schemas: + - variable: nomic_api_key + label: + en_US: API Key + type: secret-input + required: true + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/__init__.py b/api/core/model_runtime/model_providers/nomic/text_embedding/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml b/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml new file mode 100644 index 0000000000..111452df57 --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml @@ -0,0 +1,8 @@ +model: nomic-embed-text-v1.5 +model_type: text-embedding +model_properties: + context_size: 8192 +pricing: + input: "0.1" + unit: "0.000001" + currency: USD diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml b/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml new file mode 100644 index 0000000000..ac59f106ed --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml @@ -0,0 +1,8 @@ +model: nomic-embed-text-v1 +model_type: text-embedding +model_properties: + context_size: 8192 +pricing: + input: "0.1" + unit: "0.000001" + currency: USD diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py new file mode 100644 index 0000000000..6cccff6d46 --- /dev/null +++ b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py @@ -0,0 +1,170 @@ +import time +from functools import wraps +from typing import Optional + +from nomic import embed +from nomic import login as nomic_login + +from core.model_runtime.entities.model_entities import PriceType +from core.model_runtime.entities.text_embedding_entities import ( + EmbeddingUsage, + TextEmbeddingResult, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.text_embedding_model import ( + TextEmbeddingModel, +) +from core.model_runtime.model_providers.nomic._common import _CommonNomic + + +def nomic_login_required(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + if not kwargs.get("credentials"): + raise ValueError("missing credentials parameters") + credentials = kwargs.get("credentials") + if "nomic_api_key" not in credentials: + raise ValueError("missing nomic_api_key in credentials parameters") + # nomic login + nomic_login(credentials["nomic_api_key"]) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + return func(*args, **kwargs) + + return wrapper + + +class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel): + """ + Model class for nomic text embedding model. + """ + + def _invoke( + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :return: embeddings result + """ + embeddings, prompt_tokens, total_tokens = self.embed_text( + model=model, + credentials=credentials, + texts=texts, + ) + + # calc usage + usage = self._calc_response_usage( + model=model, credentials=credentials, tokens=prompt_tokens, total_tokens=total_tokens + ) + return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model) + + def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: + """ + Get number of tokens for given prompt messages + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :return: + """ + if len(texts) == 0: + return 0 + + _, prompt_tokens, _ = self.embed_text( + model=model, + credentials=credentials, + texts=texts, + ) + return prompt_tokens + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + # call embedding model + self.embed_text(model=model, credentials=credentials, texts=["ping"]) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @nomic_login_required + def embed_text(self, model: str, credentials: dict, texts: list[str]) -> tuple[list[list[float]], int, int]: + """Call out to Nomic's embedding endpoint. + + Args: + model: The model to use for embedding. + texts: The list of texts to embed. + + Returns: + List of embeddings, one for each text, and tokens usage. + """ + embeddings: list[list[float]] = [] + prompt_tokens = 0 + total_tokens = 0 + + response = embed.text( + model=model, + texts=texts, + ) + + if not (response and "embeddings" in response): + raise ValueError("Embedding data is missing in the response.") + + if not (response and "usage" in response): + raise ValueError("Response usage is missing.") + + if "prompt_tokens" not in response["usage"]: + raise ValueError("Response usage does not contain prompt tokens.") + + if "total_tokens" not in response["usage"]: + raise ValueError("Response usage does not contain total tokens.") + + embeddings = [list(map(float, e)) for e in response["embeddings"]] + total_tokens = response["usage"]["total_tokens"] + prompt_tokens = response["usage"]["prompt_tokens"] + return embeddings, prompt_tokens, total_tokens + + def _calc_response_usage(self, model: str, credentials: dict, tokens: int, total_tokens: int) -> EmbeddingUsage: + """ + Calculate response usage + + :param model: model name + :param credentials: model credentials + :param tokens: prompt tokens + :param total_tokens: total tokens + :return: usage + """ + # get input price info + input_price_info = self.get_price( + model=model, + credentials=credentials, + price_type=PriceType.INPUT, + tokens=tokens, + ) + + # transform usage + usage = EmbeddingUsage( + tokens=tokens, + total_tokens=total_tokens, + unit_price=input_price_info.unit_price, + price_unit=input_price_info.unit, + total_price=input_price_info.total_amount, + currency=input_price_info.currency, + latency=time.perf_counter() - self.started_at, + ) + + return usage diff --git a/api/poetry.lock b/api/poetry.lock index 78816683d8..184cdb9e81 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -4135,6 +4135,20 @@ files = [ {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, ] +[[package]] +name = "jsonlines" +version = "4.0.0" +description = "Library with helpers for the jsonlines file format" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55"}, + {file = "jsonlines-4.0.0.tar.gz", hash = "sha256:0c6d2c09117550c089995247f605ae4cf77dd1533041d366351f6f298822ea74"}, +] + +[package.dependencies] +attrs = ">=19.2.0" + [[package]] name = "jsonpath-ng" version = "1.6.1" @@ -4469,6 +4483,24 @@ files = [ {file = "llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5"}, ] +[[package]] +name = "loguru" +version = "0.7.2" +description = "Python logging made (stupidly) simple" +optional = false +python-versions = ">=3.5" +files = [ + {file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"}, + {file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} +win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} + +[package.extras] +dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"] + [[package]] name = "lxml" version = "5.3.0" @@ -5320,6 +5352,36 @@ plot = ["matplotlib"] tgrep = ["pyparsing"] twitter = ["twython"] +[[package]] +name = "nomic" +version = "3.1.2" +description = "The official Nomic python client." +optional = false +python-versions = "*" +files = [ + {file = "nomic-3.1.2.tar.gz", hash = "sha256:2de1ab1dcf2429011c92987bb2f1eafe1a3a4901c3185b18f994bf89616f606d"}, +] + +[package.dependencies] +click = "*" +jsonlines = "*" +loguru = "*" +numpy = "*" +pandas = "*" +pillow = "*" +pyarrow = "*" +pydantic = "*" +pyjwt = "*" +requests = "*" +rich = "*" +tqdm = "*" + +[package.extras] +all = ["nomic[aws,local]"] +aws = ["boto3", "sagemaker"] +dev = ["black (==24.3.0)", "cairosvg", "coverage", "isort", "mkautodoc", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]", "myst-parser", "nomic[all]", "pandas", "pillow", "pylint", "pyright", "pytest", "pytorch-lightning", "twine"] +local = ["gpt4all (>=2.5.0,<3)"] + [[package]] name = "novita-client" version = "0.5.7" @@ -9919,6 +9981,20 @@ files = [ beautifulsoup4 = "*" requests = ">=2.0.0,<3.0.0" +[[package]] +name = "win32-setctime" +version = "1.1.0" +description = "A small Python utility to set file creation time on Windows" +optional = false +python-versions = ">=3.5" +files = [ + {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"}, + {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"}, +] + +[package.extras] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] + [[package]] name = "wrapt" version = "1.16.0" @@ -10422,4 +10498,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "eb7ef7be5c7790e214f37f17f92b69407ad557cb80055ef7e49e36eb51b3fca6" +content-hash = "17c4108d92c415d987f8b437ea3e0484c5601a05bfe175339a8546c93c159bc5" diff --git a/api/pyproject.toml b/api/pyproject.toml index 066b4772a9..41244f516c 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -100,6 +100,7 @@ exclude = [ OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii" UPSTAGE_API_KEY = "up-aaaaaaaaaaaaaaaaaaaa" FIREWORKS_API_KEY = "fw_aaaaaaaaaaaaaaaaaaaa" +NOMIC_API_KEY = "nk-aaaaaaaaaaaaaaaaaaaa" AZURE_OPENAI_API_BASE = "https://difyai-openai.openai.azure.com" AZURE_OPENAI_API_KEY = "xxxxb1707exxxxxxxxxxaaxxxxxf94" ANTHROPIC_API_KEY = "sk-ant-api11-IamNotARealKeyJustForMockTestKawaiiiiiiiiii-NotBaka-ASkksz" @@ -217,6 +218,7 @@ azure-ai-inference = "^1.0.0b3" volcengine-python-sdk = {extras = ["ark"], version = "^1.0.98"} oci = "^2.133.0" tos = "^2.7.1" +nomic = "^3.1.2" [tool.poetry.group.indriect.dependencies] kaleido = "0.2.1" rank-bm25 = "~0.2.2" diff --git a/api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py b/api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py new file mode 100644 index 0000000000..281e866e45 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py @@ -0,0 +1,59 @@ +import os +from collections.abc import Callable +from typing import Any, Literal, Union + +import pytest + +# import monkeypatch +from _pytest.monkeypatch import MonkeyPatch +from nomic import embed + + +def create_embedding(texts: list[str], model: str, **kwargs: Any) -> dict: + texts_len = len(texts) + + foo_embedding_sample = 0.123456 + + combined = { + "embeddings": [[foo_embedding_sample for _ in range(768)] for _ in range(texts_len)], + "usage": {"prompt_tokens": texts_len, "total_tokens": texts_len}, + "model": model, + "inference_mode": "remote", + } + + return combined + + +def mock_nomic( + monkeypatch: MonkeyPatch, + methods: list[Literal["text_embedding"]], +) -> Callable[[], None]: + """ + mock nomic module + + :param monkeypatch: pytest monkeypatch fixture + :return: unpatch function + """ + + def unpatch() -> None: + monkeypatch.undo() + + if "text_embedding" in methods: + monkeypatch.setattr(embed, "text", create_embedding) + + return unpatch + + +MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" + + +@pytest.fixture +def setup_nomic_mock(request, monkeypatch): + methods = request.param if hasattr(request, "param") else [] + if MOCK: + unpatch = mock_nomic(monkeypatch, methods=methods) + + yield + + if MOCK: + unpatch() diff --git a/api/tests/integration_tests/model_runtime/nomic/__init__.py b/api/tests/integration_tests/model_runtime/nomic/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/integration_tests/model_runtime/nomic/test_embeddings.py b/api/tests/integration_tests/model_runtime/nomic/test_embeddings.py new file mode 100644 index 0000000000..52dc96ee95 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/nomic/test_embeddings.py @@ -0,0 +1,62 @@ +import os + +import pytest + +from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel +from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock + + +@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) +def test_validate_credentials(setup_nomic_mock): + model = NomicTextEmbeddingModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model="nomic-embed-text-v1.5", + credentials={ + "nomic_api_key": "invalid_key", + }, + ) + + model.validate_credentials( + model="nomic-embed-text-v1.5", + credentials={ + "nomic_api_key": os.environ.get("NOMIC_API_KEY"), + }, + ) + + +@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) +def test_invoke_model(setup_nomic_mock): + model = NomicTextEmbeddingModel() + + result = model.invoke( + model="nomic-embed-text-v1.5", + credentials={ + "nomic_api_key": os.environ.get("NOMIC_API_KEY"), + }, + texts=["hello", "world"], + user="foo", + ) + + assert isinstance(result, TextEmbeddingResult) + assert result.model == "nomic-embed-text-v1.5" + assert len(result.embeddings) == 2 + assert result.usage.total_tokens == 2 + + +@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) +def test_get_num_tokens(setup_nomic_mock): + model = NomicTextEmbeddingModel() + + num_tokens = model.get_num_tokens( + model="nomic-embed-text-v1.5", + credentials={ + "nomic_api_key": os.environ.get("NOMIC_API_KEY"), + }, + texts=["hello", "world"], + ) + + assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/nomic/test_provider.py b/api/tests/integration_tests/model_runtime/nomic/test_provider.py new file mode 100644 index 0000000000..6cad400c06 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/nomic/test_provider.py @@ -0,0 +1,22 @@ +import os + +import pytest + +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.nomic.nomic import NomicAtlasProvider +from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel +from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock + + +@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) +def test_validate_provider_credentials(setup_nomic_mock): + provider = NomicAtlasProvider() + + with pytest.raises(CredentialsValidateFailedError): + provider.validate_provider_credentials(credentials={}) + + provider.validate_provider_credentials( + credentials={ + "nomic_api_key": os.environ.get("NOMIC_API_KEY"), + }, + ) diff --git a/dev/pytest/pytest_model_runtime.sh b/dev/pytest/pytest_model_runtime.sh index 4c1c6bf4f3..4c0083a2de 100755 --- a/dev/pytest/pytest_model_runtime.sh +++ b/dev/pytest/pytest_model_runtime.sh @@ -7,4 +7,5 @@ pytest api/tests/integration_tests/model_runtime/anthropic \ api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference \ api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py \ api/tests/integration_tests/model_runtime/upstage \ - api/tests/integration_tests/model_runtime/fireworks + api/tests/integration_tests/model_runtime/fireworks \ + api/tests/integration_tests/model_runtime/nomic From 8cc9e683631e155e9c7c1c282e91f9933f2858ba Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:00:34 +0900 Subject: [PATCH 15/64] fix: prompt for the follow-up suggestions (#8685) --- api/core/llm_generator/prompts.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index c40b6d1808..e5b6784516 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -65,7 +65,6 @@ SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keeping each question under 20 characters.\n" "MAKE SURE your output is the SAME language as the Assistant's latest response" - "(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" "The output must be an array in JSON format following the specified schema:\n" '["question1","question2","question3"]\n' ) From bef83a4d2ecae95b5b07be68b10fe5181cfa0ac1 Mon Sep 17 00:00:00 2001 From: Nam Vu Date: Mon, 23 Sep 2024 20:32:58 +0700 Subject: [PATCH 16/64] fix: typos and improve naming conventions: (#8687) --- api/commands.py | 2 +- .../api_resource/chat/async_completions.py | 2 +- .../api_resource/chat/completions.py | 2 +- .../zhipuai/zhipuai_sdk/core/_base_models.py | 3 +-- .../zhipuai/zhipuai_sdk/core/_http_client.py | 12 +++++----- .../zhipuai_sdk/core/_legacy_response.py | 2 +- .../zhipuai/zhipuai_sdk/core/_response.py | 4 ++-- .../types/knowledge/document/__init__.py | 4 ++-- .../types/knowledge/document/document.py | 6 ++--- .../nodes/end/end_stream_processor.py | 22 +++++++++---------- web/app/components/base/chat/chat/hooks.ts | 4 ++-- .../base/image-uploader/image-preview.tsx | 4 ++-- .../develop/template/template_workflow.en.mdx | 2 +- .../workflow/hooks/use-workflow-run.ts | 6 ++--- 14 files changed, 37 insertions(+), 38 deletions(-) diff --git a/api/commands.py b/api/commands.py index b8fc81af67..7ef4aed7f7 100644 --- a/api/commands.py +++ b/api/commands.py @@ -652,7 +652,7 @@ where sites.id is null limit 1000""" app_was_created.send(app, account=account) except Exception as e: failed_app_ids.append(app_id) - click.echo(click.style("FFailed to fix missing site for app {}".format(app_id), fg="red")) + click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red")) logging.exception(f"Fix app related site missing issue failed, error: {e}") continue diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py index d8ecc31064..05510a3ec4 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py @@ -57,7 +57,7 @@ class AsyncCompletions(BaseAPI): if temperature <= 0: do_sample = False temperature = 0.01 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501 if temperature >= 1: temperature = 0.99 # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py index 1c23473a03..8e5bb454e6 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py @@ -60,7 +60,7 @@ class Completions(BaseAPI): if temperature <= 0: do_sample = False temperature = 0.01 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501 if temperature >= 1: temperature = 0.99 # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py index 5e9a7e0a98..6d8ba700b7 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py @@ -630,8 +630,7 @@ def validate_type(*, type_: type[_T], value: object) -> _T: return cast(_T, _validate_non_model_type(type_=type_, value=value)) -# our use of subclasssing here causes weirdness for type checkers, -# so we just pretend that we don't subclass +# Subclassing here confuses type checkers, so we treat this class as non-inheriting. if TYPE_CHECKING: GenericModel = BaseModel else: diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py index d0f933d814..ffdafb85d5 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py @@ -169,7 +169,7 @@ class BaseSyncPage(BasePage[_T], Generic[_T]): # Pydantic uses a custom `__iter__` method to support casting BaseModels # to dictionaries. e.g. dict(model). # As we want to support `for item in page`, this is inherently incompatible - # with the default pydantic behaviour. It is not possible to support both + # with the default pydantic behavior. It is not possible to support both # use cases at once. Fortunately, this is not a big deal as all other pydantic # methods should continue to work as expected as there is an alternative method # to cast a model to a dictionary, model.dict(), which is used internally @@ -356,16 +356,16 @@ class HttpClient: **kwargs, ) - def _object_to_formfata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: + def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: items = [] if isinstance(value, Mapping): for k, v in value.items(): - items.extend(self._object_to_formfata(f"{key}[{k}]", v)) + items.extend(self._object_to_formdata(f"{key}[{k}]", v)) return items if isinstance(value, list | tuple): for v in value: - items.extend(self._object_to_formfata(key + "[]", v)) + items.extend(self._object_to_formdata(key + "[]", v)) return items def _primitive_value_to_str(val) -> str: @@ -385,7 +385,7 @@ class HttpClient: return [(key, str_data)] def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: - items = flatten(list(starmap(self._object_to_formfata, data.items()))) + items = flatten(list(starmap(self._object_to_formdata, data.items()))) serialized: dict[str, object] = {} for key, value in items: @@ -620,7 +620,7 @@ class HttpClient: stream: bool, stream_cls: type[StreamResponse] | None, ) -> ResponseT: - # _legacy_response with raw_response_header to paser method + # _legacy_response with raw_response_header to parser method if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": return cast( ResponseT, diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py index 47183b9eee..51bf21bcdc 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py @@ -87,7 +87,7 @@ class LegacyAPIResponse(Generic[R]): For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. - You can customise the type that the response is parsed into through + You can customize the type that the response is parsed into through the `to` argument, e.g. ```py diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py index 45443da662..92e6018055 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py @@ -252,7 +252,7 @@ class APIResponse(BaseAPIResponse[R]): For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. - You can customise the type that the response is parsed into through + You can customize the type that the response is parsed into through the `to` argument, e.g. ```py @@ -363,7 +363,7 @@ class StreamAlreadyConsumed(ZhipuAIError): # noqa: N818 # ^ error ``` - If you want this behaviour you'll need to either manually accumulate the response + If you want this behavior you'll need to either manually accumulate the response content or call `await response.read()` before iterating over the stream. """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py index 32e23e6dab..59cb41d712 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py @@ -1,8 +1,8 @@ -from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessinfo +from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessInfo __all__ = [ "DocumentData", "DocumentObject", - "DocumentSuccessinfo", + "DocumentSuccessInfo", "DocumentFailedInfo", ] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py index b9a1646391..980bc6f4a7 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py @@ -2,10 +2,10 @@ from typing import Optional from ....core import BaseModel -__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessinfo", "DocumentFailedInfo"] +__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessInfo", "DocumentFailedInfo"] -class DocumentSuccessinfo(BaseModel): +class DocumentSuccessInfo(BaseModel): documentId: Optional[str] = None """文件id""" filename: Optional[str] = None @@ -24,7 +24,7 @@ class DocumentFailedInfo(BaseModel): class DocumentObject(BaseModel): """文档信息""" - successInfos: Optional[list[DocumentSuccessinfo]] = None + successInfos: Optional[list[DocumentSuccessInfo]] = None """上传成功的文件信息""" failedInfos: Optional[list[DocumentFailedInfo]] = None """上传失败的文件信息""" diff --git a/api/core/workflow/nodes/end/end_stream_processor.py b/api/core/workflow/nodes/end/end_stream_processor.py index 0366d7965d..1aecf863ac 100644 --- a/api/core/workflow/nodes/end/end_stream_processor.py +++ b/api/core/workflow/nodes/end/end_stream_processor.py @@ -22,8 +22,8 @@ class EndStreamProcessor(StreamProcessor): for end_node_id, _ in self.end_stream_param.end_stream_variable_selector_mapping.items(): self.route_position[end_node_id] = 0 self.current_stream_chunk_generating_node_ids: dict[str, list[str]] = {} - self.has_outputed = False - self.outputed_node_ids = set() + self.has_output = False + self.output_node_ids = set() def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generator[GraphEngineEvent, None, None]: for event in generator: @@ -34,11 +34,11 @@ class EndStreamProcessor(StreamProcessor): yield event elif isinstance(event, NodeRunStreamChunkEvent): if event.in_iteration_id: - if self.has_outputed and event.node_id not in self.outputed_node_ids: + if self.has_output and event.node_id not in self.output_node_ids: event.chunk_content = "\n" + event.chunk_content - self.outputed_node_ids.add(event.node_id) - self.has_outputed = True + self.output_node_ids.add(event.node_id) + self.has_output = True yield event continue @@ -53,11 +53,11 @@ class EndStreamProcessor(StreamProcessor): ) if stream_out_end_node_ids: - if self.has_outputed and event.node_id not in self.outputed_node_ids: + if self.has_output and event.node_id not in self.output_node_ids: event.chunk_content = "\n" + event.chunk_content - self.outputed_node_ids.add(event.node_id) - self.has_outputed = True + self.output_node_ids.add(event.node_id) + self.has_output = True yield event elif isinstance(event, NodeRunSucceededEvent): yield event @@ -124,11 +124,11 @@ class EndStreamProcessor(StreamProcessor): if text: current_node_id = value_selector[0] - if self.has_outputed and current_node_id not in self.outputed_node_ids: + if self.has_output and current_node_id not in self.output_node_ids: text = "\n" + text - self.outputed_node_ids.add(current_node_id) - self.has_outputed = True + self.output_node_ids.add(current_node_id) + self.has_output = True yield NodeRunStreamChunkEvent( id=event.id, node_id=event.node_id, diff --git a/web/app/components/base/chat/chat/hooks.ts b/web/app/components/base/chat/chat/hooks.ts index dfb5a1b685..64c238f9d1 100644 --- a/web/app/components/base/chat/chat/hooks.ts +++ b/web/app/components/base/chat/chat/hooks.ts @@ -334,9 +334,9 @@ export const useChat = ( const newChatList = produce(chatListRef.current, (draft) => { const index = draft.findIndex(item => item.id === responseItem.id) if (index !== -1) { - const requestion = draft[index - 1] + const question = draft[index - 1] draft[index - 1] = { - ...requestion, + ...question, } draft[index] = { ...draft[index], diff --git a/web/app/components/base/image-uploader/image-preview.tsx b/web/app/components/base/image-uploader/image-preview.tsx index e5bd4c1bbc..096facabfd 100644 --- a/web/app/components/base/image-uploader/image-preview.tsx +++ b/web/app/components/base/image-uploader/image-preview.tsx @@ -88,7 +88,7 @@ const ImagePreview: FC = ({ }) } - const imageTobase64ToBlob = (base64: string, type = 'image/png'): Blob => { + const imageBase64ToBlob = (base64: string, type = 'image/png'): Blob => { const byteCharacters = atob(base64) const byteArrays = [] @@ -109,7 +109,7 @@ const ImagePreview: FC = ({ const shareImage = async () => { try { const base64Data = url.split(',')[1] - const blob = imageTobase64ToBlob(base64Data, 'image/png') + const blob = imageBase64ToBlob(base64Data, 'image/png') await navigator.clipboard.write([ new ClipboardItem({ diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx index 2bd0fe9daf..5c712c2c29 100644 --- a/web/app/components/develop/template/template_workflow.en.mdx +++ b/web/app/components/develop/template/template_workflow.en.mdx @@ -424,7 +424,7 @@ Workflow applications offers non-session support and is ideal for translation, a /> - Returns worklfow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order. + Returns workflow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order. ### Query diff --git a/web/app/components/workflow/hooks/use-workflow-run.ts b/web/app/components/workflow/hooks/use-workflow-run.ts index e1da503f38..68c3ff0a4b 100644 --- a/web/app/components/workflow/hooks/use-workflow-run.ts +++ b/web/app/components/workflow/hooks/use-workflow-run.ts @@ -185,7 +185,7 @@ export const useWorkflowRun = () => { draft.forEach((edge) => { edge.data = { ...edge.data, - _runned: false, + _run: false, } }) }) @@ -292,7 +292,7 @@ export const useWorkflowRun = () => { const newEdges = produce(edges, (draft) => { draft.forEach((edge) => { if (edge.target === data.node_id && incomeNodesId.includes(edge.source)) - edge.data = { ...edge.data, _runned: true } as any + edge.data = { ...edge.data, _run: true } as any }) }) setEdges(newEdges) @@ -416,7 +416,7 @@ export const useWorkflowRun = () => { const edge = draft.find(edge => edge.target === data.node_id && edge.source === prevNodeId) if (edge) - edge.data = { ...edge.data, _runned: true } as any + edge.data = { ...edge.data, _run: true } as any }) setEdges(newEdges) From 7f1b0288409626fcb87bee2fe12643ebfa0f3059 Mon Sep 17 00:00:00 2001 From: Sa Zhang <55871322+Nick17t@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:39:26 +0800 Subject: [PATCH 17/64] fix: change the brand name to Jina AI (#8691) Co-authored-by: sa zhang --- .../model_providers/jina/jina.yaml | 6 +++--- .../tools/provider/builtin/jina/jina.yaml | 20 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml index 9c70d6ff33..2f2d6e6daa 100644 --- a/api/core/model_runtime/model_providers/jina/jina.yaml +++ b/api/core/model_runtime/model_providers/jina/jina.yaml @@ -1,6 +1,6 @@ -provider: jina +provider: Jina AI label: - en_US: Jina + en_US: Jina AI description: en_US: Embedding and Rerank Model Supported icon_small: @@ -11,7 +11,7 @@ background: "#EFFDFD" help: title: en_US: Get your API key from Jina AI - zh_Hans: 从 Jina 获取 API Key + zh_Hans: 从 Jina AI 获取 API Key url: en_US: https://jina.ai/ supported_model_types: diff --git a/api/core/tools/provider/builtin/jina/jina.yaml b/api/core/tools/provider/builtin/jina/jina.yaml index 06f23382d9..9ce5cbd6d1 100644 --- a/api/core/tools/provider/builtin/jina/jina.yaml +++ b/api/core/tools/provider/builtin/jina/jina.yaml @@ -1,10 +1,10 @@ identity: author: Dify - name: jina + name: Jina AI label: - en_US: Jina - zh_Hans: Jina - pt_BR: Jina + en_US: Jina AI + zh_Hans: Jina AI + pt_BR: Jina AI description: en_US: Convert any URL to an LLM-friendly input or perform searches on the web for grounding information. Experience improved output for your agent and RAG systems at no cost. zh_Hans: 将任何URL转换为LLM易读的输入或在网页上搜索引擎上搜索引擎。 @@ -22,11 +22,11 @@ credentials_for_provider: zh_Hans: API 密钥(可留空) pt_BR: Chave API (deixe vazio se você não tiver uma) placeholder: - en_US: Please enter your Jina API key - zh_Hans: 请输入你的 Jina API 密钥 - pt_BR: Por favor, insira sua chave de API do Jina + en_US: Please enter your Jina AI API key + zh_Hans: 请输入你的 Jina AI API 密钥 + pt_BR: Por favor, insira sua chave de API do Jina AI help: - en_US: Get your Jina API key from Jina (optional, but you can get a higher rate) - zh_Hans: 从 Jina 获取您的 Jina API 密钥(非必须,能得到更高的速率) - pt_BR: Obtenha sua chave de API do Jina na Jina (opcional, mas você pode obter uma taxa mais alta) + en_US: Get your Jina AI API key from Jina AI (optional, but you can get a higher rate) + zh_Hans: 从 Jina AI 获取您的 Jina AI API 密钥(非必须,能得到更高的速率) + pt_BR: Obtenha sua chave de API do Jina AI na Jina AI (opcional, mas você pode obter uma taxa mais alta) url: https://jina.ai From 21e9608b239036c98367193e32ade356c886de99 Mon Sep 17 00:00:00 2001 From: themanforfree Date: Tue, 24 Sep 2024 10:20:06 +0800 Subject: [PATCH 18/64] feat: add xinference sd web ui api tool (#8385) Signed-off-by: themanforfree --- .../builtin/xinference/_assets/icon.png | Bin 0 -> 57667 bytes .../xinference/tools/stable_diffusion.py | 412 ++++++++++++++++++ .../xinference/tools/stable_diffusion.yaml | 87 ++++ .../provider/builtin/xinference/xinference.py | 18 + .../builtin/xinference/xinference.yaml | 40 ++ 5 files changed, 557 insertions(+) create mode 100644 api/core/tools/provider/builtin/xinference/_assets/icon.png create mode 100644 api/core/tools/provider/builtin/xinference/tools/stable_diffusion.py create mode 100644 api/core/tools/provider/builtin/xinference/tools/stable_diffusion.yaml create mode 100644 api/core/tools/provider/builtin/xinference/xinference.py create mode 100644 api/core/tools/provider/builtin/xinference/xinference.yaml diff --git a/api/core/tools/provider/builtin/xinference/_assets/icon.png b/api/core/tools/provider/builtin/xinference/_assets/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..e58cacbd123b5887b34fc8414d8b57aa801bb690 GIT binary patch literal 57667 zcmeFZg;!N=&_2BB=Fp+Qp;5X7Bo84eDIwh=NOyB60Z~d2kZ$Qlx=Xr2Iz+m=f7?gj z^}YYXx7N#A2R7$y_8l|#%r)1{5T>FegM;}96951lIaw(+06?Pt^Fl`j-qfUpS;Gt#o@*O}lzgVthoC1P{6Ynx zQlXK`k%$=049MFe{_XPgaD5}vh8aSyfgzu>m(X#y>7V*qni_A`RXr>y_N8wUWz1ek zOV=jVC3#7k`HG!o($WpI57KgvEi|83FAWE&8F# z1N{HRY^c)zXS_!Ed)IMd+`hUh@Sbch%CE=*B0D(kN1J{th zRuC=rKjJChof~Tn1zgXJr;xK9wUK^CFZP{_{k=7$lD?)UuWr z9!qT-_zHh-YPiCk!FWpg!)qVgBa&{9ea7(LJbnd{P!}fsdf3v{aYu#@zoQw$;8MwK z4~Nj5BkczZ`icCq4J4;O1l)+Gx8g#okadwpt2mjy@z#cms@^C{cXOJ?Eu&HT(t)IP! zazplSzhgI9wXh0}2YR~Zh#Gwp0}{QYlHyW`=S+v@D56(68f90}!Bzf`?5P=&P2wx{ zwulGkG08amt&&AiKS zj&0W0z^=t14%(k-$%c|pc$&67-}BB!X-EwW1pYz@IK%O_{&9<&L66r&%N3RC{S;P;Lq0lMM8{7!sg zocf9MnzK?OZN4TY_ESRK#u(VMFU5k95OHz;!4A~29r3t&rSB{UHUbW!Ut19n&@ldO zkFEO7uNE&wv;?h(d+67u+(8UW536ayF29pX*#<0J3aWnq2&M%7)t^T79y5SkO8%eK=#WdDb}ur4*nEHgv}?hc34w{wFgyQrP^r zC)M~-jeMlGyfZ(}HiN>@f7)gjZ}yU@*2!jmq#+ZiU7mx0i-RM{*_&$$K_~q#OV54> zELdvbuimG~4an#=tttyEt!6ulqH-c5p^KF)eUvdteJDSR_KQYR zS+r`zWN9QAWvT)Ebpix?mB03>)t}b=jP?g;zWU>E1->)-qg$$6hB19RT2a*H!)ZhH}C^6X= z#?&x> z7X5JELvP0()#w@@5FB@z5ZSw{Wpjr8B2>UDwlSY;*Ao|p^;d>7T5pezeiIk?-H61H z+Pz|QCZ;JS4#k8}(jAOiW{%Pz0zrQj!pN{}&-=;I%EKk*XuHnF2R<&S>KSdNVQ@sK zE6l*13>O_>`=2{Um_-Bl#WMA_RzuzU5S})=M*zUZ-;IVDb|Cz|hp?;Iuj5`p7gRxa zbJzZaw1e?CT|uLy$U(W~aV}H)L<0b){QI=@($T~cgC;!ow)!TiCLkO_LSuqm4h5?I zEx1|2R#SF}{d00+-ZlT?BqDt+pNwPH?A?o8p;W@>4ic}uiJC8YDewTnzZM*R-wru_ zAJ8_4Qt)uJv*P5CNo~=2noU5HhYplI{wtDGv`N9qmTM9er;FZ)ZlOL5O9$bXtIsQX zeJ3@Qg`kb6+qZfdGl&?+tJysbvL`SELh@ z#SW#=n{ASyix|5C`x^(m#jj#m&5MP)4bB65>#nbjx^1O?l^s#eBje9#Z5KBdSVeb(O)HGjAF6mENvK;m%7l_s7^ z1?*Cjd>#JL?U(5ipgJ~`C`m%4n3v_S@8H`S<5hINFPQN=EGUKtVt1XjkcOSKn^Tn*iWE|BOlOx36`7YLdy z98^o+i#a{?Rp0D?d|S)xSr<X46Pbz`S*1 z4}DCZSNWzsDyN(Xy-K%NNh4EODZg8+aa}Csc<*!chy=TV`r2jjPIp{&RH5$CMe(`; z-F{pUc&0gpy05EDwSGMdNAK9Q_610Di<2n{NvQ5M-G>ohVnv4? zCJ?WmmK}c6>U70xf}aEF98nZTCG(Iy?^D8Agtn3mAb~nN_VoY&L*+$Vg!-~oF|hkE zjT}94V@IC7TJ|ELN@BS%G;I>@8%f!`<6Po&Qd{<3OgcjNzfk_$8?IJz0 zOp__egp5N;;Cz`aL-$H5OQ!Q22Z-XR_B%@9?4Ps?zR>5Qtls^`guPAwWP?sjP_c_c zSTR^RC955415_4ByTCt(S%-jz43A}9ME_PV?!5O2#*tEgYz3=k%PbVbJaA)uX2n2j z<1E?_n6;1@E;Bop$Vm%}Du$|fifH5;a zEMK_HhdkW7G@V-ADDy;*8|$#|M&PTKUTPU;`#;Pr(F5@3wcdzEp+lk?knu&ni=Jg4 z9)WQpEWC*EWmffM#_!F<(Hk}WtX+{HP{K@cu+2z4IcW6ZLj#`h){I$wp zfFIt-F`L|l?Tgzwda~P%RATvT0oOS$?l3XAn|oT!ijOv1uHJh=$tR~u`#n%nUf*O> zIZR&PmkVN#_EpF93Q$^o^X^kFs^(N5Y~5SJ6GlU2@$>Kvdqf$fdVFswJAHvDdY%UwRmM9ErNGF1?6ZzwbJWqc3<45-OJzb$(Q9zQ&!(_*Ouh4)e zgm{jXtfsYO%b{v-q5AWO{ukY@T1ds&0T27{jyOVzhMldM$RDZ%pG}bgSulPOI6MT7 zrfv|57vr3_gnT!>8>v*9XkDyj*cHquaEJ8yZ49{(L{7;Yb}=~?bQ`H`(VaGg{o>ox z!UI`FC&+$(lzFw?EmW*q^! zHtPBM`vcye)R1(4+|*y#$&hqB(5%H@T~4U~6jV;v=#e>8n-dfsvXBHzYxV!_ja4!n zJcMS^WOXMbOBO(mk+nOq;$!$CKGG3|Qj`K-}~9gWv5tcLWv6TJQqnbL1!T&eeT zJmazB_RoNNrmvo|MmMiV_$U0_Q@8!D>q(%Ew|xxVwAo?hbhJW^_8VNU-9i2TyL>!W zJsb{ZH+ED(ugN$ls;9?*$h!OC8-IN{p32k^o+!R^xf#Jb9 zL!px~aCln{vnZ{;#)`w%S9H$h+YHk z99FKN>2{Ecvj`nN!L+872$!JLVpd)(A-605cZR{?K0i{h{0(%@w(TZPbN!*d)ok5# z+d4Cl%3}`-L0kJ&1ttQG?}FbxUD_|9Sx!!jeLD1NymYv{yq%Un;J$Mf&pY-xWZ-D@ ztNYf~v%F?64LdIbM!mfCa4#-rx@yoZeQ2|q%{GI|4vo-|fha0;yfThuqWZ=vt8evOG!qo>x_cPAf+Ou0dA`h$;2)7bF;!Z#{T@B}R^V zFnK-Isozox_@jhAOc7(ghJW)iefhCDnnIyY%biRX!n z6o&wkc8t8Yz>ZX(W5`S>L^t5k=(V@N*Lr>@KlQH16vtF_pDltH!&3BP@+%EY-CWV} z@cap=8|USFyl=POePVb`>B`6NO3CU9J&>kSl6_7@4FGPS<~5r*X>U{Z+ciG5ytjgB z8Q{FVUxV{$nVVQpe;0T7pnWzYT5FFwhH5TEF~j8g30u^3?qa&w*GcAh1tWtWje;uj zU3%Q%%_HsVb$g28UuZE_kCU%HoEq?{bvIYhRkO(zx?Ls2kk}#f9)3x0Q(ga%eE#-r zz~*69Ui{mmH=zvAR)mDNwgz;TjXJhm)@MmH{7&A@B4F4KSc;DkeYllNHci}Qvztlv#d5DL&|9Hp$K z+v46*i9FI*cJ{33?>Nun*P_3ACO#Ig1E2W*L=7&rGCgVY>|-M496E&8Rd{5-_msz< zghoI3*5i}$Qj%Qg|&MM0@kQ7%^g;KyCmz^b@#H_o`~?zOYB zWPRMlgZDfgM*nyrC850(F2P_`eYMza^L2&u%&2gVQ27ZN_jMOziTq2EpT5oE+ZV9t4{3fn zcMkFBz%q%RG|aFpLoc$fUoAZHXZZPyYB?o; zo0oOaMZs;*NProKwjs8R@(xx8PkTV}fk!?qRzz`}3f#I_mQA!~D!93{B*Yecj~YGR zG}vUlnMZ=OT|{~+zJ1s~-m-i~&{K&SAwaA#5HatkId05J`6mMET{r;D(`CQ3`h^rl zIcB$O=Br`V=CIcT`fa{R;GL=5_RnP{ZHkmPX7cX4dTYvKjV(2%34+v3hvWy$5&1n0 zA@L!|l%jj1t>~kpAtFuAAG%(6WYciRj&r4>{$!PK&rR!kpZ6kc>0OK3P3^yzyHdepDunJO@4Z)VSdd0(y|!2zt1NBMc>rd z#)Hf!)4w*KEBmoW-kOPj_t~#Zy%&~dEZnG7&PdePNiku{+zb(K=8rB$d0A<1*@Np_=$Wr{o*9hQR!a+Vro0ekA>E68 zqN?@^abw@y5R2GgzYFnKuVluu(MBf=-i!sZmaQd11)DG~+3GKP&>;5Wfn(<_FV9Y1 zadxlmiDAAy@`Ujmpn8(H7P@?1XiAQYe9D;;7da3R8^U9><^7s+?$;&x>B=cnyZBML zOuP1V79^xUTS=6B5fTx6gthBgmZi#CK8Jrla0WSseeHTN(p%OQfzcCPMyS1jN%`Y< zck1NX2>wq!48tDVu!vyQebrY9Z{xQpD58C76B4JnsLl?W9_RkN>(&L&S%iqFj>C%c zYp5jIR3(RVHV5#5A;iR zgwP*a31m(4-)yOdU=ofFqSiMi`ecTpXXylFk^xknT=RtiD5yQ|FU2T_>Q7(^Pk9d% z$nY9WcHdr-M^a&kjXQmM9hFic_O{J${hcIJ5m-E#~#<*KQ^O?fnquUQ4rpR$DNk z$^4g(?p%py`3fmxr6A)>4FLf`Y6n6|GOn|W()$}Pih1*%bR0@WY6%){P6FF|es1#p zA6!8>A|g24re}N=Ti!+dt0HhzkSx3c>9})b5{l%XR#!FIewPZw1zH{!C!wUL5>tf& zW3_}O6$V|#(H{LCb>fpNlG)>HsorDb-+4IDzY6Y*_Nv$7Q-RR5u_ffebJN#;H{|_8 z)hf5*&rYp`358{)U1mGYn1)ndAteRMh&$MhA`HKBwL6TnnJOF^N_Z#;=Dq5_*(l_aMQmkljdJKFaJ38)ntl zThj8Vu}K0#T{|EPVm@E&_-xnuMdBAa|{6fTco4K(fbL+pi(DqC()7W{A9`H zy1mwUt1As@Q|Ba6^#0)}=(Zw&=Fabkx%Qkg1JuGU{GhwA5dO_Y+ zW1n>Ywd3UbK{e*jQo)R8dh7>dh|JqT)hA?sk`KTYNQXLZhYkyDN>f16}WQa&c zNa8E%l#w?38S*jeV1{FYPv;HV)Mz-Z2yx(4(;%P`mRwh<+F{KVE9CXpku4 z40VwB7-6hEj4}uE+F|=de%B2DvHq}T5RJ^&@>iZ<#^&%unQ4mRTh52^9zE4_F zlqsC9QGKK`6yhi#->7?TR{QR`9aXPTEFlF1NCv6nBAe)6*jl%>k4qN9YK;!6Y!a#7 z$)wc_&<;A$g+q|jkfMa}sHyqy@3q?1BgN?!8vE~P$D+b6Wxc)$cduD?hA<>cU-&D# zJ7318E8kaV4mrF0{@m-20Do6?a&&j`q?SADfRduz!OmWS6CbrC8IR>6=cb})b+}n- z5#vvQ%@01_>9-i}w|XUYyM60aRlWB=6h7_ru(fdGDQjDzm){xcNjrCctVPI{Tl6q}olZ8cobQr5bIx_AXsZA%n zXL{lDQ0E89dkpAxtOhRNrfh8&mVVaL$ymOg-~W)mfcA3*u5Z8P+!?JBCL%DDwXAhy^zIU~FDhPERO=qk4A(mhCWEg^UEr(Txdbhv9Z?ebk(RC(&g?2F@8GyUj`E0%@U*LiGF8*vR2+co3d}U#2VR^W7j7T;GKIF;3*aOZU#OHv8HEZxwSG#7y&6J^cx}O5DEfy3FY}NS{mp$5N zNM0qMwv8TFD4pe{oqZFQI&D4K_EpzkcF;tiCcK>B6I${v zt`3*#C`42BCnCRdIdFbG`!!UcKa7yit@hkif_9FU1%Fk;9hdtCO;Fmz_vlx_6M`0^N zA4M_TH}UmaeA>3eOcKe|y0d*NjYdx-8Y_E#2=OcO3WAY&6B;6N_U}cu{PDdZ{TSy9 zdD!IOZl%GN36=%*k@-l5kn!DpFU8zUB=36KWfvl4Dv7f^MkGifd%OsR0D7xwEd{|c zcMWzSk8y_BzBX}!SwFVfqrlIU!;iW1&?*eWczRC9SOlp5TMG3vRv1YM3U48D@BF$M zmRHILh}N#ZP;^Je;NvZ|`&$l@tu|7R&*IrXQK+#3Af;Q1DDMpYB$sqk@L6BTGJlLP zFK6vX(Z5zVLOA{*gAR;5Ho64Vc)V!RSxT?fwQX#<9-km$O}AFeG&X~2v$6;6P-F|A zUh10Qlt?r~){#*s3A7}vNC3cHc?}_Ik4VAWrDl5e`a> zeq*JJ7*!MT&=uP8hhDkUEbaOwJ!>6*nb7NOd54%uUiN~|MI;u z)^z*EtLI2xDvvdL_`1p)OxC>3$mdM-7B$ZgErbOx5(5?w);6Cn?onV80;Kky`uWKb zvu;a_4}pm%N7}i10;mp3vS11-l5lb=LcJ-)z-$KOi70Oz_G5>9 zQGiVmY8d>P+WYTk9{D^*N4bGU4_tx_o0q0kpQ!M81yGL=;4Hu1Xb4fZp^65KL{VkGVk3VuFmz}f&4oGp)9={MW^iruX5V?O5 zNp&L;tt7^@IZ;VPEdS?B!Y?ikK_qhh?%NJ9YZN@0q$otBR(p+^z3-nh->s8|_JJwNQ!4O`K5`WnShsbNywl6Emi}!t`~(N% zQ6jc}%+V5~z=F)3zHB)yiH&14E)IBtH5^7S^%bWkC-!EOVtg-$ zeRRG?3o$Od{JB%4#mG@vkVJb$Ec-3w(hZxBMaZWTnMR zd!b}ge@G~XxmluSl5oq7XGxS~{WF6CnIH|mjYr?zVXClXs-pw|G+`m5b4vI4BFhI^ z%gyS%*zNJnQeoWlJY)>~qY*tdC!@9g*rT|za!W{nCn-3&fogtlkT!&~d(Y)HXl189 zkEF=W{ec-LGgVki`X=G5FMX!S_7Pf+DxahJNO=f~$ayfL&ym;@;}9o6t-Zf6Aj#>M zA1~p)mFV|x{Nxk#y|R;WwKI65IzMc5^DE7A93%uO!Yd7k{f^0*y;muSQ~iR#Y2v(p z1pM~}7N{a2nnZiD1)dbCfTBFzd#s##1W6yUd}jwGio?xcB%WJu&T-a?RXU!T^8u^ z`j*x&A4P-?L;(>;={hz-4H_zzKUFy8jjE<-(9USl&SG2#tj%UMbw%QBs;61vWn-}f z*W$>?vDd@^Km>w}9%{bM4)XFbw#|=6+Rv!xe&k}KZj>}cSwfH9{XX$^M_^o^E-{aV zF(E681x_Xa^e+~5GiD8MLx0WFPBx#;#{WKGPxuNWIGrjTmrB%fU zwIddeXgAxhAJTlj0KgprI9it$uB^=~s|&iqbSM2}zRfx0p8cTEC*dBJtTp|mCjF1r zC7x5FxUfKiwM##&Q>a6L8zNWXs*^VIlKPx!^N(LBx?-8~r>i4)32BsOX)g}M?8MOk zd(1xzaZwrzwtIT9X60&eJ6~a-T~*HH6Dm{_D6JsxP9em!8S5xwa6f5sYXJc^L-llW z1|PQ5-X7t5^P1kb3!{xV&~~mT7UdL|l+)Op#TY+H1vdy()XfONIyCP2D{mJAHFLU5 z+Yj0vOhd4*=_g-~S-V$JBqE@SRrMhDQythV9swe$lw_Uy2{21w`c*k1&oIdoX|zF%XiM*q1%$Xwj*q*;@VX zQ()(kpRfk-DQuW3_pD%a%BXe69=hL?%Im7=3_1UZyUTtK%Olm>t$1(*uwV@w;k@wdg`6%+Mf6?YuFDkZqy8La z#`W%!K+9^Pa%g_PqJasSA&;ZfLauChWSb;1z?oeaShs{neeF;^Wf0DdPvfv3DChk` ztj1gog3Av0V1PS>Y0*-DZz|`;p-@mXx){~Ayl{=zUeP?>z0kJhI_#4R>w%tZuB5@H zSO9CIjllfdH9z84JpSkoVRD?cEKHr5Y(F!eJbNvH30neF1@&tBOgfE2M^OAYsUB#9 z@Tg`Z6mdBCNa3>^!wc`ha{<|fcrY2^t~ZkLxz+^9NZ#HlBHK}L+b;D$FJq0mQ8B1W zbYYK7bzN@zOkSFK+QwnG2dSrbZ6;DCSMDP=SZwhChT8htDUi+W9Z@#D14oTsM~bA7 zOW8TJ9);XJOxDO!?8sH>(KSQnu~u{h06V|s%9!lrNXT!rNi7*YtSBBoY)*bzgZ{Pc z)XF$l0RJP_`_9T=DG4JN0Q?>q5qVbv)oN;(X;ue=GOD_PvV`^NoApX()Q_fOLE6>y z0y}|ap7rna7msc?pNrGI3V6@d4gxP=^b+bTXb?)z8VE5)XvAMx zfW<^`jB2rac9aXv-WGdBk#C7jZw;T*D6*PJIy!@f_ztYOn0evydGkC3VWGj?ce0`$ zP9})N_n$&M!+!-9x5b`XJ%0Xb#^5`{dEmvJz*$jV;0OYcGTM-6r%^M$d(Bg6oXb)Z zs9WxUZMFl!kdn=;sh@BnHzb(OL#EBXnkW$*#9XH(P8C)#aSp{y6Ob`63k3%T4;DDE z{@C70%$I`%47|KbEpnX2@zXYWZ!@V?#uuCLGT`WN8W*Nj1*k^I+=U#dwI*RNCiA$7 z)|Z=dvd>}m789BfU@yK?3Pqu~QFY&seh+wnM*9xuJ-rcWXOAy^V%&iN z&2h%#v2$^<7G4jnvCy)b3OWeZBO%7Omt`aVn_!(q*~o}OvFL4Rz}yLup3UybsJ*Dk zgA!qvu~`BGi&D_3y{>-8(uL6&8W6>XM{PtJ_e}2l;^!*vIoIdCYSh1IM90pJA@ig= z$9xgO^lwjPpMapLVI@biakt<+%kBHc^+;`UTO=V3yIdJ^Ujzvg{YlMi1OhMv22F7N ziNb6iL!07fC1g+0y7;C)b&CId?=!Ia=yx{6;b6Fi11C3)8RiX)Aoxi?hw@wGmjs0@ zE_mBISOj;c4W8T21)G)?8q8HNCM7^Vds^y!oT=55(E&Wj@PVOYg;u9IYIEmJ(zz=d9xzo_yGWJ zkiZ1iCv{KBYpH6?(e6z8r31ZTPtgVAmmMW}I6`!+n8oo1Ic)qMVdmb7PNk8bl|9uT z(F3@`f+cH<%qWe&_k&%MZ$>}a^*EvdjjU#A!NQrGej%ns7@cL64 zbWRM4J;@%;nYKkfeV9BEy>x#t=QTj_dgjWgEn^<{!HRAQNx|zH@!;}Tgcs0ykAR45 zy{6SMh&D}Xo>`0CVN!X0BeJ6rkI0mkvO17wk z67joIa32Zy3%P5IkILO8$7XMbEWi20pA{6h`}TTHW(#NEzxv%nCx(HBO!4xy^Ms+7 z%+hr86Mnt=x&4PPO-XXp%vyAcE`f%vdZ|qdUZ*!;Zv>h_A)||q5Tvp1_F%yW%^ACZtbJq1 zFKev}&DfoCgbi>%JSo7b;Pk**^)zsMO}yBzH>c0krV3d%LyEK%l|)6<_(n9aNcE3jeTIL;8F-&$MC@lW6GEtr{wOg3 z3GtjiZt42-;r;gqXAAMnpB_K!MxqOistjcdY6N%d#8bArD4E15KYQ{ih5CMM#RG5= zLCHT}#7HrPUHT^RBAMb}jgaSGFWY-04vw+pt+Npcpk_<`skrlExi+vR>vor7v$Oib z?||=Z!|D1)Jcb$8^e&CwMR?_`?;soYbL%9y@XILEy3 zLrq^=+10%E^`Y|)sMMddx?q_RK*d1qZs?<^F{hV!`Gw!*av__xRp#KVEzwxKw0j>UE75rPX@jK2g+Wq#bdG}>IHO0<}L6B~E z_JL_;*b_`VX!^rwIn|`#6~8TEaY=CGPaKTfP14_#Cq|CR38p{3i1yUyhs|0!Na+dC z6rZX2PMMgzOlkb!c}2YREePNPa53?yVTO-yzp78WN+QIxkxl1c$|`UOK)={fc)Wg# zul58F@KL@ihR7}IXU~53mGZA&>RunCTGPHJQVMm^#SPW{vR=XQRBUH8Tsg|kI_87l z)w&8CBoMH{2WJgN!UR&{hOLqpS+q+*ws2J6Qj-n00;?St9bx3Vy(>#yO@3Dh3DqaE zC>y=Op`?kqz{?n${LgaI1$_8p*Un7Gw?zm58ZMZ~?uJhUIl?k!ntMItN_@Ydr3f&* z-Hf7!=!OS~=H4%Yy$gf8uA(tfiG*XI<|I^#01kh<8Bqg>-t<{+Akm4r)-nPU&Z(;E;MTQy9fscV$DUoRw@|N2Tkdr@d# z>Rn3mtd*_7Tkss{QAj7Fb9orilx^;sFP`m|s zgl5@GRdV)=k(Az?FavkLr#C88*p)_Y0Ky%ttR~k~*lOvTX0rJ%>rhVQ_28D-sqi$r97>S1&$}+=Le^0y!@)H1 z1P(6xruoeJq21s%%;{lUaou}ho?{|#l0EQB*i_Vdc9vcFsf5XFj7>>ksGw%*!BPd= zQ7hLmZlvnUPnq`1$Ug<*6feQOOF-#g+Jk4O#JP!coYIWvQiMWsdDAJPXk1~-{_w?M zexg&c+Lvb#_UMO|^#$TgsnDnFs4HP$DZXCJ<(*roWx1%db5r|M2AIJ4THSwI$_{EN z;U??r3+SH|7OIbhe`Tvv;18X(X8Qb*Cu{m69vpa^kdbhZ#$*-!!Z?HE5@?RC<6zp-UCs~~c_g1{F}4FiIw6Mk~rf!;e9-?NhvUCdzS(^BPd#)V)YLUOCTFBhYH?u>6cmNrq&PRY z%}8DK4(vC*foEKeu=wQOC-ib;tX&3Q*!}JT=kK-SXH5;yu^`&TZhndr1J~zJxOE}j zK&0_2I~ie_c2nmf(v@H6^1qmO-35m*wr29!%^PN|3R$Ik^0H=+yJfzmQ{b63Ap*Rh zOT9*vNc7E#O~vXw?hL*9Mr5B(5v)irW8GvnM+`Fv9h6)hHvyX1{L=Pv*VTDAsw zQcfIEub5MGw#IE#Xsy&|w zyX)t%xS7bLutx-Bm@d=*rc>4Bx>LF$*WM!~a7f-191IMPQ2xFu0@nPZDXXpoGo2l? zWCVeoZJb~xM#|{CG3}`F5VbY6bqKA2GI_7S#Yw!~W1X5#`n@bg+5{K%Ef4VDay&0~ z!A!$@rh1J~A=tn;8pDo;o==E&>1@6&fyNXA&PR;(H~2uE>diSs&Q{~wbuo1F#N|1)Og36+i`EH^gB<VOPpho9ACeskw8#wl0K?Dj_4gDpD6{W zvZFN>R-+mA9v7e6*kAA4{TBb3&-!q4U1fUz$1CE>FtCN0swGE1YO$epil|2PS#4bN7O=5UD&^{8>m1h z`x$uqBKE^d1*)t#%x9Rg?j#7R zt1Bzqt_9-!j9ssV~c;H&R_9_NdmTViop=$#0shgwoG$-kHAB;9GU0XYh0aI?Ilh?jJgM8yY)2 zid|c%RR=ouw}tKvk~@ui-I?eMnt~1r-_>d8Vx-`8coz3t2X`cpF79Mq_Eier5Cede z|K$HB_G(thPoJ*Ci5_pcSALZ_h%3_4-??tAjCnp!U;udADg^0_;rh={+yF z0~CMo+fYy!7j+|tY-X!<2$GX-J>-tT0=4~^b<^s>sFRhGk7iKGLU1EbMUIH zQNqzD{F|m$dzSfU@+1ZshqfUPR=Zi-ej|^}9ntlx`&TvQtWttiO!5qwj$Dlh031O; z7jDxSlP<}GGFhn4+==t?h7xVnwDej<|8rH2uOGS;nkB~mz|*60c@Tdh9hbVz>Xj*Y zOE9gXHdFTTgoFG7O|i1s%YX~0VyDGHBO4ZD_qWsaczm!ci4*LRv0n4Ipp4Sr_2)6F zzGbGn==yEmUIEcO;6M#yeND}Vvmt3~ZHbWC5~&)fnPd)Qv^3O>s8$Fv8`C7sHy!_S zB08G-onda40M#bUG2u*}dXF7w#rqGT`@VdMRZvu;pLlGK%1x2X#4yeHicj-rl#Z}O z5Ih^X5@%yAvRVGjO^nw)SduAQvZ>P`SVK4Qij8uXHYD3#!g`0CLXv_i@0i2eA8fe+ zyCUqN>)u3Y9Y$~B&GxWg*AY#jpss&eBb{4`2pd^5cdBN?KtL>te*Ad$?9gsljk6{D z)!u$9FD}jAZljz?VFHBPnxz}VVfKr7N^MPMpUv!tLMB(f$)|i^nKuO$oiYaae+bI{ z1bMGmF&>Hi9tH2(Ru-?`_P~O8%ae14CosyxEaeuyNUloB_IQH5z4@Sh0Nb$sd#t`a1v*F9 ztQuj~q~ZE1OlP?tYJqDaoN)Vb-7ACo=GaQZ1WfeYv zHqE}qIVMQM*du>>Pxt}vbIlBb$0m^no#ezxuf2JSA(sB#7~rzFg9${jfKnjM;T|oQ zL1sH5Gv_PaYCy&9k})}N_4(=Ywx927!?Yc1{?byqI%0OnF8K%{ZwiEj`V88la24GI zB5=n%S~D3Idx;vu0*a^@N7yxjf%E`kc0G+t1+E>po(VQE^M_Mmm+wPJbcd<9?A$x? zRUT_ccam@VMj`h^hq{6_^nEKn=OPS94j)wFfR*H+3XAS zDG%BdO4*%5uX=4Z(zOns4bPQt$lo}u_)DwgUSBqhe$WYuc; zjD$uoSGA71@%BS78j~)lfJO(cY0#M$4)D#xr{X7C!IR5+uQkg)-SxKnlt&DgZ3dp* zH~~V`UQda{4@SVlbA?i1s4^s@J`e)xUVlPJt-1Eo6u1;vGt)(4<5D!u^5@(6R#h5D z=C!&eZZA*dTbeOD>T|JzB-s?`>z7!aK|mSWA4abXiQ8c|EBZ%w+>=NI zo4&*c;mw>Pt%aG_5{#^PnJEwkVS)3ktdmh3(#lhIDOvfV7on5UNoczgw8l|*q_mw? zPhDYPW1q}x_@T|Ycgobh{39XeY92KTqJ&DYLk*;_8n_XlZNWUz{+)y1;x&nX;LBp^YB#ZTBjac*F<%*xk|ho&u1Z@TQ!0%`R%=w7 z-cZvj_3xJN;?+5~K0A27$O>rQIgJ15l;+~q#4b_LU;cqZOu@wD<8_Lhi_PQIDCeC! zc)-_ershnk$*6P_Rr*mg01oz*!u1b*sp&oon-Z0zn7awe!{Lh=6-tSNPXg~@%xx=6 z38O)8- zE0O^bV%wv$nb}@E6{bzKYuTF;bC6MDn-s{}T9Ufl<@3Gdze>r9=r&y2cxfViK&^7OSg7MZ(flCE zAZ1lWX|`+fW?^!-QB~yU7Y2EH0cr21z2|-#`%*!KCXS7IV(+YOip+c3$Knlg?|P#{ zu!trY2KCY3zKh!|UMkt@NxOrff=NP=|0V$!6=3|v30!0uezOg_7)%*$YlE+eRGo0O zz<}CL(D~d%`>Wm?ZdRSVaA{6{=*>%)2H^jXrE8AMvwiz<)w0{dU9FbgmR+lsmhIZT zY}>}NZQHhO+pXog(|hgtz5TyF*UnBH$5%(%KhER>(LMo&Q0X-f_!`0wRMH^UG1vOs z4j+Iecv*L+vKK~6r^Z<7RO);y>FB(0ubZi9*v!Wk#&p<;)Yp___v183i}v?8NunGD z=+947eMJnmH|mXxNM1{oXcDn@nM^0?GhTmBl~*(pZpzP`PpWRt1u5o)piUv%$mJdMO)#~)eOljNfxQlk0rGhL7PBNOOx?UukkGf>5D~Jwx(I@QZ40vwOVorGEea)ay&Fwi#>IVFyNN)U;nS-6nG9M{ZIl{<2g1gT_td8#p2! zGHfuP>*J=d3Lcc!7Uut;Tc|ytCc+ra1~j5?3O*`_l-eHU`K0*_>G^DW_U4KZ(o^L| zSgpqnIgubZqG5dNgFf#xWpA(3}DasXG%b*!U)Rs*wjDPW!DoYrZu`<&awQiePDQ`gEt)o6zbV zyJHzXzqFnRGmm)p2(87-A5gT{^=D(@G(?+6cqcNH@L$1&C@V3Xm4wXW~4H=&nk9w8z7haYZ$Z$LpJ=$EZ324~q`m?klqsR}YdxR2jalD1ul3 z%EQ9Ds`$}#?4b_cXJu@pP{$HSQ}EJbi23iD^P_6dzHzlN!`ys0@#M-p-S$N(Clt%f z?0=VkKHSjvd)Elv+;BT#d&*FLS&~sV>DACu@LYl+ror{=(?cyk^^@n4!bdb>jBaj` zKrcI@w2pdmlfOTlE%(_EN$e4!RDh1%uwdf7>+>~y%T-C`J)B9*e)QMrtu>aH=hp+q zcXiDaM+d9=pA%;@yOe7Zkq$mi3}!xa&2PR%r|H$e6|D+-w)%cj;2|m)N@zg&DT;Md zyjvLiQ2Uj;g*f4&^kK$M$sK|TbH9z$DBh`-6(J^x-|_jz2(6XW%n26KU8#C}a}A&G z0FNq%be;qSH3qzi199`i&PKvNT$q=|Sfs#R4@(6EcP#RNZA|xObnK)-Sw|KBM-!-B zuBxphQA?6i*!<^MD>nc~*JPwOnuHld8qcbn*?b%wzB_%ATTb9$sra@chDC{jz}-FZ zZqu2qXXPigV9H!WFkZ#Gla1VJ!&eY#+=F|^I+9d+c%IlsI$Qyx`=dg>~G*3yR0Z<60wGvF{2 z(NI7w*qa#9*c2uF{mVSpldX>mn36%e)9WRb@8Wbd86TOQRXqnYGLiQ^6&w_xTlHqi zCsUwO&#C@&;u!wREP1iBK}mwgT8?u>b{D0OSh_C`EF8*j@9HI#egvooy;ME<%KUCe z+QqEz8R6}moCZCX${wl2WrOuKi@}F7C+ZzoP34!}C^lSyhfd!P zSL1~qtR{6Q-lLE9y!CuMh{+?gg4jQYz{Ceig2U+bI)$OgyFVZh^s*hf^IS!X4^Ft) z-@ga<+Om5j`#n_gsda`#S^jO=*Svsd_tg5~Ar&a~KJdz!tt$?)Pq*@j_-9PvfWna& z%gFRZoQmS{_m_|~jNRNulBQloB>vMLLv)2pnm$ia`5&M16(@0vd9nVK_U-s3*0Vm_ zFuD^><=B49cs7yzBpy(~rZm8X=S{tZ3u6{I;KHTS#4+@hg$}Mv>;0+k!#C$K{n*Em zhE9V)pj>KI866_qQMb_vtgrtJ7~M*cseDL;Xa|9SnWdMl%wm-=PoHfk>8}GztR*@` zffgU;*}5GvTgdIq!(8ub560iMBgm26^+KJ0q*FE2>zkED9Q0dbGJj4le&ghOIlp(; z&>wW6TVq3pn)(1}wZ@NFgbFigiejyIk%F-KLi&OXZOup8rg74DjA2vn=fc-vc*tT7 zD)w0CQBeL~tYxogS}DGa;ND)-!;i}#XisS+;3{UMNrc3fF(i7q1NE_QgVAbv`x+`{ zX1l&7J^yzmKB*lbGRXjQ+kbV+Kg(p_pU4An%eQ?C9Ig4Ib9MZjIe$D6)Um-sQM99l zEXcup3thG?ttLK+bnY)o=J-ziNEYndLZLznA61OvopYEF1gzBpex<)iV}|&FvRZm5!3nIZ+__ASEE^7 zc|CJGt36nX0@6S9F3^!j{6xh;mqpANQ(Ml4QTgRh`)!(#z?M$8wxinA)Jx10^*G1|8IXWo&YjF!}oM0T$)Q=Of_3lVBO(;Y)9kTQWV=y7Jt z-}7;9n$<3jk|rL}#4B6cI-Gtbl$OK9?1_`*DWJ(#*xvA(m(Ow*AZsJ3jJQKTxd zvJ8dhicGBknXCbA_jvxg4~a4*zsgT85{e(>0Y?wmBoUci&3N7_|Guw{OkW+&$iKnG?@} ze$&fdGeX#;>29W`ykXOPWU{4RtKkbd_k!W2y!$a%Iy^FkU!tyzfF|iNUE;1scB1}e zOzA>YnwM5<>F2|_$Gd~5fRsFBd)iItAgM&?BQQ{{8QidzDGDZCybeej-s6z~;5w!R+68hCwso4NlOBaqbbS5#Z9{^He3Ns8HkOb$Ndka~umiiH)2{gMMNKPl(?qCM`yti`>&FXZ%ienJgsjay)YB z=1EuGbPw}Q4hI4gR+PS>Ehisq+?eonRfGe>ypiOpLC(nNC8xY#r?|oaCa}3 z#G=GzejZe;p3xtSZR2}g?gl2^(brjooxU7ol&izlysK8cb&WT4e7MJ->bHV$EAQRR zN{pu>)qhmpbQa+uZM*rtVCGo6bdJ~WsJ?3THKKG%LGBwDS@FvEEq@BjQJZ#04i?6* z6hfM-nUS%RE$uphmHdI)V;k3~(-zTc!0muCsx2mu2j3r$JYE(_{bOfkl-E|N+2>6- znKV3h{`2Eu9;kn*UZgBKyQ^&O5>rQUT_JUHdWj_s#f)*M!iBpGQ5Zvr>XoNm*Lo%< z;4LmFuHK2DpnX-@ZKNgur0HhKPE@M@gESLzE4C37g6$(wV)-w~3#bW0V8hUCl&jToFaz630{Y8^jRy}DkHWUKZzyDb= zn?Co(puolS1Ivu;&`+~63m9G=($-gNER#I z6;D<|Nr7@sxI=lRnNO+Em?WMG7S1D)a)C3Sd%pk)Q>61zjaCaD%xL^dO_ZrNZIPCdRZ7knZ$;8tZLc`^5i6kt5Q z3sO)YWBu*++iamUQqD*)bW9V-rC{HVYrA*}kwb6anNF7LhL#*1sC;uhgrWM7yh3s@ z60<*rgqfa-@pX{7ViAup20SL-=sI9ATxI*tqF%LrnaThDg_{4K!es;+db!6J(0ZqE zemWWpgHLJ2nOc`>olVu=fgbCT@5n0~L_ zmA@llFRSsEk=mZ$d-D}4%}oSRZ?k4{p{}3NP~~w_H?WBg$Zx(6kifNZ-h%RF!?qqs zmJ8^(-oJQsb-Ibtxh-xmt_oah2rqi8)k%&^)%$&Gx~LcdS}NB_y}t!>KYEJ~l3?1O z$>$kW8=rSR*8fGow?Q0tU8p^=1!qeQW>Jc$oJu(oe^S;Q_iJ2b(zbq77mpuY_}yAQ zjtGO@SZ5<&_Jm&jckM%DIz}o#mxM$Ph0;?`I0fYdUOpskkx|KnU2P^=YErh8Rm3dPP09f ztj(K7)-toUoYpVC$9VYJZ>7n#H~~xYKKyC(tZ?+C8x=?o?fV-8Avs}+1Z$AWXG{+IA(1u?+Rkrq-cK?a(8v(uB@m-w| z=tZN6hx=;QDe(Sb>i&EpC~RM5+?5eL=A?%6ffLY|=dnLLEydnNK)5w2TgHQdP^KVwLmz=ceXPgP zCh$;~YUdLZ3V0@ln5QkZogiAbymYcnVh{uf<7An6N(01i-8N-1Y&k5q%vhOa=TtNk z%Cz?_k4PQJAx3U{2hb?XwUaHx%L;$)=zD^006oZ~T_x#VE&?a}`SgQiPIDN4`8YzI z4U;SQ=FY13dEfhcukXf7J2z_hd)HU1Enbv=vP4q0z|U~;@vuNLTs6Gte1kIW0IU~2 zr}aDO5(noH7mr^4r8r6(U(R>_Oi8C+fD}!yo?YRTjeTtWO(pA`HCx|8@#KKyj|TiA9ar3N&W5fq_?Q@t(pvI zMJRy39wUqCsJwYF76^Xd^>6v}qblMV-?Q2L6K(pf1cdidr7$8N?Up>U+joB#G-qP1 zHFZ-9|B)Bx=OT<=Cs0GOim<;DW*{zheTA1unD7p!9z_Ave2-T(qE^E@Z00rf-|F0X zvc_@cz(YIQer>R2A~&|Bt}+oQrjHXzsYInO_uzQS5e0v>hW$Py78AOXwLFGd!+c1Y z$$eOW#G;CjKTifa8paU=Cz_E25@Ls3?}@pf=X$PdJTVj6cPc0diF<_5Pj*ENe#kPp zMY%-F^LiJApg4i@7P>h&${!)Um0)TmEiYu2M1~)OvG?j7lbpM9e@hCnT|nY+Hk4m_ zbmvSInx_Uc|K7Zk_QqQ03H_5Ivx0ZxvQBU;1~JVMcJm$H<)op{CKhOW#*;GkrWElT z5v=wU$2KldFsw(jJ4wy}zPU;h{$Eu`Tj8p&V>0jY@PzU6a8o~Am$9)XlZ=Y`FV*2H z%hlnGrZa_HU{jjX)kKyyNJs6rlz-!sr~S?0A&rcF5spRrNd(R{Y(~#{lHBMf2>@R{ zv<%}de*N&bLt2zj3YtL+>6Zkt8G#1&87#1I4tW0#`|^jNf!-rnjuKwZM)7UC1BT5B zNVNHK%5t^S+Idcir<^h+9(V)U9?R#-+UHxrmGf+SfY)^a9;n`FW5IwTBlrOh-%nXl zD;Q(3kBhu`(@Rs&L(XfVT+|)hB2TMu9B^0%wVggGbRkcI3vpEAB1;{GZ)x8@orn7B zrJuIoYL={ujtF^}=O*SKmqj!X`cKHsTBwF5morF$B-c9XWUKZToy~@Ejdcw%5mi}x zVmtQ}3UF)e$+sJwbhH1ZaZ<*(0p0RieWV8s5Kyts@c3*8z)_Q49T`G0IaSOTdpP za=*LYIGh1!8>OUISLEdya5+EA`sWP@``6vl;1;j4Mwwuqd3QY?ArC|bun=;1oy`v9 zJf3QJJ+)CdzQ)MSD@Xm@xH8#cN5(+e7JT!yVj4n~Jsb%yPR{D0($GC}&u?&eCn~D` zGdNPf(IuyRsU|99BW52=pgMt$Un$x0S^$)9p|E6vzRNt^*Yg$Q?<{sOc21hZT_jdj zwA#a0LUG87H#!Y5XFg?q8T7=NNT>99;fDlnOGaj1*SWP-Qx_Gip(Il1v_TI&Wpm&@NY)=lh6y;3s>`&8~`7V~KVUXn1T2GLw+W;)2*|N zDlLkAr&k?x)*s}69T#5Ts<1A+Jw#{_L5vE%z9W>ml<#zBLE(4m{ui#5)1G$K-!cv- z8*HMyk0o~O@lzFeC+hyP8upes|ZhL)Ur=RR97G6)|}bK zdRSkKj=bXn?-$2X1vsSUoi+>fOni(^oqEL%a?V|e-(7!Zc^9(sU9Hm0YX$OWiRIUe zv;R5J-VT3jUb>81XCYc-dt9gc&cqwG6vh8@FIe2CQfm+WV<>xnpN56@%*eT*9E`R{ zy(~5uncW2t+1#K5i{w;5g0fgfl7Z*ym(PQbbb~qS1hdja_3I@g?=sdmRHUvF4(L{S z>;StY;)QeMQq*fcFIEdFJVNOqXQZuEy5HgfjK;%IZZ`|%uL(e0{7+1R?Q9h--uWY| z+*mTP6HF38WiY}`n1;VBfz+*7Wg=xCwjNbAhZM zpFi7gO32IBIfYHXcym|KV(_{_%2E@RSN4GQS|vTyPt@=G8cHc)7!i~!aoTWK#t&*_ z?$;!Mcz}bTIF_558r4~^s|vW??8z0gO!Ftj#nX-IFVH!l3o>#KQA z2svRJYUK*{zj;`~;ZvkQhf!NqCY{*M?e{OvUzGkpJG!|hiI9Hpy5&;IdE0O}N<7GGt_mcR*pui^uW04$iY`=#quOlyCy~%yy)<%>@ja*~3Ov2{ zc@5g#f4Jv!UCS+3mMp!7hObA%Wgt5J9JUA(;~6p0rZ7hcvbApci|CyJsL{`mfI`|X zM=GCdAj+_z0Hi-XR23Y@*U{{OV{2!}bIZ61b@Zj}Q!E@L^CvB22dZ_zveIqdp_mQ# z%A-sVAqxRG`fl$vrzc;ue*a0kKOVXFXcysTx(qQ!PlG;NFVt4aG*!qj zR#<2-+yIrQ%*1Ubd%0mV%!-oQ7-2lKuRiQQg(44xEaD<-j zBS&am(YxA<=hG+kYIE>@`qt0~iK)0&e2p@)22U@GC>Ig>gL*IdMd*aihfLHT0vKcK z*(;Ce#h$w#=PR|9RWFjv%`LFwYwE)__!G%szGE zjOUr=ZOX=;xnpf@pg-~M0N*4{b?@jrA<~aLe&s!!)I~blxD^Bm6yckk`Db<8jGg&x z0G5a|E_bYhwxcvM|9b@Jmw=nz>6wM^SwFWRhr(25x6E5@3i1MrfX?;G+po7h{G0BX zueBZz&&;zOK_;-NU4h9DQ`mo5%Mt-C>1n%nMaLgSW0{$gw8zu*KF>TG+@T$u&ES`+ zW53w@!%#+q&O7c-CdtY-m8Q7ke%<5oS>&)tJ;wx7{K?sc#?I_Sz2!bK`_%NWk*L+l zEQzg=9j&r+EYhdVY3Ub7TXi_x4`r*T`!%`}Y1jt#^sKH{`IGHAi8}F}SCr6=tV-}cT4o#v-ihc&+4wM#^p?|q_ zDiQfqq|#Eb2Cv7YKRxXV`-Ci~y=lavsoGN+m|Xp=agiC--3>7Q0A1UsMMB1XRh2dQ z(n{HKr;%m&zCmb#j!OHej$-qszo42y_syzjU%D`p?#N-NFyoZr%P|wf%t7WG%W!=B zpZJ`g#fCrUBhsTqE%x{PljbC2O-9(i_RVx#x9Ca?p6TpuARYezZ_(*eTnyp(ybNz; z|4kI&ESM0-6`GHjC?1QDZPun`n1nm|=Bi+|z3WZ#s`dL@l-^JD8?=Y7F#l`d|Ml-j zG=NXq_1FsU_^)JU!bQ=CGsQd`?su8{$N7=)}zXD(KZsfR1tQcRDgXv7ZtPsCcEu zUYP!=&f`XU!|UBs0Q*Ut=T-c-lXZQVr_2z9_apGzW@&Psx3Bo3XOL`I`NNw8Dyug2 z!sJGOogLoCtUQrI-osQ9?|A5#VkpgvU_;+uFOpR9OkB_VZjAOBLvLPKXyj_;8Y z$*nEIV)01JTynMZ-S}qi`PaHr%!f;{5=Y(?oWm(;JS%?*BiS#|XU5bJyL4dweZr?$;Z4pkh4#4~qf| zETrF#mX9m0_m7`uXTseUv&GY*l`M>JmIr}{Tk?-Tj=*i2Ex#<;^*prUUQr;gMxde4XIVFPM=@F|e?>f6A%*Ol z;GrN{gu@WIOOv0;vmFAgf_-xqbZTuAJo#?iB>szhi9xlU57kG}si_ctHSKC5f&moe zR525U70X7k%NG_4x{3@sAhENMcQq%8o54ct^{1l8f+M+TE6JH>IBJl9n_8T8G<5Kc1B3xi!vN43vAM zNb#Qer}_WcL1L` zu>?H)Vn}Jlpym*rX`aNteGkh*devM2J@u|YQ&4V}C=C?|EBI@Yy(=WWVB(ZM7m|8z zvyVb?d}&Dt}awLdjYp-~b`#5B-FFWT{TdaE`v zbKrBWZlI**=S{iO2BSNRWTL^NT3LWf;q%)S{W)wqlW_L%>d-(($>}^C&ynvhj%4S9 z@LO1&nnM@~N{%3N%3|_~r-JTez_(E6cU^JX5uir!iE_rM;@B!fa3#c)CC_kASsZ!q zdhK*-f5>a>hkThWiTSi`5S@^KvZD9N$B)znys_w+D?1pEZF@x%_5C|G&Ooh;B0cvA zljk?4_L7eKNJr6UO%5I<3cnB6XX()kwACIpmUKC9j4Q1DQ|V1|fwB^s?Mj{7Lk*5h zqo&-Td^vu=p^1?6yH_|Dxq ze@FQS!;f~_qvu7BmRhVI6@6$w6*xg3<{P;kzad!hmFZg1ca|W!-bbC>Y)O&G2*jTA z1``lubu+!;meAezW~Kt7Cx_Fe=^pBWjknUX8f!_ zkK=2`29Pjq^GCLih|Yo!A7C^zKfnB`pKOt@G?PZGum7ODQGPT<;Bw|}5BcK*D`hj3 zJ7bOSrnUA~ZRmo(({Uvvb7EX=<^-dWG3>&M4h1-3A>oGT$lSIKf_90pD{AC?-uvDy zN}M<#^;;j-Ge01W86F{b^R|eS8(eMV}2V|0^}Cjt?yJoY??zykMPs`{Y44wr7 zzb{^;p@6Hb!?!lT`DDcN3{!40N0kFcLpN?+u89WW?prS*S?+0QQX%S7QZd^1*&rNZ zO-+iHOT$cG?&-c{?}13fb6aC1|&c#?|)LZZ0*l&P0~VU*9&BnnKR;=8!iQ%4I|D9q+Y{OX{H_UnMb$_}Nf znk#%uH63z7Hy4|-3vvzS4ZGL!VieT`IXl4~(ApkJeRC;}Y^jFYUhhk=nvaJfYXl%V zE(fytYBuhsd3`Z8FJqM%h;QXAog5$aQI#vB8T5_}aXz$z88=z?j<6H7foK#LfbRyI zvDy*eEg^+)y(?S~xQ1@o#xXl5;K@>uQ96n1@@z4U%k{9r zz%d!}Ec(<|VyEythnMI)2Of39WHsAMO?ifdGf}AAycuSYo|4=jPFKgXuFf7?I&_6HCQ&+16Ij&RVOE-9!QUFHPCWOP+SbLOyw8NfUKe|TrSyMu%JmH z^p&8KAMkzEIbBrqj-D`gJcOmBM|ZP#=FSYu;cyK8j?UM^<5$e|yT;JhyGZ4HWwB|} z9o)=ZmRMH4bWtU#S?%DjoH-3JrgOYKqDic{3&2Gbs zg{jTR+Kv=aw&|#^9Sx`bDhh|%4in`Sk}0l5S&G+aGJAF#@CfLfqA@JcHnabz|%7KmEWL{qnjwG&#SMgt60vK`Gid#XLRo9$L?jgV#T1(Z8#ERn)RQeb(>2Z~)KXPIP31ypuk_Cab-B!cXavTd8er4`fO6t_Ucih+br7s z%sV`SC+3(G67KRVL~4jsN-5_>I0u+EBMBHI#qOAP4ePxYTa53&U#d4Io=Rk1+R*a7 z;NX&ojA-i#acFXmJTmh)cyJAFue(x(!*g9KMU|n#J^?QHs!9R@+MCc18zR1jNP=F9 zt9x(xcrltI!9@oI3ERY6A2)#T0Cb^b(vLcp`!X^oXR1&)bVAxQ2Yk)#KpdbtFd~N_ z-^WAbdi8m_lz+Z7=JnxCM^QF7ue^TZc6?#rPH>^L4zP9cCr$Kl9f>p+-pF>t3l(PG z-@J>{eORST;Y^PrjgEgmY$|qvjkTtkccN((t@TAM!PxcZ{H?!G`rdxEZ1DJ{(PC~Y z=#!16`W*H7B+Ji1E+|uhnQV7k9(5elvt8B+~If!)!)a@w_+(? zgD%`;2707z5#iBA=D(>E6s*!v??7j+)^~JGpf0=&&eQs0JsqL0(L^r1${)2_0?6Lb zfV)Z8_C~9SoByekUcos1NcT;^G@b6-#7L?v9U)(OnPtL+IADL{U)f$FaTM;jGgJP^ zb26RymWvf3y>DkjFYrb#UtW-C2p@K>T?K+>cThaJP(K|bwCHgJji;Sc@{ zOG57zLG|ql#0sL`*p`;4jryDq+|rU;;St;_)gk*_Y(nu5EhSk65c^y4jtzplzaPf- zpd0^fP04Da43!`>7bR-_4%g1E?%!?v$!vx$Ivt&=j{FkqH}o8X{!mjeY3R{$I`)7( zwlNQj+*;7v4RBR;93*F=vN@0Gyr2BD`FJ5rC91{=N(QKUI=NH#XlU{R%aqsDxKyPJ z+i=Yb2;_D0O=@2n1sM`!D;$V^X!o1Q`H~HiAqH4b;x9aV$B<005-CRmuxIiafJm$r zEFy6C1^8G~no0W( zAgV-_Maol_~+kjJAR9l7n!RQ`#YsP2#dW{Yy(JwqKADZ`Xl{&|f#NWP^F z9XH!1$apmPW`iTWD7D~89fM^0RMFuYY!gL`U&``y2 zHmE#$6*I++O=U~(ucM`mTJ`A>;FC9gq-mLfb8TD*;Z_-hw(n`!IiWO_QphuKx_#Al zU$D7NaWy*fY@nlWhhf}mFU=dC>9tqJ0(Ge_4yUwOLgtx2=`)?Lp+SIrLF=rColV9Q z522qMs5c?@4h;?(v2S5b@Ap^=WXSkyD5MU1`>Qu!{g@l5emz4vt}^3jm{M5CxY0zb z&;64yc$gE2b)ut#8yv?TQ_&V8R=?8-Yw54698P*}jq0Kh|1;M|7L{Wvqcq^11lC^L zB|_@ING^IE&F+baf^}#WOnz{6amikJzW`}!oIh~pI68j32LMqLQea}-H%xv<{pY&D+-#0lp=;CfQ-1QtZsJiMhhOa)W zdaO*V$jVA6rN>!(snrVE*5?Fu(eT=k`{8`&9ldTqe8X{`G~EabNyn&!yM&(cOP}iY z$!(Fezf5(|(2c%9cr&p`yw>O8Y}8IMVm)eb;5Uavi@FCVT`er8a+5af^cJ8`zM9L2 zMP))Z4-ft8u&6JqUngIy@j$QjQN2{V6Lp`#J9k?V?1Q02ROBPlQJC6teQ~KopTyeX zEKpz9Ccpl9qlMmz5qJ%gdwwM89$cNyJXHcf zvxf#CAlr2leJ0r)J{s+-`+b`fClu!fdKn3I9>IhUtzA`7A)}gVx^b>5Hh*lhKp;1Y zH~7(j<5|(~-b3-Y45D?P3|b~ygA7P&jujEaDJJhTvw)?~ zjsAv0U*lV~)Xn9~@h|{s?z$bCm;XW1>Uos(5k$HyjHev`PnwDznpZ@HdzMNx+qr=Q;tp5Fi4YU?sbVdMx%*yowRq*?&o05={XXdUcXa@K6_MJzL3od=O8i^a^j45$ zyLk|JqH(zQQ35zQecwoz9H*Nfj$kf!c&;N;%IU6S%Df{5wx{LZGOvgwZ1uRo!v+Zc zmsaa}D^FIb=;4MeLoxdb{!KYiY0=%f&o1q|Tu>Fo6#>fxG*xceC3cpfEo-Wd@v30h zl-8};2PC21*eXFmR^UGN0ZSnLR-AOLvsYXm1r;F+YXdn{t)vaHn0SX4r;-eQvFw8~wo~pd6ulv^ zRsVAO5J+T)pVk2Ehc&%a;L5~IZ2ZccKns&7!>ybNE1BI?{ZCVK3mYAE&;K$xb^?9Qjj6scM(xT)1S|C zb|Xm5m{TKKhnS&Dw<7o7lhN`~n1m(M(7`Ww9}+cp=)h)X5x=%I0KKESHWP^RCB&U6 zLx##99_fu}Y%;dfS~WtHmV!>x(tspqMDw&C$&b%u)CW7}>p||eI}Qma@t=G7jZ~!x zy*EX5suJbA4J2(tYgrtY{p8#gz3IAQt^M(R!AwP|6S+KZ0c^d0m*3wis1FC)+VkFf z&6#vF`xn5AUB4b?8__2&u=VeZ5fFu-r}A45e9-w?RZo9h`lcM80r}421l{9vv>)Z(Hgx+T2<*(+il9r{ z6GJ?o6J=nVEm8e#k@{LW4wjr;7fT)7D&5yahZ(!{8GmHyJ}C?R{JfS_y|ol>xFjzG zfwvaW4nAxyIUfb%gi)M2oCgxLF~2(1T?Zw((yU@dOimQZy*qbs`~9o+195K|2@fbB zc2>#t6XSwpb`M&j` zIPrWkA(0?keiz>k0D^)-{q85K2#}z4TG~ugw;%-j!(Q(i-ty2Ts+CZ-lkoVioU=#^ zwf+?1Qg>hG!`Jis-pz|y0mZx^ubiUvsy3$VHg&R1Q&q+o0-~AjWB$Tpz({M}inPp0 z7E)zN_j}V|Q6*Prd&#E#tmNS+cgY@O560?b@JW6+iCzsr4;W7zial;)ZJqWY3*{HQ?Hykx6QQ@Fu zpo6x32roS3zH*Ef@&Xh}f`q)OY`pTMl>}1|VIO{#$@SJ_FwcYI88<~w^@9Yv8Tf&m zDl-TL)t8G>@Zqjl_cLH+%hgwozCY0Tw`?~P$07`}!E8ohL&0i(ha*VU_MjcfZU4Ty zei}!Y%&47YHYN8n78p_{+S0s4J@|wt?=#Zu2MK#mOTTm@N5K@r2Vs9GLLjc_sHd+k zi^{V^u>YH5lp9-2f~8}VLSHi)4YT%0=G>#T4JIoc1y!xJI^$c*ovZfXbDBqYE?Xw; z)Y9ei;uriv&=1QYOb-w>i$bTZWLOMD9ADDXg4-6w$cb#HD}8C(Uv!R*TgMIG7@sUA zr+h|H?l{#6!VVpcK-5S4(DqC_)mZT-&Cl+qMh;|4RRkf3FqPJrpvXfxcPm=4VDVrXx?O=2W6V||aw8m3m z85;{HnDJK+_$Uyl121r<-ndg6B8o5 zpNo}(yQ68F`?C=)2c)~c7Zb`AkB7=`*I&hKCbkS!eLIRt`C5)C5k&x6{Da^-A+({i z{6#I>z8b^_hmgx5h6_~y*bnuFdNUcPF2(s=&ohx*u7D(QWb+EqG;77N5yDCDkR%9R zt=hu=aY-tCIu%NlefKQ~Ov|l~;I%3rH{^#8$xDp5q4@m}lFKtqjz;?T!yeM(6E~~V zC5(+eW~@*6xs(;cpEE8PX}fmf!D>q>N0YpEou<7KYX}Z5$VIwv4L1l46UwC~ z=X0&fA2=MPRwZU$EAWo{lJczi-@wpQ{)qIOqhQ+zDG^g^uITOI4`;~0Hf8n)_W}Gj z%`ae99R%yA+Z`J)I__p~ThTW(-}@xKeKl@^5E)@uMm*2+v;d1#1nx|qN)k+*n$qV9 z(v#C0%&KYdssItq_7Ku6%ssv5xTJ(Kz*P7io�oZP%!}7y``yDY3s??a+BUanitz z?SdZuayqXw;EBkZs=qqQ&;CA)QYe6R8a9)Dst7AH^C3THBD!BVj)Q3pdFe&(>)UhG zjmJLECbN6Cdi~H6Mya7a)dEsA=fcF*#LIzm)J&{g;KZO|REfNT`UO8auqhjQWDq)^ z-YTYc#Gx(LkR8p1f2H2iA|p`9gFYV4C}z^m6wBcqFJW(F^pE?zLW6ao==JXn_0-m* zuAr3a7!0AGB3xxos*pFTlb9yjJX32s&nOLkIS=v$i{nT^uCaPW9@c;$tJ!vX&LsxD zMZL|bAu#5g3#)8M88_76>n?{ z%0|pK7w@a9$y6%*{y|A=#n?5uI?j6q*Ycb%Q1KaVXcGuaY=7Lh6dfXf5P{;UcEbC0 z^xW%?eJ&eS`H!l6f0j@=Js!6a-ro!*#v@-nkSzCRS#_xscvA#VAzj9eYLTeCA#GAS z;@vRuwhpo-;!fz~68%m|>84j{GP3FmvTW{0`t3&;G3d6KsbjSINC1pHyTfyoO})gj z%{1yV4WQbtgRMc|B(z2c)JS2vzYvScqE9U_aFy7 zGCit7jwkr#*OSd+Y=6xZTVmz&s#v?pK60l*j*c$}b6MLftViYurp>;8-<;jKf7Q|! zDzQs!cXuY_J#^J#+jV709WVCOxsyk`pELIFgSO0tN9JP@t!K$ypF&xmw3g?9&M$&s zT^ia;IoB98Z`rL<=>N6YL#V)H3C<TNA4fokEa(v>Y@Ip ztB9|o_Bab`|1#MtD{&To7QOosif>SSvvj&A-n-(Z zilk+iapH+49evg8`(R<~99O|w)Fn0v-}5p#u?yUCl^}QvSec|6*}6}_@n?R;i3fQ`f2$=@yRQ=JuGNAbj{U1$V9Ta8%y}fjIODrkf zARW?;k|HGy0@A%KDIn5~5+dCV4~=w}ba%tju;lOd`_9WB%rLXV-f=$XT&J$<=;QIu zIrKNnD`9%BjJR1}7Y%d|$&_Lu`lLJB9Voy@(S~djvdLFdGM5{T7lK9H|P%?$!r$FBP>z@ zC4=j~@}dHq`v4q1q@IB{nb5n-aIxucJD$WiSWpkgPhos3B0C5LR^Oc<8DTFrFSx*9 z#TmS#UH)p;N&lda(gOhAG#R&$*;3qoUJ@`iELP_g@zrBx(m@az?>LTVh!ZM^g4dVP zD6%_5(;BWP?92BmtublIB-8=;xF~3`&Aq`pdHSUG16kyOd{k|W#Jhx5mM-jt7%<`A_7rlbO+yl-r}r7FLcnlY|=J-;7) z>~$Gayc%Af9=C%y{8j5{lcMo9g2!m)=LA}>o*&Fx$n=LgXAD&L`&u}K4>6S{3+fA&IJv)YGE-B4j<2+L+@A15+9I>dkFM51kCtKoH z^yhn%iih!2;}~qIu)ZZ&A=X!i>NpyMY(fkO%5l6zh26#3=ZPB_-q#u<=p(&8qDa7+ zSQ9UkI+)-2%~T&$F8T7<#v>6}KA3n+mc}589m++(iSZp`V(`kg!~CJ46yG~Afsazk zF8W5UqCB*k&P78}j-$WAmvu|^hu9DMJqzqHw0%zRLbsTJq6$_tOJ1M{nmuO$5NJ7% zMI9lD(PltDXb^x}q_NRKBIh;%l_~%kxzqEo@2v{`MUGEf8df~O#K@18qI(>!3s&PQ zz__p4Iv`fvkIA^)Uj`!82v$}zNmu$Ljp6Tij>o@Ed`@&RQ%FzAKJ>aqq`4+q92Z;- zSlmOeq#k220!>0Jr0#A`VY+s^oY{xE#YmGD--K;|ab}nP7#3i(*YmJDJ@vZ){8P{v zv{|}4@k0oTCorfIn2`pZ)h1&SynC#!vgvzi z7jM0@3Rf9nmP{K}MW9iR6NAN;Qq0UkC03Cc=lUeS6ULD!B{n?x#G3XKhu?+*5b5IZ z6>?Eqw2)L#(Cx?6fo!rO{?gwV@lq8+EGfEdGu;#Tz{{=VvigP)%%wEZKY<|ghU4E_ zI>>c%A)ECO>+j>v?^R~Y0|hT;#`wDd(B3(PKZm4NdFB@jE4>oK%v~u$_br@Q4PX=S zAB32e4OZ~8+*|KT5C4{{qoOHQ3D$4F^q|gYzs_{Br8AeyKwo$tyZPXLMgGk^9yQ>Q z9a*}v#ub<_5IQ<^5(n7jt1H|>{=V@Ad;U+_Li1}nk4_UXs6F;k0kUaSF&kgL2m0_(Wb4gu2K+lQb zi}ip(4QBqT!H*qki5xPIRH<`BzF~x&;#c!zL=z%7AzsL3j z1)J&$#ttARu>itTXR$i|;Z>?tsZ`<+`j7*f0K~dMB3iE)eNXqkiwbTn=hOd_3Ew#K z5ISCph@vVN@8QE036N5zazV{0QehC<7WM^psc#oW&hGz<$w6+dicjIGvs< zOP2HKQtPkpS9fe|&ibfsH$042S}Vu!k?wTZ{S2Cav*zA7=vy|0gL9QKxqHy%^M zB+wG}OA%jfYYSB+-IwqV%YL!ew@JPehOyJpuSCSwRfA*3(;NKH{GSt37cq(*RoJl6 zFYv$PzFK#{?WF0{7_a~|79RT%R-UV*OE0HX0xeGKQB+mA$P${-k z&?;|cB1shZ*4AC+Y8m%X9ii)%ic$-HGuJpQY_Hd!Z%{y=*H+%Av8QtwkM0lph>lqN zq6b_`tv}tb@?=`Sxj%nG6P!kI9 z6ntTj`A%?1hae-*Bu!&SK&t{l=0pr#iE~7#iFPpfG6Vn1E&)$k&0i6NYT0xk{vuld zRF^VulR-@zM?nn-7o}<$u{ZY=)Ac0GR9Ib!_PL5DE1FdSe<-K}Y1Su{V@)1+=z(?1 zckfaG%F%<>^0B|&O(<4{oQ2jP`MF71(5K68#Wq;me*zUnys;GdVL5v*Sw+i7KxaI|zt}7U`<+t`ReV?RU$Jb_ zTNXIKP;o($LRHh-x?uvl;!>00F-5ln5>OjDcl0(>ZD}LY$1E3O?{>Umec`6CLgu+M zjG)*5z78KCtk};t<(wQ5``o}%^mnmUCuHnhgs>ax1?JsY4PMGzm0F64)p5JKZbqwu7z+G?*o2f7Uobdy-IeoE*FOU{%oDD zt)Pc7#bPSKVzE0D(A zQfp#h7)4^D^}Jz%-@)DsBQP!uWYD}yoWDWx6hT;#Pj_t;i-qsWk6Ab!E#s93h11U@ z8~#R`O;u{4Xt;rvFMA+XmJt2VR96iu6G>Py?9JJ3nOU6{r zt<-#mW4dzat8c7vZYzpp)r=~vYC}6&jdOUg$?@>%)l!1larlAg>BYOd?s*r2zR*^! z30`_U{{kf@L1?z{%2IIX-`k5Ux<5aD(3j5){1jC&|0KPFib07QX!2FNpyOPV{bOL+ za!Ox7<+qI30kYFF2j5lL8P9BWgNWyYwgt0xh1T@tQt< zWg|7k3j)>s`RT{|T|XaJ3oHgiY`(Txuzk75XJe1c*M;bT(42}4yVdjMe%v*MyL#s? z)W0jgNPx)ao933y3P`n!H=hY+j|H@8u-BS}m3`^K6_M}aOLT4doOvYXDp@wnF|WPH zfM~MXSuG357_<>iR1sF$n6lU+_#KLv{ARj(rUM09ma#J5Z_bt0@p2dcmXpLQioa%S zm+T+*F8QSw5A?%+{uCym_WoC{GV|^nXcPmZ3J7z#zY_&zenP16;eLdk-G+7uStz1J zmn=;xN?hQN<4Ci8#@P$$4Ue(GCs+K5&w|GIi^ns}83PFiAHd1rcPs0(lD`|FjS50D z9Jklt@RkChL3h0@Dq!CIB;??$*U7U;_G!pO2LFc$b7KkII?3n@>Q@C^g< z5-^|rE4JyvID0k;>4NOVqu$N4jKgN<=ZtSB#s0+vNB6MF&7U*=v8fF+Axm4MiF7hd z0iOn+Sfi))PnYGUOIn*5`@6A*v)Be%Sk29c2T5jp_G$H{UgqeMFZ1Y=Uhja2(lVO1P3M_2 z^^xJtTJv)6Fu%Lx59c@akU$EO|^Rz^uBL3$I&!r;jX3P7MOi$qmA2pycXpkPzgfIv`_qye_zfl|#UK z|A}9?vF!707Y&?h>1Vv8O-X!R{p;&hIu2VA1vzi673AdbE`8ch)W^&}>_$CON*Nic zGWt{Udz3zWNMSyym|nQ8Is}K(q9O_g5Wa(ji;I0aWfh;V7Mwzdzs#Wo=mrjP%Le ziI3abWB1Az``U*PW8~(5fi6?7OKdaNj#q- zOmGX_=B__UkatY=KJ6#ES#N9mt@JCTuc86wZVLhczyCm2y>tAD)8hk4=8dyQL-qcf z(K1NwkDXqg?*?!#ye%6z_Th2y<)mj+4_6}6ZT_$tr*87EBsuo4m^UHa-?%AnosT~OIROM2ci`>6-p&I#E{*l@!#qP4DeZTmu7wWk zKC(W~PY$}f2^kqdJx=`_4%XJ-;`}=vuG|UZOZ#BbN)(kVOlpTAje*Xhdt5e7Et+$s zB3{KDHPAL{Htg}o^l(`T<;E0yR?@~LT|Wb3R**W2r1`LxIX&=uamcY{sT|tzrcn7kzp+>g$mj03x zL|k*Gw|2_kw!X4U>dsm=C#JJEPPbA#+{ZkRWZW2p(OcRJP9ze^647ipI6DJf*H0Y` zT?Vgd9y8%dio;$JDKb5JAkW-DI4?Euv8Xu?`7SBytWVg{feercUhYOVR}zjI zW7;~tQ48gP4CKoG9ipZnxqleA4Pl5jco!uXDEkI>-sgodYI;+TZjgnMn(L~zob@8n zAO8z)ECWwl3!>33$~`TjEk14l*kTosyEubvxxT`uxl^wQ)60_s^^))YyW(8izi#7W z&&Hq-MN&Q#WBy~4adS&OowWX6_om*|rtWI*yF=ea44*P3A{%62N^!bPk;!8VRu~>f zupYN#C^^cee|itB#zFRl@o3KaAWgTA5INQtlfSJ}+FFryhp%4RR9{KFW^KeH`L}%q zqT=zkXwn|{sq4}h;4D6N){;WYWVaE~W^w)RN_gL5SB`UU|L?;5%Fbij!mz1GkRNS|5AhEla{M7V{u*R>kz|^!)Rp~d%qt|zaH3m=; z!`Hc|kDW*GT6mG9GhQjjXcY>WiPtP<-VpMVrRsFvM2cbG97RsGfh>ObG{-RF+`AdVte-)6d)urS;x>8xR;L#ksAymYA$|n* zF4qJ3gf*@v*ii&8srIkA8dtIQD+p=J%?|uL+Xe7QnHpiW4CnEOk30LL zr%n7g|Ac=)RLs?^EfEMnrrZda3=5`*x z>9+WvqNvrCBU9w}5V_kgsEY{cqDKe*j`IX&eH!T0?0k{xqc;*6ZIP*wu8;U=$!;0_ z$3CWFwDy;OD%^Crg^~Jz#gybb=X08=I)ohr(SP|Tf{nLKkzr6@ta6)TcQOnvDxEfW zbje{sWiUP@#=P~XDX}qb#}_8PYC424x!ZQ;*XZeGlhUvi8;-brb!irQ&-#9Xk}`&d zu4>$DqgtGZtNean%;V&TORM7IA0O_hXnqe#fEg<70!DF<^iS6U^lKxZtJT7{nqz;z zn`Y&i_Lm_nJU!CeNA^Lt=M9(a2a~UaIFZ!j?$#najRr%2c$rar1726z$JA8$8i--S z6qU|O$x(kT84_Dj>3k1TJM?IQ`R zFQu0R+zW;+lyS#Z+2P4&&MuNF0K>Lp)A!W=r#6L5yfYgVE#Ps0CY-Ba^Spx2l#GC8 zmUIxeoudh``tw-`Ij}9vt8Vu8wyVFjR1gb$b*Z2zBaTRu#P*X;(Vt?)o%hNi;W}pN z(%A$o)da|9Kb&`GTiRD!Psh+WW5`iEKv#mm%MPSI8{u`rzFi-d|KU6FSG7i17Y0-y z%13^{GQRw??-Jhcxq=g8#>unyDY$VbTDZBAA4m%_>vwT>y$4CfX!&8G!In3z_qeIZ z(Hd|8o{H0CB0e5ME}`uY%&S3};M9JZ@SaPaA1heF&3V#K&w5|J^4bgw3j1iAsl+(6QdHh^r~V4;NQ@8vjnDIgBrTzQKf z9~8A_oOW!pQ$B<|Q5mY}ho{}8cEUNC z{%B&#Vt<3Uvl`BrpjYHOI0W5$cjEl8)E1U*{>;LiFG#hXCA#4t7r^&}=qV&#X%JK{W7u%0 zh6FVSh9RMhXa;A=hYi;XtJ^!43VxHhE1yxV#57}D0~pt~vb~+CD%aope3%*_$T7v6 z>gGbTsV;F0HxwVi2M&lO~Kd&s>^3RpZ5Uw z+MVxfH$bKK-I8&@IKXh7ymyRk*_0wqB9U=D^q*_+!bb@fz1(Gwon1!`mY{Ne2b9I9 zv!vbK%y|lv=3W`A3pLviDoLU*VZsHWB5Sc!?|bG9LEqz7+6&Sed?Jo z1L(x_KNxB!RCFAa2BDb7^XOyT<1NfX+6siK5|>;UPuBM02X{}8Hug(}w+|R`^4}K2 z(E8=KKA}0gzy5g|HeKwtu=0tn>-Olgxw!?a(dS-s%cHCw5Gkk%;QRx1Mq0%Bh-#0{ zpV!dXw%$+*wlzHLF1@wjvFU`4`_5=(l+|oP3~$6ws6mdqxFmctsP^5t1b3>rcTEuZvR(SIeHNg+ z(9fW6WKJOrZV!4<_$2`Xo)0ZSH>%734q))64f^Qc722pd_#HMYrwYJJo1&zY9*{e$xlh36;W#OGvNZZu=|v( zAj6E$T7~!KpBt3hML&Ybqm>vAuNGC@qPitQmrR~8 zIVRC+wj5Tp%7?~ux=0j6sSfHoPD3LsiUv-@9)$^F+PB(9%Vf0mqY9sw{UDD#fkyY4 zqLk-6v46zoy#!E1gy;Vq8OhESM@Y2$$PUTV%tb{O_7qWDwjF^jHA2gJw<=?kp&8>8vyL0rh&zJA*=(i>a|8 zSj89JxO<4D*6HSsOF$+4?g93gvTV5cD=pmS)P6Yb701Lc(Eon^H@^*X9%6>FikGX2 z)7sk0PkFs`0Q?9XMmS^zmIv5*Ki;_s{7ld1kGsYlQt+=T1ETJeT!&MC?W`UwmLp(o z7R7QKG9Q<(oxcCnL?6xnX%gV*hlOgG`ZXoOj8cd6s==~mBZ(T8TC~oc|2&H&qD?;= zcASRl5Td0ASL>d~p_nu8yb@2*h`DptA$bfcrE)b1>Dch#pC#wIRt|Rahq&JXW1&vC z9edMx1T{Wa+>YQW0F&07$`-m+#5@fgkpT=J|J?U$HL0xw>G^{~@ixi}ycgx@*xZC4cn6M#q^rT+}m-ueoh#OQ4Ge@FeoAY4!ljY9Bu^KTag>F1XF@Ad<+sY#buRXx_|^`b0!Esh~oDKbX$ z*ecrb1$eCYHeyvOI;7gC$6M-?;q$Kg*)BgiFY92j*lP+39edV3DTyd=WM-R3nBEUp zQaxrRPK`XHf`DLXOB65tkP`y`s!R5>tSo3&Xb5G^^;18SpHI{z2ZzSPks->&)!T73k&Au;S zydm2qC0mhDMK+@%b3G9B&#$3@3>MzWdS2!Adf0CCRrVcrUA@vCG>l93p!*Mrd(AyY|v)h<-^_J6ku?bpX0SjOR$X;{5`MG_++ z&0u=}lMywn$om+3XK;ep$tJK~}gx}6d8C0s3SclEK*gVvz9g2BiTslhzmf#Nz&O$N!UEd@*ybo@?q*8D#+G@yxChsW5HmBB%99O1GyB3(kJkBWDj-<@zxA@$Ulri}L0#0dj& z3RBM3!hh&Qkd=aw-xAsj1)6*woPKCEm?<(T;j87lEH62QG5?O+A1W&=JIuehm4vs$ zmR`IHFRD{J>gccSozmC_P>S2meyB~&Nn!2eb{r0j2J8=wcHj|7VGn2FG0f~)Xnijc z+B^-GI&LFgigLqd5E{O`FW4ZT3X9FVZ4R^`Ke|4F@cy9~=YOnFJ0`z4_H({wjHMg> znQ?WQaaF6cenjb-YV_C}DWDH&x}ek*qbH4d!$#XlK;r1Ms1k9Js&~R<@aLjLEjVRI zvu$0qk;Ihw@y>)%^HgDK5s+#?)u`gj(1|G>8IoR1yj;e_1m|D$8KqkLqZ(*FXO?T` zuAjyG8k1Tkoc*T<ms-Sbw^5i&Wl(?PRtR`K{9r(Mkj#q<)0bqTZ*)QQWO|LD6?-zbpJyhRFY$$Zd zeB4o#DhrJsUUIZsgC%IF)Y8aS=)t#F$m%B3e)?&jJ#mRZEG5FK*}%AhDH7?Xf(IcW zM7lswxK^S5!u~4U!3H|Ae~MTDx)nYoH=xbv5#WyW`^|3jx=+5;0KO?OnRZ}RTBJh=xp z%5z23lNRQqKiA30h#!2Wlk6Jq{lPPP;eYr2?z8{ZSjEm*c;LxJ{X=ZPf1a8jcX$1( zIg+_0Q-;2#<^f9wa;ofv!nm}fL|JXr9qYrFP7#o^7qN>Ov@FOy9(B_T<6gMKTV^UN zebm>pY*MQQ+B0m~hi|PKhQ#LE_o-8i;b}u>@uy~AolN(Kv%=<5=^!kbp`IH8{5cE? zAcUBVC%T?P2h}-bf_OYfmdOp@6DtGeF<~&%Fy4ripf*O+=I~>rjpB&!nS*Wk$%-$r zvdwU7r1Lt0RpSsD-8e6?v$KIF6+i3~8ynKWT-K<02Q|_gr&jV%ch!P?i=1D|@>Tn|ast$p`iU!A;7t6w_dkNc zO3W(2@TlltNy`wCirOJY)X-V(eK#)cy7s%7#iLCu3ccdYJU>>K8dx;kofoAxXrUg= zt)#zvDP*>?YRE{ffhDM`mqL`89f7~bmydpi>A|C5vI`yBiBNw760N~YLa<)Dgy!sr zE34E03N11~V(7G;X0tD}5mOGe0YxVH43-N#7na6s!=+k&a2z7J6idP%{r(3(BKwCs z466O#zADfyucsKiZWLSmYSj_^(NOi_*SPo8K2w=y8`p4wNNmnX;$~fc+)j16Nw%fb zDune#m0v`e}^lQ-aA_TFXkKW8Yf7K;<|tm+MsH zlXRdYlnNA;w;_9FU4pyu#GUVYu=5Qi?sNC@NX@yHc+}97Cc^@hYLGbqmJZVDzBf`;egqagQ>@gmy;Ix=z<9W(W}Yc0IZ;mwJ$e-PP`tV}-Y z&EE5|BYmwK+S9SN`Zc0DjA@$fSR2x)d2cW@&z5cHuVSTxs&yh(-r7;=V`7R}9>|e# z(S(fEf)Ch|G&|AaZMTNF_d5_pUN)5lG1mQqW5CqUAIlk652~eFP0m*42-|n^{=5bE zlT)%}LK}SS)Mt7%KZtxdibST6k&uv(MN9{ZESmu@8 zFvHR{lOnyN3r>ES_-*K(kG?Q(^z_~(l$k!q3bs5>!eh##9*@UcZmb-fdMcUht$v;9 z!GEj$;)|(t*@~o5r6wR5ahQsN`j*6B~y!R)o6GooaA+Umf7b z9&TfuU(AbWHh}xxzRBf)KvaK~(dyT-^d$EKW7_WGLDnCRwz8G}tLDmF0{0{2{f*H- zc8q_ed!kg9a>Y&;g6CWEG>Nx0Q-GM8YAzVCvo@4x44;XtYor%}{%Op$XnQH7rc`Qw zt`d8E>%$JV+dg@SZ>)*4uFdFww_CqmoV3EmypC3%)+DJ8h2tCEclyhPd@>4FaU5-$8AyERlCQsMgbq;Kw=d zo6GVL?v6z=rK!gTMY}oJs-G0Zd(^w8tFN)c{vlkTqKz(bDx1lFI5aNWFCOyPXl(Gp zmq6*{;qcYc&GFL~pP>%Fo@Cf^@&mcb(rprzYzgTWFFUP<$=Vn-(eXok10T3dwT1FVf8O^lyWTuIB_@49_EmNhTp{`FSG0Sc3FY<+syjk8> zv|gpHQ98T4#8Yfi=3u*z9xHmNIVIllciV#GTmbK2l($=v%#7?w}az3sC}Gw_tMIyY_q9q?2ow z>u`Sx&OVc_`!>py_Z0I6#$88C27-TXg=gCFydjIEThkj1FkWIEih)Ho(|tejUEbr5 z$+uosBQUr0{y4j}Cr8zLgID5VZfg|-K5k;?8QhnpMjNq{MS%5mj#WL%Om#>&$NPLb#;x08 z+;>nu_S|?XbRZ_YLqM>Wez?5N_di0iGk(cAK}xXw!A2*xj| z56B%Ewa=pmBkaXMk;Xs2wnz~z48LLy4Bv?mL(@~Q(UsisG>A2uE1_3t8}i5Idu~@O zW7)DPP67ZMIqoCh=%7#6eUSX7rxi_i95+$-_lhij909j}h|j4QiQ7+yZUSpG5t1~7 zk96pBFy;T+Sgn41V9L8Siudu~STsK1s#N)uVqQWA&1L;7ZKJJUIu7S+q#6Y3KsR^` z5OO;gHh8&;C@+^xUt;md5kK2wPxYCQL)6~D5}Ro#5OL+}hf;wcv>DCO61hv_oF=cs ztWY*w+VTCfEK-C`s`STyF3J6LIjKCmog~0ab5{G!*6T5AF0`?%EY?s)iMcV)$$co@ zZbe1$>HNs4UO!^2E)o1*TE+VRu;&%uRoD*Y-V=Gp1@^g<+{bdC6puU+Hnof7|{*@jjW`H z5&EJZb<&e#k!1QR-q%cp#*K{Ev+o|)YGc6f`M)*1>EWmAsT}_%)FFP}P6zGf4-jTB z5I<0!tGh2Zd6OK+sX5ybS=qn%U-qN5m4weIB@L#J?Lu-;!pHjBEa8}XuHi3*{?nz5 z0EQ={_4-=a34rH*o(-7Wi$p`N$!Kpb>S4JS8Lc4T-?YotZe?vG=60ZMn&0Sm&y@U3 zboSkRiW^GX&q^}+3oVn%+WmxNsC7R;?*cXv5LX{^$DX5GkzRf}?`5e>vzSDV zx@NUDlgl#?3K&&NZ4c*o72tyD|4RJ>vr3ebd^Ft^EyECh1{ep7obSZ%)0UCcIBQIf zPgkM^8B)`kuRPZy(PC{pjudYvF_zF=zS}K-3&%=?O8>sEeAb@IyqlBQ$;*`Bi=#h4 z5b(X9QP`zVPv)_ycW{()O53z9#AmQce{;(l!W}nZ86R#M>S^BH6RDInyI6M-8XLvj zYe$o?Kn@($He%_M^J}x7DA6tD zK?nsOZl4|PR=jQGbj`-My(f#TOIPqHnvJ#4&rwgw8)Dn=sNhC}baLFiHI0h5J(WTy z9EZ;rk=`~xH`Rp!U>BABva=SG`rQU%=O}lv3iiya<#fG-dfkO!iP8;DO*O$P@`l1K zwfjW$DlAxpA(oB=v2$d8Ie$LD6c^bN)mEq5+k<637I$cyAOO!xXm+wvBOsT4G}-R1 z_)i&-ermHUf2iG%)%>rV;16wWo_{_Do%|ra%rT2W>|B0JJ!^UFD#18%d{I58Nj+Dx zO;)z|T3ox;U;24RKi8cW5<90}l7!i?fhSGbOAp2+dD2=cASY41YtU%!{SU~u%)(8r zt!Vq-T4VYGhE%9Kb=%a@{>b+6;=X!qi}=2!h|`TslAj4TU7UovL z)kG3Cg*XH&$^+y`TpozBf}DxusJq<9LrkWmn%^?t8#D7f9(KQLn575Or-jG}lY;ux zTD>E2$`k;E=#32zFYBPH93yRSmO)>!A?12WRm0p0j7QFHefA`7dd6HO8;^&-?V*DQ z!hy@kfud|n7ncyB=*mnv=<6_9+O!Z{em#cQ_&tOfdrCg=v~NXw^Kv4{NtNkmeV57` zZ$BOm`I#f2>Av$ks`Qc4Wi4*(=2WQ1pwuJXI#0J9+n_tVRz>;kP#GlZ`iblA!DtAU zlxnv0g!i-P&+YtW{rg^igdYBYImF^I^=<-ly?^gy%HLhT+3SnS?WzqcchezfVz5#` zw{4U^_QDs~?n4;9`Y|MHw~6qO(=Vp@dJ^xX2EN;oU?enm*Uxp5j(8LW}?F+t2{6XF8fexPnqRVO%)AxPiVLFxE zK0QS<9c{|(tzfr(5%+$pVi1+zY-(6>uy+v1f&nmO#>LgD?o&^+2Pl<-oy-8a zdtRAcL8hRl(UsajgyoLbCqqK^dLV6n-Zr626L;JOWi28zs#Z%vkC5J)y(p#rx)x(!<7Da zee`z$VwCE*0%E6>gR{W4t&3c zOTyX0h5oaX%{KeqZu|Y7P^U=!?8Hw#OwdzbZR>AiX%&s-_3xS;OXYkF47=?XSxonc56G!DzrOkVaHP-{7aS!o2CBENFU)PZ$ILnKrAMNP7sb-Z! zLGtv#KasYPT=RW#{N_G%Ebxa`^(8WANj~=>G|#_+ZVBgvi0`v0Yj)}V^6=(3x9Zm7 z&)41~qwaUY5jR+N9DWzkOw}au-=)9wVs{}2l%PNL?N9Phr>U3;Qeu&Z}`oaEDkf?av zPwWWDo35G0^(60>f@=WS`MT(-_o3!F{AIM^tne_m^aKZcXwZUnCIpA+}YQS84RX+8C;<05y z`2`cWTlj&-RX@@q0#ht(r2K-lVeo&roMVf+yS;@dhHkjMzhpZa@+;}#9<_lrcc|`` z9Qu5F=vh~KxfZij*z|~LwV{ZW##6{(I!exccJfH$+mzf<#};uAVNkMK8nJIg|4RH2 zt6+vN!d*k*8=|q|17YA*-37rkIt>dLNQw~!+#R0pG>=F23}JX95AT&sW1^Or^mw%D zT}b&&m7I)s9!ZK#{-duSu2kNcMtSSING2D4Hj_Rr^#s+=TAs5~{8;d@2LA4%afrE0 zX^BnBJ~_zWi&;k!_YO93QUHK$vzF)Py0w10Vr+SQ)hn?I2eiHocLX!BghJkLw z9LV{XISz)FWeEAgl4PPoGPs7%@q2aRQ2!Y9=>t@)+po2@c|;E`uw(^^}mLdkeiJS4FqL^3Ho zFkOk5i;3pbv=q(1k!zI^`^)r~%;E=i^yf&L&6)qPO)W%jg1`QTk`|Ql#kI|dHeI|Q z;NA9-&iYq#^RuGu_Did4ue%j?`fAg&#UAaQuP5nV$NotRoiQiEn-%Um` zlQCjqK``%UDFe3Gvf1p5ZIvumZfl*-t)FGt3J%}5xU27)yr>6>eeAqYn-KOfaMDCt zTcOOM7CAb?YrfboRcgO;VfkqvkMnvlmT@gH#SqS7`5?|5!xSFNXSjp4Q^dEvna?QU zGJE@970fHf7U&AY~vb#5nk$&BiIC zp^aa(nae0?F)^OBPwCj)rxq}dlKtj(gc`hB^X}GQo4?zo3_fr=7rGrQ4)1J(cQdjrFAH`rxhG^WHZ$C4;!4iMgrwqqHaOiY9S zazF5OY!2rOvGH{bT&rLAQEO@>=zjr+MnZfTRVvy@|9O>niS^D7`xs)4fvQFMZE8S*X5T6 zrz0xE&U<4Ww_+fe19S8LQ=9scGi_qh=)4&6=`+4GNU;vloJo&aD>RPai5+*D>vmtC zwPEQX@94x&qoaxA^5xO1w{$$6*7ZHl(<@I)Dow7e2t!rV$Namk&F^5ZlUDkP!Yp0Lj22be& zRq`{;dwI%SUB8TeQlASpyE&3-$!MT)U`JyZ$+xzJAi7XAc3xct^vjerQ?f8NCo9s9 zkhCK~h)HGFgJK77sC+l2@MTb8uNq?C9}#8Sk3;*sD;5DKS2OrXqbOq`f}1MZ71Mn6 zo!&6kT{8aN8?eGfZ;P?dRXqkBrLh>2pO7L**+cRTPWgpJ)(288xKc|SkUNwOm5D}u zI5@t}*FEEnT5NCMUy9i+Y$q77bF;7TZa@;{DF<#}S=7`xRVf^6h@Lf+n3Xlua=F5F z*XKOAL}%QXfXXqsnR$4;hreuYcZ;A_qiuii#v;D|mVV)5tb>xBM9i`O*vFmk-nIbr zkasbnAU~38UWd7DIN8T!*Z0ingHDbJbR@(+gx;KgkR(epGKRqQsMaF`d_;aBP7AxI-)`%1 zl%eC8Y0fw@)|srU?C)Y-yMaxVDE{T}_!P10a=&2K0Ca>B>go|M_D`S369k^v@0XaO z1d=~sP%O_Cow!nIKV2OCgStCcm)N!6kq+>GU4Qk~s_WI(gspkwFF@&UC2<|`oBZ0? zV_}ihU5!i7`w%_PQjga5`5r0t@(U{#eu$dmOCo(&`1S+TenvKYc4oMI3|eZAq--&G zE(B%)+&asLquT7W+QJm2`G#z&_fqUsbuKBNoYy@qkmEDVdQCr8zs+^IRu`L7+__>h zgr7l%UIq%jl+Z(GYP-WcKgMS?AF6s8Zam+u`c%yqGjqsDRX0X@RJRLfrea;I_XyLERFT!=_5y$^)=gJ?U{J#GfBO*%^C2LtC zgtrpeOIb@PnNOlgNY9hB4XKv3#!Y_iy<8 z_MTs!AI_a~U+3I&?>YCJ^E`j4f0KVTpF`*!d&6WnO!xSFs)y-bET8U~Wo4!~q>~R) zYX>_1#5cx3prYqFVKI0wxS$Xj9g$D(JzWSdE&C*;RsbI9DZa+?q#g<*X~O*yW8ymc zdy+YW`@sFp!L@zf^8p6bdd93>&d~n#>AZOb>?5OTs&%DI=)vYh8DAXr##K7*c;P-* zpCk=C;f6=mM$QQ+ID^=vWo3?yMePcW?(?BR_a=9)C)9ugk$>=@<$IaP> zPAoWpFAGUZk6G!z^XiXo<2_bjT=pQm%1`UBXi5II%kl>$*vD+V}B%mwj9@p4?EPzE8(!9y0;pVCn(Lob%Ft`StIa|W#v z{2x+~56gg~rwTVddGBdnwQvaGoq-#r9@k5AQSl)Dq*T#75FP^e#pBg{gGnmzH+^%1XHtLCKpF$h?Q54NQDfq^mzHwHeI^vG5rm@N$@>QA6kQh-&H5pSV3{ z-z7+wdX3DC222!PTaCUpN5R_jvbP&FEVi1EA{s_z2V$M-)p}Tr$H58?Ypw~pRT0a$)Xr)yNOc@xjcnN>KHUwePUJT@Cl25{RE@GJYD?G*saCEU-4bpTIlb_x?X|Jf@Uv04>w23?uXa;H-mL>OXJiE5j()*=!KdLqS%r3I{iNccZFHG8e_TrpVkwugA=h~yp$qVqGvY$f| zT1FL(w?>T??1DTO<&-WyH06k&csHeCsPFxElYd~2%VQ1?1sBP>!=RM*=I72rZTkuq zB{+OCd&>I^AermY0F_#oV`zn&;F--N}Xl&DF1tJpG^zx!x z)6V#(>!03KxNeHQg?@js3}qbe$_`YJ9>OWyGFmezcb=^O<5V4Rly$tKZ0?n7hfBA! zUq$2EW+BS7bqWPL?Uz<{P$|LXkH>qiHgLVv>gBhf(lp!QZAhK=Z#hmQZhf>WRN}o8MKwAIe(eTvXm^M@+c*I$J$2Kk`DIuTUMfQ5Cd|63L z8#xmyZM8A>rHt6F%u>95<0pCxFB{Hg#kpe-mHNB6#px~zH>KAT#G`sRi%9ZP>Xa<^8+%jBQqXfx~ zekaN*W{wwpPQoyHnLk%r$ge8OXDz~sa6xfeW%f|K>w`|UA#zT^d*)<3`?nSQ<7 z>e<$y7WF~bL8HDJH0I_h1LkDdR_ZL&k_23^DP)sa^KT=;kgaFA!Sl!I@s&#jmyu>kyk`cCXfAf3vZ%q+0GKT>4aZ9 zjrSgb4~gxxQFgXIJNy%rG0WbjvC=bV*u|M+^{W$yn=6p9H5y4~27?)~g6ZrUtv^FK zu_9zO;%Rr1PQ5ApWE8JvFihk~&R;UVcY9%LBE$LeX`0I(jAXgF|n|8S;*Z z_D=IG&22fA0KHA>lwL$zfB#q}jZOGz<~n3k-euWhZ{E=xBP#vey77tq6>sdCy_9l5 zn&4LT2OMs-kMl*0t;Wd;o6?#wyrm&sHkN&WS3yuv0;19%v?oMRa9)dO!Q9xs${n*u zSa81tJfKD}r+*(h{A3N1a5(Z-VPFG2byK*`du-sKg{8axtkD;*Wc>KWHmkV2OPE|s z%{Mnm=@i348OZtZ8^h79?@ADo67Y+V{3a!~gv`cuG5rO5QLI?MY$RbQ9`=XJF=pR;ANbdnNvVCaYB1-PwoDmWOqkPIf-t9-rn4S_@|!^q?JoVddnwePV3gNcad6sCp!qIm!Myka=dat!1Fqh-P9_enU) z7Cx81RK2NP{9K4i3uPJx1)0U${!Mnq`5~Y)V-RG7;Q>E=ty#bIU6$6xOK+|CrWnT9 z*)%2LHgX2W+lkUEr-i_gwiS(oQw+IOm*hNMK!g?O+1$MF^IvDdsC;&(GF8Und}5CpdgPt+|i~7%+E-9O#2NFJ)q+8{|gpDHfvoBKf9@ z3Ofheu(7yiYx;g5#0_xmTIseX2K*pUqVY|R_qEFx*XJr zSYq2WPeD5RLe%5y1!*->xB#GEzc}FJ4MAUyWp|ye{!Tpmp0AQ53}iT6fwRR0>Y+wL z5Dv&1L9fzf* z>)(yWO5aIYHw1W(f;cRs99oLmD*8NY-g7|c?+8UW`~GGctld3ly@*~ahBM)yudD;; zSSc>+%q10(!(-|lvA`1(NlE18a1aG`Gaf`>L+fvRDC zU@O8v3Uc7RsN4Gr%l^W7N;Uu)!%e154P~R4V*Zz+J-*4hK8AIl8Rgb>#+}7C@!S_TPxqhM` z?Xz-ZmF+G@@#jDh7qG)4+B1L8!d2w-%8540`HLaAK2C=rvduo z4qUdk+6Fs#O)VH}7xeMLTMd;y8N%SduI7;XSKU?N4+aKIa}OH4bXSp%tny%Xi!@jO z1AL>jZiRVa(9Lj42$>45Os=2`|*Zy;xy@+ z4X+XXQlEL3yLGgrTHdU-7xE$L#q{^~-p`xTyW-9B9nt-9 zDgxZtK|HFEXi$9KtRcFB6jA_6joM{4ifUQ!i2QqhO-S8CX!5U=3eb~eZ=CF)j+x1!~z!ULZE@Jcw^~<_WVDkg%b*mGh9QsgdCn1lh zRSVXoPV{KRs2`K`sFRZk(zj|u(^zm0f&*9rAmC50xY?^+nG?Rj8P~Z?oy+8t1=I9B zhD0?U-KVJ4FI-G}|Nl%DU2fb1>!>4NwLJpLvA jtAu|o;eWmzI*g&gu62;ASE;z&&1ZSR<~;G-?Wq3(I(xn5 literal 0 HcmV?d00001 diff --git a/api/core/tools/provider/builtin/xinference/tools/stable_diffusion.py b/api/core/tools/provider/builtin/xinference/tools/stable_diffusion.py new file mode 100644 index 0000000000..847f2730f2 --- /dev/null +++ b/api/core/tools/provider/builtin/xinference/tools/stable_diffusion.py @@ -0,0 +1,412 @@ +import io +import json +from base64 import b64decode, b64encode +from copy import deepcopy +from typing import Any, Union + +from httpx import get, post +from PIL import Image +from yarl import URL + +from core.tools.entities.common_entities import I18nObject +from core.tools.entities.tool_entities import ( + ToolInvokeMessage, + ToolParameter, + ToolParameterOption, +) +from core.tools.errors import ToolProviderCredentialValidationError +from core.tools.tool.builtin_tool import BuiltinTool + +# All commented out parameters default to null +DRAW_TEXT_OPTIONS = { + # Prompts + "prompt": "", + "negative_prompt": "", + # "styles": [], + # Seeds + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + # Samplers + "sampler_name": "DPM++ 2M", + # "scheduler": "", + # "sampler_index": "Automatic", + # Latent Space Options + "batch_size": 1, + "n_iter": 1, + "steps": 10, + "cfg_scale": 7, + "width": 512, + "height": 512, + # "restore_faces": True, + # "tiling": True, + "do_not_save_samples": False, + "do_not_save_grid": False, + # "eta": 0, + # "denoising_strength": 0.75, + # "s_min_uncond": 0, + # "s_churn": 0, + # "s_tmax": 0, + # "s_tmin": 0, + # "s_noise": 0, + "override_settings": {}, + "override_settings_restore_afterwards": True, + # Refinement Options + "refiner_checkpoint": "", + "refiner_switch_at": 0, + "disable_extra_networks": False, + # "firstpass_image": "", + # "comments": "", + # High-Resolution Options + "enable_hr": False, + "firstphase_width": 0, + "firstphase_height": 0, + "hr_scale": 2, + # "hr_upscaler": "", + "hr_second_pass_steps": 0, + "hr_resize_x": 0, + "hr_resize_y": 0, + # "hr_checkpoint_name": "", + # "hr_sampler_name": "", + # "hr_scheduler": "", + "hr_prompt": "", + "hr_negative_prompt": "", + # Task Options + # "force_task_id": "", + # Script Options + # "script_name": "", + "script_args": [], + # Output Options + "send_images": True, + "save_images": False, + "alwayson_scripts": {}, + # "infotext": "", +} + + +class StableDiffusionTool(BuiltinTool): + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + """ + invoke tools + """ + # base url + base_url = self.runtime.credentials.get("base_url", None) + if not base_url: + return self.create_text_message("Please input base_url") + + if tool_parameters.get("model"): + self.runtime.credentials["model"] = tool_parameters["model"] + + model = self.runtime.credentials.get("model", None) + if not model: + return self.create_text_message("Please input model") + + # set model + try: + url = str(URL(base_url) / "sdapi" / "v1" / "options") + response = post( + url, + json={"sd_model_checkpoint": model}, + headers={"Authorization": f"Bearer {self.runtime.credentials['api_key']}"}, + ) + if response.status_code != 200: + raise ToolProviderCredentialValidationError("Failed to set model, please tell user to set model") + except Exception as e: + raise ToolProviderCredentialValidationError("Failed to set model, please tell user to set model") + + # get image id and image variable + image_id = tool_parameters.get("image_id", "") + image_variable = self.get_default_image_variable() + # Return text2img if there's no image ID or no image variable + if not image_id or not image_variable: + return self.text2img(base_url=base_url, tool_parameters=tool_parameters) + + # Proceed with image-to-image generation + return self.img2img(base_url=base_url, tool_parameters=tool_parameters) + + def validate_models(self): + """ + validate models + """ + try: + base_url = self.runtime.credentials.get("base_url", None) + if not base_url: + raise ToolProviderCredentialValidationError("Please input base_url") + model = self.runtime.credentials.get("model", None) + if not model: + raise ToolProviderCredentialValidationError("Please input model") + + api_url = str(URL(base_url) / "sdapi" / "v1" / "sd-models") + response = get(url=api_url, timeout=10) + if response.status_code == 404: + # try draw a picture + self._invoke( + user_id="test", + tool_parameters={ + "prompt": "a cat", + "width": 1024, + "height": 1024, + "steps": 1, + "lora": "", + }, + ) + elif response.status_code != 200: + raise ToolProviderCredentialValidationError("Failed to get models") + else: + models = [d["model_name"] for d in response.json()] + if len([d for d in models if d == model]) > 0: + return self.create_text_message(json.dumps(models)) + else: + raise ToolProviderCredentialValidationError(f"model {model} does not exist") + except Exception as e: + raise ToolProviderCredentialValidationError(f"Failed to get models, {e}") + + def get_sd_models(self) -> list[str]: + """ + get sd models + """ + try: + base_url = self.runtime.credentials.get("base_url", None) + if not base_url: + return [] + api_url = str(URL(base_url) / "sdapi" / "v1" / "sd-models") + response = get(url=api_url, timeout=120) + if response.status_code != 200: + return [] + else: + return [d["model_name"] for d in response.json()] + except Exception as e: + return [] + + def get_sample_methods(self) -> list[str]: + """ + get sample method + """ + try: + base_url = self.runtime.credentials.get("base_url", None) + if not base_url: + return [] + api_url = str(URL(base_url) / "sdapi" / "v1" / "samplers") + response = get(url=api_url, timeout=120) + if response.status_code != 200: + return [] + else: + return [d["name"] for d in response.json()] + except Exception as e: + return [] + + def img2img( + self, base_url: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + """ + generate image + """ + + # Fetch the binary data of the image + image_variable = self.get_default_image_variable() + image_binary = self.get_variable_file(image_variable.name) + if not image_binary: + return self.create_text_message("Image not found, please request user to generate image firstly.") + + # Convert image to RGB and save as PNG + try: + with Image.open(io.BytesIO(image_binary)) as image, io.BytesIO() as buffer: + image.convert("RGB").save(buffer, format="PNG") + image_binary = buffer.getvalue() + except Exception as e: + return self.create_text_message(f"Failed to process the image: {str(e)}") + + # copy draw options + draw_options = deepcopy(DRAW_TEXT_OPTIONS) + # set image options + model = tool_parameters.get("model", "") + draw_options_image = { + "init_images": [b64encode(image_binary).decode("utf-8")], + "denoising_strength": 0.9, + "restore_faces": False, + "script_args": [], + "override_settings": {"sd_model_checkpoint": model}, + "resize_mode": 0, + "image_cfg_scale": 0, + # "mask": None, + "mask_blur_x": 4, + "mask_blur_y": 4, + "mask_blur": 0, + "mask_round": True, + "inpainting_fill": 0, + "inpaint_full_res": True, + "inpaint_full_res_padding": 0, + "inpainting_mask_invert": 0, + "initial_noise_multiplier": 0, + # "latent_mask": None, + "include_init_images": True, + } + # update key and values + draw_options.update(draw_options_image) + draw_options.update(tool_parameters) + + # get prompt lora model + prompt = tool_parameters.get("prompt", "") + lora = tool_parameters.get("lora", "") + model = tool_parameters.get("model", "") + if lora: + draw_options["prompt"] = f"{lora},{prompt}" + else: + draw_options["prompt"] = prompt + + try: + url = str(URL(base_url) / "sdapi" / "v1" / "img2img") + response = post( + url, + json=draw_options, + timeout=120, + headers={"Authorization": f"Bearer {self.runtime.credentials['api_key']}"}, + ) + if response.status_code != 200: + return self.create_text_message("Failed to generate image") + + image = response.json()["images"][0] + + return self.create_blob_message( + blob=b64decode(image), + meta={"mime_type": "image/png"}, + save_as=self.VariableKey.IMAGE.value, + ) + + except Exception as e: + return self.create_text_message("Failed to generate image") + + def text2img( + self, base_url: str, tool_parameters: dict[str, Any] + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + """ + generate image + """ + # copy draw options + draw_options = deepcopy(DRAW_TEXT_OPTIONS) + draw_options.update(tool_parameters) + # get prompt lora model + prompt = tool_parameters.get("prompt", "") + lora = tool_parameters.get("lora", "") + model = tool_parameters.get("model", "") + if lora: + draw_options["prompt"] = f"{lora},{prompt}" + else: + draw_options["prompt"] = prompt + draw_options["override_settings"]["sd_model_checkpoint"] = model + + try: + url = str(URL(base_url) / "sdapi" / "v1" / "txt2img") + response = post( + url, + json=draw_options, + timeout=120, + headers={"Authorization": f"Bearer {self.runtime.credentials['api_key']}"}, + ) + if response.status_code != 200: + return self.create_text_message("Failed to generate image") + + image = response.json()["images"][0] + + return self.create_blob_message( + blob=b64decode(image), + meta={"mime_type": "image/png"}, + save_as=self.VariableKey.IMAGE.value, + ) + + except Exception as e: + return self.create_text_message("Failed to generate image") + + def get_runtime_parameters(self) -> list[ToolParameter]: + parameters = [ + ToolParameter( + name="prompt", + label=I18nObject(en_US="Prompt", zh_Hans="Prompt"), + human_description=I18nObject( + en_US="Image prompt, you can check the official documentation of Stable Diffusion", + zh_Hans="图像提示词,您可以查看 Stable Diffusion 的官方文档", + ), + type=ToolParameter.ToolParameterType.STRING, + form=ToolParameter.ToolParameterForm.LLM, + llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate" + " as a list of words as possible as detailed, the prompt must be written in English.", + required=True, + ), + ] + if len(self.list_default_image_variables()) != 0: + parameters.append( + ToolParameter( + name="image_id", + label=I18nObject(en_US="image_id", zh_Hans="image_id"), + human_description=I18nObject( + en_US="Image id of the image you want to generate based on, if you want to generate image based" + " on the default image, you can leave this field empty.", + zh_Hans="您想要生成的图像的图像 ID,如果您想要基于默认图像生成图像,则可以将此字段留空。", + ), + type=ToolParameter.ToolParameterType.STRING, + form=ToolParameter.ToolParameterForm.LLM, + llm_description="Image id of the original image, you can leave this field empty if you want to" + " generate a new image.", + required=True, + options=[ + ToolParameterOption(value=i.name, label=I18nObject(en_US=i.name, zh_Hans=i.name)) + for i in self.list_default_image_variables() + ], + ) + ) + + if self.runtime.credentials: + try: + models = self.get_sd_models() + if len(models) != 0: + parameters.append( + ToolParameter( + name="model", + label=I18nObject(en_US="Model", zh_Hans="Model"), + human_description=I18nObject( + en_US="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", + zh_Hans="Stable Diffusion 的模型,您可以查看 Stable Diffusion 的官方文档", + ), + type=ToolParameter.ToolParameterType.SELECT, + form=ToolParameter.ToolParameterForm.FORM, + llm_description="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", + required=True, + default=models[0], + options=[ + ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i)) for i in models + ], + ) + ) + + except: + pass + + sample_methods = self.get_sample_methods() + if len(sample_methods) != 0: + parameters.append( + ToolParameter( + name="sampler_name", + label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"), + human_description=I18nObject( + en_US="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", + zh_Hans="Stable Diffusion 的Sampling method,您可以查看 Stable Diffusion 的官方文档", + ), + type=ToolParameter.ToolParameterType.SELECT, + form=ToolParameter.ToolParameterForm.FORM, + llm_description="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", + required=True, + default=sample_methods[0], + options=[ + ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i)) for i in sample_methods + ], + ) + ) + return parameters diff --git a/api/core/tools/provider/builtin/xinference/tools/stable_diffusion.yaml b/api/core/tools/provider/builtin/xinference/tools/stable_diffusion.yaml new file mode 100644 index 0000000000..4f1d17f175 --- /dev/null +++ b/api/core/tools/provider/builtin/xinference/tools/stable_diffusion.yaml @@ -0,0 +1,87 @@ +identity: + name: stable_diffusion + author: xinference + label: + en_US: Stable Diffusion + zh_Hans: Stable Diffusion +description: + human: + en_US: Generate images using Stable Diffusion models. + zh_Hans: 使用 Stable Diffusion 模型生成图片。 + llm: draw the image you want based on your prompt. +parameters: + - name: prompt + type: string + required: true + label: + en_US: Prompt + zh_Hans: 提示词 + human_description: + en_US: Image prompt + zh_Hans: 图像提示词 + llm_description: Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English. + form: llm + - name: model + type: string + required: false + label: + en_US: Model Name + zh_Hans: 模型名称 + human_description: + en_US: Model Name + zh_Hans: 模型名称 + form: form + - name: lora + type: string + required: false + label: + en_US: Lora + zh_Hans: Lora + human_description: + en_US: Lora + zh_Hans: Lora + form: form + - name: steps + type: number + required: false + label: + en_US: Steps + zh_Hans: Steps + human_description: + en_US: Steps + zh_Hans: Steps + form: form + default: 10 + - name: width + type: number + required: false + label: + en_US: Width + zh_Hans: Width + human_description: + en_US: Width + zh_Hans: Width + form: form + default: 1024 + - name: height + type: number + required: false + label: + en_US: Height + zh_Hans: Height + human_description: + en_US: Height + zh_Hans: Height + form: form + default: 1024 + - name: negative_prompt + type: string + required: false + label: + en_US: Negative prompt + zh_Hans: Negative prompt + human_description: + en_US: Negative prompt + zh_Hans: Negative prompt + form: form + default: bad art, ugly, deformed, watermark, duplicated, discontinuous lines diff --git a/api/core/tools/provider/builtin/xinference/xinference.py b/api/core/tools/provider/builtin/xinference/xinference.py new file mode 100644 index 0000000000..7c2428cc00 --- /dev/null +++ b/api/core/tools/provider/builtin/xinference/xinference.py @@ -0,0 +1,18 @@ +import requests + +from core.tools.errors import ToolProviderCredentialValidationError +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController + + +class XinferenceProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + base_url = credentials.get("base_url") + api_key = credentials.get("api_key") + model = credentials.get("model") + res = requests.post( + f"{base_url}/sdapi/v1/options", + headers={"Authorization": f"Bearer {api_key}"}, + json={"sd_model_checkpoint": model}, + ) + if res.status_code != 200: + raise ToolProviderCredentialValidationError("Xinference API key is invalid") diff --git a/api/core/tools/provider/builtin/xinference/xinference.yaml b/api/core/tools/provider/builtin/xinference/xinference.yaml new file mode 100644 index 0000000000..19aaf5cbd1 --- /dev/null +++ b/api/core/tools/provider/builtin/xinference/xinference.yaml @@ -0,0 +1,40 @@ +identity: + author: xinference + name: xinference + label: + en_US: Xinference + zh_Hans: Xinference + description: + zh_Hans: Xinference 提供的兼容 Stable Diffusion web ui 的图片生成 API。 + en_US: Stable Diffusion web ui compatible API provided by Xinference. + icon: icon.png + tags: + - image +credentials_for_provider: + base_url: + type: secret-input + required: true + label: + en_US: Base URL + zh_Hans: Xinference 服务器的 Base URL + placeholder: + en_US: Please input Xinference server's Base URL + zh_Hans: 请输入 Xinference 服务器的 Base URL + model: + type: text-input + required: true + label: + en_US: Model + zh_Hans: 模型 + placeholder: + en_US: Please input your model name + zh_Hans: 请输入你的模型名称 + api_key: + type: secret-input + required: true + label: + en_US: API Key + zh_Hans: Xinference 服务器的 API Key + placeholder: + en_US: Please input Xinference server's API Key + zh_Hans: 请输入 Xinference 服务器的 API Key From 7c485f8bb80d170b5756575c150e7cdb41613145 Mon Sep 17 00:00:00 2001 From: ybalbert001 <120714773+ybalbert001@users.noreply.github.com> Date: Tue, 24 Sep 2024 10:33:30 +0800 Subject: [PATCH 19/64] fix llm integration problem: It doesn't work on docker env (#8701) Co-authored-by: Yuanbo Li --- .../model_providers/sagemaker/llm/llm.py | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py index 2edd13d56d..04789197ee 100644 --- a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py +++ b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py @@ -85,7 +85,6 @@ class SageMakerLargeLanguageModel(LargeLanguageModel): """ sagemaker_client: Any = None - sagemaker_sess: Any = None predictor: Any = None def _handle_chat_generate_response( @@ -213,23 +212,22 @@ class SageMakerLargeLanguageModel(LargeLanguageModel): :return: full response or stream response chunk generator result """ if not self.sagemaker_client: - access_key = credentials.get("access_key") - secret_key = credentials.get("secret_key") + access_key = credentials.get("aws_access_key_id") + secret_key = credentials.get("aws_secret_access_key") aws_region = credentials.get("aws_region") + boto_session = None if aws_region: if access_key and secret_key: - self.sagemaker_client = boto3.client( - "sagemaker-runtime", - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - region_name=aws_region, + boto_session = boto3.Session( + aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=aws_region ) else: - self.sagemaker_client = boto3.client("sagemaker-runtime", region_name=aws_region) + boto_session = boto3.Session(region_name=aws_region) else: - self.sagemaker_client = boto3.client("sagemaker-runtime") + boto_session = boto3.Session() - sagemaker_session = Session(sagemaker_runtime_client=self.sagemaker_client) + self.sagemaker_client = boto_session.client("sagemaker") + sagemaker_session = Session(boto_session=boto_session, sagemaker_client=self.sagemaker_client) self.predictor = Predictor( endpoint_name=credentials.get("sagemaker_endpoint"), sagemaker_session=sagemaker_session, From 1ecf70dca0b313966d8955bdaf43fdac5515cf99 Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:20:15 +0800 Subject: [PATCH 20/64] feat: add mixedbread as a new model provider (#8523) --- .../model_providers/_position.yaml | 1 + .../model_providers/mixedbread/__init__.py | 0 .../mixedbread/_assets/icon_l_en.png | Bin 0 -> 123637 bytes .../mixedbread/_assets/icon_s_en.png | Bin 0 -> 37303 bytes .../model_providers/mixedbread/mixedbread.py | 27 +++ .../mixedbread/mixedbread.yaml | 31 ++++ .../mixedbread/rerank/__init__.py | 0 .../rerank/mxbai-rerank-large-v1-en.yaml | 4 + .../mixedbread/rerank/rerank.py | 125 ++++++++++++++ .../mixedbread/text_embedding/__init__.py | 0 .../mxbai-embed-2d-large-v1-en.yaml | 8 + .../mxbai-embed-large-v1-en.yaml | 8 + .../text_embedding/text_embedding.py | 163 ++++++++++++++++++ api/pyproject.toml | 1 + .../model_runtime/mixedbread/__init__.py | 0 .../model_runtime/mixedbread/test_provider.py | 28 +++ .../model_runtime/mixedbread/test_rerank.py | 100 +++++++++++ .../mixedbread/test_text_embedding.py | 78 +++++++++ dev/pytest/pytest_model_runtime.sh | 3 +- 19 files changed, 576 insertions(+), 1 deletion(-) create mode 100644 api/core/model_runtime/model_providers/mixedbread/__init__.py create mode 100644 api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png create mode 100644 api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png create mode 100644 api/core/model_runtime/model_providers/mixedbread/mixedbread.py create mode 100644 api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml create mode 100644 api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py create mode 100644 api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml create mode 100644 api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py create mode 100644 api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py create mode 100644 api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml create mode 100644 api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml create mode 100644 api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py create mode 100644 api/tests/integration_tests/model_runtime/mixedbread/__init__.py create mode 100644 api/tests/integration_tests/model_runtime/mixedbread/test_provider.py create mode 100644 api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py create mode 100644 api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml index 1f5f64019a..79ebd00764 100644 --- a/api/core/model_runtime/model_providers/_position.yaml +++ b/api/core/model_runtime/model_providers/_position.yaml @@ -38,3 +38,4 @@ - perfxcloud - zhinao - fireworks +- mixedbread diff --git a/api/core/model_runtime/model_providers/mixedbread/__init__.py b/api/core/model_runtime/model_providers/mixedbread/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png new file mode 100644 index 0000000000000000000000000000000000000000..2027611bd5e8b4c7d06a5d00e515a0db70e67a17 GIT binary patch literal 123637 zcmb5W1yo#Hwlxd{hv4o`;T8z)?pj!I3YXv#+}+*XJxFl(Ai-S{2o{{+@)x(eU%%Jy z8@cZv;}lgKYMisrUNYyJYj47p6{X%H5+FiAK)jWa7FUIUKrn=WfW(7`1)o`(ohSys zK{}~Qi9*y&5*>hl5eI3>m?WJgjm4%hTF$%H=ngTLNTxEE& zpdhNmuL3Wd9BYKMqhC!EmQTJuDhkrxtT|0xWSt5)r8?z25LNF>LqLChQ<2x&&AZNJ z)iz_5#cl;%Fo?<~n`}`WfzBL&+Ortip#m2TTt)Ypm^tJGRVqlNmTA^0NkQ?*7;a*N z3Y%20C6@~utEH@pL-B2IR3vYDPfL)yu}OUxxCrdeRxPHRJ;yCBX|7vzSE|^UsE=To zD$2)YxNWcy;voL7#||CjKH~H)ga1(J{HQWbE4(ZvuAc}@2SA(p&{YI|7vH4PJOZww z_?~e-!NUKm5>z_H*@-#qO`Q&b5R^+rBw7E5+kAvEC}OE5AmLNgvAu`ch{;1)AfwN`SnzDQ5az-vFwv-#E~x?@vjZh!e>o`f|#N&%s5I= zGL&a0#hp{rDQ%F55xK(o#WD!`U*yVzHLBn;Y=3WdGOKVp>=}z2pdzM))SP?s@N^^<-Kbru2dn!rr(}$m5dY$yBSw&ROucI~6&-RJd z@5Zqo9t~d3+c!#v#X$3mZuMYymaaBGvnA#I1ZfrN6wV8@74LV$F;)1h;t>D;dfvs? z=RGnn%e7WvJBT=61$ZM|J>hm%-gGw!VHUBA!X#H4{;_W>g;RCoeZ+#hwl`NpN^ z?z~7zlfZ(uE1Vjw``!NeiutSt?GZU72H8Ije{lr)OfK7~1y7s{^{HiZBAn0~_uTLL?u-BIs~h6l0Ef!hK*lj6Hqv^o%%pJIh) zXAd3u%AVm8eqZJ|lZXGr%hlODYi#S$J0@N4nAOuI_wd0q=}8V?@cTwbEgzpx zbosFMf%)jk-9dSM1ccD(8UjH5*FF8vdd8jbX|Se-kQ(K^a^QS1u6LD3$n{5kZh`CcW;s?Kk6-R~&$R!Znyd0ru>D84U{LwF#UTLtE>CUQL$Z{N-?1Bvv7(j*lsq4nrcHez=UJyL-ic^Z z>9+e&GzmD&x0@pcD$rF&2PgW8ZLLospBQnz|6VEfaH$^}uz8NHAjs*ox$7+?cpHAh z^+`{EObUuh_^%_56uLw)n-`O%Xj8;mD9pCtGzn8>NW3G=7iJeGFYfnyt)ctcw}f0( z6Q~(B)c=}~e;M$9BphL8w6m2N=hmKgl(yX0pInUdYIP8;1&uxVGS@i$d7GB_ceSnc zmIo3VTt&CC$M2&WG6-4I&G~D+uosdN8xd-jd_9(_jU12@{_FC>D+o!cskx9a<$H$7 zhA>Gbg3yqC$+akkzC7PkjUO;N0I{0=-Ef=825(+?A0+|0)#7c`@U;Q8!ESowP^4ma z*2;^YRdYZ2>Hf=6Au;xY{P-i?M~SZ!ShpLtw2@6rHXP$*a^4>NhZ!6*{i1O98j(m@ zrw&4XI{qNY1=5>F4l z*2!WEhw#ZTJBQYq`BcDW3$y9QnR^T#gUmmRN?BW*r=z&ISQF=c4GE6T#)$D9ohKrK zaJV^g6%lrET|^Sj+vQlBl}1Y?S%!1vZO5)W*55*DczoU2TDs`I5a(iOLMT2sea8un zCn)05UPlQ*lksHDEdAp}<@}}DP%M_>y+>kYx+ydr9#8kbSE+Jh{w5`NNP`oRK0kwA zT={{e3;j>-=V->|GIflNnbBv_Cajc>NxG*ZPrgTpeyTVCI0fFT z_{%B%G!d{w;=k92W73W#7&}v;QLsAw{E7iE5C|gf3U%*R(o*(p!yHp87&xIfu++)>%HjJcRdn}_U5T1!`+H_85Iw8WqmM$!t$_^SN8GCU7q zQr=Bp2x~4Kq^`kNPM4R%u2hb6MJ<~`nNR6&hY8QbS9ZKv=vSrp9bo*Usm*P(7_?}?Qo^Sn6eHCN_pi1*%G zy1f5xXN5bZhY^<9lYi$e)wsX)c_fY@rM7Q120{Arc7x$wp78p1`+XlOt2`7{+-K;D zhAPmh@J9cccOI+5c%$X)bBEn}8*W3@Tden^|HyAo+h%k)vizsyjHDRyvW}c|3@8^Q z3fkKw^zJi7GTwo`TbCB62NXDy8pM4p{pO-~@{GkAy^)+%iCYi7Q+|$1LU}6-y0O zXT@;EzQ|);DLc&rpUox?VJcmWjW~+Df^9jmYOU%7DEpakt+k@R)O36wVIz^%QX>0$ zvy7aJ2UPmw*gs}Lz*T*&qUDyAF_>hpZ>OyRcK4|UT_p1nBRGeHpGI89!7G&=Wd7Y! zAj8v_k58g>4m=mFiV(7d9N&g^75S8|9-dw@k8I5z3NX1nul9Yn_vsWP=J!d>C6y*9 z(<%I@DR4zc%lpc^33ThJ=u=8NwnFkZk%P%`#$wkp)$J7_656BX)^P=As;-=b+zGd6ZGhVIc(e6_Z zsu*^5sobY-B+&29VA1J5G43|iLA|2YJ_%%fjuXU2 z$}i=UFQ{V}lvMJmq|l}ZB3ev{2A&@obOgDDbaDl%2Tm{4r9YKYI$$;zZ0*cnz9aCq z;@iQLV07s2!6+>hC4Z*;pZV~=&E#LG=#Pw^2L%l2acf#BDV;anY0a;^Yk23Kh)~wO ztYDSU%kHRC`7rUO;+@>8X#K2okG&|)_YPj|G+B(je#C`PHEqao8t@(bq2aS)I^?n+ zLCSsiOnqLk3J?}tpuw5I&Crg5wZgo2p9wOxF-k6p{2$0w@}+|}_vMz{J?YFx0OM55 zm>%1*jN=KrrT>Y-{{hOX<}~_NI$a?KEcow`H99TRu)jLQbL8os)u&W@6>3)o80EHY?SjjuxU%TG?pi zIVxa4$z%F`+#zZ7b?r^+bIgH=Gs~%hK+X$0SuA2jFo8~9x)=z zI_oK?$V)!piz{K`y=CCECPF~6UG=G|sF1}1>^sH@=L&k89`sJLyDA^XUrrXo3ApFv z!+X$7PRUC(+TKP-Xq4MH8do2=pyM5L!vX6`K#6Bb$$w-kWNz>~HL6N6S(KMahvrH5 z>M`$1#W;T9G<~&GelpswbYj5M(Q~?bZ)F%NiWh$a-B^6NdO!aluvO4f`)mH?NiXEp zr+Rdu^T^n65lO6x%W3B3=br|2F;p{5(aOro(qf#vE7cazvo6s`0jVIWTnH?3q2rW@ zOZCt?*PG9#45y#r`b|6O8NZVKkq?7eP&7CP%!IUk!sgUI#Mr?)QsUXI`?}SxW%n`L z3h2Z-F-ti)lzfjg$LUZ4YTzeVcCysw$?){adDrsU>>laO;g4+U?+&k)8=-;({6v;| z3KfS9>PC*_+ykkSEM$LxYPcesw7#7QWGM{k9jy*7rVfN)oW4_;98|@b)F$@YBNeAA z{JxV)?zkvduT0Ty`uuoz(vUh@8&3UilKroY^>42QpAp7_Qtyd?8e;fXhwM_UX)2RJ z#AMZK)u$RL>Ea!ZR>&-uLBt54gfBCLm5BnY_rc+l(Ye<~>;6-$zAejewd{fN>+@nJgJxFfC@ zUNo#A_lwB<{QOBm#IVV2?jQZZ4|+xz%w8@1*4F6{b5zCZ9t|hu$$AdwA+fWLw3f{@ApN7l$nwuyRXPOhzOOtY!YQOdsv9F&7V;Ma z+xAzPt)mPv)PId_41NTo!Cv;DF3DL%LSk@F4Ty=P$m0*@qBuG!O=#GyhRISebc{96}hB3u5NQP+lpE z#T1i*k|d-zbQ$Qw1ILg(2KQUzRyp)*WZ1yiuem~-O?f*C;dB~sP`kUkvuOBTS6c|g z7;Jr!(4vU3bst=pBKus#O5P~uV1hYUM00*-H4G{cI9_xE!L<@9{JyBTp8^GC{E83`3_ zHpN4w6*eiE#L!W-4(wD48K9J z^7=Br-EfW^8S2B03nw(Y4@D znFYQe7%>aL!)t#>{J@i6HD|>$7Ck;=TM31;V{dDa(j093Hvx?Q8o*aUMmSVQSao?` zO>;|DW*KoPvT6cVLxs?+{jLpC5|=?*TWUWML;!zDrOy5kSHeR*i`?2ZbSy00bx6zU z@1mi>1i35nkcLO2rr*_-pwe6;9FrRd7{tPu)g+=dX{|vO$Zl>SGh1bg4D)ZL>;GlW z5nk{dUF_2ZUWhBjM&PJ0&Q_(Q`i9s*2|cjjbQ2yrqtSqgBe0FSf=FlWNAs##VE%>)H-;!qV00 zx)^B<*F*;JhA@}=oE+rTk`X9jdztGM-2Mp5uo)LKp7*}N$!~+eP^X#() zKV7-w`rw^E&>I%V*>hpNr5?o0_i|xQs-uspJyWRmj;wdy%eRRWAZ8~3Ma-(89=)w%qWv+1s&Gb7}!a;z^z#@vI|z+V%= zT=j4NG(>Q@CFX~DwIQIH)8ZElf=e|My`y&?iz;v-J-uFcmcc1UB~zU8$4F7qe#V`p z4x>F{YuiY4Q~|uQov>IxtsQijcr)5NB`|#uWbUexPo{@TPEJ-+zV?;AQ+AD~-UyLS zW41<6=SzqLY2UP@R7ay*>(|8)IPzsPz5nybsQwkV%$ih*F*z;;Uf=-^eizYUG6;sG zLqy_8Gl%}#Q~{x2r)NeZdig+qoLODf*CctTAM=s_rqJ2KnEz&V6bB6YJM8}F7Hf{B z{>4m8OtC->jn?svUB`BTWiD;c>E`2LG{uu%#Bp)Eo)9@y#He2`jwplSeLohI0iRwv zp}swY-zuYg$n2cR+xhc=mu0~OheW_>+G^!gbloKEdC4&*d%~TS30TF65 zL};D7w;0YTPfl26RO2I7aWSR#MiGIg&v--17bCmB!w@|%;3YH9=v56bSQ+4cE~0K- zFGvB|EcZ4u6~ReO{MKAond;OPGQEpP2SjC zLtwS}DBk>ZTfguI`Wc*e!@w4g&O?ahxaQk#ar{mJ6tvDLkOp1lNZ&<}9GgfBQw+a$ z`oJ~z@rZ0M-MiO9z?q7bXz@rvu@QBAOMu;ap-P`Pk&OoXwPXkEF^Vs5%c17PMYL&| zc_P>>)fut)=(5OED-RKbM$-yGga?l4OZn6%wKz^>aH@;Oz9C5bbK0Ux1?PZ-aSqGk z?SYQW#=ZoVIF)t{;p~6{_EIm*f@r%$%70{aDX;kNn4% z(X%Tr1W64kc*Ur$GmRT*`I**LedlAEU*N{H#*?r65ra0?Wx5!7s&0@yObtkwTGFUX zgN?x+PbF%I!ybwyQAfPKwB&9;5zJ=)2%Wd&$fv60;AqnWOgcYs41$-{y;;mx# z2qPSF+Fd;084m^_-t^ZBXFbAxo?EWQjMrX)h){TSzj*j8s3WeL;G8L-y1Ey;D#Z|W ztgwg2ohj=}idq5S8YO5di{0lUV<=&R_6#pz*c5k64tt3x;Ui9~_kLk=Oo!AP{~k(@Hhc=$JmN+oPkxFx zktGGa(c*_){0){j$}>cD!5Lt3JiVe59Fz?(Zo=V&&*w10wkE==Pq7Bs+6p94L%w^L zvy1n+h!ZREH5_yhu7U$bNPY#^?7<89?Vg7;lYq#|p~1xVQQ1a2Ow>VXP*3B?Who0_ z)V4-m2NqA?aJQh!MAjXLZ_cMMT4(cLCPjRWd&1Riw((#Y-zoj_=Y+*03QkxXg7KLg zOgWY+<3^#@K;GTDXkG%<4l88^*T@Rdb#x}Z-bW~KdF)}#ubz6Hd>h65SP8tJm>lS1 zeBv&y##bKjoTv|**T%W|qIH`F@`AxM!argHm@;v$v|u=VT8G|PKg8{1Af$ekxBP?P zlf-09%hg1EZE*W$DH7jMrDqmO#g5Gcs%5k1N7?_R`s|gmOAH0h(?b656e!Sr43rIT zmQ?pCv`lY!3)XLv+5!RsawQdqv|bVD4~Mh>m|_*@I8bnoTv(<%Np@Ohh>brLOTd23 z31QUlz1CzcatEn*Yhha}Fd*08?p(0hKG0ZboS_%Wg#z>t88l#C29k^vs<_0^kJRhK zikV)x!1)@J!FJfhliQ4HkK$BbYmfl88y8V~JQ-9i(rtT^CRoVks+`#kQilTml_u$^ zASB3C(9j2w9QOJU7ZipAA9ER@40#bi-s5yiDI{2+_w;Q%!j86|9exb0Tg>|M9n*(k zEYvVs6!QDZY&vh6cehy0)4PSwFrwvB{JDDFA%Jx2;ayxwCOl?4jK#N+{%oo4Qr2y* zV_G-w+OB`>gjIV#puLCSP-4aY@QHNn<1Im4}|6YPBL-0 zp8{Ci;KkQ;A+tisHY3UR4jEDVv$AIKIre=azf9|l z5l0>Fz@Q9SIztL>Rl11s!i?;;fwO9(JvskL{g-xPupIg&=)f_f2X(q_eMS_(GdTb| z#g*gQ&~bGT6GRBs7`$ssi;F-0jngWg1hC^jhwOu4y z+`F*dk+)8roc2vS6l3Ykq4b!WgjNC?W=OD8e$?*!Hd7v~YW?U1nf=OdLHu*4LzpS)zYBS3}X92sxc zoTw;>*Xs4HKtjYkm+(*i3%=jK#JubxkXID>VHDs?ISx=Ep`kM<-cuMfH&8&;E<2s#q5`cW=U zl;)!CS0j6HgM9uu_PvoFQiN8zcE+^ULT)=FKgTPg3FBb+fl7GL2p4bTGF3l(@G8ap zS&|7&PfxEXwqVb<+k86%)c@et3HGjQX7?S5%iPPeT0++4_AvaqK3?5E!==-X<5A!`0SjNcYn&X`uK5>7 z;k!e^xEo8i+`nsgf~7*RlAGW|ezfC%?rsjljgk7P*|A1xgvU1hP#i6&$%Tb+gxI!ygWRDh#k{fMHZp0bp!#)vOq^Q?rZI z39(FGs2y7+lG{-1c~>@3R!V^{abLZYl!&aYCFu#j)Y~JYs6Z`wbeTj)@@5>!(nGN@ zelBeo6~Y-?aG;g*@$JR0$ar%KM%(BH#!cg*a5GK@N!OdWoInslOJR+eoxD}E*=63a zb*_TKGUHzYc8454=k9v}ws_k1&ySp^j zPG}q^9V{CwtBhEk_05D14q(1PRqsL*cPev!g~~U zay|5zZK&d@^ZG4}cm(?P=S)sD%>?FGhd9m1rtF7Yb1PX58E>p`e-T|tYu^Eu?35Ym zWil?3N37mq*9D0Q@~COOo1Ez~Pv)v^v~CG;VF2u4rx0p1h*%p`fne9Gyp!m%K+XrU z#V+cRC;+#lW-m46pOMM~yuZv&R)DCBKqI!K!E0wG1OD=f5=f>drNP7m%fH-T3gJq! z{knfu@xceBQc;~E;)m(!TI(gDva^6 z{iuq&%7AQ-tc7?OT$bY}SpjL4??v~aQ+o^sZ*bXgDgUl8>wisq+n8wfzXwyx7UQn> z&Pz^Zd5P7J43o$7777`(e<-b1=?^6BuibJu=ga_HrtQ>!|17P>jA1s2n zWZG048*k5Cxf<=3;7Z#SS;BSjB3r|mG6{wsbrq9^E47b-n+aiqSFjJqh)P_A*jgla zGt}w{bIT*mm`=oF<(6EwquwBc7P-cIu@Ka1{my%`zQ2?1wq&5Pk}h^E!x`1%4c7fB zPsIgSj+aslg?}Hpo^Z2U>2^l!zSSE?blBPF54Ti#0sc;dK&()jTRHqb>BK6*jK+Ny zd~u$JEjVh4^mFe@9bh!^84-~%AFD|V|F^z7U%l6sI|#e)Q0H61*{!ru5+5?6ZYlY( z@mB#c8TtqgtGtW)F#&zwMvN&G7DmSfjTwfht=)(6zgNI!<(Xy z@B^cv1RPs(+k?k}FIGlI+ZJMXxOEPe&~{;ihO#$wRkZ(asrZwYOr07BGa0 zkKi(l*#R$_+A60p7%W?2M9PcNV154@Zv4Z03$tKE9B<_QradgxS+(Ks9` z`RL4|aEQ;l9MMR7QZ=c9rZ+Yam%)W&m6$Z<_9N|>P60Pvgr<;Qt0QRSJJ1~}xb-C?ylVpo*kdF1%yVdX&jV;;PUnGgoJ~@|&4Q>VEKSllZxwV3sC^k4L7bG?G?q_k`5qgWdqF)x4B8%w=P(AtBAp2E0;XvOP*tT}tS% z1nN<^Z1NRH8=0BsS9V1S?@$#*@2dt1QjrZAWr9z17vH#zqj`~$K7yObDC+W};AwJ$ z9{NsFw_@o+&UnoambIwIwwzJ?FD@?5;h8_Wg77!l-PbbqrO5=Llw&-l7>$h>fU`wH z>rgL@UYfb#rrJM;&oiGGoBNeFOmEw_RvB5MP5w3@EunmrtE$MoVAcjUwh)Dv@F06mbC#i{;~0!Ilb!PPY^inLDGP zmEFx$sr|}3mEG+hdp9%(m~E8BCyY=vlMel9=}nMA!(S3GAr}v3Q#0St>t*@7v5XTGt5{LfqP0D`;e(mv|yfhKQ9LH4V>4K3{^X6Valn=5iv;3|1H>!lmQ?SXr7_KGhVg%- zCR?;_Wg!YUh4VE5H#rUJoL7X}?F!H!1_ z`{TTT;RY7!LjWt`q$)K2?@|q>D!J@fv3Yb9Hv$mc+^RHIgV(g>_Y*1$)dC_%N4JJH zk0=$mV-qX$4-v+a*;=b=*?T{Cxmw`UU^ILH271f&8f{v-5>Ck)ywKxF%)y@OVxjL= z=oh>CFaXT976K*S9F5enTIW24zvYV~QU~Oj7Q`6ks;ZsmlMV6vF-&ZkQKtljm$!8OPky2r@YG0m@eFrcxk~o5)p*s)GwH+8%Q|#zmv7W z32s^sD(m9`GY{$xLzn!qjU})lY2_h;`g{aiUDt>f5s|vwVFjprjFVi(864;u$ctE< zKKngcRrm1FeU@F%6`lLbdv{2xl<+Hy$yz3@_6FG-GJi(U?1JU|@C9y8Nu&Nx64s&j zZ)9~@S|76Qn(RqZkk^PPiIMBIX{~HKaVZs})TU`HefGa}hdzu_<-y4s1CP(d{M zOCgODmVz?wJd>dH)FMN3&ofEr2#?=;4&90@9#odrjh+C2@#}x}Xa2Vg`p;$a{m{Ttaky7b2S2bM*I zb#r5tlm&oyvia~}xjTfRCn3nGu~?T$+j5srtTMno^+IgnyI@^|3u6dlV13eTC0ooe zcM(Ircr&*n)LRDX`wM?1B5&3UhoOH6Cj(@xFvA;;F$N#>^?fv=8hJj!A3xI*&>~uX z|8lkEfb#fvU2U)f$h;EvBEzpXiTsi{F4J?|=$SIWZI zzmklZ5zOn1aJMpmmYO|z+_A>#xynghs7Yr%b9;I#S<);61jZWcH^P*31m=PaeLwsqbUUyCPGUJ%Ay@E;3Slp@vaF07=J;8uv=fXU z28#TvGJot{y`u0x+op5CA{G_@gvfc&P#H@eO}c5+egI_hjFTPQoL?CBFurBRoUuuz zwgXM7smOeuzVdIY4j}npt@TE`SoVDIqx_)*R5O=QNZ%aM>earv{5cZZ`vrDyMQdkY zlVlXNF)NKmMC+2`N0?n@t+gwkEKXf*iSdoiSoE1Lu(IDWBu$?9pvE@X%?HdV8*cQi zQSI6tj?ENmwBYdQ`O(xMkM)n86~Z?N@}j^S-c1HPWI=Y{XD@@B$nlD-i(kXVaGwv8 zRQ^scB=fhSj8g?=*#yQTU8H;{@F8xTgVu_>HwnFlp63P7Wp9uRYNJcqO5M@AwQy@q zd5%JBXsq2u^O`P}SG&qdZ&$crVB>rtS!)+g9@0a>^h^<+;3T~RFltZWKz?iSv{d_; zQvXTVL3YuZx$&ghi<%=@;|pR+B}$W=f$1Hh6#2>T1$UDPe~g8Mg{OVUaRB4vftG4` zLRBc}?t5IliCwjP{>9xxVRY)-`}J>1D4)LX`^S{yiT!28>9?Q#=s`ua(ETIZxIe-+yw;7H|V;=y`cygNF|<$ z?eLY=5x;CF*<{Y;oTMO)>)EFZ3EGFU_* zWDOeT_8ve|(wO8G%NE`(Leq}0sYFr2p(=k7Lp(^KhSVNXr--`~a3R8g!7#JhT0i%l zTY6s*lanG!s%TC}>^B#>?B6Z^1z)WEHJ0Z-{uVDE#H|#<#djeHYJEF?&otO)YhI+x zY63UEWS2t9`nCcE(WLYHR|D&q@|!p=D{p0=*4+^(PJzjbw6B5CDA1v6$n}cRj2ZY< z3g=Xy?+sePHK6F`ssK*n@tJ-`1Al-_XKhy5lX>6D!Yc7>bM&_gL+&c|rp9R7&*u3X zX>!=jGSzdNjH@~Px8I&xu63_A;$4To$o*y4I3lP*<*&S@Wl-Y{B*DP|m7xg<^QOi| z{-6+SxgUlw*4SLMGHHLeZQ!pWw-tH-0|M-q)2TsNawiquT1;2gF4R>J-Uuszb$uFE z0(wT`Tf`WJC|7=9how4$15rvSubXm|B{GRdd%U9~ZF#+X)_s$9UdaKB(HLj8>31W0 zH13bU8R^%Qzf_nqJQ;CZG0cxXfd=h4>pBiD>X^Qb$DNuX6?^rzY(FKcJ~X}7dfCvNXbK+-_#|e zE)~T_rqr=$&KC-Y{2x^1DbgdKY79B`0tpQ%P&qBM?ehug#_GbB?-am=U6|O zbi*|t=bH52JV`SEzl-gmAFT3^Q`A0qeZm@c?H~78Zv}TB-p7|F%AgkIQTp$o)qO-2 zguK~O0oakSH=YU91T*;SD?%ZNm95;JE~B~^mEL`jFM*k9_>0Fadxt8 zQNQ1i`JmPqeL6fkx&b$*G-|+m4C3UWaDaPwF*j9e(~Cp-Q_-T^o!h#bi)f4YD75Ei z5@%A|v0F>%JJy#QA-CCIeC-ELK^$3E=55I6afFO|Bxw6j+SDwLZs1)NpAJb?_P!t7 z68y?V+GL=cut(5FK5T%(sWPkXE!6oirWrRo)DYP4YYpFF%!{CgzlHKRfP0oB9S;6i z+hK)qd)I;f_+|%6gvRGQ)}>LGG0oy%Gs!6~Rcj$KRd@<#x?SH*XqEshpV$pI4jZS0 zmK&i55L5V*=++t)dl`(-L_7eIhlVgnmd5zKsy~{5nP=^Zz3Crm*%|$@gaW?LJ8i?^ z{X_Hw_ntoNDTy+{ZGj-pd6CthK;S)fz6#zG2jxa%NpuA^kKUzR_~LOQs`y03|18`6 zJ1AK_E_O&HBqTW4(0CxDNhAGb(~}6)^z@jf-mv45I)=Fg<}=NN!HjY_81WTE%^y-_ z7sc-Eq`C9kk3j4zE>e0+vsIRt%fL$ANR5qiV}wZxV^zBzuW-H{rp>pHY?$W`QWEJ- z311X#5o+F)^-#HQ;%7mL|8bn1MDD>#PAV?65pzZ}U_0PyA|o>zu{)kRP6YvWV`NB= z+`b@d>igS+{Uf#MI=b8Jw~oF_4)4^H)OPeZcGkQu_%LX7!^P6`y&r2($m{g@V>&~^ zzZ{pn6l+uhJBej`e5v9@q4s^>OrK6lb@QRFjtvb-w&sfalpO#<*Z5#vqfnx2Lko&? z3u7dcI$@rkJ?1XGz{Q#VhYt7pD2-_se_;)cC!#&IpDM2X-6*Gms}T+B49bA@U~XQFeGJIGHvwwj z_GtR$d`W_zxe&Ki;A4ASih7QHRqL)47K4Y|9oU-lHK6)prN&Q(#Ns8e!D}O3}$prBR{+#O&`L{+A^^*(c#xBrixnZ}{73A<<0{W4TK6TJD$uBBhxUoC(y;6DK zkR&G|I_c1J`kApt#RM6}^$*87zj@l4AX`k%Od1QumgvYfbJDi zmyokgI(6=#?+DvIT|S<<>OUHgedISIiCq+nYF@B388?*`jzh^FSrlDtyxfZN9 zckZ!8l>W`oXC1tdfd_kexn2+vKn?B{UDbi*QtI;Pj^Q}{B*=A^7Z3(#nESyvy9a{- zaC|tUVRK`22~P0c?2r0R`NKhRZ_N8sq4<{@WoIu~oCcJpsR8fT0=2G&To#xcRE66+ z?ry-}&E)(PH;AVq>g=oOnEUY*oxPmq2KGGvhQo%Er+&>L#Qjb$Up{zuXLsX_;Q9ro zH7C>;5@>!_%|?tsr3i78=>H}6L-_M=nX4B3C7qem0v;kvBs;jRH}K=$VqMZH$OR+J zH<1(rq4#_G2aqQ$xP?2M~fhx z?X@G(nt0Q~fsS)jr8hVm#P9gxcKXyn+SB#bI&8+3{5o=wl5vr~)od{B6khL#xQDP; zC8h*+)?y2KJv4~v{NYJ1FRrPR6Y>oe1Qdt)Bx7%T27p^BIQ7%1I{%_#Uq08E%D$N1 zHuaM(V^ixQZL6frOG>o`pPUxr6=gK3XOs z>Ynji<+HCaa`fXh!^DY?A|a!dG5g7w%5^p7>{R%oj$5EUsTXtBUdf)igP9(sGsgms2<=#GSF zzyhZ+z8d`8^SuiR)hTbFcmS+u375Y1WOI6(cKQ@C*+p6q{7%_FeBP6Wgcl0L-HCN+ zXN@%#@4iD^xlohJs$R-LB(>i9N||6DjK^M2ftkRLf4RTXWG$44t*R#dm*(#PWilMY z1YR2c0FmK=Q!{n>ku)pfW7VfzZn z*M5L*3QSx$1pe34=rhBS_wd`3V=1E*(>vHSG^x{wh!#8f{SbG`?;bu}WlnDi^fi!9 zj20za9eGRps^4?cEP@R6hxSa_*NzwAhjzucm2==dWrO23HyF30Sl=pKgbdG5F?o3ys&VN_9klm?19V#$u#K(vNdKL=rvbw zPlV*g9uGui8TalCbwtVMZxzBaT*ZB$rC?07_?9LAr8>};P{kEWZhrb}rlllqa)7Gva#gx=J?`S)m{{0s~-MGDYj=y3r zFsEgOPv!9b4aMnj+Si6_xJw%Y6Zt*)Gj6!!l%s9=TP-Kd3uJ~x8XkMcsg#G0=-+oX zDEB5iZ-zaYB^$yNbKdNH7;~~Ee*6SzM}LBwWys%cEjAc&0=nv?NOjdj`;WI7GMSfg495|>d)(lZ_H$3&Z7ZeJsmzI!3x0f0`+ip;FUqmR8)1)T zB3RNhfwFJ_u$RsFYqot>*yr}s}@rm@jD8-M5d1%Lytlk7QxwiR2WRT zcb92?+7O?_Y0LcGle7GWf5`|tWVk!?E?=|1wGzN;8{-oTdSvNSUHCOQBf*Oz@LcSg zeJT&=>j>GKx_sEg)uCroRUHDQ2~Q;F-W!CS4O$p$0Y9dnFHZDed4#6+b9p#l3MJb; z8fJcR<>OttdhaE7O)lR37PV5{3cRWqs8wUiC@<#lIak)(7l+nCWS_3mOO)g%K^kkWoEMQMxlT=p#$-!^T zApg4LI-M4FVn^c!RT?6}+?Y01ZMZaBsLy+9+EQFe(U){M`dwi@F;e%xTrB#zkP)Al zKIz7N+4*uBUL41uG6OpEE=&T*ZG^#~6GBCRyMwZ6f)iUA!}l zYldERSaQSWq|j)9iASY8HaSqX3a5hjF(cNat-mjNwaAh;bh%wf=hhM9^LaQO?J(B4 ze|s07mH32=-s4Rdjt~4}`Pp$cAoCHD{TXd)vEW?-h7?RyjZraFit24Nc7Mdzc``S` zM;UbZF}{KiP7hiRwCG5*yc-_S5zXV{mEpA%e;$6uf1C>u!!&&(SclxS=XB=wAWBxxckpJ-M<2iG-G zDmH;a$SmDVt!|CJB%kmdACWH@#k*1H1j!U8`G!@;%%_#l3k%+HhusoyQAR8{Dqw^Z zz$esv5~?`d+To>}YUGTQt7;4@NR1K+HwM0=y=05+i!{Ed{D#3M*pBcujm?CW@z==G zjK@B%hD9_WS=?Eg66d;Ndv~KWUi#!y!4~bQZNn)i?Gyq8Zb<0S2d(Z{ zyC-gi(=Q$S>K5M>3BEjT{KKy)JKIQdd z(XOm~JTMb@(L+O!tLJ!2V_B(pl!zNTEuPL^C%ZGd_x1}d8S1?=3Ate&q;Fm0jpjMz zU@R~0=!Plw0?P2ZFo{(`t65>JZLsQG5{w&v&9b5w~r7YXX9i3e)+jr}o=^En+6rUG}=0Oq2`m{gj(Hs!9gA~_QOLIC~M z2ZSyjYe*!Pffw-y!8m@KaykU|4S#=sm~y&KlkzV#LVlZbO~kRL8uG-j-io0Yk}g1* zY*V9e!>3{|t>mMFCGFhlTWKMiJ=O;`+_S%p0<5r7R-WGNe8G6$fxj5>`|=Te5Y5A$ zA4X6QbFZvoRS{OeZL4EUvo07uHaHb>=30?v^Eu`n>f*w}^TF>$1AD7nzlUWRh(#Ke zq8F&5ZbYAtLBKHMC+4hANC!o}BSlG?Sz3^(;ed4IH26&)<(J&B2S}PMed*>N@cIUV zZ(hJ2(N&53=kc?ao|cyQLrY%I{R;*G9@@Gs3`@$HR zuvpH?4Q5?cH;H9q%$>Nq%)~x9$w!RFPI`Xk#lA!ug~Q0*JZ6c9TJcZjk`}puywTGa zvW@M5gm6F=aqeD3+>(iky-3b`qI5}PK7O!U8s4&UKgP)6Ns%W>d~?i|5bA{YzIP1s z=IQ|KG`U?cQ%1NIeEfjSLOLYv3uj~OU(aA(^m$?)kVwV&fpHgEw}_2 z+}%lV_u%gC?(R;4ySqzZ2oN9y8Qk6d&3E6uwX5dm)TuLj_U>M*yBBrqtKnW}M<{Gz z`Xk?Yrgqnh-0e#Q0GV*h8>YeI3-x7REWNikH9P%BFF8Ki5 z4szP<5e?YTXVu_eOT>K<`o}7~weF`ekdleQk1fjt9bbN`R*Xg}MDae6$YFh~Q8-9; z&$U7hf6-;BJ&An=u46KS|0AH-xb3fhqHV`^;F1CNRy!QI zs`3iMBMbkCyVJ5Df599&V|z4#(XIR!EL?vq{-jEGJZ%zOF3&L#(DI2)sU-?sF^73j zJt*3nF^19W^w#JjH5iyCSB!RqkA?U+Gw85K11_OU&;Wd~@ z242}h<_LoFYMo0%S59gEPyO_2J=n`|{aF2tx+D1s4Gig%Qd`U;`Qa}^l&`az8i zh(~D7f!*rdFSeTXV`F0izaQ#ljF8D~jWFo0nVtEcqZLuC=I?^-TrP0hp$(rsX?rlP zIO|hW0TqR@3n6M@+YqH%^jahhN1qE<|}-l z^nn3PQ`&x=LAP+i4dhV2cEAa6{Kl8~&l!ZbN2gO~@GM@2Vbe1=P@Lq*u#2k$7?{2( z@q648ww)ck0(Q{FhTLMP@E?^)If;>0o=`y)fe?pvDt)2o-{n7mv}XfBnSlCphcC-3 zl-1e<|IlrK5oWU&Ht`IVvvqgVnF`lQs~dM(tAc@tbepdMBB*o*q0RmlYl+H5`%|`K z@U;~pJEV~aStiBz)Kj0|DjO5OQEG|Z<{f{R9G>2-~5P_KMK9Bh9F1BVHjMfbG zY2R{3(<0O}IgZ(n<;*!D0{_o@3(=K^R5&US$v(}(8==Jh9BfS z;;qc#hcJS01m}_#7p_%L>Hw=ms#h|>n_fo5Ln<6m5p5Vl7m>TLP-Yp5g*C~r!A=o^ zDXr^YzT(h~v57R-ji%@yuFoqvAR zbNZx`Kv4ooFc|Sp1Foi)mPIGASe5MhFoCYNM<;LIn81`~avBgAs+p$hIB<_}>4&`K zo9_wqj;Of7gw{_LR)h4FV58jRJGA*KoBS#mX6|LYV1};kvXzu^>&=xV^BAlZ`qSST zm2q1OMY}IVU;4uy+5*WF_dmQKNoGuj>WY~zFeSedO3Hs3eln{mcH_&SdiPzU@7`Zk zM<~OlyAPXwG=Ie=b@wr$z&gjraSmQa{^|!+D8`lpeT{*C^=AG1J1)qc5C4A^KBkyS z(B#=+nf2(SKHF!iEsy^q&`C6@PTeCP1Xx0=IUgyN&FfMi6;B%30qFowPWr);lnkL# z6q63Ro{-?i+_@YhAWO<1-qTsDe}y~1=X>ahgv;-CjNH@6=n(VkBy+QG@Z3J~SD*^qg<=R^FZOFw=& z6A{}4!7^J7NR#>%A6Hyua4G*6apL7phx9stf^+MA+z&d{{%i!uPr&(swDD%3fK%q^ z7?Rm1gzhiERHu^kB_lPOR;>e@-rhF2kWRoU6_WJQkada5+Nb)^?ABo?GPryYzO-)x zx0HKcUR_R>YJ(3)uVnd=!Nk-Vt_}mh9>879ykkcyE!XC#vv(k>jw7l_)`5vyo6E~v zt}*3>Ye@sw#Fh)fJqT*E-t0w*q5D+`M1nCm-1c4W`i~A;1jx{YXgn zS6M${LhEJNO#Ez7RKaDHtNWqwxWtulovs(5LgIcVDy)a|dgZSJC(_St17 z+9gzxwLd1Hd+d8&_~o5y!g|EP-^2sm(7y%0FtdQJmLj*6k>({x*d0?^G7W4ykx~#S zbw(eDm44@d8=zA1RTF_tvvI>km&X46a|W;qA@c*O5SzXrmy{ZcwlJ7)+$He|&?9g_ z|3Dz}+%Ir+@uXQwZYC?jOPV2)ANlf*-QzjwjjYv|;#|pMU9D&{eW6(+ybJjEH*NV~}N9t>AQ-tbm75dvde zg;gZ8Sowpo_`bqx4yiI5)s8OtXXeJd9n2jRj=KvLc%L7(Udpy3gP(=44!|Hj0@E++ z-}+KqX)f05ho=P|?7W_$1S&~anWkR3z!ra3(oN#KQ9!BnQ(j8iq@f&09T;XDDVLbY z<_V(p|Gp^|!XNv%Ey{Itz0qdS4C-#)c*N$AFe!zZ9T4C?^OE=c1qDwcIi=u*yH;Wt zxGuQ&?ZY(cIw9X$z7S1|=;~13#F14EhWmH5`G8>@$xfZvnTJ;1U6mRhRoDyK23VPi zxWI!}feCo2xx*>!vj1D2H=v|oQ>hCXBJ@iRbkyRk6kst#urPf_RPKdd6G6qgFV_Ps z$-q2G4;p?=CvSPva*)6Zz}f0M z?~80Nv8Kge+FfNmw^fD0waCqLBb*Q*d1$SD6cUO-NN^=48d=Jh&G5aIh5W=jaOR3C zeP4xO4G}`ttq2KodGL%GLm#HiVKB!JIVGVEjN#~N0wQl35f1i9wa~((u;k8ZpBCFy z;3MT9)2&5>OHey;E{!s1Ut#HTBpb66;JTzl>B7uSk+SPuP?P#1oN%6V^$}eiQhwQ6 zt+YXH>0CCgWeOH{?NpUy-N9b|g;v(0RLqosb2`g|q9yl6;G6!gDfIk`61Mj~KOQ|D zgZ|7?fz|X6BD3+&W6(EyS{UWS*mC_w^1Er~Qe%tW!jnmZs#lO2sb2TsnU=gTiUj(D zsi)h)mF|7-DU;P7j`{Wg6iY?=+{|FuNscm+;#0DwA9~|X*S51juRgoDE2A)Soz9?( zY@{~gvi`TDu72$1O5^aMPQ24M?jJ7JfbR|F?ZYPcnPvGHb*Kt!RL6TP?cv-HNNzSq zZ#6V6aAN4?4147pi2HU8Tsu+b|1|pX8|l&#FCJ}2NUG=o=FVml{#cw@!;2Q~#F&Ut z`{1S9<~R9i#a!sO0Z-SNKSBkyon4hLCG#^SS+zpJZ_Dd^cvD(z+h<1vs=FBk?>ow7 z@n+S=aleHgKf7F|pD7{|j8aI+^`S75TD>fHZI#~@rt zSsMDK472B}emudt3XQID%yq2&yHTlKb{+veY|Ya(HlYpsN3v)IQjKnF6@I8mX7PK{ zwn#wiwfoAw;^acO&)%C+UsD=w-N<@^t^O>V=hnl^g@m3(h|yYXN1N6s44)T~=XA8^ zcw9!xo5EthJ1$3sHKC(jTN^a+8;t!cfjr+I#?diN-1?2M%;;UUOSeYkiJMsc*tTGc zNAR0nHWb`WziKBf&%jsGQKOS{m;&k`3uN+O1H{gi=p~DZY=o?IfScZvJds|%1MicC>VpK~!xJxVrOALxk? zI)#MPfEQ`eFwp-aba}&NdXR0S=Z_V$U%7pPBx6Op#gnvIr&wpSyPI@kvsPuzAf*9y0dTF_d!U*DwwxyO`VNsHf5GLqC$+p`D zL4febL`Y$Z1#W4exWth^eR5Y5x<{aMPtwhOlsi%oxYKIB9=B;b~lPF>PFjdc`Gk`k_le6Z7t^jjx+$v5gAly`0~<$+HJ zoQM$rxf?-G2PgXrb=~@=*h(*57~ql|S(1t(!%|mQS97c2N$tQ3bBh*fS~6LYaIgCW z8S2~R_<$K>8>JM&>n!VtkO^v?1|e$Yv?dJX$%&doP|3SM;GP(mHU_!42#|ba05Ngo zB+u5KZrFjY8h#gQ-$-&GQya-|I`5B(3{OlMHx;`GF%)m*Bu~C4_ThX;tWyQko{kA5 zl1g}Ffi@dzC1daddpV6jwdJ3u#ZR%$#0h`RF{+!vD)W@dv2S^o8J_UoR!N?S&Mr!w z;Y$}qAbhUj)L z$dmti`szx}GUni5(qZiN*#d&hI;vX45J zwh1zLy6N6E`N3d-?J5z6)s9pN|Kv_s^B6&40d~B(^gzdP4K+1pTI!HpXs>Ee}V(JO}$5IR(Jr zgsCXzaQ}2P9p&=mhEcW7f)7tpGCunpxQY}jKH=oms4l&n5v0hC?n*d`MHKp?;^e1W zhledD1M$>HU1Rq38O57b@{ey0c?dku3)%rWPDP!0UL_wm6cB|L84bh%eu%7Kg_?}BfeUaev9N4D~xcs_EM%}m==!Px&E6s~Fd zb<^R|{c_tJ?Np??x>_shT8|;(BSjqQAq-VDi2=iK2S4hanY^$!TA1|#>&ZHu^$u<~ z--PDX^wHAGnE)3T*KX_v6q$H&9asW4pnjg~V$eCug8JsiI0Tz#+trgf*Q;U-Q-{c@ z*-W_MQPLk{d(}*8iiKy4KIB@>;n!fkp|Y#OaGjwr4X;PnM5hHaD(;ZBTJ z`Z*eddP#=4`~i%3-`034w5nZ?7Vs+TPS&w-&VjZdyt-a(+)!+cw@ROL zOGdgH3$1X1@kQ(2$H}PK5fVVskKwg!>f&zf|lA0I^T`offGR=*hUL;@e1WFKHcci1|ld7(sM&3zI5tr6$X8eT1BT#GL6#2(mwOo#od z`JY4q*`gD;%aSi-ogPEey=Q_wY3mITMbO8QkDMH+n1krUHtksXbB$=n=cCpJQHZkN z3km#Z!ViBrR!NpQD&fhRI1!(BI^nTR%oKH z042DP##e-f-&XaNi@0R(SP9;!%6QD)r)0U}wTX(LYO z)*e|x;4;inGcA%49IE%u<<{W1u^T6~S)#MttRxX-kY{s!Y2pMTBVi00P>qKSnGMA2 z_8WO3>x830FJg0zR4(ESy~86vo))=&fGbjPVct6r>iGjEZ@8Y{H0BF2`-5vzWMkao?5tF{}Pk{lDa2aKJrpb{Jo?863|Bvq+7ZKF^Ese zbh;VP8wYRM8$bD*v$#R@8SkuN-Xifc8n;qa12*W-WpP`#AtD4jsJ3|zJjbehuMi_r z&_w3UhHYf}^YMw51n(6oL-g0;bsW1tr0S%>B}fITLtm{F{~h1~VQ5p6JRss+@rio_ zH4miVFS9Wxy>EleS=6}jPk zA^w$@uV#6qs#jE}IP>LA_V!cPys*W;oy!Wgiev-c;cJ2(6GXlHEu6#Dby=&Ik)YqD zj(TG$>az`Rr=3O|zPRSvtOE;-Q**kG8@IE85V_9WQN^uryT28j~_Kl9InOWW5rR!_B}P3FNqhC$bY;c#qU zrYXu2zgFS^G{3+{WbG8EFXzb1I{}vz_eX;AP#0v&rTu{*ta{`F7q0ttE15)jvVLuA zg+n5achYRRXixOUfd`t$>5*nWHIK|a)xCCizinpRnLtJginz_7`iMd}Bb57xt>-(n z{>{L zmcUcLa?`=6Nnh6P;CG$|CO2c*iyJ!yqvGRZkqEj6q~&IL%wPtgd_;@98)iL!-;ai- zhkJwK!tkM~!ei9L*Qc`ZDvrTxftihqjSK>e4Q)P|?=R%C%&^h<-9e5z4S%)%$cf|F zU6YMze5@?F5$};$uaLZbVb=I@Z?xn~knr}mwWzl=(RBPA&~aScr9LNRzesd9!Gj3m0g@rC}9fo35MB*rvuaV2pIQJ)Jg zSI|>npw)f$T6U-BG0VYP_wqqAh}9~yb?GnUV|k-{ICpNU+sN zr^msL`6%AZVn0Y*Y805XF*ET?K)f8mH zEU$^za5NlgxvrmksO_^Zudom>k*^;_5ye@5DevX5m4wiL-+8X-! z%_FO}uSXb9_AT%(5kcU{DFWkhd2FFhP7G|`KoSCVp?q2&hOuyOG9vya3McZA^L4eS zSVR#)nEuKo~NP&*^%%R^|QoHL8_Z@8Ccs*Dt7N2LswE)~p{?f~28(J4;4f z6+%f*=bo#D`AW&QfGl(b&s-O7Y5jqATh-SY_ot_c!e=D8ze$4!0KHN;PXbyRMMi~& z?$B=gSeNnp3Lv?3(0lMv))ir^5&0I1wE^f}XABjkeGUOIP+xxuT9;UKk*fUEA8EvC zSUatQgn_7@UiszVaM<&iy>j$@;nxmN1md4d(;>nGsD>A#itSa%FGIYa$0wZ|cOgVcE^xwaq#oGs`=QMlz}QO*^95Jr8kV2z zP(Nbwz@z%8(|aAbK`l4l`w=wm*^@K9{j#BPbG&ccl@~HJ%RB|_C~sO@w^%Tn)J(58 zHO%M28O_>t~H0Ut_hcye%!gF^m?qWyd z`b_#!lFjhmO;G1SbrUi0=_L(_?qEeBJT3>Wvs+J7hWNLg7M6$Z0-N%cNPuhc{Bb z=)~>rjo{w(q)%m@7JQ&a$kO+dr9TZm{3BBQ??(jMj7PDA-V@WITH#1>*E^m-@du#j zSy);g5oSh`b=$2K!_R1h9BI?Xg>x=7=?Y6CCJj;qhk>WSv8xqy7NswWec-pZXaA5F zS(CZh6a>NiB4XBNnZWrZrG{p|kK|R!qcP;iqT3t&2hS|Mek3Vj_?_kqS;R_RrIjLO zARvvOxLIDC%*rNB!(~Xqvu^cwhz=y165prnht*5UtiN{rhq>i|-i)ui3#eAN$b9P* zWli0w0TTu=x9OL5af|fhkObM%>~@BEHxbtPJ-$$N?T=XUEAMIp!K;8+L+i~j^c(7* zEZ!$T&I4I=H}Cz&b^jq;#>bc=B`ji#nB~7Vdy#M>tqMtRK>uz5Vr7_)RR^y*(r`hpgw;}Q(RgP%lSp%t?~EK-q61nICFU1 zk=fxh0~E5Nkba*Xpl^FOs$)NVYZhYSHPCU6An+_KjjQ6`TkP!qTxfNyoxg8TO2PKC z8WHEnV|!`NhcuTsJJmUJXSswKd{I9j4(=rTP}c*uQV0E^=g|p;G$HL$ zh+gL&NZ)u=Sf_Q*@g>=Yc0rW44qcx#b{rOci(74#U}OwGf5Lpt zcUe?BHPw`ff4uGRQn9cq!cXQYshP7T`u&){UI+(={noIP!imDFFHhNMoJH9E9xrz? zn01x3~`qsebtAvXuakfQVwD*2w#FdQSBt zxZlVTgw(TAmB&I;!CM+5=ik*27pn(5s#h~Y-<+ba&c>(-oKU6vYwW;Ajh8g5SF-blJ*^#EFCUFlV)lCpoS=r{cy z>R!TTmDp+wgt&h|`NICs17>y~g|%#AcK#r0i#NIJggv6%#aV zb*}B4af1G#_%Xngi-CD%MMN%HqPi5GGtXCEb(-Gla5vhQn5O{8wEA}v;e5-iq_9X( zc3+YC6E@`#VldNm4@a#6c>*xvgxAUyd+8|8x@Lv~KgS6Q_#1iAF&+}vOVAUMtd}dy z>{bB#gXHn%OY3mToop|dpk=sGU2~`jMLWMa3IwWG z`&92&*L9%K#~$#!9YL^<%p&(xqR`hi!!9d^DjCN&Vl7k-S=lPLyZRNv8FZH|_bkQQ zdDM_+XS`%rsBkBh?ZP@wti^cX^DIVg(%S`>MF4si&q2d9yjYvT{O?40Zeev;$E~D+ zxGE$5&F9VIF6kPlWc6(>4ubo)H`@lhrAZnxzh?9`+OvP;LZl9Kbqy!-x`?z4^qNZ+ z^xhBD)pgDvx}xVj3Bc2~#ZLA0?v#(gv_Ax+C4D1*$YMGoriMmTPx1$wE}i+G_X}7}Y1!8IeHYC> zSsOq$dN%5^NY0zJ{FCqKPt?<4x3if0PJcAwcpUifuyJfx!PC13;aGd3b-)2pJye=5 zu3l%6?30k2)Q@1?(#BY2t|-{VI1*lmksj}xoH%Hg&^Voe_u|Q3O+Mq8@1!4e?gwRS zPN01oV3G)+sHGuhv`XOzD3H4xFlrcor6N5^PItyCCoc~&p-M`9&YG&O#c|QC}!RFbast%8+ zl!JsG0-Ip;zR?l?#+vE@(jP^B{)tCfHfRbqjfEVBd`RVIj$ocx;|_V!vMdDD3bO?6 znw6N|UvbTfNv%`^!NK4kO0EhV>arq=^d?vSn|cEm~z$ww6)Q_B}08PDHTNd+=ql`Bcl`1r}! z*eJ*Afdw~0OS*vrb5|U%7-|!tBi>N_jwX*@7$^ds1OL7g;>Ps+EbAqeYF{YcaeJl5 z6pm_m2G9~|9IPR!j$Lm$*WGUQ4A?;Tm``8m$*xW@wV0ccCye$!&An(}z`g`jr$3VU zoEi9h2QI(8Ax>#IrVhGiu`?QdDV}^1oFCN!Vx9E4XR6rVujfd%8UBA%BkmgX)K!Z7 z>m^*8MrKb%bl|>cQV&Oukzp4$Pm<~i3LYM3_-_ZSn4Itw$el<%X^tueZW^B%kqQJ2 zhZ-0X<%>qC?w}p{o!f~0V#rd?nZmjsBrSXW)IhmHEX-i^vleaN7W-Lp%RDY~kZXme zB|l)lrd%P@2GO$&8onS=e85c+f+`5xpBo1WQMa~(c4hd{Z7am8Ngqo6hEImWExj6o zd5sWqKL+8Ncw2eR)L;2n%u`_(CntA#*LR5+O!CubIPDC#cA#RT)*)kIKEyWQtYO`tFx-0k1&U`e=+$U%!2O%QF3g$H`^1^-;=A(10iiOKCieB?8pT?LMh3>AWYKlaI~SFZ!o zo>denH1wl=ap>qzT6S$I+er_V?nBvA7?HRi0`_-PRWeAYU`UNOJm{vhRCaEckB%e` zz!`ZANsT)#NfrRbFpy28ngW=Q2-J=7w5`>oFHO_5P&O(a25dO5F)Ur~|Le+Xw%|)i zjmaYuT#{Hidb7Ri>MPV`BmZqlAnffzSr`qc!Z~1ZZ8st2{PlRSsDyUoRPLNbFWN<; ztK(Dq;H*+62E8(^I+y!e({S9T#Ta|bZ-H@SxVfK$j1Kg20sg9G3j>u`VZ+g9G>WKc z=s_V%=Ljlhk@F6=q~?AJyhzIli=KHr<%5~J!@@Lvymtl~ytVu9O2bVjiR55+_j;!a zrP3I40v`RhS{&YQ_rB5kj-!$$ygh`f6*!I9(W@2=3I3jcT9>8a9k(0?T?Sl=-1v&f zG(rr~YW5Odw;NTIQW0^Gl;$;=2>C%~R1e{FYE9=-s%}Kt0Tbr02p)~d`hVCkIBvhY z{0E`0Nmn4LV|SR1bC@OYILPEf0BqUX@yCziFq%AuFBGS;h>t|Uvqxk7Xuf`ztRwdb zkACcrPcB5yS@pQX4&OYo5+XB1M74_Ys7AL+*>wsZQY*txohXzI(n5yaQppPEIzu@o zK$pB33Ry_yHouqh&>dSsd6Qt3#)UXW6Boo2_*9kp7L+#nur-Cpwkrf}_p)JIZq&`T^ zZ?`R;%0NI?@0}OI%yBD(s}0DR`@<((j78W*05}tc2{pB~Z-M%`x>!0fO<{aDZCr;M z0SX?%Y(H+eICfwHGh!P5mTf`#6~JB4B7Q48XENb%?B-TlECGBzhSuWw|nDu(YVyL7B_CftCx|s&&&Y*=Ze^u%1^AGt{dM8cVd+Ogm=HL@cL-0K@lTLZr znRxSqLyXa^1iQP-9A8~}KC(4SBCRGhe4V1R0A@kh{mt`+s|xY6KvJ$8Re5>5*2XLK zxO@4QV9yQ^kGmW}qus6gU`dz{!iQ&pID4-29? zumR%E?9IccfS)n`#XK4zN|8bQ#qM0K#g+?Q{e`-tTf`2jVpwLmWj^504Jvq5e#Ul) z)#Qu)Oe7pdsSpJGH4hptmPaDd6ryl&nVDi?p z@x3r^+HL(_!byCG<9_Dpu6xymuAUULzcLpa$1haISw@S_2B}fg6>wRPhg$@}Czto5O?7!LL5}Rg3_*dWz;o!}&DGAmAmt>E zsn4NGa{T^^=f=VdqwqUaRV+J)~1AWjZmS|LeIgeGu;y#!zkhQ;eoa|rZ^J4X)BM4>u;B^(;zA&?q#yBdf0TFWV` zw9@DAJK1W&fa9?aF~=E!2l<90+kY0wbj-f;^T(1CtMB7p8Ofy{#EE2j!(?YJ_YT4U^@RA*@%prv;Ow);AF0LeczoV zS#{&&Hpp&a;0HL_&vzrigF@Neg5D+z@Fg@S4D6ut=}k7c-Tb2_*WLeCB#+M4vm-n; zTz|3nF4m$gx|7*Dx0jgSZ~++!WfdFFrx3X(IskoqN8MpT14R;~!&>=DiNL~qI`nvh z^$seO3&PX%8&rx>_=C{D+^E8F&aS7hWjj}4yE8*@C^337AwGr20*j7y)1WfO-rIsH z29O1+?vQ@aFt65;r(J(@WFEPXBmf!15G969{2@dQ-x2*HC8E`_Z9nO~J@9FjoP{RB zgEWO+7`KU#Wj^zYY>9+swoIg>ISDD<_^%_ogWylNBUYrzuC~~PjWQ=PhU%K258};p zEzB^x;{3e<6Z4|#wonP23iiNPH7l-ob)Piki_mvD1S`hU?|uD&Y1R!LZP~Dqn66)> zIS~fiRRpg_uI^^%y{-dK_EBd3xeetSv)X&9ew;|0azT#BkME(DyRCCcxcC^zpYY>( zoQxqgoBl)ojm>D?9`GqgM*v?5x4#N#S@U`o<)JLE~5dLu6YmL_KL zJA*~f0S!?XA9XtT)HSFYP)DI01L-&hbu&#sb)q+=+@0LV;}6`gE}Yi~JE%IZV3TY?>sScFO-6vF)m0oP&UY zZ+G)6U-b@5@Ym__kupQ@rn74FAi?{EV#W<(+q8C!1iB1@h+`q*W&c1!&Fr^?hk*h- zx7zq3A(3;4c45Ersqh|J?eM$W9w!0t;VG%Mf_Qm-geYY$(QvNiAv3@Vrv*t1i#^Am zJC#Jlo8wEZtLl<1m<9mSwABc)v1RMwx;R##{WvK<8|8gCom7MH>hr46zt{$j;ql-d zQMny@u{HPg%|qN4A4dpnZ%@Ce|M>Ko7KGgslB{21{1%hc08S*MpYQM2hL|#jlEaf+ zMW<-=L7w!rBY`0{-V?uP5PTq`jL-F49Y7bc_p_EO2k$d_le-aPPkeg4y%yX9%KU@b z89=?Seb;LvcFG;{fAVVMJEP5rr!Gw)ST{`IO7FvimEaS~ns)Qc4>Hh=XW{e34E4s3 zuti?n^J4J~Xs?CRBc}1I7THg(`kw46f$Hnr$}u8I>pu7&Mf3Ss>9Dl-sxx6AoTp_e~blFj8$3 z3s%Stq}Efx5SDgcv{5M4VFYrveZSIO@S?m`fm&WneXoc>8|Uj@AV;7#%CcTHy8{*@ zv<8EH8j?odg{HLn(Y_4TQfL_%tcM%?QNdxsbg6PU5iqY7v4e+w;U6c>5`AA#B1&J} z6lqG*;5JL)xm4y{fW$K&;^fJGexBZKS9Jn>4H|?#SD;S*zbt?(43vauDT)))qTXz= z3Y$>ouuxc;8>PkZWn{83cF&SDUrRn!CPQX$vpIAz`KIL!sw#-H&dR(w&&|8{?fzeX z(W`Ol4Z^lH%h>1YGBeHFp@gkKAH0@Sak2-4xbGr(LY%7!9QLnCtwCTS;vP z#%9u4u*4$p*T>XZ^?@QoEiH(5Mj8hqcGx|k{uC8|`F(vkTb!H6g8AM~l6TvvY=P1g z)YUW1vN(YI4<*Q6fx@^=W7LtZ28!!s^uDU4}b0YdD38;?BcCYpy$yJPH@1h}@{Q z*rNPKgyae%-Mrr6?N6;?;CaCu-O(jv3Wc(Y4OWW2d;qyx!7ke#;#Ix2F=DC-W5yF| zW=FmE(_SX+M7uFib&(Y8Cdyc1=oq!QW+|dbqCEbE5ydS1Cefx(!z||GeWlTimK_uk8RDJ> zgILznJ&Eaqr-fyyf5D1idC9g3MMglZE7_jr{X*E^Iy4A9Q(hk0KudbK`zcm*i|T>) zFZSvTbUnXj+j(t4BIR-CkM-zS+F$~KZX+E~e_f7?6T9{yrbP{@Q%|ZZ{8G$r!H9HP zz1yv^#tH<3qioA}eirhYovzcXSnsu9#Ofew(iboExfI05&Zi7sZqHx&gdh}39RB6X z$@EWE76EyUv2yqEzg>4Uct&0&!ovzg(4D%o9rok`>);rAe!EIB+Pj@YN>TRmUC)$d zFc}h<^w&>7>>ZixD#K)zZT*?9j?Rl!9U1fSpd@t(iLW)N*T}x(IHMt7d5%!wr1?L@ zgADkU%j?L3I&CFpH`k|d^mIo>WB7I%jO6edklOd2^Se|AvDVnPDQ+i9O0cjENw*fAsr$^~HWFq4oYyRemxlsfD#nRT zg7{ASr4yl-|2Vjj#8*Ud@X98fZF&H>0`*mf-1~kg9F+t#t~*I#SFc9c7a@G&w*_y1 z(%90yhyol<+JNUr++Sn!P}APC}LR%mROH&xbhWNa;q(hZ46~ zX;VB3VL=Tj*a#*=8}%}U3uL9DG0qE0>XYH*Z1?X2b2*-|97`m)RWl0sF2A77`BD!L z0&QY`N4VO5R^2o$p5PnsTxu^cnK+Jk0oid9>zkX8-JkxwDV*DNEC?ARaLzpT(P{Kw zy`O9DKaxp{P)J;hjJf+P&Zw>q>bJ^jMtt%AMX}Sa*}+$IQ6kKv&4xnYp)(c%?~7`( z8x(l`Cs-&j_ED5xHjTUCqN}UK@1;~$J#UuX^gldI>zOGeHQsztC7MdvC8MC~yQu`N zUMG*rN^j)t(7C~j#mx^2)*UWdiyfu)bAT(*y60&s!VgCTi;|L406B`(5WEDJe^c3+ z$*Sepyo$@~*BZVwG^r7DXTpfctJSun8`>dV2`sDnG;ZF#-wAPrxCq$zYE)`^Kp48r zeBh5#Z(KQkfX)5+2KQYBI4qosayrH2zZR1efLXgil-}QO0bD2b_l;e*=Q9r>Vd6OK z9w*|%@Ngu?#kv?(pc4NX5*B-36xmsK3|P#->l0_lLeED1j~|bwSVd=+wPnmmCt2q0*OqNj zr3Rq;8q*qX^Woo4j*s6JTz~mE-WD>H9um)Rq0^F!)-PR#K<+N&7Fu={g6SW&r91>K@-GMIN>Hsud z4e==5V%6uhu4do&qJ{8Gpl52N2Pa%ZZ0?teQl}Bt<6oW>zJ{iCI~m_E&rqL3N8$`J z1RAGs?w^21Q?ZG%S1+?(#N{;=&Vszfqu&RmxobZk=f4zOdDUdYWTcjGx3XFGPRG*b zf>-$vV)*qmBuIbS5mDH4!8d>nVuS`<*E2O<&)mBlsCa67qjSa{SbJOiQnl_7hg2M& z>%P7n?yWZ-P36uqetTwF&w$N<-P(epCw8vJcB)EK7b>emxQ6ZQ8i^N0>VGvewL^dH|<)cD^aN@zJU zHP*JzFry1}*an@nv{Wqeez~cMIWwNOtmF;;FZa0&^jU!{FokkY4DG2FJ(ADEN}mD` zxLXAHvyH@36N$grU?P>VewF*7*TAo)YMG|Fwf?=oiv2{2=f0fz(yxk8XF&W^V$MCN z_zbv86o11URtTFa_WSdB%X|J%4^nmDS=5qT0|#XPtd0(X8wSD7L}tS(_{5b|pN&3# zq=^l^*5ku>t92cP4BdT%X1zxg9T09xnmDN8zOqqZahoLUrz(WOp5t_C`sb{vA?7ji!2iAKqbVV05Mh|3ySzkdzNk z_*F_Q^fFbjLy*i+edqbwXKuB8TZt%L6Ezo8-Wp*0L8d_H$bd|HU;K|tj_WxnbGpwm zBs&Fq@d7*jkZTCSwCCKh_OcL+@mpf55xY|t0>mif_1VmP;NJ)C^CY>#tHAkznuG&%4v7^<}$s!g)trxohfVYA`6Z`-BnaO@YhXDw(ROaNEGDVeo8Gy6}zzy zJ?87GBifSbmLNvZ9 z1Bch?NCrD_F;?U9*xM=3+R1SGkNv@pa=v%A*6#+l8C9y}Iq`2sp;ye*g0TqD_|vm4 zqLl%CIOap`A0SBVVynfCW)jUUkNfNAq}MRa@V@l_N7Fm7$JMs|+p(Q=;>K!h+g2Mp zX&T#_*tTukww;EJZ5!`g|ND7Az^rZ7+7`~^hyBzjIA}(LM=2GQ5`$RiC&fJ!D6Oom ztSf2>fP7n!fN}JqVd7J=`Vi)Ii@PZ0`6X9O?&p!zQpYdD`7~exG=@Fs4%a>t1jqzH z^0W%ctgnkRSg%BlUXub(38)Cn8&q-^nedjtSb-PIbcByGHn zSyUd+A zR@vUgZ!+&MokqhjrWyE==k=+RLDQQ;R2b<|Briy<9Jk)sn3LmHV@9-a*c;8Cf~=KI zBWl&o?79;pQWc z0wbGScB*VMmnvh6&c@5u^^>fvjN(l%4r={04j}$0|KT(dhe8%d@fH8V{4&i1+5hY= zK0AZ4c#4cwbl#w`Sjw-Yl~Z+WCZH&8AEk^S7wc}`^l@#uD6y5KWf>41ED`$z8Jd~bhp4zK7}Cy;f_*TZSAZKr{c-! zVrWSk^sY$m@2vT6x&mXQackm{KvLn;5jM%)2uiR2n3LMEv8!7pcPWhl01XE zq1g!Hi($K~)FJn&faVgDuKm#^eENOluz`or8kK@k{gG{bMjI?>ftG)F^ z0NN=nop_c(^>+ezWWUUO7@mi&h|i2-FHV}tv-0#TuRk=i%R9v<&Z}F$80q^>YjUth zehjE(38Jxr8TUElu5jhLeCke`Mpm!M7k?ry0zXjF?Z!oe|rCYW)CxG3;yO2Sf)@Br`%ID zWw%CM4%8oa6W87%(Y|$EI{V&QgAa14yFm2Utx&=G+}CN>_cjvW28mAgR!DAflM@Tr zUEa_)fh|YgM$64&o*eT^tzq7#D@bO%60K28SQ#UcSSmf2_S7ZS_9xM)BXlQ=MQ{zN z89$cx{;g_$&z?OPzPbj{{=wPn1`Y%d(v@mZ4sgTTiB`rfi7C_EDU})B7b}eDH&rPdI9Y zzj}A!D*F~Wk!9Y+kq$ZjslYp*c0%IiyfiUhNmQHsOV;T8?ZOd$<=Ls}ebw>D=I**V z#Hw>wd!RG5$Kc_-M9@N1=B3fAbtjkX7(<56hxaU$Fha=w!Pl^3_i0 zk{Itb>~9NbHB;dwRE;XzO^lU{TRsH%mDZrX0m`n*IV+S51>q2BzMY9%wj>0*SwX+XYBkGP7NBbBf7tv1!@ zb6p>FO)%&N)vo$qjZIvK8eztb3C)|1>k{s@_D}uea#fv4oX4X$)4{x{{9h0vO>q7Y zgY{&-engn5sKzu^lkA2sZ0#i2X+_jIKKmq`Z2|r{!ZvoHMDa55EGa!`-_p|yJ&;%W z8ML`?jWQ})5OkJbU18X=?(A;ar>dyv1n7X)U4Fk8_32HV`@Z{Dap0$E@t01Qd?Nbs z)&{t(q@Q(0#EFb31SLEg&^4uq!_NMlB?7g#acOn>R<4i4B}*wljCJ4sro=IzNoc(P zfyG82UjdHUFWy4?^x;CS#AS`2cP&0RW7{6aT98$^RRd-^odInXSPMnwj-~_2qQ57_&fh=LidKPn?%TXj71fraQ+2ILB|!vs z#MuFS*2#qXzN*TPXyfV_7TRWMkZ>NDPb~yR2Q6&;J@VL)-o-_F^8QlpE#%<~ zB`%RKGnC)J^I}}bK4dpHOOTBJ{O5he+R6tA*STs!vR#1Xz1b=R&Na_IaBj?!0bGHK zR6>gay9sN2K8CUjSUJ?+no%Q(cGu(CUFQ)%dP0cP#z?n^#IA zrCRZ&z+?J8e41}(lm#&C7&zR|217#3_Zl79N__KAH4tKBm5BRPkB=rBFJ=aa;2WSE zJ@1cisyzO%<+T66_P_&_=Nj=#c)KT(z~4sXS;9PO{q-uN6SP98UEUvHiA_=5$t6&Q3Beq8xp|uJ_=|E9G{!v)A zPLoq%lM{3QXT4+hIG)n>ZY(lr%%v*}RmG57i#^F*ACcoEQpeeb8 zW0S#UGlJ{l4iwz=a`?`Xx@05{Nje!kXj2dvWGcMwcjE_WttKQRqVGX`3;^zWYgdVy-@~9`&v@R4_Sh68Had+%41NR8 zO$m9&7aWyWWR_>i7C{xLr^(Xxn*sk6o5>Rq+$rMA!5@ofoWUmI#R*mBaur5n`@L|{tRB`XoZL6g1^|b*3WnZ{&H1VpWRP-_|I<8KmoaZI`bChp5l)avI`T+aOST_WOKlC<=NQYijO2qkuEUKDM48qqZ~=Tp{^o|H#`{-$sw}9)V&0EF2^SI z+?LLDg&u-g-(^j=axtv-)jdKUPb!PO>ol~9>Pft#RXGnKOqR!SMSe~YbmRA|6$yk? zWSq2r$cyv%j*QHjYsjKu*?GtHb=FG#*LlrwjIW#KQZ&H@nkbSrOG83AAByL|f)EaM z~gO`Hd7Nh3O}Y)D*Mksaj;L>P+!{eCHQ!`8BrGUy6bw-ApHyeqvSpM)IH5M$JSGreQTt5@_*#%p6sWotwU$Gz>X zK3r)ogjZ9coY#OKhpjEfK<4;Fjlzg*$i=0MvWiperK6tEZ;`77QXRvk6M5#;qeO;s zJUoll=6u)L%o%k{fJ@4Y@3*sIf}d{5wFWxtM6OK5ODEiA#O^7t-nf3l@D}QOIAbsU zd@0afrnz;`>o`$+OOl`As`I9n16Y@(zJk&0qoI|rBcdMQM6HX{CBeNA#JlM8-L+?RUU&|HzBH++k$$aOZeH7$#l(WyHx4VDR7I&HXE$ z35TBpOwRNQrC(HMqI>}EGI`hcOgB&ieD5^8mr{!5F?L>qawSI$53OS7BAEf>H&|BXFXoz^^hg}!mZQOi*d;brRKM)A>8PPSDt7Os$*aN7xjVlc3*tqR6~2dzgny~7 z!XX$bW~ANrJ!|P}9VB0w7{C1})(89Qcs0h`W=+L8Zeod#l~d=iu2nwqP>hlCP~ zK-nuvf@ezWK9jWw=6xW+BhriGPqpTH{>(_W>?_D>_1Wq#bnpkkh9q|jflL`ke4nqM zcy4zP2SHjd2s+FDM+R2Fbm(q zE&GK2 zg&$VA)6z83>5<%|zx&;xwXFM=a*1e`I-y{|`wa1yeJh1rJl!}ECdMP_)$8^a)mJ}g zU?Nr|m}S>#C=1b=wB||HkX_Mt|DwwX`W()z8ep051?a{!7!8FB?{jCfEESI;A#WO^ za@vepEqxttaSaEGPy0)Zbr)3<4^2j-oqhUOz%6BK)e^uV!t!9jU_@LoE-fz7X}3F>%O|*5Kax(IhTonj(-><-y*0vM zRdPx8DhG$kBw}$NPAGEJsX`(?w(WR*aUFNcY`#*vGFH z9p~YnvOk%y+TA*rlNbD(A2kW1&d!JR_U8Kb2cJumz2uyVCqK5wt4K;>fyzi&z9d5Z z9bAs|8*hNi^8tlyveh(9-sCzP3i6?~Rt)ukREhzFL@n9^^nw#ZnJZvz6r6cUB#(QW z+y@N7Jjn(jN^5lDN?m(r6DgY032vtSwrAdG>z{?zjOOv+kELg4Ma(DXnJA*2i?dLC zaZo4SLQFfb6(5+QS0fB|;C2JKaM85?DheNg4{qoeTdUl9BQ@n5vR9X|dE;8j8L`)T z=~uUyfy{4<{BMOsSgd54ztAug9m=$}tG(S%K|Ba*oxX)O`^hAjP7C?*KSffN?lVG- zYz^*&%kD9_tc9=ji+}4)vOR@k67%>FyZ80N(-x|;C7$p2%9xVrn_S@5TRzH__A^=h~ zeNg_>q6a9-W* z99PiZ5Dlz|s=~o#9AA%W?-&RZ&q7(3V=LPOZVE^GUr;W!W|TwLV^E2vJQ4_LC;O;S zAqZ^I3;h9r2e#-m3GHi*=5>hMDcoy$1&TYGAHKE?@mIMOIQejRiqn0^_k<5*WuTjE z$(R@ka6H@BGs-!Z`sf?!9UR;zu63W zTpT=3FbPehqWA+U2n#6xG zSjLa!IESryK8zrpND{eSWI@F3M-Tv(%-vH+ebKD$89)NdRn&kaegjJ~MKq$KkG-9Z z!>m%2n=kJ@5&-;hnsM`vdx!nH*<F|01{I3#%4Z@+&Q>-_c&Y5r&;HnX^_v5gICf$DVqpZkDQP`>p}g01?=2r?APzjBrg#s_itgiZ5)EJ73HR} z22d3frEBL%R&?gBv@pJ}=M8e8lL=picx^H-wk+0IaU1`?!cwd&N-h4QJ*5K1sL!{8 zA#C~QSHt|~QpE8zKj}r{O`E~{-?vWC3F{?k8v+qZ=RW-&MoLj$F;7d((?!knm^R2XrN;9S&9ko;Y>14k{^*MmdC`X%ptxC8OcNKVan z=9k7Aswk632pH`H4OYl&C(59!&GNqsS z4XX*c;Kd6pcISi9={@u7QWJvW=On(;p&G(l0JHtDNQvJB#r8dJ{|{z7{GY}g=o|sA zS~y6b;F)=WqDa-{_#bA7HvQeqiK!gI<8;F^(!H>NS`MKJ;=QQ&q6CITeKT-sUp%up zqjOu3VgJdwr7r4ldciZ^ou%sQ)9Ur&K(AA#XI58&)JbV*+Xofi$pt#+{Xj_GQrn0! zVA&q*2fknlu!DDt>L-I%Lt9iLs!FK8M~#X=n&t+ zXD_P5MJ6&ni^Wpm?Kk&;J%nrQoC1jUR|gI*OP!TQy{+&KjlC6r2z)0O-`+izH67ph z@uh7q?oYkc@2x={`&jc0aIX%TdBJO7yw_hCmzEH~QR}g>%e?MqN8^o&UiJFYQa`M1 zCuir=mBwhMF>0R%&!QpA*PAg`gp?t7?}UUgQAyzV~wlt zxoy3_ac!Y-SPa!o0JVrN1yfAJR#3A_J^acs6p9?vYNpraJ9w?h7Br9 z{xVG2b4sYjN{*Lbz!N+&9sYandBiahgK4e0c@>d1eeicFTFlQGPa>r3NhHs+heFx;bf1wn2%VGqbuo3UcgkNRKTFBTTX>R7YWfVYm}P$N)7c>nWX~A~G-smO!@3jN+-9tc2_;n-)u+uEu`G}V7+_!Q zU@N(;s9r@KCM^8u0Xxh=2YKM}wz+NgKMeq(D1(tQ!q3vxhTI5-pSW~zg7U2yx7BD0 z*nFSEJwP?mI4-#FTQC^w$fWI?jx09x%~ec>c}=~)yioSRmhHSp4D-Y}pqT;2O}@K1 zD_~#xy<+BbGKp4wlS9W_1peq!zF(GDJHblJf-x_$-AOfGbd#bbV15HcrDs(ATM_Um zCVE|VIbx}wOaCa_o9JNc$NZMZtc(+GK%x$BGLO41=r`#YkNt>@mH8h%QNTT^&w!a* zy^Ahlzd1LN65{D$G30h%yV;)4*f!giQgnkGdI1zA)R^FMTl{es!>1uO{VQg45mWkH z!UalB`e0bDv+;82Mj~UgneXE*x4(EkXS3$U@eb)JdvRVt@S_OH~_XB14+ zDh|8cw0d#;`4wu--QIRbk0{a#)IT&GD$|GkPYlO%EyhUKpQiH{_k2fJ(dupfXE0Rx z%Z0WH@XbS~yV~YrRhl&;rz-+ z6^zFdC8G>8BG}9o*Un?4;inyI-T%C0@e%NgRO5JN=zj9hxPHGt7A>(K9JBc=bfSsF zFoG1dGl_~5KHR8Zb0rWbe0ed zp)2lt+B(m=ouxASdG5=-S0tb&Kzyt1e3U4L_(l$}v4ube{-4PqmDLtxKWA|LMnahi zl;xHh*Hkt`B!|O3zV{_sb&;x`SIc+$Yzs=GIFheygrRp^Fh10j6S-^qASOeuZi78oP`*gf+%P3<{do0T zc;@^hBuo2U>I>Zasu{dT+-HKel9{IrSp^ z!O`f=0s9sR{zP&iqnkP^{mdPZFX`8?FXf-q+9Sm4!g3)&Y+Me+j4#>u+IA%h*AVRk z{xGXzEhP-3AA;N=jC7o;xYoLUL&-|TlR#>F>_F&bK)2bZ5r|3RJqN3H>{TxLL}6{U z$*Vpd9K=-mbo+7s3{u-YdEFEOP^`&6L;70qFK=)@8Vo}mu8|Uv-tIxy$6*Eikmy{5 zaE?GT*QaR1Rs1MVz@Gmw5i9Y_-xJIE#0!)|%z&L6dFP~RCqmw`g`O$A9;S!pI>G29 z=?VJFjb_Uq9u-H6!j%5uQGLr$=hczC#I1jUGZj{nsj+L0Rnt6ZoJ5p|%blO(_2@<| z?_Sp$D3R9Z_<1mt8Ne!ez13=?yE?nbm6==mFMO(RifL6sve|t<5p9GOlhD>!#*B;# zz0ms%@`$TA&DX69ZW*dVetW1BvujOD-t;|~n;rePImTa`>tgu|1M-(97pg}1dHd~A z5Y%HXKN&8E3X9F*b+2Yf`iy6d7P(oT%* zbFu#4>dR6Lvx{L;g|rET58(1RKTtt zGW{-~j@sUU=fM&KP94sCL(%^CEcL4&FgOj)Q7f~i5N^7~kCyaj4?p3|7yZF*(XAvD z+ep%G^E2?_zbK#>)Rl_F9pP!(@SRHi{II|1i;FQ+0^Bl=3B)0JB$inr8XJr!!PVl^ z*-xYo$0d+}2}F|ElX}pMf05b|`8q^gsbF@8tbXH*>K8CU&_Q6pyi;=dQPfOiT^2RPNkcJ!CAxs!PTEC;R4!#0R7+GP%*e>u91eG;};o{IjIlrwsBzTPrtl ztEs5oeDES4G1ay!n12>+kOY&rPC=+=u}~f`@9@Jyo73d+kpL5Yf%dqEPG+s&I+y;2 zWskkLu=Xp!x;>)UL2boAU)HW=YI!lYLf?I#3X4IZt(L!GdDyY*hfQ-mg%lk3I!;KX zprlJoR&YAn%7k%PN);Nex$(oYz&4_5?Dun{y}CFfu-^_q^dOSycn>1@t?#hvP#=%C z;}Wo!Tovhlkdy2AvtSO7oS(Bh^|JBzPQwUTRrN$!omNZ4*(>2?G!kyX;-ZE$;!rii z$=heP_wtg8&0=u`VrVTndBX7XPs#uGS33h1U7O&?JVY#$q(WmY%X@3 zArElVtl4ml-c5V;vBSGF5BGCdx=F*mtfM!oL~Y-a&-5K|S*JoKu$LyB{43fX8P z++5HR<|voEqM-#b^J7#w8%ZACNqvg=^AbVdmMPt&s|#G}@Dh6A8V!nqXoDnSG-_XI z$D7H7pTCgiy=zeiI<4OS))RpI44uz2P4aRM__J$$S=~3bWk+iZThpNMnu<3w)-e5jS{6wBGK2g>VFz>Q0MJ5dHa(qKX5=Veo4@68W z>REa^o`+LX8s-r6on$+ew+WmBuWrsWTjae#!{;WzgO;p0;tNW4#lMGDVgKx) zK`PzVa%`HNkdoGc|0Y&H=fv*#_N2TA9d3JmOx#WvHLUVEteIxWb;F zEwyAh;Y7~@B~wVVw||0+T}R^cANXGAO%8=tUq8UJDeprn1#6CO zF2t$linO(1UoySE<;)Oz_ib}}E2Hq*oc)N|rQm!UYUg&|G#PntqO*0BQe*r2%5>{F zM2ELG(u%Z!dC*dw@_&LJJPUHjK9a#n|IT*>C$=;L%lI$#O>Y~;3T1wm`rJh^bhx#N z{ear-Bf^WV)S+vQ0aVSz-(^1XccH3>XL0O}@bgs5#a%t zNT+_>Uv-mxqJ(SC#(s0rf>3oHw=q^J!oVmXc zfB9H5!rXvaA#VVaDJaE1h2^ek@jOkYmNvz29KZ8(KqL-uPLO9{E%At=jOffnYr zUUx9a zaajx?J!jh3#aC{}GVLrwVbkPG?WlyO`d2mLmn=kY48rp|KM2qHsPyFc56=jTN97gb z@s+%PHWlnd6VlDm5V)u0|nVpDpVcnj!N%N_n_J@eSSE9 zCZr;R4&#CMTkE0h33sxom>K3S^G2H%8MdAyS302udAt`KHSM0~vWfi<%J0zM0qYZf zCM=Kc^Np4*I$I+t*4j@m0Ti8bHP7Vad@cm;VT3Dv#KL(edDR3#)i5NTL?PM`p4?N$ zjMwoun0U~h-+AkIh5yVSf1~m$n^S;XECztl*CGf8pIt213bN>WadMLb6r}=MGuirt z^6cd}U-anW%u+b`J#imh&ny%V9v`6<^52tuP&ezk?2D`3ICMPkg!Odkjw`hk4U8Lu zAb%fZ-!wmYb4Ynho_AM8^M(rUBurY z0-K9z`KAdUC;fJo_^eeK7b{Sin~>+ko>?Oo@exI;^c-d{KOqFQs~&bTTe1MZ)-^^m zLuAE%;M{l5d>|0O58Zdjuou%Q-jzP!${lxgi!~V8k2Fj-V8&@D#5p}H6p$Imhoq!M zA9TXK4E3pD#Ssm+!M*}MgA5`Y3BO971&c$#Tc)>b?RNU<3v>F%@&)~hE$z>~W>=){ z6?y}4A7PIS&OMYs!etxrn^Gz|1Na8VwY*-D;9MuEGq^U@!aLz>jsfeEAU3_$y^pqA zo+WEo%K#3=+={OEFV^k-F#&2pSz*~_?D8nV?_@(Y24f~*HfdHOnUmcq@rEr}t~gnz zXIn6JxmTW9bz37iu-%vvCcOTb;mZ}rSI)l=$QGe#D(-s;Lfs)9u_%A$z9NWdO<-iF zAQST3yXaQoz>u4(eh~^JBVt=6r)L@W`>Mkbkdu{iTs``q{V82uU>%IIs!c>%^>RRI z6o5y!SdSj6>?iPtCo@C3gsjhq*?CU72_E;)*R~;4v6RUy9nm%JJ85Jh{K zc0FlT@74mEN4+)*WI0=?Z~VHzD+I}OosQ?m}apKGdUK=p?mhtU?$qTpw^7=p7#?`^Enhb(e4$Gd4N*Qp%HAB!p3?b=D8JMNK zq5d(yh(c29dxM+6;0GQWhUmuD3U;+$O!i)bo9=SLB{?U>4UM5KlUQ z8WS$u+a{bH{fEOnc|re#USvuvyh=gY6qUB|6V7Gfqbf?d2;DtqiyjeVFYKl!X%hI) z<%#%pkxlMg9}__Fzu&RmL?-RV z35B|1>-z&tC=RMBA?{)w$Cz*ej>*%F>$QYjSQC)syuCrZ6Qtj*V?%{r-)bXldm=b4 zD_GeW~lJ>})6nYF~QdU!LhLS1v6#Fe?gnl<8*3V%lGW&xh0 zo7qRtch_{LsRGWDZq&55YcQhj2&`HWSn6EFnp|Bka97CbJW&@S5yxjLb_GnU6jFL? zJ!WeCCDp+Mmv_P1Zc_-1*$T_wDI8+8(J&|)MZqv?`wKfcd>@fIf96tij4Am&4rZj8S%%H24m zuCl%c7hCQ%;uw9fI2Rl!tcBTpBwmbzX7;r6O#eyBaQZz~p&AiygRz+RFwf8*vz_hj z?KDFUQQ|z^83{@XN4mrteCf7@s{SIdjoQe@oPpEoAxM3;I9~TN!MZOprPIAqZNF+^ zhDOKkkr|gfBhKJ;Rlq))^2({24?07J4`mMuP$ema`$V+zsgqwoZO0d+2`?ZHdAns9o4c~8^p45NzH81 zX~>w1qFFW9L$k>3Qqr#eRLfl$R4k}b)tK}93s$=Biqqe|llKhFc3DaMD+W>+=B!HgpeO&CSA>pnff%-J1D3sC_-ODZ|BO=&t5|1Du& z(gCmJ$!NW64MCndJplV^i?3E7kQQ#imC43R(C9g!x6wQ+m~i6wmwQcdlrcnCAyp9* zywP|a`J@eup6&%21Us>fAlGL0Bjx{-$O~~7Hk~+pGPq`&>hp{LeaU7xG{^nljo1es zXxGAO2mEWo45tkt3c?okdUY!^J!;1IUW?4qrVnM>)vXz+o)YBBzoYX#XY2$F?=psD zQl+a(2}{pr4b5&D{7K}M@{Ke&O$kRTC7d(zM0FQIpzWhoLfdP0Khg%X zBtlar5i#3iCpxEe#Q}dWOl0}HUHtOilhpsMwcPT)twi|BGe7Hieng;`sV_AR`EC?eLioSnUt4=jG;()s>b z54jN&B}z=oZ}hNwCCGf>r-hM)kJk}&4j&C>WkF2zE`}eyv#mtUujll`L^2UBm97Xz zeh^kNvCfCFkawfnGU@~JxE8X&-m(TsrIbry^R9mm9tnyHl`nTkJv5z4v(IqQ_r@sp z)>H_uq~@?0_bNVcj=&nsKael)3~Ti<3(GZ3t=LsX@|d9dfdD?l*YD89;&5mZ@@F)= zc{e#G4Dor3tzW&c&SGCuX-c_K$QOKAd|S}sK)sl^Kkr+v@JB|l4J5}7$M?%tc^^z0 zc^0;+-F+5&m;4_0W;?ie+xqy3SN`s?Yh$^Lh{)0MXquB>ZuRgLkWVwD6B%3N3d-&$ zHMW7dR7lF{SIY)k+)%@V4Cu54#EoLdGrVbM(-nAY$!F)c(He1B$E`NCI4h>%TuUdv zOwSNotFNx~SnALYD)yB7d5Yz6-KH}q$JLSiEp*|Jp6CfY;Q?})rX37snVj7Az!J$d zqb~38pC_AFGT6rZ^GrD~&pSQ2P)Wm5k({qM*&WWw_6 zGD)n{6oTRza@-}+q@XzqlB$WnJj(okZYL*-8y!TBqIr5HqC$SoqDV!Mot@A)I!5$M zZ%Z2#_)dDSt@VG3bfu$^^)B>&IZ)9K;p^Ys!}P3xB6!0xk_)DCy@fgWfxzyAx=!HF zhFXaULX{OnJ&bINIn`_aK#K)$^+6()*gU-~GL~nQD??D8&kgwksKH>cy^T zb4Bp0PWKYl?So>7FZP~&Px^%ePg+hjsoTWh|9L=TEL&F4$-<4<9G2*ryEf;$|0{WAk0;QrqYt2H3-2a)o(ebIBZxhr+il% zQ}XGRa0cd@Q9G-(-PV+Vkfe^c%ceFD#86ui)30$?5`o*zmeB@CP!wif+n&r9vERS^ ziF40wtZ^ouf*pX-g$R6ZsfOPOs_!FFT2am7u2phtRJqi-Ca~PC{K3fBQf944*@i{>Us zWMNo+_WNpMY2VhA>WNE-ipz0JbTX_7YK(T$e} z?~in4wX9`&%fHo;u3xQo;Y*&(EnZ$col4yvQP+`QWageFvL#G2=PC+co3tg{Sw4K6 z?k3*%_sri;$-7>me~wJhMc!MWQd+P$GT?4bYA?uSF( z9ZqS>^12DUoDu!ZDCL&QEiBKi_2W=7Q~o(F~7WNxRoH@+Hm8&8Q2+7D?z0h z2en%pf+zFOoh_>!Ycz!nbbiJc0AoUlLpIr_uB?f+*1L^1g**eZP>TzBX} zTr!cS%Vr*X9>sqx%i$h0!taK;Vd8BAap74y0@0pztIvE9!Uzs59oKqr36c$4C?{yh z+v8hGKkl)ocMcR^4r9JycY0CEfQ!n5Catrs_ z_Czl(>MC83?>LG*rcbzrazm2{3i{b@0fE|T$!AI8N}#lLgYjWg&iA2EJ;)REWg>kU zj;uJH>B5a}6%0@IoEx+)x{pHBIsU~vyZxvQI444)$>tW7Rj0qrB7Z|1uG7lU&S-$( zO|Bbmv`sC{0YOhNL!9U`DvMMIv90=$9&~p+T5}f!p^NYfa-Y-uZPhXt&ukNv=S=|b zN19{2+G(?&_XxN7K%0?vx7Z(?kF$BAw0-i9;C&WuxE9>#To&@)KoUegIiROvSHp9T zN};&>6&@&#H!8}A)aagc8xA$7~1^mnTKA_eOefbZ7inak%>nR^M)n#3>&;|i;bYd#w&J7 zV%2t2&vqhZI`SzOY$N`-|Nq8uYVO`Zam0Vf5fQCL_YE|Oqsee8kz}SK7Ir*S7*K7T ziEt;$aua_3j$nT8Tf)1rFmc~kc$vH7%i$3-*aJC4FIscshwdfID=Lb}cOm^Pr{pi$ z_2A9fsSp*CS^DzoK{%9j6Q;GnGLxg@!ek}G6?+pl?2mBiZfqXWid2!TG~{eaIQ5Mg z7PZ#(ZC!_C^m74gH~YdJ`PApxh<2=VUw$~(Tq{j90{ItRxt*(TP+=T^@fNS>(h+JN zw;8_o>wy|uB?zs# z0|3YfLcV$;!E2@JU>Z5F0V}sM&$(2-H=HTAy=cUL$x09EhxBer0B^a65%oFMjSh+z zV-M2jUCk<2*iDX$@*8kwpfIl$QW$jo1sK^Cq+8RDEWEySMXgY~^Zxqv!<-?D*spC> zAOvlIlw-QK(i#J)k7QDtVfG=B9XLHl8qZJ~;CJg;%%J|F*aMbiU2)ydgg)WBWpR+? zD!Li~kIL~9BmrIlno}^hm$bPC1~}{b`D|u8tsCv20iX;4du+vGNMf%lmD61oga4wz zhu+=FldSK(nlzH*koC70GYPv&2sIl)ouSv;^4tJhRcUHzXk^i|8-4}vQd)f+Sv68G zSEmiy#sE*|+-qJ5q^1K(vjgwQK9z={Z1VsoqAg`2`w z>RDauHew6Q;r`zxW;46S;{{XSnI!9ID^iD|ee6z?e`R{Q|6(=dr~YbKU+iks+Z69n z{_!uN{BiLs+maDs=zGudD{YB%toi@F022wAA|e#=_EFgUQ=6aG^9p~|OP!3D+`zo? zQ>$i15p$xwN+wfBv>pg0a{12h0k4UsnYoAp>{#tW;*so@E?b0V-TI>md2Qg)#O*#T zH!z4T?M(n`sYhc(z6A=HkKpYLhd=9;Xzid{n};NDF9!w_lr)HgaKAd9yY2YNSMZIM z)&ej#zU_!Gey!L!tR^M4%TnNPdc^|iSW-rTj!d>6_V||*tRs^j_-mPW@-+P=JHTo0 zu?q&_2zILUpB+2v6yk;Fs|=>K#BWfYZlmOQzS@XP0GSNPt7}Cle0a+;%s&j8^(Kxq z^?_%rdO?@#;=Eq7hucXGAz0+7$w=yUOjKCE-Wc9SDPhU4^rPma`604!So{_+zm%@% zqo_)BFHu1NlcvURv~?l{O2nBeLM8m>x?}3uJKDM+rrh}j&6Vu*7`vLFuUSvd(Rvn= zA_T=}?X`$;12Rln#Hz+(5nAX4h@CIv>C+`lc{Br#;3p~j`}X1$`U)4QAJJj*yLIuv z6H>1?rOJ<2teN8x+97JTG3ENS(^vBP+P9ZbMEmS<1h@0_nTk=f*=ltYZ8yy^F|~zX zmPC*p`h+R1iJOrP0$KyQ@*P;ngqVCc6Gf1C?2(p~HvetJTp`S9M}D9cKzDq&FYMs|2Jn9SY>gn?K<;D)_?{yanhJ@Se+lmu7$@#{84 zg5-{w`|REl$>O7i$y8$@$-F>RCaT{Y;eqqWUq z)0iCgM*|T2a1iT5Zc$;WR;1y#dquYAhFO-;6^$91ZWdM|(VA`7Bv|S(tBQ+Fe{jFl z6Vi|F>Lq4nS~Do`gE<@UyTP(d+s+fs#E=rM_NS==E{SV|m~Y587=P1V5$vme+aH5C zaQ9nZ)7^u}68jfT8#{xOFt||_mMXS$%8iWYB=3|=8%WIrsbKuBD{Qw>;^+?yfX1lL z{6Y1#rygSvd(S6mHIJIP?5#{o=0r5#iEu`8l?x=yKbmqu>~gy?T>-<`SsL3s-42uL zVs4q<2Hv;?*gL%dA9GFWc^}MhY@G6#k!*D>0yBlGU%eL737J8a{GNg|3HLLa;2g#| zkkmg3|9w~EA`o1X@v=FLn7Ou~9dXkO*zQUq?{w(`himxq^I1k}Y1Tm;#wD)f9zeuA zKOcSHjEij&w=+^$4u;ba4W0jsj|>9bp&IJz-U?NdAT_=&4EcAjsp7D7#(w_3Q^J~J zQ?h1F{52R5CN*JBporHLz%-z_ggZhQhRMxsS)U+>0@VX>TXtwJy5&IMYRTyNY1ZuY zU!zAX0@9Rf`>4;OrB4UZtj||KYfubZ`{s=)kC6mHB=9X?HhgHqfq0ETD70*Ws4(@1 zK}>)vmrJT^3L%6KBcf(563LP+vtY&3aD3}hvn%r_lU9R}MWC>p?Wnlu4YO*&=F~b28nrg!%iz#VhGzR>QW(Qa?O?6Z@>byUcC40eso zI-7?2r<&U7Ryi8R^?86l>4L*fO@Kaej)t1Dvc2WRT*OxumO1x>!rr{loKi+Mn*xny zErN#o`@J3?Ggr(|l9h7i#$t7!Wc>kVt%Z@z3*SgpCXy(mpYFQ5cFHvf^JdPf;J>FZ zL5L5DjXcTfyPSnhPpGUt@+Og>HDf*7J{?;^#MgVXk$n3LRRbVmt&~m|6~z_QE)7uWTv{djojRu3c&q9rJa(H z&F>boXMqp`Mdmi2-d1v@_fZ>R4$l9^Q4Ysm9jaTzgk01|P>yZyh66kHBNWqIR*!Af zqWbSb>mPIhy0@X<^paB)lxZ6-wQYxp;%aGzNd-*Zf4tX z4wp7m5DEHqi18n#4YXATmxNr37xN~uiOj>J2t;vO#g`orprro@XBx=9Gt!%F()owW zuJA3n@{|fm3}?y4zX3vv=57Z)!{?rS9^1R(LCP=xrsyF%COaHU@Qu*#^ZKRuNAQ76 zA@uuf8`*w~^{q8pT1A7w@C8u0dwksFs#|;bd$@O@#HUSm$-u8iGlge*rzcEf4mq=* zKfLy#^F35DsH%KXDJ7e<*%;aN*`YpM4sdKj)_2MVyXfXX<}IBR82$Yr!g^bdhVWN6 zhs*s)uZUmuTp5&mcLaBrye_^kc#?lRHNJt$an9tlRtlxiM>mRYR_Y4hk+pA{DoK6VJ+;gHi26%4G_?g*s;d5wIcrLI z5yH`$XqF&2kyqT12siSm3ELNfll}MkjCzu(H!+%V`O%{IrM;ld-+wzAhIDKL!&Rp zPf}C!Pcg|@X)BfxviIK<6JiJtQ0L!AYg0uNODd!%V0r^fo99(D%&rE+Vj2V?76gcX zC+&AEx!f_%37(sEYhzyc27p<%cYVsnvP~%cvIIUd_13^7p7|TcDzN|I75g#3uv6;i z;q_I2dni{sJg8uuXEm~HP=I&HF_&Z;;8^$`T2FIqrPqax&v)3PlD&GVFHm@2AQhJ@x#4*wl#0I^j-q4$bVUw3qJJywIpzkgibj zf@b7;(%w0;qf;yp?YPdU-+vdV$z1mE_oC@BFXJk67p#v$r+P4BN_TCZ&;XHR}$;= z@Ds8Aa^|Eyc=v87KKH?lR?AGlwR2y-vlDLf`&9@fn7uhPj^rB*ye<(CAsA4I6IN2? z-}Nx1u=F6}-=XI({mbp)_IBYG@|=L(&$5n_g~~w9C7%=Qb5ZYVjo1Q0unTO*3F;?z z)7qqQAt#+h*lIH;Xf%QCZe+Yr6BxF2hmSL{CCUT!rS`sQ z&G)c}Cte+w-V8p*`(scqc#4&9E(9j0d#|(bLeAg*5UzFrE0t)n>o>dudQa0d(KM?| zn^7s~!y9g4p~I(R8%6y_7`pRa>}lmpA_!y>Jo|CD&5nFdGhAki{yDCpB^)bhH>H*I zP+c+QTRy?i@F&OGD(_XdFY|XVnV?Oh5{KZeCH_@|Bi8I7ctFqHBTH)f;_hRhP-Ed4 zybKL{U%6tol=&xWWC?RG!d*LS6{V6NF{oZMyqEs5TlY32C9ueaxgVWo(02wc^=mq4 z&f{q!sCcc30Nlw!#vfcKcxMj^!MQNLaq~uQ+fQmb{2T>` zhO+j6AxsNa`u)pd|C}DQzp=E*G$URZZ6WJg?+53;vj^U3MOaK19CB>c=0z-_^5q_m zP7PyVtN6p+Xuc3F(3rMp%0BGY#@X=dDdF;EQ#v6L&QZ3|nX+$cS^ybU)EBI%Z)HEB z!`_!dNW0RxcO}VyuzdhS9+6YrghUlYy#VnI>jYd3Nb&sI7;QFYF6HCq>Jaa=i<uywLsKJFZ zn+%OZh(c^-rth5end0w+X%-Kt;h+DGXmM{xef;Y0yZy)L$z`NF_)VP^&3VBSw@dj3|7__ggT3 z(n>^Qm+DlUG=z=U3$qy}NASb_%A=TRVc2(R<(KuvY&iSIqm8rk2b7l@@CR4Ml2t&A z^H_%|_Nf)lTtKe6mRlT*BiFG_v+v21xlUui7ipT)Nvzt2C%j9StVhu{rBJM>Vc4P( zDPxezA{UzK>G4)b;_m?ewwh1Yy~jv-3%-H@HJ#hv@&{(f9_wbK+lEaKG);I3{{5WX zde|xG<%}B$xXnSuXEUL%ScO9fn#?;={~kfXwS#=C@|-m?ZG;B2Ike5UD!#E{Sr5%y zy1Snv*7{SgQ%64N`HmHf!HGUdh(KEV#n2e>N(Z{GYRl>~}lfY9Xf( z{olx6Uggoh*EW=TUz)WxZmvSf!*${IPX*<|3lOJIm@bom8>b(om&Z+h@3C3ENakk* zl7KY)yRkps(q#>{Rs9=K#sst8bR^jdVs2#t<2)4!yJ^Op8DTL80)mcxp1=DeBoX1^ zjy^~d(D>>qNLn6Tqd|(d#coC4%5@LGtDF<01op$)z?H&zm+m;3mYg)9){69vR0W#gyu9fI?zth6Kt@(n zc!~22`ZksCuPNWp(@_UBaZhTQVZzwz*DqS_LzF$AOhGR>-lqCma z8b`$sG*~ixX}QoWUf!=+A<=gf23Q@ukO&1ZYjk0y4>z2e*cug=immxm;}exofAd!m ziU}9t$5AMQ6PLSK?)3iU6jS2AuK5(xpaRtCB6D|7+q1**hlXFfvAcYEgZQgr);UCz zi7xpSFzZBVw+zhveOCv&ofyJNAiYHA4{hIwA6hsldQ0##%LMTxjCKO5N&&bhJcdt# z;hACbF@GRm4hqGWRw>)+t4e?>S9SRms=51zg8fkyh(20a{U?G20q0F0c&(Abl(Hx)3+9@DUs$wrLLvev>6efn0-H#)TrQy6_`RpcsLQP{C-&sR`jtVSgbo^Ig9&%?-FaA<2=k4%rRt}o6 z>l&9H(~qh%wA{rO-j`XHbLsZOMG?DfC-f8DOUYAe3si}5{hVws|4N=2){}>xqP#$O zrg#{fNWBf==n5BS6~^QP|S(9a-VA_A$#Z=S_MPuhFr&>y&u8Mc|9Fb8Q(Pmmvd zh1PqP5~Xu>^-vBLf-h(jNCfT?a(~1nwGh!UFT51z33CtcG19_x--n3!CcwQ5Fpk)q zqd)n*kIdZOY8K4Eb~f@mAOv*wn~e7rFPifjY$*{+Jn-WxNm7<6=L;jRy1yVm22}Fn=6W58|r9OFaQ>tZO42KO6_+UC;1ZCH+iML~52QJ=2 z>g-Rxqpj^zrN_GMAbM@;E9~mEaxis!#3h3toMMZ!1eO*LJGqH?mSh7+i`WJZ^Hx^O zFa^i3tpLNec+yzEqz!TxWtxeXft82ZPZeeJrvi^Ua+pz@h1) z;I?i^EgL@t&j#d{3gX%lrMn|}w8|G%Yb=zElt22y)D-97M-kMVz{6PQ9wGuT-TDmr z;`bHLk&P*$3j^D+5N7WQ`nYIBEAvO^>Yn%8P8PN+ZQ0zvLKQ)g5yaTQnb=f z?-g=*{SO7&1GhbxNI*hi&j+;^B^2!{(HgWbeKl$XNLIGMmU!$=d1p?#a-R&#FP#1B zcsjE*M$$i|0CvG8mP0PstoFBXwaC10RTn*!*8TXrxzoCHh1rUH=&3>F2CLcSYk!gf4>~hWVvwpbJ z7}qrhebw)pJ8&a_f|Q^NSSW`-~wzP&*E(J3AqH?9zT(VU7Wqr1X3hM~;*yN@~aFC;6aYhbFBv?Dq3cQa77z zh;}o&P#G-bdU>9=!TKJN)o~s?d3zx(!EvP_LmA;X!?Tx`zcENIYM-Q@Yw(6=zgJ_> zjjr-fT>k+*Jj|DmxaCs~vA`m_+>10}hx1_N^UEF#f)fcxNgHCrvm`Bjf4@|9s@rp` zT?)r_H=b_ax9Yj=%pyrOw4bo_PQVaulOYto@)1bJ&yokt)3AoFPJ-n!eQ;k{B{Rg; zRae8;r#!yqjD$T{@w+c;z`Ixx1j9-L2(xWmcbAlSXnOs}u70VLLY?p)@QYQvb^PYL z(-+nX?at+}M4U|9+6F6oe=0ZEQbDB>{SwhA_*yhQ-Xn~z;s}7z6h9Fz`Xda#$cy^9 z650E>G{a^>i}OFZaXX@#!2s|*SD zQONdcbWvf}%}#cX!=cKdX8uncOge!J(iwj}_HuU4-ZNg4X`|E7{9$)~0#9R-v&@S( z(Oiz+&cR5)$;I)b8d4f2{Uez~d9;}(fhKb7ElJeHYtnL-HN$av0D-Cw{S zrDe!9BOKI(vuAI|>XQIgtsa^SDfODGK(vLmddm7Njt6}8`g%kY*>MU*s7V_?q#M@}ab502H>A}b@swExc7 zj!)Bcy%F)d(M`Um?swd?3sFy2EinZV8z`y0{jsN7N zI;8%f7;|D0f=Qqvma~^}cqPB2q|HP?Zh4AHj#cRnCr?h=cs>9c@6A4coe`{TA@ue% zty8Nljon5RUuh7-KPXL`q`zYY_+jOXy0d(>4XEtTw#dlUb5>FlnxAKFZv|pl`J5eNQsG%J644NK^lB`!&Sq zwR#dM%75+}7^Hgirlb*#GP!;Pg)sJA{z;)#e)kJHB2I;rXD;y|+@IvjE}S|I#LZR% z8Wy0OVgya0j^MSO`qL1C7Fn>hapJUwM`+so)=#i4-;$e%%M`e&e0`+C1>#uqYTNA%So zb^8hJiA?s;o}KGQQW&F~9I}_0o()4`uba-Z<73^R^;KV#UYU`0rEoEGJ!n#%i=0ORzoNrX>I%Wqe^@G||D}roBB8bZx~GQ#M%XPwXwEg!O}`IX z3ZEm(k|zx3TKy`^u(!S1P$~bp5CqfLJ{e(y(C7tk%8cpr(*}LYDRT^F5GRZaz|ADD z^aV?ctCLGkR%^T-y?@mL-n+W2Qz&ijv?D~#A z({%IJ8MJXAo)x@d?ubC-M{?L%_-!%ly?joKWW(It&{gRT5I`)V_%i4JGP$cz42xF8gO6Tvp`87Uk zl7KWM!{>F?s~+lG;lRXkv2;SZph`K)03kcD#!G-bK=Uq#$I-8*-ZmA+suUSWr(d@? z@F@dftPZkq3X3ARmF>_f`mPdOmU69RqF3b9l7#xOe~BD-P=qWN@dLP;HIt`lO((5l zLRocmy>(UKrYW3%d#XP@Z8?8FTgS|Ys3T!AGai)}&kNQ_I0?dCQ7 zT^rgIsJwM<6Pw~_LjaPf_o1VCSMTv(IH{irzaQ@J-;MCP34_t3v)fKSWROm3rM3kS zNt<4li$_yyxxfvBKpl1*C3)cseSmx!b4w^fpHU0>S|piKLAV6{T;rwAtWJMtff7fxx} zDPxaCF=9Cnh4fUOkiP9!Y;+rl;iDb@y8T?!`+}P+*R|-HrX=j`Od{FXlFUklO2$h(zh{+qP=Rai!88_fCz@?|q{KQrw0@h|mZcmKRK zCSuHE$7-%eM{cl!Ee zAM?j}E&oSbKJDMk)>9Vk)ebde(=5ZZ{)K0u+lX0Sq{n4Qss7&@1_ZY9+i^d>5=Aa) z-Oh&YJTSkm!F$JMj+_iu}zEDee>Si8KNg&)o#SSK*jvNLj)I z9VZ-)vWO=$jzD+lk$rVVZ|bWQZu@+g-BwdM-t6#|>ZR?7IcI=`pU%$`R0Pf(6q}O_ zOfKohNZ&sZc?QG{dDvx8@XkZ*P45aAzq73hV>T~L1=Zz7(?9CIqyZ^|Vtn2OaES4> zzJb+}OtsNKHtR)UE}!;K+e7dWQ5O=2skqc;H_zSt8hzAS!)@}xA$8>qgJ6sWa z^x|xB{5^<>g75D=Jl~6Ll?)2m;7xMZ?BkGKC%xu;ZuI%Po3N-)8xGmfkh55`=>QzfmKy_8!{*7(>Uu7=f2Z2RzxK+G@ zDola`5n%7#C^mBy=DzXp@Nn50Y@1w}d3rubN9NvGwMmJ$Xc@vLmnz ziS$wgfjNFmq{$2zVTlWg9Lp*bu|D#bW(JeZ!tPryi2%_#3akG7n{;Tz!Fn6nOXR2A zp4b5I*=0++9cX^r{O1AVh=RbBGn=S=l0C)mR~e^`vQo_||46znjO;(ub+<27x8S-@ zM53p?dhM3G)d^2}W?!X}zMdCeCvI04bN}0o;S|Bf{Opafm78EbA~M#T1Ne?1`tcWb z>hPw1EOsJaBp*Z&$%nk$zstgz<#p-b;aAAKRe_+7G6cIwdvv+=4RyYe#k+QnTSF;? z{nYq~0^bnT5ENV}A{_8caT-Ob-=859hPV*g|>jbuqMUEeb208VrID>Sk0b} z0LT&4|MGVMIoLCpLou4hyy%O#R4$kV?C^}f94}jp**#{~@HPcOWohcBcjv4i-5NS8 zHy{cnIuIKBQ)p>;O&8cLx!YwpNLta92a>i(d$cZkPc%7F(>#(>NwSR(H%Wm49WIfl zna;Os=l_TWlflH+|=xU7X$PSDL2z$!f}L}9M@Pp6)T!WH)HQ@Ps5OTO+`*B7)fDPm z3{TNVJ890~AfEgKpv5&f#UVdHgo^D8O#;ly{?T;hv>6sU)PnOIa^b}IaLj4Ilw2=Tc|!zG&3oRR$nngkdz z<7sAL$>^g5TDJC+YxMJ3q|&2V08YZD_ZtIKM=<(jz?KAXD3PeFuUdD7KJ37FoL^2@3 z{XEfeg$|$NVjzH83GE4spMeTCxJZ=t+6BhQGAe-lc^^m6O0HVv6Hug-hRU|&j@SfD z>k;mk@hGBod+Wrhrw_N|FKNnkiEXn(m11`~#UEOXdA#(>&JR@uO6OJiSWj#={8_~*}n3!NKT`k+U?|4>~jAQ6wlG8cpEU`Wa?MO9 za@}Uc4V9hZH)%-441L+^JsyXgg+=|hF>as-xky}8>;Bpdh8d4D3BjeVL|Fz25H;%} z*EA-hBO(+gJ@GV#zBNv-Y4l{`EnCqBlz$G$%^_tg1`SkPyi>5+NvjD;Qxsy7VDP_6 zN^Q6IR=O19g0zvI8rL}TFy|)R3XRs_R<_kTCTOUDCFHV>CE-+kv$x_wjdzM}IzGrw zl&ru1;OIK=)Rik*b397vv0YR#iVR`GDcb;FqVWaQIuy;Zcn;A&S9kWFSdUHD#sp#Z z7O}ruJXA`Nrmo4L34GGL+qjAC>cg0+F?LBKDDbNj_aH^cqmG9Hfx@bXF(MwF%J|Or zy_5)*j#XqC?`%(q#PJ0imbe_Xg<3uaA0!;n=$*+>{Pq|`Hi9IlN2%rYa<8|47$DUK zkZ@#HXF$<#3MSH#zxvdxGZ|*J{H(WK__k;t6>S69I%iw*Pju3ZnXK7QDUAf!XR$vvVGNRSy!dzy3@*Gh)c|ooT^vj3T^_t>dNeH>G717dXBmy*#<3UREtxd{ zv|8o*>mj?Ns67bW_U;I8Y8V;(R%4R-d{J&v*;B5#QO23MTnmRQpBwvWxc}Aha(*uo z1rl3S#E`>lUyQRI<06zt@&>uk_ZSmfb8Cd7$Amcb>&vt1bo8_q3WOH4&Ik%L@-w=e<68bdUJ7d-OnYlPMRPNqpotQ zfeKPw9}vjN2EXBdqW)zwTjXJ|+YC5OLQ=`oE@aJw%>WaDmdXkYKJC%LlWg-T;$BUK z7@-;Xq=#?e`h?`yzBVrjUnzVBj#0tHj@-NN@A3Xd%eaTOoy=V4dQU;;e6oEPe*wGY zl0E%SMwgWo!yJ@m`zIuZ;&3ZciH9y*VkwJ#cIBg z`eT+oQwk;oEt*Z0fj$}~P>CM_vttfDkF{RRx!4gDRw3@Jq9gST zFK5KPm^Ln1_8?v&YuKa0CHP2=upR)^!Ix7JZXUobp{93`rDV3xk@-QrmElJo7tWD! zhJBnKpKN+bk)5oZ;cNWJlHoL6UdL1lToEGr7bbIJ7^6+f@#ce>v4;t!(kQ}KANsc`Z=`aIh$H-OicfEUA}BxFg(`|4$ND}t|vxc?^07p z-z_)Ln6%&GpF~KmsU4BQ;+M$+m4u^d7-+`klgj+)xeE>($*B-@4oLDb$5pzZ9Bq>Uzw?@Fi;{~zTI zfXXTOaFLfErvUrFtoA{C&1Yy>>;a1d4Khg0jfvKE{$xELScK}$5!8#@SDio|H#2Q% z2}{Xz#RiOmb{TUlen-+{D;yqg8~;J0QJ6DMn=Q95$+eWK5qQ@)m^+600Hj%45*NIG zARQi*?|^P?q^+7szQ$c{dt(yI-(4WHrGC`lQj{+n&)S%%>cg8snt{trb&v;DqAwL+ zldg^udargzSR(+xFq=!eFi23&?i81j*bnH{Kr-r>ar-QxHA96VGC$%%ms9E0^&nlu3!F}C55sZ8Ntk` zV5QY~IvM0XS=AlFdlhu9>UOc&^nhkkob&9PGkqE)7NQc=tq^X zz#Y?AQzh5aIvufx{zg?JI(IpTj)wD_*=bo`$PE8X2OYaOy7S><)B&aQ1TMHPI_59D zx?eJRjjRdFR)QEgL%>D)gsN~?Oz@jb>Wle)!%dB!lVUyF_6_e)UWRXa_qLbG#Mms- z(>bPu-V%Z>Ggf-j#OUGSZjW&J<=hi9dN*N|RXCw(iA&UBf$Q*73 zX%qr?*rI7RD9HEAX5YhP{}zv45Yv9R-7Ojm1b9FKrQkPu0oJVz^j5sXysRO=i3_ts zGB01g1W%e)c-jom18={B0A=h)m{F?it*k_vtVffi3E*UxagbzPhPR6C;L7Q*HMeB1 z;ptBJtBl-ehm=GX<}w!@@R3wT<+}o|c^V!B>e7b+QU#U9#(y)$%%T$Wo|7sXwu|1f z@-)5!fERbISFyNo8Xqdff5vXufs~Xs&GH&Om$$Z{Mw8%TD?EMb94q0bMZXkDl zho4ft$W&};j!8XI$+f-7iCh4iH3rF3`^)unP=5o6zQ}PVi1cv~+Uqh8#E>Oug?~T% z+nco%KXS#s2a5xptVfOT`qJA}2(2LK8>Wr$jyLvTHpUZiE#<$nFI0pIZLqc<880K{ z);7*Wii1s@bjm`wuJemlvzwd*{o052qDbGf8rLOjgV^~#|F0Lsg#yCB=R*{|yx+|g z{@Dn6U^i#H)2=&61mN*`mmr)ePI}#G^^a%%8i5Ci1C|nmCO3uQdBeCQWpD}4dV$*sj1?7xFSz*na0pS%PzdRuE2H+8 zHh5kA?Soipwn2+)_dvm5wm-A46Pd#K(j^BC>2>PEWPlEA^WAA2;PNIz1z^(^Qz#Or z4>!Dt$lyz{sbG&5NSoXt(PGG6l5s@ER_1!>XaimS#TxgAIaKwdeCw@vIr@#luNsto zovK}HMqPUGnTn#zWZsdCSHCq=x-NYb>PJ?@;wJUz`?g0xnd^2RN`)pcewY@TMC;iwpJdGAlbC#t)38c zfN`c$=|@|2>3*eyMa4Omz-;oCh4>?4f3_V!1!1b;g4_#F#-yH!J>d6$$aByz{O#?n z9}xQW4mD@x}-mfbf zg6+rG!~NjBE3cF__9zQO@3kg-WZ6;`)8HW<*9YXPf1u>hNbZLdMC%UMmPrGzf6HTR zMQ8)6_MTmHzts*~;f0{(cw6q0ZPyAF8?Q}P+)4u+!wKi-cG|_>wys5_$c#WLa2uf@ zd|x9R=aZ{)*x)L@84q`B2oxv0_on1>Negz$hWSvs6_MLM0lpf4cNyQFGpgbxes8d~ z0;wpkdlQ{}Pj6t@w$D_Ts)L+=Hnsd1XN>MFj`M?Zvk{7Zc3!QMX`F3Pqa}15pku|S z8dNcmmRSWaO0=3e_t#fg-jD4XFzx%DdLqvT)N`qkc#*7>OpB|lF|~c24ofNuZ;eKh zE-+}Am4@R{k|}mECkU}zjM<-SM&={#FIP@OQoqXO8s3$I#ASl0C zv)Ze%I<3cF84s-M&wa8-OX$LnKEzR92_Z1V3_kqr{e3rH&N;c$EN*gH=g4S2Kq7>M zE29C=EwM$p_)R}M$WJpH#`hPZ7cr3Bv$#O7h&UeuJ`^xad+tMZVquZ3S+H+7n&2=+ zOp{RhH%NLlmSTYL^2Md%1W%8oQc>OB+}*IQl7W)=$CzqOe)kwe?9(eO33C>|cJR|W ztMvpnd;)?UrMmp2ss59OQYv<^(C&UZwrQoR&2pO2s$F&Z{kgu6o)dU4=u-k0$td(b z=tb_v4G`D92p+C-A6<~TrT6S5*|SItqi^o_=syq2{qEY0*{b%&Px{Lg^h_D#<5uqj z?5RD)lKXZgHpJMmgVEbgOagyIfk^j_dl5ld&1!<2L{>HOb%7$PsUBfqb&VHg#NP(Kqi@O~R?9#ZXk&K5}yko(i5KS`*hP1$Dg$|5IV9+H17K=LHx@-GCpKK#fuZ`hZC$==NLyh$q zz5EmNs$pP4GSP%F29J94ksSqrQ6hmErE`vk;w4LP@--_G<5yxB_*XLr85rf9Hl2t& z_2{XHf|ux4W4S@;<*Z|oDN;mXV2``v;90ev56O2j(cwhCx}PT$g?>U~hZgTp#^gxQ zvi6-5o)>sec}fyiAAlNzlC~;}i*O5y_k0-WyCZz1AEE8}xb$sQe_wBtunv_FmcoRw zAbMfQB*Y1F`TKXty; zo+<|lpjIg-ttSEoPiFI`^p^Wq5PjQhhR_V8Fn(>m>Yv&6a0+U?R|#pi5pj0(my6l+ z?=0d#MmEfmT**fa20F@qpU=E7Y=&Cw5{^}9v0O(<#I+>~uk2vBtS&&QA4pmVt@@*k zXX+_5OzpuH(2D~1!^S+ZLC&B~_i|F-+BE!9SB+tW!JB3|npaFwcuVVUy&Muj38)E4{0sMf^ny%1 z>7I!K!s!fs7E(vCW1s?UNaDnTVgbY(>OJ&;pwbr8Pnam_HY`gyPns6H#0j-IJvR?EnbLRa<))!~az)!_3u z0LW<~;>NBZe{B$kjRuHDG3(ZIyK8s~QyvHFR|y#paK zRBCR^`oC)jFw@ak?OX`AiZm>{)=jNy)l*e`_3&>j5@dPRkVlr*e7g8fp;}3iF`~ai zS3BkGlz_tL;#8`W5`n|$AK@~|**uP`WCom%Eo9AD-cL*h?XywWl24~${y1lIeyvC* zXC8WJgUB)Uhp5ndU5I{dD?PueD&re=i z$2O>H)jqgs`oIl$ognsVmL^=p;hxOkqFG86#m~!!}7dQjfTz7_Za}7G?aqx>}$@OSx7JWG?+Z%{ZY7lbs}eSIc`P z9`NO|X#J14u&Rc??w5rh3dHaQ`CngX9{;>D#h0YyMQluM6V1AsrwrSlw>Ou+Df#@+ z7|Ykjs%e?7z^?nTS>?(c_p!nTbe9IaEy_kl&AMCD5JLkA>o&I*7n`9Sukb`eK&!uJ z^2})SlFHgpbrvIssPZL>M4q;5lK#Z+Xi|BYajDGcsr!S{-KktTn(|^U6K**w{jR#&Ho(WW5$>aNxh9E#qmm0%%nyTEY_NE^Vq2 zZyhy8l87C6P=KD+BShF?T!1mH(yX-35(MjD6x^dDjkJ)f=6HjZ|2wnsAQh!D zhwG5IGahyCPbHd7#$AD0F-i8a3eRgO;bo6?Ui8VOP6R;e;5q^+K=M6?!tb*2qh zf>q|UKt)|gy5`REa$S?c!syxrT|YFP=6{@!9j*rjMkyyXAix?cjd4nWlCoepm%!j{Q^l7F61YHZ)LLS%%T-iJQ(`6Ib4UwHc2U1AbOcekMm66!W4 zV7i)kWAcomP_1!H&>Oz2PEj&IS=9<+;#JaFHMEpmq$nGWr&zuxVMDub>ji_Q{}BnL80*5%1T1=hlppq&!ul|D)_@@nd@m(S}%!L1Q>(&-Oq!n z_IMpDEP%>%nq(>_%zzx@SB*6e=;{DRxbDw~ftErIj$_&Q|JH{oc+sOEF2aD}og|vT z#!Tt*PQuBY!992U%;xZ4J0HstC`5$1nbotG)&T^;5&sE>-*FvBv~yySgXJ9y?}>;q z>#8F)znvA24k+g4=Kh&FBDW!>$B<@~WjUn{O&0W2F7WX0_&DmH zi5T-Q)WRxUcw>J;$BTIxX_CE(zalkBY9i}U^7gTi|1FDb`}m>j%}8+$Q3Ct5%^&$y zkOu|YEa`LaTrRZl+7yKt(R&hMHJ6{1+jQ?vcH}(p{@?e<&UH3Un*_5g0u<; zQX-Fg9yZ490tyDa@ocKXX3;MqL2!gW|HY|`sFHc@DQPf9WO%$4PbtWlI@aNDB9K`+ zL3O^oMR@jGhEKft_I`G{EZ(?odfb&q!mlMKwjKSYQ<22l0s4ChKd+fU>izY29kzH# zkC1PJ12ju8Nb^rK0wrMRe|63feYw?jw}))Um@mp0@u97qO--^Wv)3WGYmnUaQ&?TM zJ`7*{804dh(Eu*#H=%}riQmGueR|~r*gB1_cx^i$46C<7?Ki}{fxSbE*rLbcY(pD+ z@!4of2@ru+kWVE~I-DuiC=TXr1j0_sDgW>n$Vra(5{BD8JLunCT*&pkQ*>)~!QFne zI$8)|Y-XbvqJ^0KO35M<;D%tYn{H=aaj~TOdDa)aOj30eVx0S^Vkz~zDxg8B`Y6M} z*SAB!NVH?eDgJbX{@a2AWRG)M_rzx6NZDODJ)o)G;}{;u`&b^TNNEBo zpiNSucQ1PJFPF?YFJP2`HQpt{j4$&eKB)rE@;)NxN=}%DMT8bC&Q|F`C|e=iX*xWU zT(!+uBaGqp*AImtF#N9^HW4^lhhm>q^dIB>W~||SQ_cqDC7wlk!dYCf65xOIh*Yti zYoloi7Cnxk=!>YIcGk%<|9rUYjiuGKq5*xF2K>Gy(k0RnS?hGoXp`nM$e1qLBwOpf zOhC2Cjs|v~{jy9fAo;(Ope!7ym9;frB95MMC=+rTs47DE*gu*0MyHn@c;0Qr!Cux{ z|G{pQAf=qoSG*=8!pG4b`!J79cyn<1=bg}w57Kxn21ke%wjjqY-76=pA%W=%&nZ5C zM~IL#jS)F0e7SD!A1fx?HsVJ6QJ#A@&3F|kr;*7L;Exuptb) z)s!yy+^f?;W)s-C`}ze*ts{l#Ope?XSxj`#J;_Rza3Fle4r%_29Ix&m&6%OFg;yq% z;jR43!fngIAPz+nDTPuYtX>&;ytvn~+SaigK&si(V8vL z*tXfRZM$RJwr!gob!@X^?<5^N-LY-kZ=dts@4k26@0~IB9$BkuRn3}n78+#gV#~4f zyMuUsO4+*{$tz@Va6cksBJEe&tqF{=fG&>_J84PsxVYGu*ih=o5Fl?4z^m5tpage` z8Iey%9Qx+~=U;XRI}Dg^s~u0wR3CnSoj~Qlz75N}E?&@wKqf|&Z3atXqeEdgJIL1L zKU!3)2oNmOjDm|dzDNe0sM2sVcQOq&hnYrjLF|6PLw?-&>e$O*VpTX!IMCGUE^Wvf zl~bOsIAH-);T|{amYai~$pdJntne{dkaH#`@261JJqGLa>7B z`S*&z^4F(Xnmge4zsXS>0w^2{00n!;(XbD42nkeKk;dy0=abIrG;q=K0msVtk|ck0 zGaA|edVp2Mxo^jMTRA|icSiZ(E?A@ikIDcZ?RFCW1$7~?2C@T;4_FPOR8N}p zX>&RFj#U&3^A+MDDOIQlJkRc`N4}zUwx8dCS#uYz6L1B`LB&#^Qyw8|6t#=>z*;2+ z8Dq9^nZx6HSf)}*C2~ANVDjzy7U$Io&(0w%Pk!AO?Pcp z%HwZ_%r=TnVoOkZ#r~ynU#}5~ddLe|8(|^SnAX`4MviSMz7!M{;8wUYMn5Z6Z1xO) zYOQy6SdI?#U?XQ5w^OqM?s=G19>kEKnY&b|I?{D?YK9oHrU`_fRHQX%h{fyP_xp|Y zkR@wO?KHo$-vnTurSM*m22WQ^v^3;7e#2T52A5S~vn;? zAwkIkLEz)y%(X_oUi0gW*UFYADBQ2pmgf3SG{8-LW(ckQl-D7Vu?@B)%taMU%R!rx zIlIvkHVe3b_;i`khojbKgtpOi2$mB8?6NQ9wSSbpBy5@!!!vj;ZF}Pwc%Y z)hZz#EI>>l!fepGMzz^2jGigX-HFbeqgSWOXXQ6UGt#MJLq|Q>#^SWzBKXDyloMP3 za0Xmzj^_PpGA4*-ZA@>yM^uM80dY_#{GHKXpt9XzoKNNr%`~c+ococ#-xz(5Ngcs; zLcX7jkf6Q4_KUixC~%ow(GQ+(T8ysm0mizvm2i0f2I_&X8}GYhjOlr#Hr(bGMk$rJ z1;nmw`WIqIZa^PtmHEjY`#FgNdtW06v@l^s#Ti2qzO@|sso=MYh!t{?y;3AYD(2vh zj;5xj2FJxDdy8n`U%H8bP??;}M&m(W4iUI^I`F^lAF+n%UWM8B^S^I~!((@rC;QJ! zn=u>hz;%*@GDJ^)v#i=d5!T(S%?Mr;#ba!ez+u$-%Y+s}{2~$5cps}K7AAWyDYs6c zbZvhe#gJ~NzxU9EyfO&m1%&q^`8)v6>1T1+1dmJTpq>P=9-7%NRKO|*PU#c52F*hs zUg(AkGabk&c7#yFuiJYrA?EZuuovR4di^nR(9CpI#d;h~pu;>Q7TRNu!rktNp$~g^ zWFDgS0k@~1vb4sY#z|sC-q~u{z-^Qh&|jNVf*4&FfE@cdri5FeP;3s z@5_LQs>jfl>c4~`V>O9#D=ZWe@HP4ld|vEDJ!W!Z$$EKR4irC~E(4vmc02?92=-$v zp%b;qB7Pgqk25shZb%`L8?uQ={A}YW0dA0;G$lpOz7Qx@z6_lRi@8cekwvb1F6bPl z?RDd!`1eTI#f;7tYs(uNo;VvyUwP2nRx41mr5Y>5Lb`0|zcPTItyQ81PQv{IPME@2_oE5GO*Iez#+iZw~*#FG%XkjOhMegmkm83_&#r-801w88SB-{3uZg$u`kzO_zGfdS0&hhUl1 zx2=L)8kF0Ig!1OSG~9V~&=%`rDo@z$@6D~<;43&v6+$nQPRZ)UBc8D_65*U1`|6ym$2N4FKcq_yg|xA$biIRd0zInL<=p3 zF7L4y(ZS^_7{aatb8;U zMs^q`%AiYiK;Tw!+oo#M9l~xl@Zqlss~`D9Z{=vMVG0zwLVPONYLZB-A=j5*8rPWr z%KAF!kJLl~bLNNP^^I{P^zNDUyP<|u9=Ek*0S?$BSEz^n$PMhx*))w)LHv>Z(0AtF zY|Rq#Y>SRraS;{JbKc`-Eeu1=-voK_TBI)Y|wy=fCqTfE16rdK@f%yySj zUqBaTXot)X_4T!l(H7|M?9gwwgi;6&1S@G^Br^~`qx4YL~=itxE8qODQEtDE>-Lr=h9SMI1A%#rtUhjElM zfn>ojOhtgyYVLGb06$IPV zv+JJ3Bvdu${ERr3U_;lsOJZ)QE6If2&YI_*_mu7Y7A(MPE}11DiYtqkPvr6S!6z#5 zD9+n`1bAa{)nUBrDUVb0!Cah7_coB)9cp7?K@)`(SAtn7_#-fvRV-hab*&08IOzjI z5=9igq+s0|kOSQh@lt)){o2@mueyqrmj7Ya&Q8J*05`=Ii$x;kA|o8ftasW*+{k^p zT=%&?oGwshcldE%{R6Zkfx>)akeYl_v7`5LD-`I!*enp!iK(mZH%2c;Zv|gmyyq|< zPYnmk#-9w_Lp7(2m4+`ZFkL3Ww5F8LU7jbKclES>SXqzsN3F<{fGj3=R zAP^2SMN^s`E^lfNLHd#f2{6lda`%?Nq5>udQ;*M!YxP4nH>!MR9#lnV!fX)cI4Wr4 z>W|hCo5Xws(CNp6)9CvMODd-pq%p+7#q`_GlLIA0Tr?p3L~q; znnQ^U7>6A{Oy+=BpPoT{71g<9O;D?s*gDLwuMH`k^R`I@ESRc3Q3brirUJ1LJ*Hgr}A-VZ8yo%7cFYBy>AsUo} zqYud14}I{3TPF)ul^AF!^8$_P&sF-#SDfR1OU=kDTAgQ`D*6pZUn9qT|TMC zv|}K1%X1N?!k90XQc^oeyg`vR<>d{SV8DnXYbn4shn9eitsN+Rvu!6Fmp50eH>pt3 zDsW^ewW3*gC_omGDQ5(s6ydP-cVpwC6)_mrIZPCy`ywvkqKGai+))#D7V9whiw(>G zLb^E7UdhtSQ8l>VVFOjzzj=-AD)XB{8AOYuSc5Udy*S8LOjer& z2XY*5@uqinf@wv?9X`B(BR*_k(VO#Pka{t#`?Y??)c8^D6E4|TLy|zf6l`PJbg?pRh z4rhteuruV{$`anE%p4q}L^XkgVQDl6JqsKWC2@ zl*AX^@s308NB z8FqcA;|ZRYYl68|OBV#(=8W>miNUPi`AoE$^O=v-e)??R4(-@a2OJk_Z%(1~w6#6i z>FRD72%N^XDcVPzyAp9of;=e0rb@DwA*QIq80sfp=X>rd>2~6CX8FrTQuD&`?)A1iMQ{Gz-<=h!3~iE}UHMzJcC!n!LIakaTp#8wYB2qsG6Bsn zsAK9ur6dpK7C{DI(_KE-kY1imfX4A*{x`>5+7c6y1Gj7dmiZk>dhSjb*PcclPuEpM z$CUzZWqz<4J2)4YT z+buC27O^?U$oRu0O69$C^H8!?9Gc`MnsrIBz88uG2`D|A{e@_N6vl+Beg2qO!nHaD zJ!I9eC166n?cXkp8^&nx6W2)=SS0_$1VxMm39Gj3W2s3m;Z$u zJDJ=HfJ~LyfAIwxZ{&0UjW;q*0?~0bb%oInel}GJfnADn_D)G-Z8jG8Vl^W)+b7Rf zgND^{;$8FBK;37jd>dc$brIYr(^eHNS1sFU(SgxAe*HKN%V09Nwod}##pQjjet!ff z445@I0u)cdz1xs$Krd!O%cHiRR#i`5E;kMyBDqJd`N~gJ;;xALPtN0D4D(S|@?a!* zoaK{{wqSRIeo}BKW}+KGR7nBhK6a{-#BxVrC4r3!>ULLklzneq@rvDoR9*t%Gglf8 zq`0VV4Kuak8cfxApoO0O523il0sYZo4*R9=EJjp1$h_XVuAu<|1Yzo&OZ-EPiM_bZBw^}3uyjM;5YN8f%NKHnhO^fVo2Mlf8YoC0`Me*AZt~eY zM{&jX91(R&b=sJYy<>}%?C-)oXMelx0F7uSn;Q;ubS-LdsjjwImlcNSM@F4`cXqtj zdLQ7nZT2J<)=3Mi$Kd12dtNs*DEw^r7lHprWB9MsaCRAHGEd;meYBq{S1;$cD71d? zB^)@r$K|+7ABea9K^RWn%u!&o-^*>;YJ*JqA7I1)1~zf2V{!V-Aa+Fn`q08Xab`{9 zO~J&lJr%~+hv?nAvy|D?C?ItP{VAZzvu1cVWI?QM8Im{l*OZ!W*K1o)%mS!{W5?8m z>vu7Du(Py6K)8zLpZS`M`|u)r*y|s)y%&|tYgYjjVLtpWs2Yh_td??>l5a(d0IaRw z#n(;OX??UJt|qJK6hm;4Mo;~h5gSNv1m{@G@_9HFx<9 zfe0QP19ic&&eWHx4dS-=^mCVC=HL)P*=<{z16sVsd$fN=v~Rl^ic&z+FS`)~7>>?S z%{kgvvn$XPXmbbK6n|RC?8%0|03q3hbesm=sUzSO~@Sz ztvJGDbXIA9Iwo}b3`-Ty`boeN`aBWBlQKU)evdX`XCD|nS}laNC46pHtk;ZDy%ZQ8 zbS2gE1wnkzblLIi`+-(U^(0bIyTm*c7IUMbaXeoq-Q@)`VRsgQ$8Dhc?mkyeY0;cN zoV^y)dl30A{P^#|{~M%0ie9~>vFRBRvmM$Wt@G88#s*)j3S(BZ6y^Pl;r^LH1JJMe zTt3fF!go5O>;W%qF6B$GuWYidoUsVuAWlIYWh#B+8rlwx)y*gaBu;&bo{Hiw_?J;O zrhTa}#s-25(xG-AfE5Ol!r+fX-faEUBWf?JyC3ooFw5C`fJe255*yLrRcrHZ{KivU z;5A@@sHkf$*sxN*kZcrwk3)^Z1*K5JTgYrBdt0%4%!XlKgMCJLogwiR!aU&;4l>=o zQP)B%D&mm<8Vc2VD!mE2f^??vgJ+C~J?@XGf2GOJ92@BNKnzh_EMivI|qp2FQ zF~sEX5|A|{C2V3*pxYB4XM%7J@UCmQ`${cU^jWRle2BeTno7HYg~MxNaTg3WvRATgVI<3ay3Z=@8}4!4)s3>%fDmLSxnFx$EYt5S8QGc!Gte)a=(LpZHCsovM;Usmq{QJSm(M)u_+R; zAumraIpUA=?dI79s#+8vND#^UeamVA%6?5m0wRESsxBM11P3he^28A6km3Y0E82_x zgHy;m^cz8(dwlp+zkoKJJ&}4%@*)R*CxyaC&xe552q-TLY;P(cB=`o_`oeM$=2-+l zv{bZFVWIe2k(VF>6(%GIjhdm8iy$m1zrpor+8=Rs)>BohUEitV>RVyaH@vJtp?JT# zkT-66>(rvPGHKJpv<7%1Ylp31W?5tGLjO!;SvacWk8jJdMpA(JUOhc-<=ldvnc{}= zYZ+>2*wi0wTjfCeH3W5pLdKhJ`C#Qzc>0CzcxnnfO`;MtZSC&4$Xw>$s7r^4C+$K* zJTR3}`(x_P49wox`#xsI$nT}(>7?lT!GlInB1JRjeeRQ^g!@AG! zOWF1QNJlNwAdb>IZ{NS1VZ}YQ5VbPpEvwF3B1IZbRFWB22y((nroRYUZY@34$0N6% zT!!H8W)rXjqrfWzEe;gB&9XutMVb~c1$WC!(hP0zG|Jg^kMTtx2k|;!?%uH*hQUFb zPZ7owp`4Nc#Ty8oQxzD>usz_|Yi)QLaHt1&avo{Y+QzH+)@Gec!tn1*$X{<$u~L5b zQdC7|vDW`0qyLlQ0WiNqL9i@^f>S^L%Ggusxqs&*g1VP6C(|l~T%%~9p?`nJS>l}3 zYqE$>i1kk{THM(1FR86{WCEd**KJtFrTM6cDwIPC;?fkRpzV~CvV}Mwl%al0fkzW* zAA$N5oc&c?z&O`dyS5|lNlN_QSfG?Nz<`cr?!dGU{%3I9kdnJ^9tAX2zxrs+9JB^z z+zQBiZDz42?uBk`ZLozfvy}wnzpTb}rp#f7Kq412HL6TFvsnyowC-db@mqiZMSCPg zAIoe_RJ++qNJzLJ`$5%Lw^{VD4*sS0AOw0Tvu{jCuYV?O`ZCtQw!jS5(CC`m_1JT}W|Iui3gUxET zz@df(g;3XcWFQ=VWZ$LZ<9Vh`2y8RJaHMAhVY!r-9mGB8$o^xZE-UamNNjI1;1|Zi zHv-S~EKewB%L;=N!glxZeBpv!)82XPN^>+(Em039iDy~}>G3j3Gun@u{_iu=A`YUHXGQu5uYlJK z5W#o4#MGL;vv5f_x;clXZ#IaPGn@x{dg-@2%J72dwmAquigcTf9x?9=8Q6D&0w5jT zL^C=60-XI-xgX4Icu43r2k|9`DZ|Rs9c0dgBO7>DKWnB$ZSG|#K$h_OJzDt z!MT;I?9DKk7F;x~o)sE?WfAxq!79OnCO$TQ$0{=0&$zaa#Rw@2aao9CsF-S_${s4v z$Od2VC>^#x=dN{Gpc7$*Dkpy{6G5OUw%f2i!(jmjB(GIRvrGu?w@?z46o?n4y)a#b zJBH%j8OF#)^v)n)%Fs^N$0QI1K(|Z^n(Nq3@}OU9kuIu!$2tZh{o5c~d2N*`Rh#Or z9K7`I#t-}Vr2sH3)YPfqHOgrK^;J2^fiERE1J6W>RUnwC7CZ^K+7B0$Uyw20($bmn zqQW|d#1eunIA3d>>50pxHT>_m4~%e6MTUY3^M^~sUUw;cN_B?8TU)!FKH0M;vTp`^ zvvV6!F9y~n`iPZMq~GqBOfUPcd@tXk`1RX1Ze@(_c7O_X=itfw3)Dk^CJBUxbDt;0 zXMwAM-TrMZS?(z{YRRrCDFaN7jGpqW;)U^$7X>&>Q&CU~O3JD96l8;(Dp4v!k0zNT zC{^KY)MSo7q+g0tHtSM8f!9gfHS|N)e@gxTUIt-ul7@e1@!Y}QOkPtceVHybLpWJT zpd2lrNPj>A-@kIX95*Am?3S}4-+i3P{M#Jr6}kSi6|vm7jY#(pd^SVH%H=B_J!qzgjR4!{dqd%E2 zvk}#-9^O?#=MiEcS=^LXt57~{B}kA2ds8k<^yl?KAn|)+%>bgH1!b?TE~y4s{!UZ9 z=ap~xppGk2d8$&`^Kg;O);6~-8CX2zb0{TKXYt3?(tHmD%jOcJmm_cPUTs8-D^y(e ztKz2CoQMe^VF!=Kmt`)>f374c&<4~+Mfn}!K@Fm*tEP$qk|7(u2mMYA1_VFp364!_ z_k}O*WH)>oo&Sd_-rlQh$KJO&sP=xwvxxY{3Sk z5Bm4nntL3b2B70hQ%YwoM(?{X>D>YNwmq+u%AXc4h90IO3wop!lt(TD4xO2KKrUzL zEAHQ>KJJ@R-8STxzXFD*Pa$9bTT}jf8?2&%a~@>aSw+&r2BQw7*fe=2zXJ>m!j!^z ze4Xpkv*%s}dE-REm;E2vxlc0g)H7^JhV%5I@HrejFd7tImt&e%mtG@4eBh-jf)>=i z_1Nlc{m0*SJn`MW1e96PPcJQ9_u#v!-Xe6v$=FAj*-VX}V%NviK?j$Rg?aS= z<3wo8K_Du)KoV4aZsjO{@$`xMnP79gK*Om2Ar(1T(boqRz=ZGiux0A?sJpziy%xQn z6u|}hXd4<-{eot0&$|a|pGhtWSb~kL-vPR(;EQJswU{*YXZAlVyQS?(o8Y$-LG~i1 zhZ>!C?uPdlIa`Tt1LV<1gr851y~Oe-gHdKVBIaJb;!y}8={_u4*Of6hu>Wupl>VR-SiF~LFTF5eV*8-9~9~!kwL8;q-D>@ zi?6!AhO=M{H$JCJ+a0cj0IFjem{(NNX9(`y-yrr!N^12S+VX=a2Df{UQm(*y0lbEl ztb$TB7vz&2vxRUgvaeJ+*&*1OD*={crpO zO$wmF+8r)E=&ja8*Nv1z72{_dSB6b`9|A5^WDm4n}Fl`>A;?J5{F#-?Gs{(@b85~G{f zB8c-D9FE8D8yL`p%Qfrl>FKE_vwU18G_Nl$;6B81%dN48Lcn1*cv(S=6b330F=;7R zL9ulo=&c#$9hSp{x38yEBeZDtxT1J@#P2_q#eMRaeG&_jZz>VivNyXog!?UlLXM~? znMD?QRO^-6_$52&O}Qp)N=^1>KY=|o**0G?R2P~YDeFZIkAEd44HBUl*e~&2FV4x1 ztOqtwQsptdz7Xe&WjXRZA4;gw8wX%y_TYa((+C#8UO*c0>wX}gf?1l?(%ykV{1N24 zJ?i_khG>pqbva(ttxuf~LD4x#z;HjYArJJ4PsZ~?j0!HjmBd+WsYvA`C17v#8%1+u z7$x{=o;|P!@{Gb8CYH%U2qned$Op+n+rEp@Q2a%8aYPn4n^Z|mhTJ@Bt5YToi^1zh z^OwzPZ6MG(N3tw$*w^oB7fg0^$4hccQ06N|c1uouppk5$3z=ULj4)7At7|g7>3%~) z=Urkkj+o%0v-=XHeUC=+u7H-r`4qGgHy?{ZPKLh+f*w?1xend)2 z!|(a(+;70}VPoB4zPPtvh0B+Sd_I!R&^ku$IAeD&h#~4_Nv%NPH4G0n^}kFYk#EEQ z!+7@;sgSX5vwgy(`4qk+Bebp%xov&{0KE$U*KiB1BHLyptDQ-y+&Vp|!Yl5D^i)BK zvW4a(;0)l=TbY#wV^4-pf!R7&r8r?n=B0PJypops2r^%+haWp zBi2hMvf1e08NqOl{BSy&sN5-MJ({k?{e#)Q+F$* za9)?R`w?pNU^YT}oiefPLNnbs!1f-z@15`+RsGF1y!ttNG zvlZ(%gS&NIRyUlSlxP~PUJrqkVJ1GtC43V(A*=}Uo@|kK-+-&;umhkcaCd7X_um_1CPI}UI7p8V4 zc&)^NEoH;HW_dy}-8~h0khzE}>gA_x+JQ*t-N*7(7d=ALOFe9Ffl932Dso(JJI^M# z_c`AFkK}Ry#E%4!TKn0BwC2r{I?FkRnM<7-a;tVpE-3tH;%3V_DEel6Q@m);!xka1 zav>uG&DDPizQKKYVmT{zUS;wLYjq<-LY)2-9F%98f&b`-LK=OwIet z+G1V7HU>@h@iyDV&QXxE2q}wXoN>bHFv3dVt2b)|K=-i6fhw3`?4N?O>J73+3pnlA z?}6x#bU^CU_ihkIhz2Oy_GHU>u(HozYg9q zG}h-S7mIbMhF`C+m~vd2HCiHDFUljm5KqyfL{GKtrhuSLP*p_h>G1b=y||UbLcH0h z6c)CXqa8-NIMrRD`^DUqh-VRq)~LT$R`;(DH}yT9W3!2Y845Lx|6~97Umyqs)J`h^ z)^5^SYX#wn7jb$1CnQ?;Dh!m2E^GJhG$cIMHppdXE-xEl1Q(3Y`A|p(A*KVCH=kzB?UD|B> zHd9ez;ShKFAa$&|Tc$>9?gdy8oyTT$09fG2Uo8lSU9Rnk46HuH5RiE5G~kK>W!t27 zmWUxx3sWxoi^y4^gTg=etsvJA`Lqjdzwl!swu%f#aUXsA#rt}5N9GdsStv?OQU1?} zn37vVRwOw5Nq^BvPX+N?(3l*Eh^STxa7X*57XpY+c$n9w2F3O>mh@eB(Qe&G1+3e}2OaD1?g%8ZJLNYbc# zK6-FZIBS80wY=3`7}iRXXlJSSV;I=q?{keFy#~GZ*HP(yHZ;8>NHCQ8iQ4U1+^bh~ zA24(qu)&PKAMA%s1zvA7fz{fda607rJHqOipkg9D`K6^~59(jT)1L0lh}E#R!h%;RlqN<^zDUM~A{hL`AxxO5$DU2^hF2r}vOsQ%%f_`K|2+Y-n{Hcx zxXty^HlL%Ojm!=FzG>#qHY6OHJD%r-fy{i2mFdn#x5=?pS}B4R@$YS zdCOrwm>S?+I%-MG^KziO#r*u^iGZFkZtMSB`2jJeU=W3&Uepu6Ntsx7;vY)N<1aG` z))0p#s8*O*pH_^582G$GC&$NH^D8SS2Iv~RHex+1XFFJ`VhmG|)Ulz|v2#w|P2VTT zY+%ZcmC%sdSHPgZc{vqY=ykstF&2XUg24T z=(X~4tJwvP&hJ1yDb#M3S=;myB~MwF5rp)Q)ySOpWWdWk)F_?xNfDjs5np#+DZlXb z=^G-zw}f8w`MWh0R}d)4oI5R%b?XOO@#mMKv5ZDI1iTE0A(fbYI|ELH%bvGx8b#=9 zJ^Rz#YAPR9T}n;soHodWN(emzrufCk@Hy8WMHPxrnPehL zvzfhin{(r2t^l^%Umy5GaJuT&W85+U&SS_5^AI;j2^^gcxV%z_2erpV4g*ar2e(N5 z`SKP9^t9Mzzq^!Ce0ywZ4&Qn~jN)w9ePxWUzbfcbsfXG#-Qkx>s z`OY)5xd|K&fo9^LG7)|~arxUGO9f)a&YGZn za?}5JNcGP%Iu!!Kxq1w7zLEDXh}@^Me$8*;$O6%j>A{fa7*0``A5Nu^^9;7$7c3tK zWV%jH%qB&U@*gHYw(R;?Ae!I6ag$F;WQd_K<_e2&3j`}-X_kDQ`=$zs=yQxi{?cJ_~~I2?{Lauz=9(Rw4aIW;GFKB z1*-})vMaXfil0wUPMp^+R!fG}(4#;uz>`20wc_tQ7I);gvr9g$Lx;OrZSlxKg2m93 zex5f-KUg&mpQG6>Pq4XI?NI?Jn!rV+0~5}m@-N@k^!fnC4W~bL$_K&irb)LizPhfp zxIpRgs=I@3y9m;c?2pD78XSQ}5gGd~Ji449T9=S@K!PyN5QOA$=f|lcu$44S;>ZHY zvz)aMT1ozk}AX{(M$9iGmj8i`^{L z11X~0-B0@5v@}_2eO((C*j@ke%%+v{Io^LzTo@C=1Ik;U{Hl8|km$_jg;?~^Q6zai$nsu4hnaQ5O|*iRRBz7ZkTm(U zhj44Xma5NrFt@|*)RF1A8d`b{>_19}|E>0)QGzHP?;@oWI7yF*dZZc)8rggI(UZ}p zXd(eA5xf0>>mR@s?LYwxB7Q(d)H*3T5RJST+Wm;VHb{7Np#f_6wZRRkf`o3;dE8I` zs%RS4y}VI$o8Tia>U&zx$@r`iVoVP7d-J~eqbMW~J-iGd z4w7{Uw4gXVP}?R3chMCpV@qomi>|Zw88$8L0w?BV#ZAIR58(!HbLE+wYqpSfT zO$CQRO-&L(LMBsx!EAeJ1WL zGv_5he2qWhfifvlyaqJIe5>{O_z0a*UDa5%G3RufqPwISn%BLax1}DoSoZ>dOyw>5?mgTQ=s4p4C1H^aKBD#3($pIU8+p{Q7S(b!PO|Msl&PnjsWHs!@CvKw?)LM;XbrGH z0GPV=#b0kv)-g^mFM?FZzqNkMJ7e=jx2M)0wW1z04Az8jOgNjuBk|3xf^Kz8#J*4= zbog&@ggOR%rY#mNn-a1y-{*m^MQi{E>ybI_mZ=%{OxNfvQ}2uCk$Ph&t?zFyXV`=v z3%SI2wYYN{I?Te=;~N2UU3Is6pG^+)9lx#ly)cwlIVtZyY=npcqquF7O39(4Uur+{ zJXZ>I18%;cw%*y^x=RLi=$xVSp7u6A`fl#F?k^0TA+cNx$zq@YtXK&3Sls6BO&tvf zTxL;MubvHfi^aZQPx_UJ6LEJ9*#G~7p9K5`8Z1wKY2c`?=~eh^V19DV>4t;dI->`} zqA!>2NjyiWtEsJ*4f1NjiN2^nR~xW4%Z-$6^U_h+g{B8L<7hQoB_nW$ET~o7Nz!`u zm!?&M_eW7@@YYbTIP4U6=c8h&RWp?I@^wnQ$o3>vf^(D_Eb^y1+GHML`PBwFX4!!$ z8Ier9F}uH!{&CXbk6@iGlE=4yu*6$U2AO}7FTk78T{p5q*YP=kjTK~0qk8Hs=#C(?OoBM-+-}A~aa1iM4;(Xj`GJOt^dxfUB2s%%oqF9lUhB!DVxG-=HH#0UpqNOwK;8(|0lS$pm@gsVX)}0O`=JwF3MeU!yF@? ztSf#7y-~j{!@#S~?tUs${n60n%2Wbdbfp4H{=*f@EGG}>-}0)eXBSZ@V^Zh);i#$k z$SHE8xfv)!akp3Ep@7CTO>^uO+kA|Ws_x&Hq5hZ!Ef7ES@Ug@ZA*`2pSH@d#f>(w* z&Q+aRkaHps(nZQFdAh+&{B4X>%XPl*<&2YoA2EYsKG%X3#AO?$E^X`AS_)rQ*zMvN zHOj%t0%}FJ0xcopAB)G_KOmi^5gHe7^rlZP=?o>F5QSI)tKZwb(Im%TE{PCULH}ME zqa;^5f9Sc;nid}Ws^(6Fb;;`NJTd811~MEV#a+wxyf1>#bq~@H?Ok@-oqInxys(8G zx$O?$fz@t@p9(CI#h08t3&}hSj39Kle+cR1GzGej0+QZ5HV-KX7=~WTr>XF(ZwM7N z4pTsJu>Gknxs0ORC)+xkNYeuk_HQq+bH5NZpmiaclm}@^WP}3di7&YpuSmQKUbc;c z_g`x478aYX(W2Fmdq3)siJT|&RMQ|M(<+vc?q6@G>hIrnFNj?}_UYqxdDXny9o~8B zP5ST%)U8unLbwh2l$4*&^YRV70`P9^G5$Aa{~O7{2slP-Ra+^wOWf=LLwC8>=_gMJjSFW!?L7gzzoS)u$xT#D;MC|K6_f}UZl9ZDv|}(k z5#a!wtuV$r4=;38ik1t^3-qBX#FW1@#y8|IBpD}VY6(77G9Ol+-NtIoMgfpCH}W5O zCBn`9?6^2G-BZTvu7kB1^WA?X(ZT(;wz38w1_=&fakBGQQlDD9g&D|9akYAUT80VO zqpEr9pCXdMT03EI-2EjnT6@h&fM3OR6ydC%p&)ZVWg zLH}1_;dm70OLOKiX=A!b=A=$c=f6<=-$aAItSoDe!h$_t9Zb_v1|qako}~LHc2#1VSe9Z}M?dYxzkbi0bsZ}aa-F?6 z6XH8hC~#h0xxaWIWM+PT;xi!RbsXvP&a5Y~Dd4`^P8n2qOXwPZn3xef58JyhfAYyi zXl&JYn{=8a(_^7YL z-jny~!3HGjk!|9}XSz`H=0QF`sz#Ap$>p9_8ROb`CT&sH#uBau{A9`KE}hMSD2i45 zRclgVz}MI0jg_Hlblax!=*_$gb^W-=_nr_`#~j^O)ss=Ziaf72gHh!JT$?L1sKMvP z>!g%=tq7(oisltC8`$F1I0`>dU*9RaL|HM8Fy zdJ!4e_V>sAOsimK@O~p{KNL5ehf1!Hxn~{OpuqY_b5)nk+-mmHCB84({XnWCpW2~E zyZd$a_T}g)WH~U)wtDCiKtR8&mkG!NG zW!U?F6ct~sd~O!c9QL|a+?&1SG+8>n0aiR0d=cv#;Fc2!+CIe+d@FX;s!J7y%Xy=R zA#3w&d6x&}jVcb#$j=tVHp?T{1D4*P)_tls@H+|TZ=Judp*_CM+vo2a=*hoMvCV~a zukTkkR{5_J1xgQlcTN9MIM}qQ!=H(<FKt$hpR+q-%TWcx7r>OCp*z++`B>-kTq z_df-IuqF#&k+nQ|33>2+mzw#n$6^h?s>DYyo2XOvH3y%D3*}`viP<*tc%L>Pg;m?d zPhVeOhB~E^NmiX?y!`Xh7A~98NGyN;Ti6!^obDED5+isGN=0onSRqmi2Mv9U3JS5F zx$-a$p^ek<_bh}_60FbrgmkOje5kG3fA7eqvNB9Qad>S$7C2E(F5&Qcl;bXKip(k* zRTxfwb!BCxY_%C@ig^Jf_}_;Dd3>UTxGde@w9i{VDz4_zbz0?huh*3IK*q<%?*%R8 zD37}&{H@U8=h(o)gD;L?Cb&1hqk|ubi(fM`-((Z?(`6zvO?_|*r(z>Wzu}gju5Hc- zsQGfH1)rYUJa}&vny#P8dyzYBh-UGtB5+4CE8}b%o3hqh$paU%_$%$w_*CwF& zs(u~mZN;||*)i4W2-~$B2FyO?AYZm0XJ#09zVp3nCF%kz2nYWzY&<`e2s1uGEdibr zFKnemro*pj2BI|9rD}ox#HZCT(#5F9m|ZG73z2!hCA$0#9e#~@-T@i*8hIbga^#*n zMtf_k(jbKUuX+1Euf2$gMIVLR03(9_p0xvwixmmNQ9pIat(7aa^E$HJLrHGqIJ^&6 z+#8-JfPp1(waEstYj<{acdhKk1ebQHEa+9w)at*j!GG?{9j+1V8_{G@H*`rd=^J`j zHKJrMqMb4y>zMFZq;jWKX-2lweKGT#XO3h}Weg`GN^i`MU-w)86chCa6B%3I+pQ=F zYF?$af-`}`8rQ`d|0o{&R64}7)~46;{B z9^CQ<0~vVLO8?%$G*t2)uriM;Y0;GtNWIy;$6(T(KTEkS&0ky3SYtL~wvV$HVY7ZO zYHW0NYZtIka8mNT7eEMmDy5vsq-Fx69ncoT_v?)&fsZ=57|(ud%YCES2uZ_2XfRo| z$h(UtAc9q_{3ym^fq#9%aH$_rcOjk`>T4~qilM&yczenRUk@PN&$yNf+evx)z6dhW zH+|#L!~cLG^z(14j;i;NHRsA$z$~S0pXsIdfu#w1QR3@wdfTA5dPYLGH@3xRT3Ikx z|CL!^gRRHcoc<%Hqp$C)7kEn@0p~0T4HlN6!S{(`AFj1)$2lLbzKXoCdnM)1Ul^FX zm0F=)Uf22gTN(15`p#3f#NNJ4ZG|_4_qx+tmwZQsPy~lioq$DGONyF81?>wsDrJiB zo;bVJbgx1BHvY$N!--i30Ts{F6TP|~C$oH4>rTh8Z}mzD2+zB<99@pjqiRdeSI%^v zviLdaEGDwu4%!#2BeKm;^bdtG`gu~;-Ftvq)BIx4rVy-?7zoD8)LH_km+=jT zKP_Jj|EFQqggQMt3rsDFQqOPM=#?+@77cEN>{bl}|B^&tz(Vm$QSaWI;CYzipz{+v zTD4_6y%ik;VO=DN)(D9YzOw`p9(-MHc87-R))!EfQ;l(Rq~_u_pg`lY`+3m{{Auav zI416iQLWBACqdTzL$%6~{<32Sz2z{Gv(z@+QuB8sbA=!n_7jG)$u!&iVdF;B>5*8Q zWVU+hlmU?+8W8xf3X6Y>Bbxpm&7#!rpJp8H7DdHM?THoY%F5j~4+@S`;>R2V7THtO zVBEXu8Vri{^lUuXpv^7u8{}e8T~2O}5eaYa+>zOhlAH^6_~uJ;6wgQ^tYqho@s0i} z#&kYP=a_oJ#oDiQKz7GLG?=-*o#hlI}!QNSX1k)9Yd#-<2F7RPh%Bq2L76+n_%yKK z^nuek%lQ6Wc`0to$V;A-#%3JeM_86%bcCGl?CTZ>CO%w??&ICJx2R*o{|{Af*%eom zt&Ktmg%uLC@Zb=f;7)M2;O_1a+%34fySux)yE_C64#BBgJ;r^<>HY=#%i3$s_2@9Y zHWP1GM7oj2L51nJNkm{;i#M3|iu)|9t~{j2PX=)_`NtAXMc$OpuL+E_x02?rJ?=V5 z?FE>F9;9o=Yl$^Rr+AP_mC?zbsHXhWM@08=l@t1ni*U&O#@`PQ4I|2DV?5WH%u^Wr z_-*PP*=XaY>I3z?h>!|_OsvxAJY0^=GQOuy4B8sokw4{0bQl;qj32Q%be>wYRLZDr zK1LUqJ)I*zRvy&7t2LUr>JA~du6qqzw`K8Qq#&4ayUyWpX2!Q9#u{I9q5 zY!#^2{M5TKsTvWf3qx=Cb9Ns&Z*+i(>F=wxtq#|TuOjSe4nkH^a{>z!_II2vjB%{fl8U=NZbD{@WfA;@vCcHX_K zPVMU;Jt;V_YHm`^|1Qb;#PrjzSB*@7_(xmOsIOjXvonf^86HKVFBK=Gr9)Bi#^)^u z7j{;V3FsCHrCQjcCF_>y&qQA+$I|OGM@A7w3oj6Ny93ZN6SV2)+M{X0Sn^?(Yi)UV zdqhzC^jPWuMW35*@Sp78CXhJ;>@_ZDRci<83Y=MbB&-6V9LDNS;48|s6dthlXrlpPEH7gKeN7p^-uJ+XJAs9B$JQ7YE?+ICdD;Wff~p>VWrGKa9}yetMxG!!Rdo1?`r zr|&Pe?;8?D!>)!)7dk@8SaM_`>_rUmM>LQRgtw|_FSNNxCfKf0FB;B3YuGpY}D`-hT44(H z$f*~_v`894sd=uip^C~9@*6lZOpvGlr?Rp#+#{HDyJt_X*Jg;BYlL&|48%f!C&l;j z6^1EiK`_ICdg}B21w3|0G$Za)wR-Vu_}#&cn`{u<3jg?asA9DE7=W?hB!-6HMGMAi z{s#V}=b#cAo4k?A#Ka^;v83R&t);8=pSI$6Cg^)D`vC_h)5+AEQwR?&fofcY3|L+N zYb}SoV6jbaqwyQcMq?E#T8Tp}Q}@fKT+#yWVYE!k0l{TIGm+n?bF(vHYLJzCxG-(b zz_1ukziu=L!{E)cPEKKb4B09U{@q6Q*GnwQ-J9LPbIN-F3%y;nJx>CmEB6`}kvmrS*}V zwriu7YV)f6V9_e}{I6g~Z=X>c2okOn59H+X+Fo9_!9jIL9e|4PccP1i_OBo5Bmn7^ zx%sdxgO+~2%PTvNeF23L`0rT5rr*DbyAf^YGs<7~8#+ zneYaCAzbfp{ePWwmf0rYq`}ZJGCupJY_$+E-q4UykeMrec%-9ps4?7lQg107*dwa8 zNycBe2U6*K+GxSr_&t;8*^G6mWUil+yeA2~u z@rR%QA=`H^?8&k1Xw-JK9QxCdH9uK_ZDN1Qf7`O8BBZ%E-tz!V9P6$|xkq;Ud;i-?ers(1h`JET186s2^6SJiF)+LlGSupBj0jSG=!6KG z?WOSw9Z)lMf2~CCZ>e1$RFnmWpQot!8*^gczqSgTRzK+7CHKj2<0x8B&RIF7DbG=mQIJNcucrIwf9cFbZ{iKbD3M4O@A|yO(?p~+8z6p+n!VR%pvY;jW zj@E><*zH?=aH2yTcqMZ-@N^16qjd;XdK-$L`a zj8|iZX2Qtusc;^Acet4F57%{99xkT=`DlRfIzXio;7l<0z%uzGo{X^NLxaIuKwziG zmjc!}_saL}sAY4_bKSp}TlrOuNB3atV$gzrNXq7%w}xS%Z{VUjBNA|z zzFk%fy%}-^HB|H!I*c$WJ<>zdV~E9ca-et`HMQV@vn)~ zuISTcx<$0%ZRT^N$WZhscihOnzG{aVqC^92l&P)4d|Vh5*|nM$fzVadRN2ny9_6@$ z!!|B``bu|qvQJkwm=tEAdwK4`SgmX64M9#BvzYN4W#oNNHwYM%_cJ!P6bX3utiGsh zsV>fAiIdoc^Z?_-x1VmepMlq;Fa^!~uEz&>fu|&&+~jloe`!%HZ`$j`z zXskB}Hlw{plz17W8N{+qZY6B(-DkODcCktQ(mOJ+vSg4*_+&|&M#+K}w%z1A!N$2h zHWk6!D{A~Z`P`a2iKsO{SoPQJv2T-dSkmkeK8I~*;(jI`wxqZ1hNs;CUTYSbE z;0)($(wz88gj$?HQJ$LW#gO3{Na8f>8yrTp)7)+bw^nINn?S6$%vdiw;OZ8{c9O7GwC!n@Y;a< zD%nu4pI%?MqBYgRx8?efXIl;L_Kn_4$Iiec5U)y8x+*>#A63~Jg9R$5p$uDTRN*@B zU+$<8dq+^r_LAJZd*?6PoFTZYi+BE=>hL8>vwuPVKWF@(weHzKumU&MvD`mq{l~}= zRZC`J`d2L6UU3+|9iUlJO9rRIr63muFcZyzJiKL;sdVcc(FNUW!G2Ck-OMRR2}?RS zAjzRSkt7|n;NQb{T&h_Y999txMfq;*;3{8gn8g9dr)OQa#p93SEjw=3qPGtiHP+Nd ztD$hLh}#VO36hi~$sLRnmEDoXB#iw13cWYyrxFg@#j)6aDI~zE?%fsv9tP2}tKrWG z>VI|?EcWC>@qJm5w2gRDa>Fc%x@DuO%trU!;GgQ*FrDX=AkF5LUKgp~7h#Z!za{+e zh5m0WfK;?EO%^78A0d5^ASbzNewgEwzvVs`+cZrUfz5PRA7%*n9Bwmwk&!h7>$Y4u zKLy)f3z;zQYm`4S&xo=|%1`jJjRyonFr(eIxq6HdfQ^P61At8V$T5)gRCa?@< zl`VEuOA1*I^B3bhX4x!m7j}?o5GQNiW#bQ{Zz3OAmyLfrE&ttT!8FrDzs=as(S`g- z49-?xV{QC)ew);GHEhaRSFfhARq-4c(6_cUVC&7Tlz2#Mm0?SS`#UAcJMvHqZ`mZN z;5^yP`g%@ROue0h<8VD<<)h8mboqE}P=sa5CrrG>r}=NA56Da1%E9uRNq5i3;kL9o7Of6U1GeO9&;Jardc$4pYVL0kD&$SV zKTgF^$A0eM@Cz`yz}X9Zpq#icgG%bW;y3=J{!CAfiZ3c2{QhEGdtOppd zBqu0na3&+DqD!=*vb2T@1`0!THQ#l-gw4%}bo7UzVJ+FxDx0Q5UTm!Ow$4B=^|pn4 zutwJ4<3AXk1e=>fB@K;dl35fC!&c3Z6?ub&&(?VI2*deq*~Co9IYY{wKqem+;(Je( zPiM<@fRw|N#K*O;%J{o0fDt{2l@cpc_76@l6npPl8Z6Ahui>c7@b&d`97f%@9~+7d zJib-TM=tX_4n25Ep|CTN7g>Q9pi^Fv2WHzMgY|sU3#)x-ymlB{+Wy_ z@e%X&Xy{>@%&j6&P|bdpiE&581^OXahBU5~odSo0VzVkh17H8S643uEaHlvxGLCr; zZJC!*RRb=$lg2@N1^*%`g;?C`l6VnPC9~E2!Y({+YhXOq8|TDflkd(@@gj$?fy#gJ zTBqNer;JbpxrEfSe7T;(+@#_yU=zl~bd+;c50^>ZAIv&z-@eEd@kV?whVWmnglb0E z#$chsbk#iHW$`j9C3A$WT$gIKJyxFBg?6ifF##SLlf!p7JktaC}#!Ivsg=0`+lsObDpxH8M_!qw_xi?5%_r)sEk`*<5U+Uo5lKpe>?mcZ-{bjr zwNBIot(W~vxRanpk#Z+9=5(G60QPSx$Dw*faN(zmNNUM&JD&p}AaF23K}JmMqw&!UpLRfr5H9Uj z5Oj7F31$-4O5Zzp8$xsCL+{x33vLWSh)xgGr{`ZFGD%MFE{%<`KS#g{OhR1B&ED`Y zcen9jvW;3SXtu6I$m{30tH$5H@D{Kn179S(X1Wd3`nd1qf@xpgdASkwco58rcVZZ( zYkEFe3+-b)~4pj%&74D|g{7Q{k_^R7=A18b209>(P8$g>S6@u%~B zGIqtep%%jDF$Cv=`nq{5=J+^}&4jM62{LEAy|hxFzdvb2!s2}PnEsx0`^{+WeXIfS z*Ue-(uo97UC?&(;lY3{c>$sdzQ90FZYOsfwif!c^ee{(ijcpMo$o||%hM9Xs;d-KA zEu70JD3rFs>>druhvmQ@YDF^p5BY&KiX_#50OV9)k5nw=4oay;Y32N#dny7Z)XpKB zd?mW$_g{AEQ2tdrT(#=@tDT9DgKOsj&$Jj{fGzucNu~JXiu!MRR2O&n!ai@-PHS?2 zt^9pgM}Bp)&rsj%i<}i>KA+itLlR%L03h@1-~Icqdk@G@>Bfl`8kDMU-!|YIgN%R`LAyU48CA27Iaf+Yil(w1e*Gi(1wl>Tp$Vvm^BI~G zj-nu0r07oyi@u!xDzyyXk@ZP#?oZ|pA`ThJu*XO*&lihxHF_FF(Z7&V5^doIJDnzo z=Oo_CEW%2Ne}R9bvFs5GZO@uHYcknv;GuTYrxRB~vx=`T3K%(Dn8^`{n9GUD^R2#x~1h#T9EZ zb&Dg5XI59dyjLQ)S*WGPgDb~w=|0_v{N&8vbnLpV!JkyW^u65w_KJsY-S|$a-q0Ja^@tAl`pJJd zBq%=*&urGOpEc%rPO}L7`ud(!k0P0hTWzZym3}{HVj{-D%-%IBTesTe%@s4|V4(}4 z#_n@Uvo;>%Sr0bM|7Uc~8wyr%=k{hwAC9IVXQx!l-MPpsG!JO+~HEMp`;AQuH{yC9U z3uJ+Eko7OA)_T4z{mt4s>Job9Mn`e=CD~t_S+MH`#^M5Dfd^TDsdHN8g-h<(d>#o< zT4a=jBEAxyE(fUKpt!Cv%Q-Nb{vzaf80Zeyl(~H5aJh@rU(h zlRWt)(!~>|62frlP4H!)D|Mt_Pj1d`b}2!)-m(Ml4?%UyF4`RSL}(kHC+pv`!>$6% zm}V*cXFEM$oKXb~s>LfG^cpSs%n1pDri=21VJf~y#mE@3a6#qZ@$HEy6-lm=7-fF) zFOOYUM`Q*Wrh6cX`$%y5WNF!1r-dxPg zzU;HeS)R;5Cu8=7?+#gvnMYQX9WRWcaH(+aVoy|b%hhMnVeUvTx6eBdDF680>@&pK zJ8a)u*=L#^`kl@Kg{Il%T7IRPE%ZF>@_8?o)*AlL9YY1IEf0)L`D8?ii7kDyF+DEX z|12#cRU*+=niigNP=6U}lvo$&=t~+Tt=5kl@LeOX;#c`gtV8CZ7%~wn>=an1-MOrJ zMjb?&71T5Kb8&a^n|j3$uabegrDDU2Z`ubVF#>qHjW0`qmTf7fI!H55 zgl+QO@0fe1(}|#|5Gb0FNcey|nWr_(fHYw~4%b^N9472;c>zMxCL~*y%X3#~nGDet zCZ<^H#Xqr$dp=uREtgW{=mG(u+oYf8r0^}jr%_wt0Bp40__{nFeP%^sv6UUZqv8G9 z!Amwo+d)cD`$Y)dZQgkV-!DLC`;RAvh^*1ktu-t0Tau<5IQq`A*aL#;wiF&RpVH$N zW@>li`yQTW$PhY}eLhzFBe6qCC<4A5^rt^J08Z|Xo?V~dLTa7BR|AI52{0fk-Jy>C zKm!N8;hBlSU4pqXPi$PJ&6*{8$HZ#cVHSIM7+#E?UBPQ_DgxXZ0`!qYf!&M%Bw+uI z04WTLaxE78h;Q80Ak$Tnz2Pl+p?TzhnaXzQOuEzh?$B!Yu%&4RLmmNtgje_Ccra0!|jwF=3S)<#@4 zua$c7pFW|g|B(;v%jxm8Q$0|mqF$=hiXmNwJtkBG`A=JCpuOhHMz#M6TiZ_+mG|gO zITR24~ z$G_9m*4`6>)wud5E648atRXaWx$j6qqmwzhy&m)dA6*lL78@l^kfSf8D8; z4Y%AMom-c`$i8wlc6WesN^NU4-4UIU@Seq-eU|ATcA(Ze*2)b%MHtvPe$+1Day(@v zL0cX5(-}B8@$G8=eSH`zswbfeW`)bvOQ{+5u1gH_VXU#6ulk(Pervel`_#lEo@#CX z@=coV3Jd?t8C#RntM>M7>$}~5kE2{NKw++s21kmUcYmwWuFT()_D?x_uz%7>LUgEg z1hS=hl zi1=snuqzS$faMm)nI1#x>CWE$jwcIrAK%ngV7&PL&zUOcY=6UJhtHUm2o|&BQ7v^Z zgQe38N(405*7pUIESV_V&p|k7L{>~5O%eC}3D(1crasEEADV#M0Vtcn3?-%QWOR0h z(YuO1OG*s5e20`*2=2vqkukUVy=f-E4r9*@o|ury@Q?i6D(LY_?Kw!|<#phmjeqqh z0cV~%1L*5YOQgPpda2s~%j$g%@kt2LI};HFPTKw~Lvsuo#=Z=JI&M^d$(RetULNV| z6;qRY`_d==lP>-rX##n-WIPV_qSPxnmkhe%pWT?{iQ_SN!p;7=ukZ~=sqkmedSxGo zeV1`eDwG{z1$c=E5?_qzHWO|hJlW!~(#3mkz{Fp2%}8d7Y?jU?Uz*VGKL>i32r(+q zoGiq(-@Y6&E`*sMU{b-rFx0HcKAvO`%)Lr8cbB_%4qAlfNMFHgz1rM&wmZ`oaq#GN zow=EYi*P)*o)0^%F?~Gyd%nm@)&#L9{}+&#>~9L_jtHQ!6y_aJq_LMthVe^KqL`)f zd$%+Nv(pwh!De0n!Qr21)m0*RjT0GdWs*sDw>}H%eeHY82*aQPAZomGXr#7P_-GZb z5*xFq)~fW{H1y|ySD4)H3+JbnFyavayUPhFL=?mw`)&H15Wms~YTM4~YGIa` z_!JpJNK~_5vHmG|euQ{(Fj}0Z=LIAFxJ|EQe-;u5F%E+0GkFmK~$Rt|d^+kCi18}QKr9-bg`eZBhHdRtL&C+iO4Bf`y3M*+owxq>*vMmb)8!-lDp250s#>RY+WJu9n(ATtrJR@Jrnmdm19kc@3;xFc z$p`%}$Dt4-oM>K;2X3Ve$cBnAIVmGvUKPc)Gf{G@U*!Xw(@FN$bB`jrNT_;W4pkgao=0LIwKJ?Csppbxj!ew)H|v z{Tky63tktsP8C=FDF1V&syc}eKvcJ*qlRLE8vqA^6f{`voHWhVZH^epB24xqyTmVt zI8U=|L>Yn)fU23YT^|@G2+q)(H6>31#hBx!6R}!=epfwK5J$BCtZzM*f*fVtA8ZR- zr2=}zp?{OUhwg4xsrl<->A%hE37|Fy(ROW{vEHM`^y!=D_4Og`@Ce(eJ&4i^5oVN3 zo$Rg7e7&o@bx?o7eJ$7cfP*Q7Mu&sp2T~H;i6|5dZ3NWf7K;_XCMCBvc>zToq`Kpw zz?gT%{-7w<-hj{*;Yh5bqa!EM&Z56YUM`pwzB46}7(eAMrzm-&_R8h7#^_;oV&V&` zo9d9{F$;D-=Dw=q=PL*(zl5P;5~vVC*M7SAc{w34ns1gGb+4arn`$ygu~iJc933;o z9qi)etK>{!2~$g4!JCDpl5KV1tQp$&@vo6|T|#uKcXy}f{+pAemh#^fZvKk0=epg4 zbpZX7TU>#n)+}HNm7?v&Ei9<-?GB?ra02D}-fGQ$w$V7tro5#rMi5t}(G_!)*OrC;5wHhB76*L-Sim^gZJyo3b zi129W9oLVwPtH{dmD=Y|COWo#r#w^1hxA`f{;jyglW9|RGc{DKhi7G;QXNEWHNy$) zC)J73OjkO?0X8)wF)$8zbbjI-$@~?>-S`wONkxCQbCA6PqRyQUBOY|Xt#z`hW1OBk za}51+zS64uE-@qNQ+(N7?}Wl<2G;m*%LHc-G>%a?4=0O=xJ8CU91cyM%c#g&AbQ!F zVB~dD1PZa7yGzxp;{ph{h?jLSeB?#VsAQ+$<;m1&MCgi!Y$p z$}=58LRSlOPGLW1%5PZ@x1Kqw zL}edmXp5G5wC`{mqlHU^IZ)^Zu1tPTUqPtvh}6lZYPCkMN8m;#6Ti46%ARoVz%kB; zXuzioK2#Yhhfm!EmxtE=Q`#ugSbmb z4|`_cBP`;EazVJ&FYsn7e)o|qfM67 zT5p6m$FEF`iba{0VVS+>$x>XeJEaAzt~Bh4VY>+Em6Y2y4X++UHXeTogsr3f_Aw5} zWQ%%(<}30g{lAT^a)#+S)}=ql1lIpYu?>M8!9#Ss_!t^vX1FG9q2ql{VD4?`P58`HQk|PPl2I)+2toB1A`j9ThQiS4 zmNlI#w^By9I;o#*n3V7)$OQ7DO9foFxS?(eK+pw+l}wZV5bfeemPeDiaT&y!J@QInArbF?wTVR5ov;8%PN~F~^`sVG?*CA1HQCKn;O z{*!&yeXw=`hor297*8waHo+5jV+%}4_SDTvH14!qh!-2F@ZEBGC$Si^jZQ3dh#h^{ zEuIP2iD|7Hb{eI`0^cBajdw3*HX;8Ha0HJl_?+Jz3=|>P91@%_82kQD4aVa84`;1w zfsG`Il}Zh=p~s`c)6nPa-*zv8;ZGi zZ(LbSU$@mh@7{lISA(&iHWF$X8s05)?2bzyvREanM`iLM^JNRemE$NDg&P!`>*WEL zH3ZJRbQTQbv%&ohn&nUpe99Prp<;sXEKOAt0m8N3BNJgITYR4x{zwGx8IbX9MT|a4 zg^&v{p%=lb%mZhOlky(GLBW5A?(VFaF+rIN4v@Ivm)~AwJ6dXLC68+~Lw-GMk$KP) zO`R!A(NWtL_|zFAU?a_r;aVf{q-4QOy7 zjrvub`)|C>&)UY3qO(%nAo$lY4O$PDv( z-aZhec8m;!zz2qvQJ$BWFP_#clkYG&dEK zgmrF0r7{(Hs%%_>NY!-2HdihJSa<41Cav(2}mUcs7>a<*OTQOt^S?wJk~ zxPM%>!eU~I)hVS0(oV3@tsJUX*H5j^xiyktRBODBoxX!@tD{|tCvBKIRvZ4viG`&1O<1w5_N#m z6GKtS1`#@Y#;;t}gIO~*?E?!+%v%&Wei64m2?YE9vH*~uWbK!zp>S?FQiElmvaH?j z31&8n?8KM_SRru`pz$gBo6Ih#bz1vAKcok9eiJ3)QD-CBxJm@>1Wc$$^yZe#l3K|h zTdfGZET*x))lMa3aBy{|hR3K6a(hD_ykM`ABV^!m!ejd29;QrIJssS%UxG$xHt4(( z;BbXom%qpQ@9CdKL;(Cf@4U=K2Z?bO&X}#aI0Ni@-+7VncEeS<6Pd+)GrPxzz9K>4 zHsKTJPN@SgnhbZB3@0<8ai0JOzP!pWm)7IFVbwjKDi?$+=T?tsI%ia``Cr*-OGU1N ztKpJGqxJcTVX&j{-*Bo+;&bU(%<+PFq`=52HoFP_{>B^?rMf$qli;iIBwCIH=`P27 zxszAZdIE%29g^M&^ORq>^3PvkjS{vK_rwdUu4*Ey$U90qERB91@Mb1480HS04dGSn z%!_KcvS(PL$9CJ@R^9<<4=JYpzsiM;>zzk_s99e^xWB5Qz^Lrzn|48n6 zWrN!yM(h6;wApu%pv4WX6dwy+o2l|N-6w3_&jpEx@p^O`^@n-I`a(WEQq|}EnN9Jj zfM*%(sn!(egh-@9UN8~N7k`8BvRn=3U5(l;f_)yuHU0CZyVnUfG)x?lH z+*u+s50X-RGn>^~h!qG7ti-cUkyz_Nw(Hn`3FUj$F zL58x>S0&AwxDzUc_bElf6IssvTa0#DxA^W}eykK5Iudyg$>Zb*^6;SzM3slSX7w-R zt=T)}Q3mb^3pE=uz( z9a(fg9H;5RZ<~!Ae0-XQbBGfvei3Ht#JSIANOoOE(vJd;bWeOM?|9l#i6<{8-SL!B zGf6k$Kl^UGyWU!TOynE&3bvA^8)NDEq4OueQw}1%Z%Ev0@cVtuNToeeM-%*MIqMEt zjWIg%<(?rjQaQpO8`u8w&+}}D2uq@eGk$xR=w47_aRJ~X8Ulkr%>*{@Xs0VZKA>pJ zx))~DLX1YC)OH?flni7awJ6mzMGJhZ(Y5Pg54^jr%o=~o6qnQg;yT{NM^$Cjj!30I z3+RUYm?l&@L2r*1ASDKQ_4r_0RNTZG(WIMvb<)1P%+_8PD@V3oc}&IN;S8)oGQmu0 zzRvSqi(1MVg;mNgqVG=7pQSFDQH)oq0ezO7lz{Ejo~dXWi3S%t+!5M9u%pIzB2)i|cm^0||an zV3V}$WRmW!^niKOrwH0r|MJF*FnqH0uf_g|TiL*uSU;{V`{mb6I$|T1wh-TOD=^I~ zOO0=YzKCHf0KdYhc=WU+XB$UGkD8HjWp|*cHCG;Wn}QSB_Eg6f63K!DU|m*>@Kcn$dE$h$K(a{UR6>EuW*dpA$@j~KyT zw;K}ti&7hYhxB9e_Fm7X+EUwi@yD#WVup#wZ+ok&?JnQ-CG)nGNQ=+;k7qAT)`f7( zG@Ml+tcL}G>B0Ed2xDotl;cxAUNzc2Ut(J~iCa0ZQ(;#Bv(oF-2gF`Mh|0j0bfdJU zILyr*v@)&L$ExuMl(~K6?-Rg2|C6$iZr7VD<*`^X#G4n6&z*>WXxu10*O_e^DQ)gf zG4#7fzx?kdct%pWE|-;uEJl!jd4*6bXw6NffqAeTNp;XMBSjh*+dH*z?|fb9Oa<7t zeWt`1kBA%Zm1&-sglaf`b+ME=bSrx|wb};eu9k|<_-*(UgaU>cMj)(TdlPlsnW>55 zwpdN&9&+ROnO8A=XGe}x*NZc-XkIqKPAHRtM#bKE{_NQAAv);!cw!uA)GG@)0^7nGK5E{t_^){ zlZkxA?{4uGtuoHt7H=pE1?{hbt z>FFFBf@sKdoXkF08>p?j>-~?-+pYjZ=K@lAd?B)qzj7zHF*sg=+ShWw8^>I>?Qqsn z#;DB{d6-pJ+QXoRt?OnZD=Vv09Olf<|M;{=joiOKZw`Hs)+*{(iR|C4rPZ%25JiBm z$OC14HKK=dxfAHF{afqZpUU1+O?;>6I`ILq4{Am)^KrsV=~gv|H2ZevUSIw!b^8e4 zVS=o-lTkphEm;B1AjJrw_Qu0FDIk>^K z=Q<5Dr@5O`UG?;vP5-`dbvaSKjRwBB`*TnIM0RL;nZi-(DZ>q=rg6bZCq2u0ahnI{ z6ry`=--xT$Hs7{qe#W^FtY|Hgf@Fdz{_BS?b@NjoeYPg7D)VyGRyjv5fC7vcM|EGv z_l82)BX2So)iIfx7#`hO4$F^alQeKfO54o{&+4!1sn#iyN2LHIcCG*NH|FBA+Zn(L&10v@{_D%pt?Rrn2{kUMQ@p@AN?jZ?zP0*h* zM-B=*@xsAh61Bi1<7lqUU%GZfHKGmx%{DL8g3-faT2+*efa=skkzUJPoz?i+pYl~-mPD%wzb1__YW1oc$~biB zIlKrh>>E6P4uTx$ZQ#2b@%SJ4Pjz@D^f#;$>eevyLR}$Xsxsvff4!MX)xr6$)al-p z7@C8eCotMZ2Hzxmea)#?_veB51GnaTpKi3rUzw2`q>6CwG_~e`UYO^EAc+{agMtEQ z>41)V+ocZg$lJYD%xoNQU%Op=rj+A-JBYKz%NmlWfy|wQx`#`5jL(#1c3@Ljqhg?0 zO*9M7V8g+K&A~$qYX3U5b;vCBtPoOji#+={8E%rh3%suKjWxU0gW?K3mbs{T7JbUJ z*eK6S6rw#C^V5DO_SO@>0eg7i3Rd;zjI@cLpP#23pJxr1CktbQ4U*rc$!s5giC*_H z-l1c3@*zB4Dn?$Xm@*6h3iXewm#8xWVWtFS3sOYu^_^&MVA4b*Vm+8#<=|;J0)iHYhE~&+Bcd6`J$Gw+b`}<(&%Y z!hMI9xauRUG5hWzha9l>C2`ImXlR%C)5n)RU)-e+M8Zi_>Vq6EoQI9(pyRv!C}M#R z^Yy;tn`(C)_iVV3pZL&CvLSODfYvN6xPexAfJ?}K$>8S|4 zc3gBV(}LgiUV!(d73wav^+J4~W*pwTtY<#lVG%;u!ZSd^rdu6R zl}*XJ2MbIu(El=?Y(y^U%P~Ypt&I~(02D8p<)IQxnnybjvw}X@FQWqMoly5g^R(pG zIPxPm2mi4#TEW6bx6n+gnk0RM`xKV+0oo26654dd)Dl3Mc(cPp$eDWMF5X;>Fu8_F zqKRa9ep;wFJ9Nnbey`uJ<-u&JiIhc8KH67?eZ+f(1ZUqXvddAVcX$v=%+y@2SjJ0_ zH6FZ>!Nimwh?tj*5Cdogt!m}!bX}wgM9-J>{_?P(|F5i&@u2_Ce(sz3)TzUV%k)IA zqDyD{a}Y9S0Q6GWC8d8HX|S%G&f=L@dBy&oM1Rgf+Y#NxCxTkOdXR$LQ`O1NDIeixAt z8O5A)jqyA24{(6p-vZpC)v?hNltlnv%gK1wBoF#tJk+lKIIlH zIu15$POy-WPJovC8l^m@{2Vs2%QNa5FjXHB+nqv{aP{|30XxJ~CD#N|Gag;yQN%5REr}wZv#8--o#mIjupd6an(U#I(^@eHJd2(~kEjI{3+_S( zMSMkdKZfVjgWF7F8p)NmAn6nn_qMLd|6&G7O`s@7I{bZH;Pt{I$sAbVnKhd8{SXo# z{UR~Cjknre9*Ut&?d_Pu7q0@Ztn?G=f5LnbPYY>)%|>{u=~Fp)$(>D~5WGkuL=Hl0 z`HDWx@=lZH3*b-)7kFY|hpGH|Sy;GqMpbiARm6LCE{#i!OFn9Uc9G`;3}f7 ziuH5=>4dDL=dHX))Bjv{@#2H|c~u<=Fd8*e3pQ0@|J;vD7M)Z(#2kV*>0di%d0Z%6~29t=NSS8g*q z1A1bdUJuXLdLOS%6qYjVub2qhnac#nL-7KT?n*f+uf=Gz2Qp|B-F$L9`Yhr zpd3e?0bgWF%{m|mqMcZ$_d2hEXn61yEO(LnPurF`sz9PU`dr=>I11H>+f;ZGE03B= zW4=v{O%$2y-6(;Vj$_z1noTzUA5-5LU02t39jmcz+g4-SPGj44W1Ed_G*;urNnuTZ{}hsa@bCtq z?-kW;kt;QQY=Q6YK`3U|>vy^y#X>U=3?DYM=e`n6xuiOekoH#Mw19VESID`u*Awzc zC-^=KzJDS-(G<)g5l)r;6uf2+{llXLV{(l4r=!zCXnW+gVvvK!a?W&c4zFl14PSxQx3Efs>YVZUIk$_c9{=!S}B5{;#@`>b0vFrXVG1gI5an|cN6 zs)(0~7kR$wJHvi(5510_Zq>Uh7wu@5x2`80^Y#&0y*W6brk(Dw8}4R|%?ys0N|)n! zWj?o#FTph}5aHeAGhzCP?ZpnsHjR62$>?TkYE6^^rTz_e6@c@! zH_#ZTwdiT~TI%s@&l;HtG1yovJ_FVsZ;-V55%B+HTW*mHw@@5>X)$;?n=bX&&X1k# z@fnExhoB#=2h3{09?BHq*&B#8IEGXhl3OCIt}G&aTR=m2*_^zcTwVmljQq!=uRM|c#$M}37vjzROZL8zxL8ByvJvyu2Qw!D-Zj%sOEwttF6<& zAb~-FOdZu{xuOKBpA0I#f-Oh!DnY{Fv$lT2V+G-)aS31=5FPjxUafIO=CxV$AJIHQ zZQIhbQilw#w!DD-v_|5}qU&laP8Zq*QXMuz)^97!kUa{MyQn#!7j!!AwR- zh3%hFA$9-)=F!t^VSn%=5w=P7sycHz_}Yjb}&R*HEr zpwz<7L~xL@{}CJWZ;_I4g*Zg7d%CF$)Q!7j3h8Z`UOHM0?}FI*Ulq_0_5c$`+5$#) z?QZp*jj_RoXyj@RbO*?1alV2nhxeeAMlE}N4K1zqw)*<3c+ky}o`;@PU|sZ>?X>v! zAV!+yZJ~_DzUzYtX@>Swzm1&A_^zCL_Q*efZ0A25nd*fssUdfu|CI4=d9{fY4CRA< z{=9@DAN)b_47C9t;DPe2a{IWdxT{VUI@LXimkH_1qD}7ELRcvoIJ-cNXUu_?;HWJ? zmthG*DH~r(tBVZMzcHI?<0Z5Oa8A#nC6bpBim!_IB~lx)hpM@Lm=2Gd*YUu0Oc5%k zH0?ka>Vx9^x&c?)M>QKtPJ@!_Mp z<02)eDyP0-i>A;al9XZ|+lAQ+^#Tv9iiT0QjEwNb1ICW%h-zJ|k7x-M7BjjHvebMp zab};SO@HG~X_lO|4|VMw0Ol|0(rp5+(!l@B6D|hVPVOXIvtDr3`#>9W)VHlCYE1}) z07rzu7F5?RUlvS7cMe?WMpW~+heV&gzE>*M2+jW?b(}2w+L*kf{qxbI?CUC3tzF`< z4MC>=nfpZXc?7#nqu@ezuiUCm*j>HV*k5aMNP9yMt{Ty*t>VY*xQ2qE^hq!Ij~I;u zr8ZULf!@KEs>iC=QlXn-Py40vWm{c6fX*#|IJLn`hiPoda?UneZ(7FTZ;A6D_jiL5 z^)kv?m2!?@2(AT`XKQKwBrnN5i=~*51--k4jP|p0Y_m6wosH~W91Hfql)=8Z&*j1t zYxLiKywo_gbCaHb^-iTP6#?0rmWIC%fhJ^lFX@{UJ>rrp9pqg`H6(T;WQxe&wSL)C zRSTK(Ln=J-$@E_yY$~5ByALn-sTE3GSE!-32^~GmXi*tQEl&FTCQg-0U;CzOek^G* z2#%>s&d%8J)}3?}T9LB<5(>F#UHZrP^m5c?(PF9G!(%x}+r|5;!}1N#U%HQ-QLZwZ{xUReh%U zT!Sh9sJ0q}6{8@{g`II>Y#l5{!OG7CFZ9my@Hg22J8dfiTECEgaO5P%z(%!qpjy-3 zQjS&Z59<}@(=3Gpd442CW3tvtoEgQaCZ*j-eZkc(3hpNESpjdwxsuF-(OgiKLaQSs zk62UZUH*bJw;dVkBRKg>`%7qfn8F(NH>ziiivFm@f%vqbY69K1wayvyc{vEZdyso{ZH_K?!_3pNzk}jus5d2fC24bUHP054cpcZx& z?C@(%OwBUW;X~fkgpB;Fnb4c7uhc`mP>fv;EWg^h$Z@R>OiXf!>C%De{F^|T()kDV zfx&6dVXJLq-xj;k_ku4%7_UKu6$!BmyFqkQQ)%juYv?49@$%0w_D-*MK}?XA>;s;z z-KRVTlWwo5Z~#I(k7S2%3otcX@`DTvP1?y-$F+4|E6J5MkG?I5*mHC9B}{-rU+~@{ zeOSzmK;t`@_C&9D%p3a#seQIsw3fo+)4osMdV%zT)V`jU23`yz9FdXUf+%C1U>{L^ zT0z0({7<~CB4YL5S7Z+xpV5K)(rJ(Pyo#i2af`h(-h#Py3fMxB)%~taRA(Jk4tlZb z{fYqNmC68m5S@ABa4Psw<_&Q`3`cH_psq);Fd4u0hq0uXxY~^Y@$)R^4DUZ_+Q|%kUyk%!EI3-%e5iI;T$JBiRrq!6AhKQZ; zXF4j8fb~zt*I1BVz%3%HGZ0QnN8~R7kS{MzpkP>XbY%3P1I%p(tbx^dBiT-pdO~o$ z6@<;_+-|I{dc$5WUWb+w=HX9i2Q|gQAl#3FhW#^QC3ez0coJ~SDqeuHgj1h!RISLcw+!Ez#Nht-UF;52=34_~)t z-e4Rcjp;ahdqUTS?u3Sdo6OXt#Yn-8n%Ek6WIsQJZ!Y23CS}T~5%nAV*owZAOzV&kW4|dQ zYG5r3l+F)S#6JA5>cH`L-tz3~htA}5uc047O39}A7TAhc zMtODv*MMAVrCMCZGIn_VeHTH^&VDC6S6&OcLhp|ACclBJxfVR3?>zaKgNhjwiQ2r5 zYM`w!db`a(lI@Wa~{;&FGk%KhrW(I;RK@0q`d zQQd{HWy6^FV`ED*;MzujYx%YR-|!Y|BLdFp^y4D)&#b3%Ey|{Ec(nL1f9mwLQC7b= zv%tGs2bc8areBQ&YdyqkDQX2>6oV*wl_)lOtP|%O!7;Dx_%v|@unQ2A;otx!PYc(O zct2i95S1)2r0b zCJ&f&L-(1ouXJWs`}Q&-zV+W;Km38alw*mr$`q1sF7jceYVkw+joI{NJY9~4VF#}? zR@2ef7x%qgE3AF|WapeiUvvlU>TqgS5QXVh@5%d)ZQCRNDFh;y6hfz7{ti3x+Ru3*ouyyky--!Ln00RHhy=iMja`Rb1ymI-sG9LZdyfISlUe;8P(Q{jB1a$ z<961;;Tkvj{-GWXY<;)4I>H)rAe4brtf4wob#0c@Cca%O{m0z;cw`8OIG<2KbQ^Y~ zR|iq#sd{a{aJFxU9sKr-byiwO@~oVBMx!4UPD7GjGDXbhp zD!mJ&MkF}>ariDYAWRaEI23Db+tihU`WMw9(1d|b%kkT=s1JnEstOUVIR7=~lFc$ge3fJt#g>YFr6%y{v zO`jelp+apy%SO+vQ^mzp{y4)g>INDIe9q6QO3>IGCTc%L$y^9Qa5^9)s4os8l|4F{C?v>Jsu7M20jQ~bS$!V~iw-^6wmJ(D|5o`KAEJ;J!FMUEV z?F&(O<3cFZ@_RcH{eS%~4NWHb&Z?=gKeW7Qhgln0IG}S!l8GTVJ6<+Bti|Mgan^K* z*atXzVDmRSB{s{)Z)!EWNX~D>2nN}cq+ta0us-gy0_i9n4h#CrMD5wq$hNKjs72T( zdrJK9x&Qdp*(B30wTh+1 zr4}=p3r8*3woIxZi0K9@@Nu?iu){ogqq4tAZy;X{jXi+)d=;+h@9|SE^Hk@`zOFWrE@V=)f)VG>FEUZ>1 zWzrt@4@~>t^YuSsCy$op{LJ9$xxwST~$>VrjV6~C|`PU;5m;3v)d_`_A#5DVUMSWDuUI^K%XD)s!yI_ll%Lx|VN44)6 zh0{(_r{!1FOV8S49dkoxK7i!};4hr^*%cFr<)R-f!5 z-Po)L8oWR}Qb;!kaoR)>SM&giu8gzdc%PBGg&%ajFh4uwX~Pl0!|K*wfE+Me{rV5k zQ$=7sBzCkyvxL9r2fuy@bdHf z_pi6*drmeR%jYvx8keR+s@Xspon*~WfIIV@h8--ZMx`yyViBe7M9=WqgIK) zh;K%|Ub}zD<$R_%J16xTn_;qUh}GVAWxL*-)2+*{rLFTHyz?YCm~zEjZgh*8hN18^ zo7F8jZ4be!>jNNl&~Xg5`d@uwz}qM9a;@ ztq!Lhl#TCJzxHx+a=PyB?z|wVbu8kn#KOQnTxRjW=R!{qA#=*=LP7`Nl5P|QwI$o+ zM6D1CeVLB>61zTLZgAZI@9J>+AJX))mf>8q*#oEfPG#P>ykUM?hzy$8C?%cZT}gf% zObS;==Ik%XaT=b7%s#m(76Xhhl{>(%`H8PynymrUcRR@2xgNRAUrTOV`!=V3?2T}N zqz8_xNKzHQMf62_o*EB0>wiC>^~ZbCVao&u#3qhr3pPy}JhDrS-rp)Szvy`5HMn>o z?4@wsO(YSg7EfgXkqjD%YGnspBP3yT=!-xKmnPYT(++vfQQhE>=?Nv-I#SW+G@b;| zv{2%Zk4qBlXw$4bXvHIGRxmoUwjJlomv`ShDy(zAuE@vHOzF_HIHa*Mss}b-a}f@QSQEta%pS2Fntja}J9Jq1M+9?6gG{ASW>%&|cblkp$~y9Ymo-ca zuxd_DF(Xy#0;^m1;DSVovS+A2C?l^mxCd?sV6X~QvtMg(T`5_?Ha9}eqoQMrW21`& zgV`9sIK#XTkdgC$M-H2c`mz(pe&4zG^EV>Da7hAWC}|ji%qkS73sg=x7<{BXCncpR zjVShbfsxbnb&xuifNaI$R%eIPmLKr?zUN6}xsD%DU<@Lfp5=ASyujP|@G2$tePfA0 z#x%?oSK%9z73Q#6D0S;#DZlr++Uz9AMm|7%-X_tn6fBqeJ965(s^t+fK=_y)`gc3i z`sXL{)1pN!O^K)nSK!*+Lc;oUitr&solX#nYc&x-I|6tYx@ID=wzgh1xZ%Kggfb@e4FIFg{%dvOiAwh#5Mx&&OxsH$ zzeH?C#|pmw^E21)qvqD;!E{vBAd2RTzn=7t2LwnDaO_t}cc9GaWmRPFa+#IqwXT9y zCsCs>ir3l9QkocSEv@x(W=2)sC?iH&Z&FvRb(n9wC~m73v}xL`Dhd9Lwl5n(?isEo zL!vR9L__#jw=WbXNS9o^x6$mXqD|k}z%f|=y=XlkIJfm2`;B{!P6av5n1-qNqL6_raX=i4AGxCYRSZAR7>agMZ%-6|Vmc^$m>} zsc?*eSm74j`n4#+nq5)vmZ=$fi+Z5fJ?FZ@(Tw32cr}MmeDw_FT4cero9G53wF(-P zw{%x%*R%HiuWA&4#-B?{Jzs6(gwpH%HCIvl*R73qhqt0_eK{670eyvhcSwEV(7!NA za~x?$G@jd2pq9SqohQLAm0&MQ#eqeHxmgj*&G(FHhpb6^4E_oQD9gB!%}vB7YO#Fi zo~6diz^-s)4qH3(9QmXF;v`Nv8_45t877#r43G~1V@-mi$Zxr`e$IL2&ZhcrJe0O0A)-`{S#*437XwMZHSXpzBhS)x&Np0Jdu z?rvq;1Cx}05^59kMRH5H4Tg%wu^Z*{l>xY*gPDUyGS~g)CGmqh>%QbIV zeT>N3EiKuj_DMDi+61pdv`*-j&46r$MIA!isAHxDM=8vRK=?Ci_G?E7YLkDhDstAq z_h(2Og*RxaqdW{9N0XOE4ZO7`HUjHBATP3*~o0SS8J{WHsYRxk}Z0%&X&`#c) zl`Qx%p7Ibl6y$}&4JK%dX*e#6q+jHZ0nM7dqQS=>L8>JE-_?)u>i3W12@MWwtz~IK zG_%_B&dd9tqhIIy2H$ogKm$6R=UE$i)FMmEL0|RyF=#48ro?pv`xH{NVV5a6auPz8s5+5nvR;r>(I(Ruu~xo2`K6g9HNM%G zRF|I`HDXN*$<(3B9K!TQZ9Rnq+F?vuHL2atOLm=itzesK&g#kzI zWRJgiNUDx-$B7gII~KVuBYr7VX5udFP+MRBXJs&_`fD)@$&p~D=V@3-z&uma4`WmM zAvI@C74hR8vwM+Zho;L4)an*ni=Hb|Dkxe4;=#0%jdV1mGzayckrVDIbWNOwmIKFy zdk-BtZ6)g1k**pi^8zkiYTD?3IKLeUX3-GUUP;hJPyMM%p5P)JFd`OeQrGYS-qU>( zEZuT5VWu$7|3zCobstDAZ4E&s1vZv%?CO*kwMJmJ$hGCbDKlHX@XcWl@za0~9K z^e{NDTit?|QFs5v=fSR&McawSnB7mQk>r)o_S6u<*3LV{uCc^%@^0j_sLYE+Oxmgh z@fmGpfB1a`6WCy5fzz1R+T!k>)p3*jfgxn23$)OVH0&Ky#AtQG(w3aNXakH9WGpZUy>Ncw)M0td@%QGxsOuXnlGc$ ziL}?pK>Zb7FQH|Q(X9Ep(#08C#AbuBVoek{X#Q%<;lKL>P04DrK&fcvCFLJ;3{Qh| zR~Exj|x?+WG1$}i23QH-Gz2CY1Y%T&>qvznu?r>9rGpxRh_@+UnQ+_?)p zkmT2r!GX{eO$(zZy%S>u#SORW6dR?_><-+U8-f@A*rq(x)nEQkY5#iLA@#Xe@2#(< zaC5*cx1n_LiK1uBelmX)3gc5eQ&6G|2190kLRS!5Hf6yZ>Mija^yPe64VIKohC9ix zCl*2`3YGi&Bh{PaMclCtR1uVG_k5@PWLEmCa(C>Uv4t}4w_<1!whdc8TCq9A#hdE| z9WY=e3l3gCa%q6krg$PmUNplOA$ng04v6(0B+{`9`xZ+1eQrg6o; z4;mBFmCma<@E%ibznE>t)xT3V!BVY0kU*wLS=kH1{|)>Zu-s3jKsxo|&i6RAC-g$! zm(U#DP9eaHx!ct@8PI~;Cy?i?`w2fAa<8I{X8^XF(A)~|Y+Sq9Xc{IZ(#Nk9z21C2 z2lJo(V!@b;qTe@;vQ(K^j*pM8aGf^V?$i71S7?w)A(*8q^P+1@JBCH=f%GWM55`o= z@y@X2+>1W>{>(Ys;=rGf+t0FTL*%9m=d5UXzwL}LHhC*o3{Hj|eV!w`#qkssbSGwl z)Kp2BG2DE}v`)Drz2^V)xP13HP3R6Nxx4af=}BE1Ua}a5_4O`AVW5Cm&OS3Jql%El zS9%*xwP|8NSUj!Fz!pybCe2+d*VlR?IAN+Ept9_a@2Cwau+wA(v>e*KC0&#Vv#;Ol zwn{z(RzkldF0kbU!)N__Vde~gj3i;^W!%0TC%1@+n&tGB3#m;}kxn(a=-3}GD+|Av zk9esp{`901+(yU9P^O|ympX-CeNDNbMkx^TxjUS->BJ15AKNZf|6pl+le?H6<*wOt zf9&ElPYIxd_mY^nNEj2%uWr`gD7F(n9M z7VU`PCY_>%ke!O>@p8k9JVChAEA%q=J(n7yhevR6eVyKFeXYsxgO*3hOgF&GvpoLC zA(@UGVYM+a##mWMO(xHQb$D5edc^Ft8}`Im zcVX6&+CE~XupZCd-x0RI+BA4WK1qzqVq*Gawo-fWBA60kP1UywJXcpPO>w8AheoS@ zrJhr6tvug(`s033JGtf~&!aw*4@prFpjCFn}A8R;`X zkiuD)!uQ|a#jg%iE#^k!`(eK0*#e0|Gl?A>1G9N*CoVt~%yQAwB8i zj((Gl8OqPXa}M94A&X?(kV=X_BK|(JT zM;mZhm*}~!GH(l`Ge5%YCwIRbx5l?|8J#-NU>-%#Hflvb%9L%t%EmyuoVi*sXUCuh zU?KVBg!i*!JTnOCP5E~=hn0HI^jA4}M!nV(uKIK--ChWc)W>vL#;NS~Z-n6p?mj7R z;trKjBgbw_;x#zm#`3!Igs;kig4kK3ixC zY_vdcpOVJn+)+%*`}E0C$hImV-Gf~ZCN_f^eeMK;Wl*)`3+44o$I_oaeThBiXvtl0 zO_*Mb_o2;a8umd(kEyW8=7zIhw&e9&rx%zYe6i=^@)#1`i}G*|S*ukK3+7>B;j=_n z;8mpROytD)jDQR|=K~dS3*E(1zfxM{?*_qRGyG9^`Q}6gl?PwUhs@Kg6f1m@iQbM& ze2qI;Q^;QKoxpD;H7t51Jk)zpWSE2~MU7G^p1-*Cwki`&18l$W+Hj_F zoq%tBFShPpFnK_r*%9I1h2>as+G24*H{e9Z9g$u)F%Yv4XWfN$tmGnywg#I6 zy(-VM!68#;Oa2V=o)*+dxHnhQ@dIj?TTHBbcG$C&4vb#;tI^PzK<_3*imEL8tnA?6 zfdAw6(^GYP&BEmU_mj{cWCawQ%)vWZcg8WhG<;d?54OeL-dwAZ-cF{^wP>)THx$!H zV(vaPpbO?+Bd;-ag02A3LAnrnQpufILB8uBS*c#z(4!+~;jpLb;-~qmc~{^(9r!Yr zOL_iK+Xt~gwSnqeQeNY0qfVlu7*jsRcN6Yd!w1#;Nt+35+`yQ(1N-$L`a6o!I9kF$ zvjB=hk$1VN48oB1%C<}D7S@pPft&29jJ-`Vl(z@jC0{}*2f6~&d4l2X^_@iz>}!b9 ztKT2O&9Bj<2pMX|*+k*-3VUm`W1zuu&NQo4wY&zYT_OFs`v+bmQ)jEhM8N+qrFxZ2 zK^)uXb01c4BAn}DRkWbE`$renWsPub4Au}qRGpvBd#-2NBt=)g$n!^1mlTAkBKIqe zhBGTyzpjPPj|j;t9w|(#d>xc_UnsxOsM9?6RQSdIxF91nt|k&!vYUX_{YYn-Kc07g z^tvKrl3ah6dsp&BJYE^YivDd*=F_myvaB`)b*LcUdomSg?#Vylttz|ssUl)VvMK)EiWTDEWFn&T^5Jk+R0a3|tl zxtW%qism&uvOIM9;s(icS8J7Re*0hOCWm_5aO$sN<+3qRmu{5Or-h!!M0B-IZf*8Y z#!qc;c=ghT;B4L+Y}aok(z+(qDLO(=`oRg;7n#pTgKyt&IhjTW-Qa~2It9vE8z+J< zXz}^d=snPK$E(*DEO|sq$rESs7*3@lKI$H|Trsi16duad_#*TE@!SOGBtQgA`^9J)_j7t7kfS}AF8Mac&aHw>Wd=f8b z$UAyb%Zrh4pbt+|Akr58m~bfnfIHKeh30-55%97I5<+?S!lm7=^hXu#j?u9>I3lPt zhflMpe$wj_1=DIk!RM?*F4t~+<0gU_qnI~a<&IrmUbkF}9+1EN@I{S#P|7*{%Ga6? zw}@9+(2>DTJ|OMe0&4CwRy6SX$duMm?}GENS6b@NulsH;O$x%I^Ipc#&$bj7;yx*! zMs4~;LP=A_hh~%P&I+l$d375OSrJOk>G?jC{KlXnwV6Jk6$hvIK{bMY$=^N6X407T z>L9IBx3^Y0>bZuxzqY>_ybrW7CBCp{my5$imEw-gzRj-wjoimHCe5|{?}lH(E97vV z+W!%0aNz9fRSYiiQYizZ#S&`&S$O`z0-)R}bMgJ$zZlK~10(+;EAdt1Q4)9+4OSv* zFd8)R9lxU4fd2QPbp?#ugbAUbW4@q|>-6LD!rSt9bIt|Bq1Ny-^G*~+!lq4|<(DJ> z^#1eC>lL(^t)HIVA_I?A&L0dKN=zB#XeQ=y0NzCZvaYUIIE81n>QS2s(-Y1Ip9fV= zm$`3E#pcqG!c|J;*yzDg&OiAQtk#rulqR7ZIy7iQeD43&l?dj3y%UL{zGVEMJug?c zIv;NLZ6&YZBilOL$POb;ur)^kFY~%+#=66v+roE16Bc-G>D+MV< zywc_vfNAB`vil>4sZO$az{O2-=)Iksbo}>R-d;viLlSg_awTV{P$v=9hYbQG&E%=pynR9CTAV|Kh#x z;Roe<7OSQSys~n*=m}<7wBb~J(n;6K)j9PI^pe7^vEA^6u+prLzGAL^5D>h(prLy5 znnbH$hVQ89tf6xw_If@#(dUM!@1TV^Th3vJ&-3uVHau?>msG`{$q#O)v;I6N>b^YP z>YkH7u-_ZoULR*zc=}@NfIB_-f{^GQ50R;7lMs4#UHXTg;QtN*bt6Uy$j60HZg_Au zHJ(vq?2c^0H%A;_;4m&VZoMoyZ|TNq?F+b6i1!!I%=QfjsHw7rEKh{?>S0asL*=jX zecKbQ;mQlvUfi+^C*Xc(t-hhH!lS_Sv)bYYN3O03e~p*61VMjPQ*xBSV@%!xS?Sn5 zxwg{q0nl~H&S957anwW!>uLXd`)ccaco)ba;8{0MpZ=PJ>-V{CSx=8TWM6x7#S!D3 z5uxJ)VSCnq64W>W4@XYaRc^oZ%8Bx%5MR;Ck4b&N34?X#RBq`T8af?~()ySya=vU( z;(GLK#Iib=(@@Q~ zLSDzgc2!YdD;RHCf@KaaDR1sdjaR*k8ZJO9G6Fyd&V~abekJ*br!}rj7L>YWUPBv&IV_9UOoXOO8EF6zX0h)%6%H%!o> zpVIU^4Jb#SEHAAMqKZu0vlx0kTxFf zOxQw@;U65%fF+~C`YgwwDXXhS76Oj7S61u#jZ9kKqgB?QKXzU=jIPq%UMP>0ii9KJ zHH6fVq_8o@1;FtGjCP?NEw(APqu`ogTiTV08d2uc7JgLg{Hgf8o4;-R#Y8`HUN%s+ zM;wc1tJpzQ zoyFiy*x))ZpAiUo`b-gj79`WV=f{@B$!L)&KbA-WTm~VgQ#zlB)o73!-1#zk@U9dH zzb!hEU=DP(kNrAh`o^`6MKMl=avlnP%8T(d2XbsQ9BpfVwv}dqQGDzD?t{azzu(Vx zw$ag9e%xG7-~*P!`>l7p`N3Ml*dQ~uM@Fnvx_Iozr3D&epIo-T@msu*j_`N0Ge2ho zBb2>BMA_Mg9;vSD?g-7B+$awg-VuH1+7S zW7hPp^{=C{H2Dt=)6>&9E&hD`FzE`UDD+Fc^j5VnKb- zKJl&fq(CC6p#;r+9i@fJ2a(@E9#9vwtvg`jcO?4h>$>nQqErAF;58zr06rt%9bP|g zRiTi57a)bH8T1TojaB_7Hq#37=QujCeju{$9Iv4?hvrAvYO6JxY;PDo_8bbJt3Ww? z_T@xU%T%WDv>TX-92BuA-u=jgfbv`-xIQbdcW!1nrvX?)KETNxGKdno~^36CV2G3u&F-ywu0Ff zpR|8Lc=}MBOH59kRNV>9N&YW zzynRZoZHNVYo^^N)cH!&P90}pb?PrQF{l$Ae{44q_f40vD#*KUbmtVR6XQC|s(giZ zJ=wrzGCxW^TZi^D^JO>L8MYj1H_COR0f{Bi7buP5I~U~Y8TO+={MIycq!(GD zkviNDM^2wdDjVxVV7JRjPYC~RnEh|+Al{2SFa_l+e-xk{#0wd!fQx~my~t7TJ)u~y zU*(qgth>#AT$!sc+tY*ZUB%>S@<|^~hJ>1gPpq3ACqoKmH6nrX&lLBBAxdoCmfR zvLU<8d1BY&TN=_193Bnkm;o*y%q~QZGrROt|{B^+nCWX@;_(w7^q0xYNap2cg zZkF%Wc*@%G-4P zD?o}3BiU#^?&1GpqY6bB5S*bk^>^_~o^;Xu*1JA&kxSQ^;Z1ZL=#B)q@qf{`?*j|j z7rX3czSh;%@wm)oSK@s_ z%4W=pA2Ak7y-nCtj8FImW+qGHuVqZp)LuWky)jDyHKos8*JO@|$-=vUo&ky42cHh5 zds2i_e=)bd5vbfjWiaZJOZNk3IrcXWYfzG}nJquuHJ+q{-0|txLaQ${hzVoPvem-M zWz#Z_Wx}LDUhc3&@hWVEAC5%_?-{%wHgaJOAY9@kOElwkW9!|6=P11(*>}5ewzvMc zl(0#|^3qvlKPVm6bwFdSjilPDntybW-WTW%@29Pb;zUL`)Rk9X-qcewoh!Q&V83Lo zje?Oe-91$guXykKUH};G>gpyVdphcdii47?iIU#|@ON{Fe>9nEl5)p= z2RyOE6vO4=J!VS*;`m5s#a{C9aK5BduJrO(LD+7^$FoEy@q+zl@DySY@>Woue)~4Z z%Y}6WSvC#_Q>8bC`9uavKuJ^Qjhn)mS7*Mm2^%`X5DyX}p`Q-=%VIN0)pAmp8bTzI z0a94ZM=rljZ_**1jGwnc3&&_^zd*!yAQ2N;v}nwTb9nb-g}Dg*82JtgV-(?nP&k9hl}wETndFgDM+YMi7TDVH5YZdGft;Nk2_N zT~tHa_8c*8r7AD4y(kLu<)!$Y-jzZY>z)dK^m38mf>_A}eR;c{@j^6h^DPFA5~298 z`a1u&ZpDb1RjfFKh0RMy`HNr-5Y8ntd3ZnZ78HM)-^;*x*H0+6z5hD-LlUXf_4k5| zZg)W5c)M5(Ka^I9T@zKTqAM6L#AU1}rEPBj!UxmQQhf7R=(Wv$`avuljdTGE&j^9we+Lq+9#fQ_X@!RmM=O~YS|q_eiPTpY9tjhAMMwaHP7O8%lH$1--^ z65NX_g})6wvR~M2T8G!f#a@_+b&`K!g;W9g#caV4)8LzBiozC;XN_r7(?CqEC zMV7@1bQEEa*EI-i4(_Ca(d+?eBR&$9{Fm+;AjTl&REsAxV25Xd0uwrUdkgS;<-UVw zvhI=F!SV>{Le;N`&=3ZrTB*iaxj987aEk58VWa&rS5C<23O+jIl zhdBeVcknT@^U61n?wSsz596{DdHipWd_`$^z#-`x11tBzLppMr}ySQfy= zn~A*q)MfAKfI_cCyR150`a)!q(=*73gk<0(fMYxiCl%+Yr?rAVZ~d%5++$Xw#MenZ zptR#Ie5N0P0BiDVJ4Xd8k}hxV+u>+=d3pBj_ZGqol0CyG>-}wviub(Is3vD7p83Cb zawsS}E~^CyMJ8JnXESld7aszTjMotz8_ir}gbyvSQ{TRS-t}pm%5D*m;B#`LoXx z_;3|e<~o%@n#xyyZOp@@T;{<&`F~;c-`tbQq+osoYf8b`(tOPGac0VvbhGUuG&Q2U zGl>OQOy98vOFyoSqq*YbmeMmF84U`Ii9tmrmW6U}!;Ph^wV|f<^5>6IrcZ6tJ!Jrd-@*Wy|Lo)J_!R`DbEsr&Bf35<^(R?9t)H zvvD4tk$Mgb0>3QyLs;gsjsi$L);#%FC_2voV6QX%NuRi(^T$o}Nl8g}<)g$Z^onRp z{MGnr8e8Kjxd42k_N7;Fq$ zeF)cuqP%DM2Xk~E^y=f!ZT@Oa$2uu4i&LqDX`we=?d&ps(YIy>y8aY?QG&OsceS;$ z1J5)=!rUbr&Hm{*)9u99@xWgd{0`c}CTjj}D$6!OQ?%k!I_+u5A;146u#Ug-rl24U znIC*L-w^!4Y4KQ7S&*MYtgWP0?ML5my4G>toqkuWF)MwPUfNfs@}fGcaikb;yD9Zq zvO%?&=pEH`NmNxU5#tUjxRf+76dPjh%1`36Wmrw(x|&FNY!l<)saJmhjhe2tf1 zu@!L7Jw+en^gqf_Jn(@5h~_T^&2-k z5#O}p=+rK_wzmSV@P#O`@T;R4&F&Rm2*$8K^B}O`du~zm(}tz-!uKke06L`&zs|oj z^!l&QDC1}?Hfp-r9t!}Tcj`1;%ZdG0zV%+{D-=yZ;kt0xzXJx|s_gRSkHT8baC`A# zrt88fx;+fgy#6{or37;5J-oO62ipAK6EiwkO3U8PP9S=8IMv!;<9PYD5}#?2+V;di zH0;EGy8~E?>XYDb*4UUe;0VPj2jlbR{wah1D1F_oe#M*if2m>|JYCxD#ZN?$RtJI~LC@9E|abe+}vi+e|0v=MYnE|NvZTY$8a z@f4|DwPKILgqo}A$yKX$(#3bD9y4`S$F2aqWoT#ATxGtl)fp|JZ+gwJ@gN;P6X5@A z?_9&#%(ggQn!1##5|e7kn6`>^TsnfHl*TCI(rHPFju4ktOQK3cqG()Gt?1~u-#VC7 zDKS)>YD?USpe|8F8;rXWZ4gD`?j1o4_IECQ5LAv8op=8%tje%}O50}}V&usW z3s;vY1Pwlu%r}ySpwtRE2VakFW}QkMo)ujLDDibg6M{%mTtvyxYficwr34A4s{(&U z4aua}K5w?4xU@(4M|@z|FJ>ES(Tu3jQ)8(-PA`>)8RG^unv~spMR-pgX^YW2w0C98 z7P$3>ZVD2*3lE0ar`mWfXba?I^jX30r^oJg_k9`Po1zwAerFS!v6A2~Ny|}+ zR5P;Y<*mKNedr?pYa)q21ycR2c~1Nb__4qpNk3bjp6&ZK&-&&JQ@6%{F*`%#`DX@i5#RiosrxqwTU|81H zdF|XNqn47~8}fxQi}37`Ruakw+Mr~wUi{Ekgl;YY8bxPjcV_z^^!P2s>?_U_dlIgJ z#MYG0p=tB4Z*eobc2|?J(~wc7pypQhIEc@Q+6Ik|BbJbDbNa-IeHq7tCWI$SrvN-h zMxJG?Kr1T6B%s_8oQ}2?oOdc(DsDT{sglmNR;J})sPZq5ZZ6hv{B`ORLos$!%(n@; z33o5awLEd{>4Ep|Y_~fAJLM#`=8NPyxxE}1%$jiHo<&o}-q5@bWbBo_XEItb25Z{6;lxge}SFH8rwj@B1ET)$Pd>la7Q&azDh24EOj1lNsxY76w) zc1Ez?VWA*`^w&z69v9fx2qgEk0}jB<21GX8>LF%?80TH9_hfjhC^3O$>BImUO*;m zT6(0!@kqpRZR=Gu;vS`I;2B5#D>QJ_c@x(u*_gZwY}e=szWnF)$%`|0up*G5b1S?c zpAF;Fac@cz$(?l$DD_ChK-`ZORQkqVHAVW&j5tR~lWx zB|(}MWj8I)lWD)q>i zTO6Fx$^{+wmtE=(!n2Luv^^#@yudc|p@kba2gXNHU3p~;=JP6~^1+;G5h}c}5 zb8EGHWD-`BOYOUkfOce!hw41lX!jwvi7g1dicDn-(rd$Bf1#p8Wvul;Ff*k(3Op=c z-9mVKquWnlnX$)U&v?Nz>%9DSn%9KU6-km0AtOsqSxRg2^UdnEFQ0k|V{Ki3EP`5Z(*YK$${hSGD$(%Ino zy?fQ;Dma%TB^x6ZrqjBMJ}0+*#h+WN)oRc7t2l3;lxW%bM#Jp>OUKc-x59D&MyFY< z?V)Wr8?_h)3N-k05=GLBeZ=uWbOm~iP6Q`%xZ?d&OW+TTE7y4XqbT!yQRi%DwUh@+xMPEudA+sS;KYs_qxohX9gTcwaYbK!9LLoC2nq1$VT=9 zkKx6rKs_Q*p0u=kEihn__lsU)-A&`2gyF+^U-iCH&eMvx_?XF^3f>I)Z@K$vLaSSe zikGWx)=T$ue6l7J)mm(Ft$9A_M?1rQ+enxF7b8Xg;7i^*a5 ziz&4m3f9dVU|ExryVgO}NBtxh%Z&H79A$^j-FTgk)a?IO|2+ioZs}&KC;qoUp_c_> zo05F7vRATDuh=_$d$RsSZ0Xg5_`}&9qih9?u#4nB6W9gMAvXdNPkFNNZO=-fiQPT1 z4a#Tba})Pf<6%YWyZy_1tPGAWzpcfu=kh#yyhq6l03p|chcVF5gQcqc+I=dyJy~pg z0li-!CfO3cVb#Tt(~q9sYF=>tTxt2EA4;@nP^lPc3Ll@St&8y=q39Dj=I-!<5BCGf z(^1!RxLsOyxnsKVXDh@t%9Ubok5pzNB~c#9`nOsDjAVLAl?I@A&h}I_!H#j8?BZt4s&~bJK2d7flGf_ zAU@|=A<@fK2h0+SG$hCM6tK0Av3~w9M(WF;^PuX%6`8`p5P(4jX(Ls=iRA3Y zqjQjdAdY|qa}&bVfgI%l$hQ}q@(0V{hJnYoT0vlTAiq-D`5WTw_J9N!Ne#153|M1w$R3W&k&>G_2%6c6Q6yueWZpx& z?zA3qlo)lCFuD%emK4IWqjUDcd%|N^upO)RuQmU5^qGkshpT1;v(s5PVU^m~*L|+k zB_|k^KJrEqjCMnnYS&}VW+48QZ1l9u(Ww3|XYo%~X_pQrMHY;Y$(Eof1N=xN#}B|y zKeWBT|EnKAU0^bDn+g8zlcx2lCxuEN)LX=}U`2eK24DKH-F>sZeU z(6<`MwDMqVB6NfW>;}C?FHaKIrU2fV?kgVRcEX}9AO$Ce?XifRMcf%;XFBW7Uvzp! zEyow?MgG?%YMu;=XVliDLQ}*C9pN(sF%hjOmIJxB5ylrOcx{;7s zEAcE@y_JR7KP~^IC$iYl_!Hr|LaElsfQ2Rs^EdEtItgt2$)klIE>vBW)B937%jsOk z=%!i}k*3H;cak2~@0xzSG=)0F)J3eiUD$h2!Jmj=_oba$`4y3((N8suQeVhRA4q_u z@VkQT2H63rjA72b20XOTtLGA_-+zvX%-&!Xb(HaAQLTFOZ}jzw zgiOy_Q*G?gO=NX1FJJQ3u&0g*LRI?IU(dZSjgkkATOezXGSqniTrFR?B#(a8FuD%H zmGaVkvS5P_N9qAp5R+VPZD8Z2GxpuySA!L)ryU*SO4e^;RkB{@dufE`lH5aS%jLJJ zD!x}cO1BBD(pql1aHuQIo_iu%cd2hyw(>6%qeadN9(KK+12HGEDb$`o0NMzA+*cUa@1Sz= zZ;9zeDh>xyQ5V%q(Y~;S|I?|?#a w{bSIksD4HJ__9z1UXH|stboLk~^m(z={`Y z8fa5Fe@+Nc=@Sf5qaoJ9`XF5LEz^V*k~k6r9*Gn2l{|TzZxlY2@Rq~C*!5a%n3?F6 zrsK;~3;2bWL9r290$TzZ*I-+k6c{whhof5nCwFU?^u3hvN)W`cXv^T@>$ZE8-! zYDG|cqx1PIj*?Zh{-RfI+qUG;n{HWcs-UF+U2-<4sL7aH9i<;L3wZ=7LW zopR1I%Mmof7^6-S9u3_m8gSa?iwZkuS7XC1rGfhNGi*f*sI0?>S*TT4=V6_WP8NyP z2La)@q7ozntXB#Jyyl`x%d%(A%5a~gl%QmT*ufHhVKEm$``sde~q z#GGEOnQCKq)##kXZ`w55)n&%;*iFn;8tQ7<|LH>8kE8))%PZ;jswLS%Y~8>iTq+X~ zc?cYUWrU}?gfxir46d*EIEgM;=B&bGtvW|4C1}wcw#s{q6v!F4i=y2k=Jht#wt;wc zO+s+MFj;Cltqd4E3o6mTlSnE&3^G+o0GpXgWb5~g4V8iA0zb-8F4rGsgBB2$WragdJLq>KA6&A*;w{Ln&fe^GMwa1!7^(Fr}V&Y(IC<{ z%n=Ls%hAXyLR^F*+dcTyr`8?(W-keVdp)R3#7Y6Aa7I!nL^&^=P+O$2xIV`(WimUJcG+?Ahs^c&Q4gmjjZ1Xu6D8 zCRjAVkj?eoU)%T)bke3ACDjlALLf5MVWq)rB`XPtrrY)@*X)9TZ@r_{>Y>o=aV|hr zZG7|T(Qgj4eIgJh5CWIcvdp~EvWI6IRYt!0gQ8lHD)l{~=~A;iu51b53gb$}w-jhU zVA57@<#F%n!2)(-r!(JERwM-$Rx&s|H8Y|Z;zT-_h5f40Fr=n8bAvp{J=erJLMp%_!iR=1uT*fleMbusiQCY8V#|W+=rEueJu+#Sjx> zWVY)q!A576@(bACD&O9$4`F;r(yx+F3!w)~b2JM@DrZ16w0bAZ@Wl#>sF^QfVIuG4 zgD5tR2eIUU^y&}FSELA9&xQtg+8Cl#c;;WvY17Or`T($S1%x*L(CAmC`A4N*hHe1UMr zvXyv`5Krlbrr_>ea1qYy26}2L5vg4%rPVa`;pz9VtK;OLCSU?>Nv4c>DDBq9GcD{6 z^nzK#a?a|;*vrbug1iuyVu(iF*B(A+X`x`jHrw^XZMK#?>T0c!A1GINU9` z0*HJ*el)`*5f}4?=RDOsMBo$~{}#C9a4Q0S9ATnS6~rWY)h?d_?T8kr-Pu_M2@W{z zsEc@FY~}XXY=^{=t`*#a6|Ivs?tJDBewgX8R3?t5y^hN97;*%7Tkdm<-H=O{&PoIJ7547S5q1KzwFZleJ%F~^ z_<+u5{=EX~l>L3^(o-^vJy&uYcU%GVn_U-)aND&it*cE%bPsvHK+A=@6DL*0BfTEh z{klIFPKu5eQ}91Y;=t`IO6W?Uqvyp^2V(XHwZAQnZ?g2AVdyi0=mTkTNGb;M_r}5cGf-_}SRH5r%1!w1^^NCZvy6KZ#_=#+*143mqp~UMe#F^L-QFQ|zl+_Tshg zXsc4HOseoE1T?@{uA@NK0C@QZNFk|?*~7YThMn+$e9gir=F%EZy>5UTXb58X4M65j z)h8W;iQQ)&gu`)Uj#w5#lz7wOTv0+WLJBnWB3ZtNUWy>**cf=hHO{pSIclIT$hrf$EK$mgcb7#{gI`rvXvVV6krJvEIT>(A*> zQ)tchn~&J2r6CL{P9itz(N64st3O+LU)CP5B-&TjCXaXZ_ zC5c+d1t7VpLN6#>&RIAy2S~SI6$;*Ebu?PH+B@kD8k6E0BzeCVwE-ftK?yrL_FerF z6ST7)yqh&^1{eR}!uT4o5VnobU#XbwSlmpouKj{g?Fp`%Nrpi_7i@Y)73U{ZZH=&;DpxP1i6a2gLI^LYj?HIw# z5CTG9CQ3p*%_+a^F%b18~ErgUJAJxT4JG%($8EjLVQD4cm}r2NjP0=rY0= zqHP%NG@{{;)&s*Si#>Fk7pf`s@7vS{@>+CY%Agf3dfe3CwofelE@J(miqOH1;ShTM zc&!h(>@x?4e}FB(kb*Iu>R_@~-DXz#)H`4^1A2}Zk-eUC>dCC-t@CKU9kX`O%s5Nv znOLdGyj19o9wZgbIokpt{Bm825I)jjAL9H2Lt+SwGwFs>V}QZ4mA zL{m75!LriYe;EhXXs7e{`9u_gl1~(I9MK@yyOY?q2c}Xg@kP-r>T;CsXiy+1C$C&H zubCmRR&gc^&f~Iwuqfp6a8wM2X-;$(lzJE=f8{y1JUFND^feH@8K=(SuiMfHj%_aCE1IZ=#vMOF zB~)>hBE^65S}5hPFv7pOXX2EHnB`15{z48z3gEX!%-4w2GKjX&i<5a{9#pcgVb_I^ zN_($E;7YPNnr7BxG6yy)1a9-Jl-_gt5hE;rBqR0F%^{3!mWRfgq zH8o@cI4b=U7<-7p@xO`U=*yK9VuSk&o^n~FA@oR%#>Wv$STEsSYZ_j>Sdo>P5-@!h z*Pn7`4DZl;0m73NkY6bQ?dgTFbfa6roPA;`4ATkFD-j`zq&|@z>={2#r4n1amlI`%7GNG6m*H{3dQu(ox3%DQQdOxS7=-{2Yt#p_6U)5|J&yG?cYVM4-xR-AioTVvGGN{f&{_c5_BxLpK&LIV| z+~dBy4>XwVG@LY>UTPP3jbOH`kHO?^t~}yJWe4nkwvNG1&WA}Be7;{k^D+8Ej<7Dx zL3+(C#XD!?D*A#%ZYBp{;2rZYc=!$MZ@f>Uh8N)>KnR zV}^kcD5z9bzSXvLOROGyAF4q_ei@chYD^As?YNZnFF{~kP)Zsc&m%neigN$Jnjl7F zUuqAu1qi4k;q!Ka@`WcHWnpoBeA8C*%(nPIVv-GYD_{YxYD1BD^z)^p8fxj#JMhzj zksKMF!cZ&lj_K{vmbs_Jr`*vKPq;eSjm&fjuZI$L3}J&Ut$Q>qrm_+gX10MdJ4Hga zA$;Jh4O1IB+7otTQfUZ!YLZ?rYxKcM06kr8NG$ZM%t$uDks{`!v#kGRuhxjiaESe) zT^tKk@N9P^E%Dtir~wdE%V^(SbmRznOg9Ug`t67?UX9*?>K-KCgE7NT{c&Ad@5DNG z0w!EHqBII33wK_|4Q#i?TL_aLL0K2+EvA*TaaxR z+G71{K#P$0pptgpzmmmyU?ms+2x({0{4F&7C`AMj)WFS1!FPXBKCA|F&@F~a+&hq0 zxyeoM^tZAqV!HnTi01A5P5=5zw6-_-6ecM8neitqY2KYw)zM}+kGKHQ@*Q!?6=z$X zrPos$n8^92_owP%m_nk|g`SK3(8oD1tstK5kW$FTG#h%5>}$*}^$9mdHB8=w_E;Sq zR=FFRk$Kq#&$Lk)#^v!c>#5JyZ8CGDmNC+l@#{L(H|wK+wLM**EiuK z-Jc#p8v*tB^WaU9%vO$!>Vm=l{kEcbnM)LIolX=%=JyR1S`#qLhu1|~D6P%oE#${P z;)C3@v;H^;-4X>nLHTi%kqXeWc}o)Nvn!knKK9r(oy)bekGE-IwboRI?;t1L+spI6 zIK#fehoeTgdWTnQVxH0ovANhn!JpL(ijRMq5RCZOw1F?2@(Svh8Xttil*k$RBD9}$ z?*4p)3>Sd9(<(VIt@|!S*gJR;Q%aOY0ocdQji$k(efeSzxj}=i;zD_xmXVlQemSi} z!mZsx1|uEvlU#*`H9waR`03b(7W=`}8vw~?ysR8mXc0V0CpSKhygxG!0ozL)p^!Dl z{7HFdR659(cI84Q!0XCX)h&7xpt%}8HxibTtdttFUN zNs_I<0awMfuH%O$I8ao^@Y4ZvO%W_dGy4hDQ|&Wl0+i(jZ21*YT*>99P4_;> za4sc^06J5rG!C=(c#M0YgL^}6bgCTu1O-7|kSU+HCFq!Q1;j@U%78d#ZT5uiR1~AC zhND756>f2%j+i=GD6XmN)Q?rfgSby-^lK>Qrh{Ll&@oY%ZsoypkbMVgM(vIgg$Q4@ zw*9=rel3%dFB6}ZvBu|}JS>F?w31-1hcG16+tm~h0A7NbA#M?LufJH0{I0MqbQk>| z6l~g(WL%;M`F|6O#c)#`t;}z?UiOs8Pn-~Tf;x2++<#c{&CA$FlFXe_NiOiK(`>8N zFZ^p(>1%$`+b^o*%M%D_HpY>7#~>fpf*8&5KVxmvlhUF|6KD2mxAYXEgx~imd{8pv zKe~!IObU!h-|oHCA5oYt*JxEI4`j5R!0Nx;eWX{VE9)rw?uA;`r@2Wx6H_NyK3Z}9 zK0z&sELpW0X3)!*3XiiJvR`+86V(+&N38d3zFzN&a4v+q^r0n=Nl~X~eI|`u)1NJnT%J~4No`?uMH!-xm< z*_&codr+d4Y(83KYWAJ{)lLj^I1Q;Vx&d7MZ?i7Ep9iwglV$yIL3Oz!wihV@zrWGfm!IK%J#3x07H^;o zJX+kZZIX9BGg7qP~_3Exrk^&YW~jTL(E4v9diFBq*q|{V%>+% z6^dcbb>n@b2{@Vxv_vWhgKnu?cl(WotBbSb4{*JB!9VwcGgq+AXUfxW(^GwlormIC zk*}pYd@}*mz>)$`xr}^}t@$a=rmhpv$ z+Giv(s#Lpbe1(Z&1BfQql{-pk?6b4!z`TL$1HEgu%7htGi`fcAI~x#1I3q~&D1nwb|h)qSp~IckbK03p0}@kQ8B0`3iPM#S-rZmI>xV14oW zTSNpXc6JsIQZKxFlKZs2Ula~OcT%Phj>bCMMA-j|wc6?J;zPA>ADdBqa2}#pJryS58Y2m?ZVh;gTY>-S#L0yO&95)h*L^ zCp~d?EK2)Etc2D96ari&)5{cv4IUBQVIip#e9f+GdsmJfERSk7B^`|M0C)d)LnG`F&A}TH+TlMUNKX%P+VuWVvQjuS~oZo18&SDl; zJxBoZh78^4Ift-hh`*-p217pEeh|g8!Auz7f}K6+s5fJn0)DnWJ`{F6o1<`}gCHoSSJ1XZS%Suud#EM(TLS zxJmK46hZ|-XdZkmyLP|iJ)`s~sg$Ik9>3mO&WOq}7v{W#O!?K^eI6YsqqA*(lSsXk6;p2{t=NO0VAmH&eug@oZIl&WBFJu{i#xdzq+!^qw? zMIJQDLj$Gw8*CK^$kodGWvzuSpNSdpGI+TY4gJ?t;n5=@A&GdLWz&f}M#d?Ipb&dx zLehvllGB{%h{J*h?{LI63&E+Fk0W@FJ48E-FNNeKKepZ%9-M-@s3c(t+3y*S^5c(c z4*__k)ek1#?1VoL^av@ZzgRyU$1^8)O7>dp!Ed(?Ms?sXU2p#<(_GX5j!6$g=LbvR z#5FiKenLKc=n7%siUsAdC*Q4BoBpK4b|5g~(4F(B5b0fd{>>0LVCuKvGVg*Bcur5V zsABxz#|{uin2hR!dH4Qd4A%XQ9j02~Xum!58vnTU-l*5tF?|&?QNmq$5C7A8?!N$` z!U2&YJZhOvSf~5$A3&MhZI(u8tesUK|F|+0_AF3_AVtK3Xxp99?<%&FU9%+T?mU0d z+G`qJN1FxpP03ELoCeCN;kAkjOV&h#V$l@V0R_&tsv|BYk1Q7Idc0P+zO z`gm#d6)28ZM)EgIZO9FDHaUOn6r=f-lzW}yjTq*g+h9QUOIN=2{!5u=uXqffDWBxm za;j#lb+%#PS-h}CMZYp-%Jn=*#+7j20B7;yR0C$g=4JOne|3+*P&JR0eYi@G>+RLm zb0;{1dJVY}1}gBaP!nQ5YJ5OC|88J>9-9lPnLV=*rFrioTxbc54p|1_ms_G2>JzU) zrFY`BA0C+57KyywesTAl*XR}(+^DpZ%%<$aUH_3i4wLtGh`6B@2P#SAiw%AM&Jk!6|rD<7d6&$B3g4M8BvNSue{=%Uaj&G^+xc6II5G$oWISo+l^Zfyz zVFvi^ZY^+Dw!jP1*phmGxnGturaqy0Xm&1VQ;4?9Sg{aVSf6NX_-Je^yEh{H%5<0> z`s*A>izwUVkPqNaszi0fEeLU!d~L{Qk+xNn0-(+oU7ifLO@$*JBOb%4j5q!wiY^0~ z#l$)|zuF_42Wi73CJXu`VIQq}Dd^dv+_uGj*z9XAbN%^!Yu;mrGrdeLUr-`Twpo1x zf9=Yca!#YNTWERIJ(_Fx*F8J4EsFh>?vv2r^W@3HvyeI<^FPAI@n-b{Qqq3vIFt2L zKy;sVyt_%w%st;Eb<RF^frehea&f=`}tF0p2?@dl8MmkHgk^<$L4i6RTKxML@O)B&f{%`TpWgG&vyk&Lp{UPTZa)cMr<@k5cKnW0L*Qv_oC@xIWEG z^&_YrPKCU#BP~E_P7HOK>h)⩾!yGZE4c`i(M)g0d#49`e0ztyfP1Ot+<6qyC}b zO!LpuleD~Ttlv(;A-=L--MdHh`mVq41=gnEH!-ZTmGHZu4_UEMM;{= zf^4&?+sK3I%Vr`1>F;o-wuD0KOCSeQ?8~*_bYc8@*bb+=zvwB^I%{ExVjoUMXTvvf z3u#_S75hFeGs=VM#U)2wE5-`c=4H@9g`MK#+AboSn&q%ZDf5AnTYt1axZm3bl*X#Y zqv}`&)fJh}NZ+woNm0o2j|URn^fPz#OM$$h$b6*&zydmvBCR~`XX2O);UWV^d`R0z zt7|{@S6*C)9gjA1pOmv#At|=4vn3%e(X-#B1pAO^O$ZH9q!1^#W7K3)(RthVlV{PD zJ|2GV+OlI2z8D(Zl$8?A?jKR+Le+cg>;b6EM$Hp z9Wk8mk5+%I+&g`_9xY(xi<*+#UU&&*JZc+;X%tsP*6GtbKv_l2+bYkN?(r9LSf{Bt_Q!L#~f zZzLRO{gm~`Z+r_ApH zN3tj{ONyUW-{CRPBO+~PUf2#f)Z;d%ftwsW+`NHx4GY`}6-0BBNyXNqJ%&%aHwrX| z%??kW{9|*WA*yYo4@9R)BOyWa3#9l^$OhH&jABTm7l**<#^91?Ok6x(AoqL|3f4%B z@c^r*mAefUkXcvdsbm}U)X+t}Z9vdB(`dS2 z26ARm&{xk$3;XCA@L^E@w3f!Iw^FSz@F~FlJFRc{{!jY5%xCV&{$0K8Rz7N_4{+Yt zLAbuqeZO|)@G+NO8G3a9vJl$+&7)ev?_17k-NdEJkrAz# zuDY!DdeJk7OYx>0RsGwLTm@1Lx-8++u6U`5%9{TyTFG`~sQwz}Jq3;Q=8x7?L4TIi zN|TS|^vP7JWxF|1k%HW27gk=UR>YkWMJLTvPcQGQkBOX&ncVMf-!iGHyCYMVrN({gDfI zD(r}_?(-jv`JuR*0rG$817w!8vx<{Lq)nqgDHLCOU(z zeBlhRWB>*So;gy`4{&z2c;;Zp=mZ^Ga_#jbZ9xNu%*pL$-^F>5Xe47T<#Bf!;>~XZ z9}Ly_FvM;7((EAOQ}?+2v46;1X9|F#TCDU?)UB0K$!A>WbWxiTdoX;A{2(r*Qu4Ku zRBhf4yFULiT~6Vtdh|It8;3D!MzU$Tsr!y&3*6*jarwb2*Te@Lj;jy3Gi&gmS|h)~ zr2u2(7^MEjTz^4{x;rfTwdJ5JR`bvrKAU3ku#nLEQdCd<8h5YljW`pZ3$w@mky(Y7 z(U>&B`%RQt%s|N?X(t={y&TgZ?1q1;nW5lR5>Esv3hzY-;mwRE=;G)W&#|gfIL!%0 z62xoOz{3e4kRJc>%Ne53dU8A9ffnJ%+VXwiN+WrFGuVkm2+}nNmg$!EU|v4NYv620 z>HkL&X`gY&(qT3lg@Vj)>40a}Icoh0U0NwGW&SO9Nh4n=rcYpxHhHdhm)*p-&92-% z>5#GMBA-q7ug?TA2JMck+Am47MB$-Xr)iS8roIkIn73bhys;jA-a>q!;jQ$f-gr1m z295MacH%Ddz=SZXRpESdJqr%7`n@IgC21tcv+j%woF`n*jTGhdJB9<_i;yfwA2LfY znh3t$+fV zn^9CLEbT)OXwh0{NROs-pqc4#(v1Gl*fc{nsqa$*b%K-{zSI+f@5Z(lcJLD z9w|n>v=al{KEri*SSt|xVmIePUOhYd*%o8L&}OZB(>|jw7c^__LX**!8S;#9DES2_|C2%U zA2ns7(h?Q$7ZYE?#nUWMut1_sdfro;qCwxwCxL>V17_7^(w` z>DO#nX~?h9-hgODEDqoV%F65C!D>=7a6LVtr272Lp+T7J@ zcCc&&;tmfB9sLmm`lTABIjBh+txkEIKk%@u4h5x4MTqBXSg0uzrjSbS0dE4`qoZLO?@_4)*lq97LsVV)$j)MB-e#oSvtYQb>s{?7V;n}#^H6k z(h*1Yrym+H1&!u@fjK*S88ue#M6-TzXj4{Iqh046`kMMbZ>;b1AU`itcY%xaHLRtFHGdy5S6olYyst3m{7r8)9Q_FwX$Usg+n7ca4Iv=YU({C zNPeHIk$~~QHJU#)DPIi8FwW@09}B+}sQ)GfKHc-8dN)(+FVM-W_2*LRE`Arv(~!A( zEnJeb+3>RCHM7XZzj%>PbTnPz0kNWk$FaBkW)nD8TX(699`8*(l~o*bdlG1S?yaOZ zGn8zL%%juY5dSO!Cz_r(XFVZ9Ybc50Qw*iyTwvZ-wx3N(6yfgqKJg+X2mf5`*=UbR z-{i!g3(AMrr^S|r1}S5BB!5CQ@^g)sZ7Df6#R8$~UtP(@x`9c~3pXm}QrA!F%r-`- zYW5U;*}87%yRfP`+C_O$Fa*!^_X6*ehMP#(2W{>DLK#81qjAHgS;pZXg!Cod=X4Rn zGDZ@F;!ffGFEpAKIu2*60EtUR)4g?|x;qBpFxY+aBVVAmBU}noN-F=cQL!*eS8R*v z4;xg?S1&zt?cvC!XR4ER)TeCQ;K;L>88tjNXoO$PY(`0~5i_jUV188EiN8pWjnpVw zIs4NTviP0Y2+LC=Q+*vX;$#HzxRu4Kvi#ilRUkSl1-k4A!8>Ag!Ms}{=&PFo+UF)&3E0$M7MR!jgR8vY! z`isN}gjWTq&>>yKtT*j*Ds^$48elRVkj{{emy0sY?azJK`g^H_9EEA;kHuVqAF?=@ zh9BvwBy@^<$&$I=B*&3FnOg>xfcDRTuScoQL5zA;6am61*new|1m!Q4m+x!KBP{Y8 zP&>tM+gN+$fW0FRL*G9TMJ~q}g2yRDzM(&L_RpIQ*+1{J8u{u*!nNCWoj(KPWGVG> z*+d4r;M!o)8Cy_J6yfN);)YwXhm(Jr(eP0hEd%c$9Za^eSDC6u%kqIG&Dq+>6xBqD z+8J(4O*oOfcIin`ELFrOVSjiV%vHHUqO7iNrW)+5DI(!c<@Ct%WiJsF#?0DuJn4*} zL#oR&^7r@IEQX$meEWEUjz4z4MD~s2Kq^V(!LCEz6=!-G@K$Nlmnt15DE`|gLGhB} zw|}WdUZ7Z^k}gOd@LWW6sl#9Y7s?7#>dxMmPw54@dcr>eV=KmKLp9$hejE(HLJO(& zPOT5VW7Q?;fCvU> z2$fiC)V`+{9?aOy1dR%Z2}aN?EXmbhxW4dCA5hp$7qTArFl4h4S*FI$_QWx2Mr`+7 zT3wb!rwI$E)wX*~E|tNd6BoIEZO07UsHY@#WmDM5+D9@aJlaIjbTU&T?d7z!E6J%h zwzbd3%73c%X+i#bV-fKjuW&5Rk-LhvvZSTw3bjqpp{36#)f2pF35iTA+kIcBD zBqT@XSB|{tcN$d1TzW$35tbNc9FjvKJ&0Xi#N!2jOB5^wG%#A}yp^jv{DcmdMP5?s zHNh&((5D1tM{|;f!%3VSKjRks!S^?eM91K3UXHPPv^)Q|!}})M2&j=qo3_Kuo%;#j zPW+=yNXr+N9xii)dIjJ=y!@{W1np3$FX&a|;1hpfEaAObZ?=ryF+FYf5nc~2>L&-h zj`af}6x<_vlcf+*oIpbw;!{nucLp94*fIwWq(PW&8%I(G9HaRO?hNCQmr{oLg{X1I zR~W>_QCb2lobBHhiPBrCdap({BNT0#ix&2z@7g_rXy~6@Pq)*hS$-_j?&LWb{9Sq5U4gW546PqU2b?2LqedeXcACsQZ+|!fW+N_NB zidA1d$GgTW27pTb0h%Da*+jPd<%33NT7jvBM-zm#KLT)9Or#Y8uuE~R#X-bQvGL1#hKNxC^!`cCAwZ(76I%#HZrah*_r1cl@k3cWTt(M z1Msbhtnx>&!%!_i>JhgKD%WN@0mO1!S;HwzZo;+dyQmew)Hxi#LuY>8lh#qoCHL9B z0a?5q-!+LOdUN?#>tGXTRqr_03{Px+fO+7ZyALbzu6zzd2vgkvg*m`(QO0SE1pL6Q zGZrtqm z<`u7lyJZ+7p01EE7^w9dsnCfNC2>}p(Zo1oKl|GLOgdZPZ$H*G!HiQyw58py0BvVy z5ms=!HK9@EY8k-EWYE$#h!#jH^9{O5VV7f@9CQrDJ9i%GQUFekZ3zs>h zJSy_QCb29(QH549em5#uNFsvA=;_)Eb5=-5{d%cph{Krv{BUNljd#XRZ!9N>C8@Dq zX4NZ0uxCq>=D{zBKpei@rb4|4iu6ETj#R{+8#VG>tE)z}qKeXD2S9vL(;B^-sH#;n zQbAO+^pK0F`CuAiGi&$1@K4EsQIJz$)Z8xjm8+Hipumgth|Vv zaBTKSLvm3hE3fRK-3r3_vZd5b!CV+jA*U=jDI6I~c}6z2AQhZIh%4nKNX8uKRCChR zI6OldVpsIpMRmfC&=ceTKxH3+{8cQY)v*2C!Qf<E=QXV{;rPQ= zlg)q-1AAcSS<-=gYk=u503T>hP}q~QLd z(V|z&Qx4blCHrjDpAQ9V`KodGx6?pa7&nL-_O{Eg`cbkptEP2&<7ObP!Bx^iUdxm5 zV}H$nhbjPHuplDdvffGo6RQgvE2#S8nc!7*aSOFC=F+gM09~AgBEM&0IHi`%p5x9H zUEToUJ>mi@HVZ!mkTOV#hp!dv6AHZF&-b~YUr|F33hj!DR!U19ar?vsnNOv?p>jgq z*{uK^a9C%W7PA+pLwidw#Sv%C?AgVcDl*s{6$$Sf-}%NpAYU6~U;`lAuY(Vji6eV1PZnMFR58 zAoN}oTNbg6WRjO~-5gyD^I)pk@Vtgh9_Lc@i5s65t7)T>BTFt z)92pj&;qZUJ~Pb1kAc5^z3D*@wa_%J3KhNJ1nF^k7o$1aHa#{aLV$N}YSAN^yKd3PyQwojeLwp^sBv0hq~W^ zF(2*w$?OeGuMJgUw$v29rNDX{RTQ4bm0RQCNrrY#2i0TYw#uK4LF0aoKzXWhTK(wh z(O6gwIho=etTN2(LegX1?+rQRKzuPHx z#&$A$LoOs;^cQMkhQ0jx+aar0JbpvxI6%Q4zR{i%_{aCAU7!Ol?u3z4 z`$LTA(09gfG1q@@5%1=Qj7b?%5ssu2tpt&Q=i#bWsxUzX9v23H%CMXLA-zUQdyL@7 zl`((TNeG33AVL?{*6;$u&kf4Rj-*0+>Jb_bR>Tt>Q)7k1d@8zD>58;i9uG$7sJu<< zquC;cE!6rCNoys!6;Gyuxu?jB;11*z+%yI$1tr~onNS_Xaf&gq(t&AmJ;59G!vf`O zNEn2Yr)CooePBv0q0A|aLN|2JmHM9j!YS8nVqDYh2c0NflDUjqFXsm>!TEw<=lRk1 zXAl=$h5{ZB`1j`Q)aARvqKB7QCdK=*nqhrv!Bu9*lkJ6y+FV4B z)F(@0_iobNj&1Bq?)Nld+s0V2#85_DWaI0=Y!L%B;vlV(AU6tZfm_iWK7Kw8a~Gof z7kcwX#>6xfCyK(_Yqg4lVE#B!e-FJ2q)UDU=xgq*u@S?5NTL>zP4dRrhGjdES2m0% zMdnj|s8jFaL}>m_K>#r;)X4to46^R}Q@dG<$bFgS1*!7AL6s9xy=MHh;9~(T-;+d{ z2f%UwO?5EyuL%$14cJR~w#yHj0joy@@-m~aR|N+J^V`MRq*JV@lCkhRK*?C>#3tQp z$S&CGW9`Imbtp_E8(;8N0F6S#2zH|$ZrhY2L}L=RL|{E4CsfVQmq3$wWZ zQ)?M{c3ivEc6rJ=3M0)p&LmYU(rKQagz2(|Nm(s+ZbV{sa9!Whw=#38&g>mY165w)o0x4b;R4?*2y<$*=kg6og8lm ztsXpO83*bi!#n(6(r-A`5!dYiCo@5#z#)nhw7iPMquB5?&Ebj9Gb77wg)`(r`WwK< zr0)z6Ttz9Y@zq{fiK91F?THe(#ICdmW1?~iXvshWf)g#TQAr;QdOuR1jS>FkPo*@k z!H|_hz=w-%!D8XG)|Fqf`3E+-Qh~ijW?ZOLJ;mp%IH0%QY@y_a5g@@E~8lC@SXCw}9 zgSe37mj;uuhtC+RE|I@;?BeKwrPe1r5vW08B2D^mD!-TEK8phb)O_ zn)n>18AZU>T4opsJ}X3r!&Ix}1F~qU5>{)DO;uV}8=-z? zhMz)GjoENHp9AHaf8;F5Q<2iwUhRes1>ns z5f*PpnvGR(ZNoCMp0@>ZtF)|BnHby)OY^x5uuO%V(VTvZEm46cQt8-%?!w^pB>eeV z8xKUd8i4Wra*R)?2L7v=M`Zz*^67ClE;$7+Wxu1ZuRt$ut6v|FK&L{c-}APwu?zvQ$^N z7pc-JV50Q3axd;`q#X)4(9%GS47(zRp%icl9|M9~>|cZ>xncgD^fUD zIl*Xq5@A#RT-cnihkw~ME&vH9Jd9gtdMmDg2ON18@wMEnX2e*fx$2?FF?mQYSl2y- ztid*?1qhg9ix{M@KA~eB(9x4k!K6pq1Z~tcFv~5`g|*^ zUBr(cl#06x!DlOBv)fwj$i;jb<^kKX@N=lp5#i<+4Lo~qgYrwv2ONUZ3i3Vh(N$%E z1Wb@pS1t&LFO=QY%6ql(9YHv=U!I8y;DV;m zvslkP$Zwx;9KV9U7B6_X2eF07t8jUg(rk?oMCDLmP1-7FoL77pP`dEf;eZ;i#!-gz zVU2HegYnbWXY>bo=eTF$^3!1u7)p1^2SHV^N&>k^Ajpp)7y1e(MFQB@s1$IDJc@Z~ zhC;qAU_DO@7!#{?jEBw$G9D;vsq{Px;5?Tt;DU}%HiV#qq10{Rv6gJ>UPh?Z6e!^r zaG1f+k%i728QdI{O5B#dGg^8J8S#T4(kmVAVA&&CMkx!Qi~Yinp~tr{Vd=!`yR^XZ zcvVpnD2@b_0xphRZK>i7s1&fRWC~vyvMk>Um<*AR3wnT2z1-jGpy2GsM%?g~eMoT4 zHApK?eRvHI*77XmK7PWkM<`%tj{u4HVhuTat5OCbD5N7PIw)P%xfYrd3hnWWjG#qV ztK2W00C333A{OAt!x^p7J^ti)!3Zq~LW%nsbQ|8RTD zIh#jXfV|Huv+MoT@ICzIn|)i*I7+a@7I5|$;5U$-ia?XAY$~coNx)tLN&(x;r6{!_ zp_O_@UzBlF15qtTA-{a$$h({@V7!&rVB66Gzx^cPG)*2{;3x0}mKS-Z2?WqcCL`iA z;cd>gQi)tGX>29-zdcA;XTVkaOmaN_F9Io41eOJY_s@oZNO%=IBh~#=t1L)-Zw7ft zJgPNBbDU6B)_AIs8>N6t_|zKj*el&{S=LDjSr|24L8r!W!sb7;R&V+ZGO!#o##&XoPa60W0V%aksV!YFZ3LNBqv@og`=TdmYX zE=PaF#mKs-2LlGT-u3`VHe(XCowGVvy&Cc*2QQq3WM3fTufPR5%@dOIw6TJCwgo&y z#7&_Y=qu=6-zj`BDgz6BrEtWHey!(NQUJnwU7(3k3TnYF;pxaZKA>PsRE1MShADQWMj=z7HlegWP+NWdhh$1*eQM@ zqhmS+Q@}=jPQh`$`K~cQu8SA>n87F6*ntTw9BadjNG>b*Dwgm%mCF|y1r$&MRX4i} zIKGMl*NiccIc)}G-N?YS9~%$r5rliZaNDO@aigewhBU4PQowNABB+WTZ$C1=4$EXy zq3k>(lI`*1wd-)$kaYj~F0kV9{qQW(P1~3SCD=AN8q;3DNOf$IoBrqGka%iJ&QHmr z=T#xBHZHWsNz>=sf^xQX>TiA%X!)hLkD24soa?rfi(4tgOxX6T@!b$|;Q%h_2CU(q zh)#Vxn&PsSppH0!H}r|b65>35V7Au%?{h!I%z5GFTMtCE_kr<;mk=~(FJT||(fN}u00qN(Nw13?1m$~3Uet7@fb)`Hj>yUd zoulA#N6X%iiI-C=*Fy6(t^Qd|tbIq7=FY=tz|?|lt6{MaJqW2JU7m>8#j~ixae%7>GGz&DJNW}|q z9p;t^cXV_GaMIk}==(1L=)^%AqjiD27(wah3XkXMJDEtXefYvN#5!I-cnFklwgg_M9&z=f2iKU#8q&c1MdqNdf=K^Z@851?xON&t$+Q+cJ*I`IEq z>xNVDaaf2Ok=YSeZ~X~Snn$I=PJy6ETe2Jz&cT7xfy_Ikq7%s{>~Mufn~w8>3>d;qNXuwoM4a3cStBmQaB!F`%-W zK1(xXOC8rc{>7Tv3V&J?H$J9w{7+IFMXAn;5*{_-&-Q{dI z3@h?0#_JEjI%yl4Vdxa5V0|qxEzb1K=-t!u24&k*jwI*U+dCZrS!q@lsc77u2EfIRw0oNyPS$C#^wRNc;JmBo zBqZ#}NttVAOin;t$LHODF(A0J7poZEjnFiC!@y%aY%WBV=j3bz&HD-yHKl;P#Z+(z zrGN`AQfGn|H3U@(b%>i;q#Jde2-sTVOj)&LOMqr%*SXg;J+$r}K&)_w3fu}krq2qB z#Wmfh;^FRa*tiTOyd|xFn8s=MHa7qWGcw<>X$o3}&}-|DL6PF{BxaB`u#;PZD;UN8 z39DRM?w-8o)R%vx7#Jvr0^W}-R%^LV3;eSh0Fsk;!j}N#6sSK5CdKfNgReVT*PZ&RLh7Y(j5U$t#0Tq2|mHFIgIb|5KpjHp{(=D{` z$TlNq1P&{>C(#UrHZF1P@d>ZICA|W<7tZd<6&ZZa$;8EY5#284Gb?ryvj*M-Od+iF z%x>K7b(|GE8*cTjWLd@kEpoO;8`D3CEM%FYM{*D0fX`tx$*tCl5n>lb#!yO##RHke) z!D|p8DFfUJx(;i@1wr}ZyAY3Nw`neD)r7svzElp^dO-(KZN!uUF6k4aMFr?i=R6`` zy5(@c6@dK`k`UbM1#Zg>au$ED0P+>Y>n7TE;8n>r-+RH4K*XOr8IgRjg5wGps~A(@ zvlq(_?Z0Tg|CW6YZLZa{bZG=*^D|+c$gS99EYrO0O0XD>RNQkKw7S8S zm@UbyTO}!Gj2~W-j#MW}2`B}e6h(0_h9jUIQXK~Fdkz9?p9qXK$`BAqJTI3ax7`2y zCRks;%>_4!!!p-vO!gNM@j*g z=mA+;cmcZd{bKcEOVbRvHu+vJxKZAolLD49Gr9I%d^qm0^^hy_@`in_;o8w5u1&YA zx2xlv{93>=d~X~k7V#}X$O}DSN6(?I4odhlC1=q+bN*9{gt~HpmK)bZ>XTjt|OO+BzptKLTgafOzuJD35yP!Pb z#2c6P(CV~*sQIK$LF-JHSK}t-p0^=nal7v*Abx`Jn~ugr7^x6%IhHC{USQY zH!J{&cwA|~a-IBVVjv==8D?S-s!`rhWn4=+TFN?sg{I%K7QOYq2PnS8IZ3kuy8_6VPb3^r0xsA4Yx z9tyE@?3=u6g+<>6RIn{HobMnWKE<25Jp`U-AWM^JJeZYk(J|=RgylRF>NrqeI4oo# z)`LBjFqG%_oA0`7yZs2gYdb%EwcKuVfH!0|1o&HkHQ^Q6Ek&pyHnyNtIqkryMxBt~ z+=9~ePi#*2wO{t_eeq9LK{`o5DPSkb6tKSg&1q+W+<^ronIIL`cwNpfx&)D0{=_8Z zyj;gO^`RF!;ko!iFnmKQHGkR8YzgbZf|guzLEDmNY%E^zugf;Lp@UA|;NgueLTX8= zgs~&YuF&S)`}X|UuElSi#(5z3b0%G47WNGfzJ#3AMfDJ2Z`UqZsv8jv9c+@8$*{cF zX@fpy0ADxCDQ>`yWG;1hz#q&7PvPVy0i}R*lUpDx%*vMpLI?niy}_qqtIOH|D5DFM z=-jvmBF9m;JowylXsYGD&^cevxN*8KSVth;X9A8P0#K%=BPoXl1n+jqCU~QLZ%0Zc zg}uRx@b?7jGW&-9;GaJ9p@-~GXi6pGat$KE-Oriyh>m6yJ&uWi*O1ZcF~}*Hry!G_ z%{?-6wK3B?l+0K&4xU(H1LCg=M+$2715*(+m{X!2f1UV)!gjR}`-4a z3yqG&&C*Tv5f_lWn{a|s7(^T_lDx@RJAtOoO%vzmGYDalw1RzDIbrz>@UG9f><9J{ zL9nq^CwL$b(U9wu*pY~b_PymPjW4q1LkYit;r5Nx^B+xKe&;_&?t@=-f8xt>e{G?5 z3&>x^;Y^qdgf*}m*|Ul^Q6&poyK*Uqauz;mfFL;4)BSg_6 z#Y|A3iIW^0wh?c}->C&3XmElt7K;6o)%EYwjM)2xFuxI;gu=*00!jhrA~l~l?Ux>K zEI?2837#)l1NNSdM-**$zJzKJKg>)$?}<>$2`_vN>vRvM$_EzcRw!TTKFrI@fiQWg zyaHPuNZJux0iz>e4eysFnf*xMFXtkNT|42v`|?(r-EX&l2JsS$g%xeV`s&J@8FE3y zeC1%5=A4@_4T=Rt<`MI4-~^s@1?|Tb-Y}ow_5JT~!#oRc2S`|0!sqS&-sg{V#Z!$1 zk$_Ub1reh?5ksR(>|2Uci-4?Yh>Cs-)bo0w7Kgj?QpFKApP|M1B|_oL5Le5%+)nn7 zq0#W2LJPwMoWBc^nDz?jNJSp7;B}cpK_J%DVYG>yiJWtbV*+wr>wWkAF$WgXP`WQ! zIv(!fZw*(DWtO>!iP&n~X9jwu;pK2M73;z~a=>#(;S}8Z*RAR9M2pzUsgp$pp5>v~ z>F6bEQo;p&vxvPb(!&x^3b;sLg^rIOdYnb|pC%r87+sW1Awmhy!_kHE#idG$1Yjmk z?+8!GYkP|%hXfsc(fY{0z)gA&*4?pZQ=6I6P41Um@5u?7Fek{R5FG}Y$Tepa;UmCo z2r~VD{@~=Jj{5iXy4>z49A7`iNk=0EYxideGb(w^O*SK>(yxTMDZL{9JIsQDZ68DM z6>Q&G`S+Xy1unMmx`6>?@^VW(`Qh&GeBsC(s4J8J5>N^_fUx|6;>Cug9r|A=Ls%Ea zIm8>D0&cHg{Cwe%rc(Z}b9*u8+j%A93;LFPHulzPh9+YqwT+&0U7q1)CLC4GbMnr=6M*jUN3T_fm3fN6LIkt5!Ipiw5 zO{HrmUf`xX9ZGmJUM9oqZi$xSd}2LnIucL}TQ|k&qbjL#c5KH*;|sDZAT8mp$Zly- z|Dh1F$W@CW6D6`H(aX8;04p8=RBScAmvS9Az}{WDt9eq>`={J`Ye6QJf~+5)f5#vu z@HdNFW&2k)Q3g_AqMUzs{bDBf zP3K*XT)sZq5}9@ZPUE>HUt$`?toP~TzLv6j32XWORa;_Y$D4%7<@jHTA%AR(rTW7fUn&4K-+bSB-@w>ZHGL!*fnR> z=>qU2EkdgsSc^`BxRo`Fv1?1p7|t7Hdc0f> z9y_wOby%C6V%wl9XEucU_|3Ug5Iuio#~_i%dyl<)~mSgU-(yMXbrTto}D7K{nP zAuMK2Z&cfs!e{P;{S@~S;I2`B}eo7`-%CiF14 z(&EYPZQErfG8f@TP*?mQDbD6#YfDL7-j0K<&;}@yZ|uCRHLq<$nL}%R;PL;~7)h6O zP60!32M=V@^m`AhCSQww0uFL`tWU>n@fb%(*eu& zb)N)u64pX4Vk-a-y_Zj+(~!+;Lp~G|xOjfka9uCtlDc<6(0gu&O?Z>Gfe6P|ay_TB zg^7g+p5%sjp!m_Tp8Z1#KbIa-Adf&`2tB& zw7cM>8`#>SN~Q!z@U?uv`Wb`|-Bq~KeE$;jf)ac_6BZN3f0Aw;-Msw1n={+;x$k^)!}Y-bprirM&>>{?wjeIbw?!%hwJ|I9 z1kn8`JlTz1!JNs;X~JYjOx7V6ws&xsM(^O4Kh=pwLF*0k8LpdW1f_`y*C3DdJ;k}z zCchiTpuU`zwdLb!3ChB#fL98*0FSwS$-&29pXuEo<@<%snN-5{PXfjJ2b&|0IIG<^ zKRI?$b!a5Cr?_Rz2jiD@E^hj6p$9PSfydr%z5nrZ@LWFw+WsSB5i*}MFT@M;JGP>! z6T;n45}#_l_ujKx@BZT(IrCn^Mwx0c?~#KYvwtY6JQa( z02<3F6r0;}-+i~_NiPe%W6325xT62cY8a}RBE~;MLziH zeQ@%8*32+V6CfFiZp1_|&{{L+Vh6A5+lxtp^yH)!e9x}vx$d&BrB$k%ydRE0@H-uVkgWGvC34IFe%(NF0aw-2}=i;Wb ziZSli`=4&d+VE#BxNu=j$ZSzg?$TZy19wODV2k;%H+0{F1!Z2@CA_9`;lU?7fPZJxpMn3(}iAw z+X`<(c(3IDm=Mc*Y_-6Q0WTQ?7h%$P81)fUh2!@K*%+#$I?!kYvP|$GJp=MHgugNs z-oaz~Ao!f_z`$N6w1utrb*yt}FGzLA2Tq$UxOO#U_6ZOin;;5L$P37-b$sGJ_^0*~ z;;^zTmH>1%U}EyE$^ZA@-FRjNG^j^7=(_B(xx~fesF^rY>wE>7JjwJirvd^jd6t-_ zhu(p5IM@@?4uJS7W<(a+H4WG8g;2V%O3;OLnKFZwTw2% z1Uw{@U1&m+`7E$Rpb4C`fltDH!(cDhtvP7N@10K+oF?HrA%FW)Hu6*cX7VzQwD7=V zq?7ax_$x9yR=B?oWAHR4Ad7yC@65)p-ukpZ;tI-10)D>SPPn7CzGundO@o^86D|fm zj4xyIMVgmD$XbC(e_I|j`hmLzf7`GnaU0a+cKq7636sCUcz++)taiiD+b8~Z&z?N` z$QgC}mD3vZ{k>B(;?of+I-P5Lx=a`Xf-~_a-NFaquNcK-v5*W#JH~_fc8gi3OAjgk782rZUkUG%DC~~+zIETfGzy^|16kxQ_CM8cm&r)QM__Y z?VrA=Rf}r!Wj!Ov7`YbuC*yVgyc3-No$H}@1)#qh)5$H^fBlx609i&@Xo7b^2wl*Z zT{<(nknOCi+1%S*s6Eeqd6N<1dj|#@nPE+`$Z7>&hv4@-3x9_|fzN`%KLo!WM@5O* z^G@vfm5MyD>joGkwBKunxJcX~I`;-5dZ%fo4bmzdKceNTBnR$4^6|cp2q- z1ZKl4p)_Tng5NK?b5|T$vZ%KXufYaqpt$P_>pJuzUfeHXM!p$l>1IodZf^N)=S~;A zif`(^WYYK;Gml_`9|e2+Sj@1cMIx=byuPDk87p9cwD}akTbjnsX}-H_gHQOp!8$)M zf1Iuv3rsT3!?#RELE8Ge_#CpZty&up-{AJFvhb|}?Z$u?p;;ktPZ-ksOrws{?j^-Z1F z7OEFpnr^(YCka=D7u%Oz+0YPe!1>172}tR20+i>8cz4L}K$u6OE0Jkykmh zL0IR)SV5L=<(nYgDHy$f>hkV_WL!!R_4~Q`<85^fyR~_kcN_<0c`PPa$HLM$3i!_Z zmWi)pJd(8V@x9pvcjsF;<>)Oa=Pi)kTAk>bNq>6iRmc#{r70a3Um(|f|AgM;5=1-G zja?Yi+WzX5RjplsKe5o=A$O*d8bq|{@)SlDP-aYQCw-zpv+;8VK00;T0;zt7AmmhTw zX3}>7Hfs$FQJW8A+uS*R=(xe)&A66RFV>RiVQ#U%HXXY2Ilip;0UK^SRZ?5$rH%8* zqRX(>cMj^g$zIS*L%y^lZg385QM|maX}P!S$E8$Q4!lT>$-6%Co+*|UJqSyR zGw^-GC$EPx!@G~~q!4?B%({J=rFBl8vaOTf{9Me5R;lVcF1p|XcxZl){zl}_X%p5* z!8aWch!4VK!f2?&9M|rdZ-v!+i$>b7n|^Z>6tyY|3G{sDGpAt<`ciyHK8CucM1A5D z%Tg~Q(T1Gd<+Z<;hkJiVeDJ2mtN&bU>|FI0_O0Uieap3q{-|Ec}yt3~lM$)z8~s+;nH6iW~nQwo z@t9An;hyahNV_nTo7_07?M;{Xl`WgLYrj3hFpOU_rDil#uW+n0PgqhRiXyhi_{|Bd za+Q~;QnLn*#8VrlJ}loCE11&cx8MJhAykP@r2g))@XWq34Qa+A<_o<`)Uf#8#V^0@ z!e)to9B|ib&t+sF2~tO>5|e`nwB2+D%$)oY+@+YD(_o|%5?zd72AQ2kIC-9_{GaQe z{ZyLG(g13=h~H1x?sbE%Ft~aZwiMlNx1K#Zc*^qN6H1`!k^1L1m9+DYz5Y6*>M@aj z|EekYg_Ve7`x$K~{_1vcImU_;Xy2r&DrQa?Gi6NOu*3gXr{a5H)U;{gjMS1KBLZSp z+;JfM6Z{|k6~vBra$wfG5VK$5RA*(rZh`N{orCUreO*RjDwx2|!(UPGh%H7@>JxE- zCuZ4kn3JPxpoB+T)c+hC-tTCf_UT_O%4K$r2<)0ve-pN1{T6JT44$l7)fo3Lg*&eJ z*tA2r{8pj9hw7R?+iz8&lJeUoe)~kMNk4@?kB#p)wwZlFde&`8nh0O)b)qt15>1z0 zVv7E=`e$F11J!gWWm(qsU$>0VnD!v5UvHa*5?jvvohO4{EC-}ZQ561WKs5MEdQWpG z9bu)GNd#*s%4py2y~FG%GjD!@IIYXY|K|fArcLX62QfSMds1oZ(y7J@`gEc=zBWF^wDA ziFhC!u1%#rLICy)oG_yu2>TVn5{Ee-<&+n=*Eosc@bOU|W?X zh9Rs6FU3~7zNk&WbdHbtZmpd*O*->r1lT;J zizj9YHM-wwA3}e5{2w;KG%Zsx5ZG}|{dtHcGzV=T;h=q(m;+sKC=mmwv~LTfJwj<<96qDjl+m04n*J=+GrmNRQ-^0Rjm6uN_e3V{DRNmO4YPs zFKznNQDqMi0|2`F+NRM6RrxJE$8bm_CnSr9_3)iss|jJobaZlA%Dw8JhtQJaQ}PdG z0J8<0`F}j@x#w0|(a8M5A<+Kd%}tCLFQdhJOi=dHVEYa{+ow8~fFovhVBZexIX>Q^ zu85)ivO?I@Ap7zoI(bXjg03vH%T@skuh_lY&O4bK212ya^YG#QJN%>L*JQ2#p&XxT zJ-QkMXIm4cEs@GMEj-03+%>Ub06z3$9V+eMigINhwia6|3tQ3~!G*B8o(Sc%7`kjR z9*D(Clz!Oo+WHP#^@~9C)s0`Tq0!jon2CN9bNDMEf(>5?WjWDq8dlrou7Gz9iNc;hGxvRiCA|n%AdP zkQqfsVBe$PtW^vqd{^vEUg9NY+5+m|e@T3n#q|NIcESwJhgp6o`T!qtbf`}Pig`+1C<`M4<@-Tqx8317Y zbqo6Sb&wx}KfGgUlSWuTZ1)+Jmdd1%>cJLHlqQp+q%0bPXY@i$jhfJpJ*vW+>SnyO zCW%PidI*8g@2@&iQ;1N)!u#1)!m`C0Qngo@orS+Z`b4#NG5h_y^hEKcdGjNYQ#-r=FkBj3-aUh7%?q@i9aZ;O@TopaJtFTPE zBTT`2wv89c@jOC}b8+<7IWPkriF+tc_YyTii?JWk&ibCSM!E4J*R^v|iDjyYe%;;p z$2G!vK_-tQaBxl75V32;v#3EywYs8w!Hjd?yOjGKd{C^x;w<Dmt3qtehM7AY`Slh8L4GcE7hpxlM7Ne&p&hP;hX z+#g}k`yov2i-JGc`I#k`JQd_c0rx1~9b+fpXu0oW_INd9w=ylb)-9a;vYP855i8d9 z#0`BOF)|+SH48J}Yg}(!GRzwSJFjUx6I;*b!sNUhrsu4cT(W?aSAE7g6EjX1Qc#?R zm4!k))HGFA;G`t69AhhCmhd@celaM?;@iMriK?nc-AprWERB$ZfFM4mw4 zz{6MFhKJ&*48n9%|ADR8YGlu3&9u3cg(hU+nOMxaKBUw*KQYtljxVN4*zUw^BNXxL zIIsNWstvn7panPxCySTNs-uL6qtpfj^qVna|3^?AXHA)>61^Lyte&IW+;70pTHw&t zWWjg?QKIJ}o^yvqxy<$`0=p&-yM}Pzi*AVr1-6+Q)uY_v2j%$|j4zjG&yRKzRwaBP z?7!>G{}ITA)yH`arPUNoimp;ij6?WAFi5-|Lk=Za&V${etpZ#Z$`Rb%Cz?gjX4`!Q~eD>Bct&y&%rs#FH}wH zSdgxbT*_E3m3!+7C#c<1#{KVseq$???_yz&*oAU;~JOs@YjwTUUpLhwzFV%4ov z@_ng%T}Qki3>%wwz64Ey^?&W>-%D6`z@hTRI-s!vb!{nMAtPqW zL&dL?L`e+JiDWoly&vI>r}?252Y%T7(tz&?%dQhXc4%{ExaaoqWA@bUT!O|P#$SV- zM)O8P)${SS+LSN;IECe6gwET6ZB2b1n;3BXX!x|%tMk>SBQYkp_HNtw(l z0R#r!{`ziw^M0BlDvd3mYDZH_zIg`LeM4KZ?mN=F*fQ7PtlR$Nj>9>N7^hfrGb^C{ zQ|uBrBdO>-^!eq;NduO2zc|R&>^`Z!MHXX#X4@~JpCA7a&I6xPHZ6YE-MAR@6JnVcW)o}XN(H|)k@gJW(s$QMTpZdQ(chH7BR~X zQpI;E_DxDtT0Vg;BySr29hwuKk=ZBi>(0Kic};u?vU*1oxN)O4;oim#oElFfh9)dq zStrgSml76U3I;1+HTu-3irtqrGg;A72xtns1MdE9MD1>&{*DQDg~K)=L_~{@wM!pQ zM_D)!f1l|Z@uzDt(Ik~)u&5oMl$U%fl=H3OP-J=ctU)#%!ID|@-#dP}_ue-4jF`@r zLJDz~r#BM~jE1didp?Cj|Nl62`pRwjY(w%c90HBaZJpS=`0K1&#GntSCJE;wR}(jg zFK33-I2JtOW}X%^K@jScKrgT(4k0@Pd`joC)hR^t$D`tMvc4THj5vaZM)h#5@ z{xDTTO|~0mWGPp=?9Y@F81p5VHQTqeQdnlG?r2vGqVO!kYjRXVv(`7qQiCrDy*TKH z)+Cur94X+Pw_kFm!RTBlpHZ1v7mos)uxqFi6zg7?tmY-@8fQ71Eb}6nWSTVsdbRc> zD(`p70A~fSKp+a&m9LNkXGiC*fH|*$>FhJ7{pz9T+BnQ~a z%LvT2D;%D`&4hJ`*=05keu!Y&opEbhoGe=@jU=z*#ub}iG7_=v+;xZp-c3HO`-Oq` znsdyYYg+;Dxqa*@3TJPFWnE^r9-yGgX#Ce&^CJnLIc(~Z>^^Au5lC`%0fFH&cH$go zGR+YyPBv1bY7|C#xhqPOhOuAP=|{nYix`RWuS^RvXX4u#9RpvF#hFRXDX@0--%!H& z8Vs8^Ds%IWz`!XBvBpZUWi3hz#Xm8 zB8bxx1e$ZKw9!81d*s3JO9LN-)fHWImTW2Dw!gpR1cbp8TQwS-u`ez)WmOje9`TU4 znQ(|W{gs;xB`@zFK$#)7(b-DSC|eO7RqRO=e|bsp;taaN*wC{uLHD(+Fdjn$J7ZN} zj@=7Rb~G6E&)X%Jbftv=24>STNF0Vj{1i&`=nQn`#T>g6SV%0P*h`2%jj^6HM@UY( zu`Zp{#1hK)-q1?}es536eyWTLxb2pU2g37LM64O!U#(=Wd7tn2tAL?e-k=^31uM#W2G*Z0Q~Z_E-qV zs|lI8CQ@(~U5**HeI=h*z3&e1>Rs@Vp4^vV<07L1W(s=(d`_^z%bO$=C%q_vPuvm? zLGK-pa@=ZL@+lAkoV=K!r2{ZK?}uma5gF)QC>Dp5n2d3T4jD=H=uQacY@Rt$sj5{*} zQ)521%Ux{FGOOH2yIvT4uHBkx6>!__I7gD0Mx**@Cx@SIG=mHX}jX&7{^yk z!S^X7_Vf5Wt7}UIOc;L%mE3Uvv1+}b`80szg~b8s`?GBpQ2}RVTUXXAEd=0H@m7+= zAz}p9BCt-@>sd+sY~RAUa+hu5G=vDQaWo}b#`WzqEaHlcvE0ybj2mM5nkOxw#DwpN zOqGBD4PkJqpP6%4DgOxj##NYwZ8=im`J2yJ?CXx&rFdp(2=hyKtV@Y)^A^e?qY$(D z?(i)2vc$5~J5vR`d&=1V1ylOhQprm@2NAN4B1!<}LlSe^6U-boH%$F#z0-Pf*XdRx3G`GT{sSod|Zw;1Eu$YFlq=h~VVi_)S{sDS2p&$ zLV`iqvK;!d4~lCU9InQL2R^m7QVdR4yWsika2JBnN{DXTNy?Tac<@EsbE?F|j5l8X zW1}_ded&0lDBuBp{Vs4Am9A3Bm%%9uXQXb1nQ^<_z98=dfj)Do(;U>{porv7P!_Kof{(aQ4jK!7pdD~N+p2ZiOj zqlP1d4Bb&wAA7v@Gbq>R+oQEl#kqbS)gwpa$ic1lYe~vdM}XeGR4nVQancVgVJw8n z`AEVY9!p3Om5oN7E|G@e*d=^`?08k%?FN)@Y%ylPQw_9H+KXC2|5P#G86E*5^cr zpMtCJk%3w7B1-j(Gd51hon5oZVP@REy5 zz-Bf6+xU^Gk`U0SZ;KcDi;q;DljRZzGBeJ@BSA0B(Jm&s!9`}0RjDA*bDEH~uuz`x zJ~rOR{@%|B36Ha;v@D4+HQJ7Cd3BZ)CI$F*ufrO%9YtwU+cXQhKtt3m&F;xgennSr zFyqvxfcM-sZUS_0&f&Ps8Y!JTTGulpX4N&61r}f}3yvj3$3Jj&>GVNL>J|bDqo2FQ zD_1^@@-K4bVOpQ_1{}8VCCbE_w@V2p-^f&P2*8?RwoKQBwOVWy3aw#kXDwx27ZIkN zlm$3C!b~nfd>p5dwKS~zissZT+BC0%HDAiKvF3$Vx%4Kpl0F4Y7@cfJP?>o&faqD5 zP*yXVE7jX6Ro|;j94foljm;l-VyK&74JjTXh9j?iM{;tUv&uyfdv_8BuAUjVxYHxa z#SQ{hVZVs9X7tcfegH}edty7I*w;H2!7_y6#9St;B$V$5P~2HKQ0WV??JgUIZz?FZ z1kTH_=4)CHnWsi)`BI$5dK9oYZ~+$9?^)HX)B z)1bV{!K35x?Of%~@yn9Hx-pfuIq-5aEH~tdd(Y3=ZctnJN(hs&rHnCDa6mqTC0J2($q& z6ru zMP0#WCe}y2kZQ2TwlU9wD4d6|A5&}1r5&B9S%-VgrCnLK_z2*1`ixsT?MMm7!<9Hk z7`{0AadzmFX`p!L!}HmVnC(#v0+ztz*{;J?vM6!){IJN&{xjk-$iW#f;opeQl-G#} zFsXnO0jZ!n*quBw0~T)h+?ANn8UB3=0&2iR728Czbj%uF+kG&CScZmVarq$tZ+)w+ zoVGhlnbFFW$SF>}oqHR=d$IusLS!>*?q$*Yt0T_z3T!pHus0*;Uv4DmTGoC?)6rmB zfOAcA7QJW;bc5#40j6lZk`3EIfOEq`>zTTHo}8IFZ^W=iIJyj4zatZAQhA6z*%Oo@ zDO=uF1_Cq(1Y`Djxal87VU;2?_&#V^W}Xv zxL~>)Cs@RDEW>L#11`gVs?9E82J9JgDh2}u9Vp|pqw@$4x8Y;9#U@;WOwN8CfK_g! zSSHEN1kHIDXR(#=c82oJI_<`~Zv@JFc-9;aRTSRNFgxcWikCL3%#2*{)Q;Hi0TfvaC$dl!hLk0N&0H_xYybmRRJE`F*GKW-){DQ+sdZuLS~C zp#p#OEaZ^c1xEl%&r8b>_|A~e3JwC{gE{X$2YZF**=XA{76TiyN)ne5z_CD1P0*Vu z4a3oiXTMh$%Op6K@GB_a<8$o}X7FpV{(L(4^!P{R%FnDmXY`ofd{|e>(B2#QYl;A` ztN%dGfK8z(|4gberyrC&gQ6E}Fvt)wYdq)CMKJNDF*8+G@Q45a5eZ2|K~z#X1J3gk z;AQqWh(67;ktjKqHkL6ZJOs)&`=u2qU(BAjU=RDriQKcg+YXOsgni}JS=F{(sehJ^ zmx|=LQQCH^rApXE?>`jXrBr($<<1%b=F_}11IBj-+xd%_aNOV+%AYuc zkW3XSyxgf5XobU7UftJX1Wp~^NidPs7h4)c)ie+`(%VhC}hm6G4XJ9s(;B~gF|JX$ETcf639v8j5u#jPps3^!GNdlda1vQVy+#= z$*Fkuk=ywSOa70(th-ks1N~u<$*7Y)xjvocv{57Wbq4TQj)Z zBw~A$_+(@M(k>C@Wwu5O@T&9FA0iUXZugIc{(W&)!cJ(dPP*VWq=&>@oAK0Q8iqST zKv3L8Ctqil%6+N%#{1c}cwQCfQx8kY!OZ|uN;_x1ESNUPnjMYQ2^O$XBr2~+P5srk zCf<`!RK7=c9i>vwIB|>gDwq1HSmj1ntFMCOkt@Xo=ilnt(u$)ou%i)ibvNhj|9l~ zNDj&6S1r1-&_e5nMt#;b0?I*46MFvX5dph8ezi*alU|}PLU~&Z&Z?IX#kI6J4z_4< zJ7`+p(``L$4kbO^cX-;T9>x8FB>kBRaJDZucH$S8L*r2ZsZI@5k5CZ9~F( z8#F%?y|^JB&$dY-zd&KU31hU_sPdz(Gu~Om=ki5TJA`^cWM30m7TT28X;i-u)Puh{ zPTTITur(=eyvV3I5H;BgTUI8Qh-}_0h=B=(xe0RdzoOy=dc2ZGp?Z`4c8Z^f<6;}j zH9@%0!o>~b1>4@WzSPN_yM2n&E{6xswOgz^T)cqZB@Hzs^5)^+Wl>W=VGIm0%1 z=DQ8sN0Z5XF^*cBW^<*_Cwk=f{yt|(TuM4#HY-mF5f{_6;QY#DFcZ33QX7lAE^Z^r z#cxPutG}Z2Wk_qK^oTRPvIzW}z*KouSdCGqf$}Eesn-Gt+==DpLs~}CN%c|QdN1mc z!D`x)jxHE0KM#IQPGtJP=$%ej+EaE+_!qtJM+?YVl8a@mF}xR$ME0hs+FZj&Es4^m z4K6s@46;~`V*cKHemPBXceS78SEbX8Skpp51&Vp4Ks?Z4^thy%`x#Gw??c=h*F{?* zIh?B8r3Rk=0`P*XzT9v~za_#>udH2U=9K|2)cZ^kQ`EPxRXat8;>lo&6w}4s2%Ch@&+lyrFv_{L2=E0JSd9~)Vkaw zc;qNF);4#f$lR?jHshMA>;0q8GLHtji&~ZRtr{&jvwZFeq$MBKE?9|WLD2Z%5h6(v zE3A9mwV&Db@@WrTd1Y$Ywm5e=SDOALKu!y>+vCg9_pL$Hq|7~gu5hKoT!QwsGMjNc zBhMR$VH+Zzl_?%T>?Lu!lBKp&l+6vvyytZAC6>Cat=^c$7`ek+&dbx@gK6L_+Yq`@ zsL6PL9w8zz7MKocSh3coM4Q5so38`QTde zg1q2%P(o4EmG9I0W=r`eKM}^MCl1ZwCwD+n3yuOe__rXkhief0&;;_pS27FOAG8!}r?bIlvFjzt@?(&}E1Rtf^*m z-?f^-Ma6DX%WtRk)gTRtH-(7PhJ}?C|KD}KI$bPvzp^oY0W03O8qoMsG(29LCU8-x zU$|O9(w}Gr8827G2}5YKnB~-MAYceNat5ilX7a0lM_w$?6rsoUym;(^0Ta_-0-s%+ zClBslqJl1QH5i}Q%~yQ4B{e!;OQtb*OP(Uf1QWI%98-aho!o^L+19M;)=1a4 z%6?B7z9w|ja9Au4Q^6*sze3tYjJ20DVB{s`dtuclf*)kXUWSRWu)o;p7Ubaxp_eCM zZ|%z-;mVTTpD)d)>VD7z)G{f+7Xz^zc5h9SHvbau1U_qe`b8m6OILgD;YBLHBDT1U zB%B|s>UT43YwZKRoL!yc2rKxN$|DvLV={7XH^AZgm(z#V!CmKW1E!lINf90i0WRz(Llrnm9^_Em)=Y2jDEUGM zr{?;`%u5y|Is+GH565u0;(?)+V$wkuLuA&bH{I*0R^#O&hF{&OF*6*QLme16*T+mS z$UJb9jGDG<$5?f&JF3xu1F*HdRiQLIv-mk|>2R(0P)*E%xkbOH$fkn*!uB>+bjwSd zj4arBJ<;o+VBhSX+!0}ahXbNLs4gX{$qy&9kYMPH!C%hpw=zk`(D=!1XUHTV!$6bZ zx!Y96IJaS0&IYY0<)Fp;cRt5~)~^90w|es>iz0p%v)r2>yDgsRQQz=W?*xl9)4f?M z3+JkC4gua&t^~1lk11kJks~>CDGQ^_I%i$vsJ9-)#}eKrXLXsGuP?RbOIb@}!WN%T zpf9vs->rRw`neKVB*}KYENp#m|Cn!hy}B8;sO7fxN*qAj}62Ow(P0VGmY`}Rpl2Zgi>%V(Ej5jXcOGT?_vd1ZCgFv;`k&W529g+&Cxk}EZo~dzNCKot7=Er3k)AC zI-dEzOd5az#&@E8g8IGtIYUtYH9(c6Z&p%AX)Tz8JSpk9#Hjy(zzU&pSUC0QYZg)+ zuIxfNg}Nkm{~28Z*=06%=l+~#=^$0*eOOTARgsG%X~wmyrLFn$(Q<%V@xLFJX6icJ zARxPMHG7*EczduQ{i_a}7k^D;DYLxxWH!Ac(gF%+#BcaWLxr{LG?jKtSvZoX^6lAf za{>&9F1Vp4c*>&b`lq8O>xXJX3czhhpI^1qafr-2n`}M9Lz4r<_rFniYIeP9Q7gBD z)0vqM6cM`>1C5RrhLsY)i>YV&PYbqoH)RkiaXxZUU;nLd4lEXc7TUPss7?G|;5#)|H2XPxQH1F^%7Zg?Y#Hu4qa^|ERZ!{AB z96F*J`^?O$Xnhn?GS~r#*N!D7gEddMZ8!p#fSnY_)wN`r@gXTZGs&9M$4T?DvY`e& zyV7JLZ@_i^#RNFO^c?2&TD1pR2%}+t&Ur0(XfhgbuOZY%)SWct*7Cex;T*eoSpv#! zl^qKeHUjsY4o9XoJrv?W92^DmBo;`(pHp_4n5x*q3x1`H)wo4ax6AB-4`x91vHr?eYZaE$vX%IW>ga1d5qSAl zz#=n7n+ggfjKRu(w8Jf0kAxlvJ2tgglVpnneU%)%4DQPR97cD&;27n{KyZ0KtxNB? zM3OwagBT{3J?b8`b5@-TMR=mFi`)k~mYA^+?yGigabY(g1?<^1qkIn%Dw7AwRitNk zG;4C&R_^0I%?u3GtHgaQPeWZ8-#rHMCMIyX&u5| z-5TKK{>Xn~_6eESUKSP@z2a>)3(k2aKU87L`9mquTnc-JC#`|z{Pjd&yAkO%5BNwJ z@JOPwTFINN64LB|9FAcaT^7a_Y5qBRm^R}IwCP+(ojpijp|m!h{>kCHJaHs4-hjaWH#zqgnD6&U?{7o9wb^9EzqW0SVY|4j% zYs5XPgKHDoPgg9YCw5x=C1=4$6))O(MCGna4M>Sxjy= z0vV@C5q|R0`o^HR3KBZ3f+Eci*S*@iV$6xLq$i#Am9;eNHbCmqt6a-T#5HZnnneaw zS;zg;L6Qbp;eD@D()yJt1)%eORcv*M+`>bHGD=`?NFXZ*16C~eDd%P&e;ceex_@@k z@fZnb;b)j*tBR@g=*S(&Rh%}PKr!eGI^FiC>U2&P)==h%6vD>IioI)jI&n`uCW6~UH z?3Ror16b{?Dv@XvA0)9PhXOk;Oj&`M5&Qx43djohe`+})z None: + """ + Validate provider credentials + if validate failed, raise exception + + :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. + """ + try: + model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING) + + # Use `mxbai-embed-large-v1` model for validate, + model_instance.validate_credentials(model="mxbai-embed-large-v1", credentials=credentials) + except CredentialsValidateFailedError as ex: + raise ex + except Exception as ex: + logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") + raise ex diff --git a/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml b/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml new file mode 100644 index 0000000000..2f43aea6ad --- /dev/null +++ b/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml @@ -0,0 +1,31 @@ +provider: mixedbread +label: + en_US: MixedBread +description: + en_US: Embedding and Rerank Model Supported +icon_small: + en_US: icon_s_en.png +icon_large: + en_US: icon_l_en.png +background: "#EFFDFD" +help: + title: + en_US: Get your API key from MixedBread AI + zh_Hans: 从 MixedBread 获取 API Key + url: + en_US: https://www.mixedbread.ai/ +supported_model_types: + - text-embedding + - rerank +configurate_methods: + - predefined-model +provider_credential_schema: + credential_form_schemas: + - variable: api_key + label: + en_US: API Key + type: secret-input + required: true + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py b/api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml new file mode 100644 index 0000000000..beda219953 --- /dev/null +++ b/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml @@ -0,0 +1,4 @@ +model: mxbai-rerank-large-v1 +model_type: rerank +model_properties: + context_size: 512 diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py b/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py new file mode 100644 index 0000000000..bf3c12fd86 --- /dev/null +++ b/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py @@ -0,0 +1,125 @@ +from typing import Optional + +import httpx + +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType +from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.rerank_model import RerankModel + + +class MixedBreadRerankModel(RerankModel): + """ + Model class for MixedBread rerank model. + """ + + def _invoke( + self, + model: str, + credentials: dict, + query: str, + docs: list[str], + score_threshold: Optional[float] = None, + top_n: Optional[int] = None, + user: Optional[str] = None, + ) -> RerankResult: + """ + Invoke rerank model + + :param model: model name + :param credentials: model credentials + :param query: search query + :param docs: docs for reranking + :param score_threshold: score threshold + :param top_n: top n documents to return + :param user: unique user id + :return: rerank result + """ + if len(docs) == 0: + return RerankResult(model=model, docs=[]) + + base_url = credentials.get("base_url", "https://api.mixedbread.ai/v1") + base_url = base_url.removesuffix("/") + + try: + response = httpx.post( + base_url + "/reranking", + json={"model": model, "query": query, "input": docs, "top_k": top_n, "return_input": True}, + headers={"Authorization": f"Bearer {credentials.get('api_key')}", "Content-Type": "application/json"}, + ) + response.raise_for_status() + results = response.json() + + rerank_documents = [] + for result in results["data"]: + rerank_document = RerankDocument( + index=result["index"], + text=result["input"], + score=result["score"], + ) + if score_threshold is None or result["score"] >= score_threshold: + rerank_documents.append(rerank_document) + + return RerankResult(model=model, docs=rerank_documents) + except httpx.HTTPStatusError as e: + raise InvokeServerUnavailableError(str(e)) + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + self._invoke( + model=model, + credentials=credentials, + query="What is the capital of the United States?", + docs=[ + "Carson City is the capital city of the American state of Nevada. At the 2010 United States " + "Census, Carson City had a population of 55,274.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " + "are a political division controlled by the United States. Its capital is Saipan.", + ], + score_threshold=0.8, + ) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + """ + return { + InvokeConnectionError: [httpx.ConnectError], + InvokeServerUnavailableError: [httpx.RemoteProtocolError], + InvokeRateLimitError: [], + InvokeAuthorizationError: [httpx.HTTPStatusError], + InvokeBadRequestError: [httpx.RequestError], + } + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: + """ + generate custom model entities from credentials + """ + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + model_type=ModelType.RERANK, + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512"))}, + ) + + return entity diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml new file mode 100644 index 0000000000..0c3c863d06 --- /dev/null +++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml @@ -0,0 +1,8 @@ +model: mxbai-embed-2d-large-v1 +model_type: text-embedding +model_properties: + context_size: 512 +pricing: + input: '0.0001' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml new file mode 100644 index 0000000000..0c5cda2a72 --- /dev/null +++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml @@ -0,0 +1,8 @@ +model: mxbai-embed-large-v1 +model_type: text-embedding +model_properties: + context_size: 512 +pricing: + input: '0.0001' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py new file mode 100644 index 0000000000..05d9a9a0c6 --- /dev/null +++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py @@ -0,0 +1,163 @@ +import time +from json import JSONDecodeError, dumps +from typing import Optional + +import requests + +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType +from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel + + +class MixedBreadTextEmbeddingModel(TextEmbeddingModel): + """ + Model class for MixedBread text embedding model. + """ + + api_base: str = "https://api.mixedbread.ai/v1" + + def _invoke( + self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :return: embeddings result + """ + api_key = credentials["api_key"] + if not api_key: + raise CredentialsValidateFailedError("api_key is required") + + base_url = credentials.get("base_url", self.api_base) + base_url = base_url.removesuffix("/") + + url = base_url + "/embeddings" + headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} + + data = {"model": model, "input": texts} + + try: + response = requests.post(url, headers=headers, data=dumps(data)) + except Exception as e: + raise InvokeConnectionError(str(e)) + + if response.status_code != 200: + try: + resp = response.json() + msg = resp["detail"] + if response.status_code == 401: + raise InvokeAuthorizationError(msg) + elif response.status_code == 429: + raise InvokeRateLimitError(msg) + elif response.status_code == 500: + raise InvokeServerUnavailableError(msg) + else: + raise InvokeBadRequestError(msg) + except JSONDecodeError as e: + raise InvokeServerUnavailableError( + f"Failed to convert response to json: {e} with text: {response.text}" + ) + + try: + resp = response.json() + embeddings = resp["data"] + usage = resp["usage"] + except Exception as e: + raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") + + usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"]) + + result = TextEmbeddingResult( + model=model, embeddings=[[float(data) for data in x["embedding"]] for x in embeddings], usage=usage + ) + + return result + + def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: + """ + Get number of tokens for given prompt messages + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :return: + """ + return sum(self._get_num_tokens_by_gpt2(text) for text in texts) + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + self._invoke(model=model, credentials=credentials, texts=["ping"]) + except Exception as e: + raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + return { + InvokeConnectionError: [InvokeConnectionError], + InvokeServerUnavailableError: [InvokeServerUnavailableError], + InvokeRateLimitError: [InvokeRateLimitError], + InvokeAuthorizationError: [InvokeAuthorizationError], + InvokeBadRequestError: [KeyError, InvokeBadRequestError], + } + + def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: + """ + Calculate response usage + + :param model: model name + :param credentials: model credentials + :param tokens: input tokens + :return: usage + """ + # get input price info + input_price_info = self.get_price( + model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens + ) + + # transform usage + usage = EmbeddingUsage( + tokens=tokens, + total_tokens=tokens, + unit_price=input_price_info.unit_price, + price_unit=input_price_info.unit, + total_price=input_price_info.total_amount, + currency=input_price_info.currency, + latency=time.perf_counter() - self.started_at, + ) + + return usage + + def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: + """ + generate custom model entities from credentials + """ + entity = AIModelEntity( + model=model, + label=I18nObject(en_US=model), + model_type=ModelType.TEXT_EMBEDDING, + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512"))}, + ) + + return entity diff --git a/api/pyproject.toml b/api/pyproject.toml index 41244f516c..9e38c09456 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -122,6 +122,7 @@ CODE_EXECUTION_API_KEY = "dify-sandbox" FIRECRAWL_API_KEY = "fc-" TEI_EMBEDDING_SERVER_URL = "http://a.abc.com:11451" TEI_RERANK_SERVER_URL = "http://a.abc.com:11451" +MIXEDBREAD_API_KEY = "mk-aaaaaaaaaaaaaaaaaaaa" [tool.poetry] name = "dify-api" diff --git a/api/tests/integration_tests/model_runtime/mixedbread/__init__.py b/api/tests/integration_tests/model_runtime/mixedbread/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py b/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py new file mode 100644 index 0000000000..25c9f3ce8d --- /dev/null +++ b/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py @@ -0,0 +1,28 @@ +import os +from unittest.mock import Mock, patch + +import pytest + +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.mixedbread.mixedbread import MixedBreadProvider + + +def test_validate_provider_credentials(): + provider = MixedBreadProvider() + + with pytest.raises(CredentialsValidateFailedError): + provider.validate_provider_credentials(credentials={"api_key": "hahahaha"}) + with patch("requests.post") as mock_post: + mock_response = Mock() + mock_response.json.return_value = { + "usage": {"prompt_tokens": 3, "total_tokens": 3}, + "model": "mixedbread-ai/mxbai-embed-large-v1", + "data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}], + "object": "list", + "normalized": "true", + "encoding_format": "float", + "dimensions": 1024, + } + mock_response.status_code = 200 + mock_post.return_value = mock_response + provider.validate_provider_credentials(credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py b/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py new file mode 100644 index 0000000000..b65aab74aa --- /dev/null +++ b/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py @@ -0,0 +1,100 @@ +import os +from unittest.mock import Mock, patch + +import pytest + +from core.model_runtime.entities.rerank_entities import RerankResult +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.mixedbread.rerank.rerank import MixedBreadRerankModel + + +def test_validate_credentials(): + model = MixedBreadRerankModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model="mxbai-rerank-large-v1", + credentials={"api_key": "invalid_key"}, + ) + with patch("httpx.post") as mock_post: + mock_response = Mock() + mock_response.json.return_value = { + "usage": {"prompt_tokens": 86, "total_tokens": 86}, + "model": "mixedbread-ai/mxbai-rerank-large-v1", + "data": [ + { + "index": 0, + "score": 0.06762695, + "input": "Carson City is the capital city of the American state of Nevada. At the 2010 United " + "States Census, Carson City had a population of 55,274.", + "object": "text_document", + }, + { + "index": 1, + "score": 0.057403564, + "input": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific " + "Ocean that are a political division controlled by the United States. Its capital is " + "Saipan.", + "object": "text_document", + }, + ], + "object": "list", + "top_k": 2, + "return_input": True, + } + mock_response.status_code = 200 + mock_post.return_value = mock_response + model.validate_credentials( + model="mxbai-rerank-large-v1", + credentials={ + "api_key": os.environ.get("MIXEDBREAD_API_KEY"), + }, + ) + + +def test_invoke_model(): + model = MixedBreadRerankModel() + with patch("httpx.post") as mock_post: + mock_response = Mock() + mock_response.json.return_value = { + "usage": {"prompt_tokens": 56, "total_tokens": 56}, + "model": "mixedbread-ai/mxbai-rerank-large-v1", + "data": [ + { + "index": 0, + "score": 0.6044922, + "input": "Kasumi is a girl name of Japanese origin meaning mist.", + "object": "text_document", + }, + { + "index": 1, + "score": 0.0703125, + "input": "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a " + "team named PopiParty.", + "object": "text_document", + }, + ], + "object": "list", + "top_k": 2, + "return_input": "true", + } + mock_response.status_code = 200 + mock_post.return_value = mock_response + result = model.invoke( + model="mxbai-rerank-large-v1", + credentials={ + "api_key": os.environ.get("MIXEDBREAD_API_KEY"), + }, + query="Who is Kasumi?", + docs=[ + "Kasumi is a girl name of Japanese origin meaning mist.", + "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a team named " + "PopiParty.", + ], + score_threshold=0.5, + ) + + assert isinstance(result, RerankResult) + assert len(result.docs) == 1 + assert result.docs[0].index == 0 + assert result.docs[0].score >= 0.5 diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py b/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py new file mode 100644 index 0000000000..ca97a18951 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py @@ -0,0 +1,78 @@ +import os +from unittest.mock import Mock, patch + +import pytest + +from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.mixedbread.text_embedding.text_embedding import MixedBreadTextEmbeddingModel + + +def test_validate_credentials(): + model = MixedBreadTextEmbeddingModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials(model="mxbai-embed-large-v1", credentials={"api_key": "invalid_key"}) + with patch("requests.post") as mock_post: + mock_response = Mock() + mock_response.json.return_value = { + "usage": {"prompt_tokens": 3, "total_tokens": 3}, + "model": "mixedbread-ai/mxbai-embed-large-v1", + "data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}], + "object": "list", + "normalized": "true", + "encoding_format": "float", + "dimensions": 1024, + } + mock_response.status_code = 200 + mock_post.return_value = mock_response + model.validate_credentials( + model="mxbai-embed-large-v1", credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")} + ) + + +def test_invoke_model(): + model = MixedBreadTextEmbeddingModel() + + with patch("requests.post") as mock_post: + mock_response = Mock() + mock_response.json.return_value = { + "usage": {"prompt_tokens": 6, "total_tokens": 6}, + "model": "mixedbread-ai/mxbai-embed-large-v1", + "data": [ + {"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}, + {"embedding": [0.23333 for _ in range(1024)], "index": 1, "object": "embedding"}, + ], + "object": "list", + "normalized": "true", + "encoding_format": "float", + "dimensions": 1024, + } + mock_response.status_code = 200 + mock_post.return_value = mock_response + result = model.invoke( + model="mxbai-embed-large-v1", + credentials={ + "api_key": os.environ.get("MIXEDBREAD_API_KEY"), + }, + texts=["hello", "world"], + user="abc-123", + ) + + assert isinstance(result, TextEmbeddingResult) + assert len(result.embeddings) == 2 + assert result.usage.total_tokens == 6 + + +def test_get_num_tokens(): + model = MixedBreadTextEmbeddingModel() + + num_tokens = model.get_num_tokens( + model="mxbai-embed-large-v1", + credentials={ + "api_key": os.environ.get("MIXEDBREAD_API_KEY"), + }, + texts=["ping"], + ) + + assert num_tokens == 1 diff --git a/dev/pytest/pytest_model_runtime.sh b/dev/pytest/pytest_model_runtime.sh index 4c0083a2de..b60ff64fdc 100755 --- a/dev/pytest/pytest_model_runtime.sh +++ b/dev/pytest/pytest_model_runtime.sh @@ -8,4 +8,5 @@ pytest api/tests/integration_tests/model_runtime/anthropic \ api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py \ api/tests/integration_tests/model_runtime/upstage \ api/tests/integration_tests/model_runtime/fireworks \ - api/tests/integration_tests/model_runtime/nomic + api/tests/integration_tests/model_runtime/nomic \ + api/tests/integration_tests/model_runtime/mixedbread From aebe5fc68ceeb30c8a8958c644197cc8690c0429 Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Tue, 24 Sep 2024 13:06:21 +0800 Subject: [PATCH 21/64] fix: Remove unsupported parameters in qwen model (#8699) --- .../model_providers/tongyi/llm/farui-plus.yaml | 9 --------- .../tongyi/llm/qwen-coder-turbo-0919.yaml | 9 --------- .../tongyi/llm/qwen-coder-turbo-latest.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-coder-turbo.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-long.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-math-plus-0816.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-math-plus-0919.yaml | 9 --------- .../tongyi/llm/qwen-math-plus-latest.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-math-plus.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-math-turbo-0919.yaml | 9 --------- .../tongyi/llm/qwen-math-turbo-latest.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-math-turbo.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-max-0107.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-max-0403.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-max-0428.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-max-0919.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-max-1201.yaml | 6 ------ .../model_providers/tongyi/llm/qwen-max-latest.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-max-longcontext.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-0206.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-0624.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-0723.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-0806.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-0919.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-chat.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-plus-latest.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-turbo-0206.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-turbo-0624.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-turbo-0919.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-turbo-chat.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-turbo-latest.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-vl-max-0809.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-vl-max.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-vl-plus-0201.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-vl-plus-0809.yaml | 9 --------- .../model_providers/tongyi/llm/qwen-vl-plus.yaml | 9 --------- .../tongyi/llm/qwen2-math-1.5b-instruct.yaml | 9 --------- .../tongyi/llm/qwen2-math-72b-instruct.yaml | 9 --------- .../tongyi/llm/qwen2-math-7b-instruct.yaml | 9 --------- .../tongyi/llm/qwen2.5-0.5b-instruct.yaml | 9 --------- .../tongyi/llm/qwen2.5-1.5b-instruct.yaml | 9 --------- .../model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml | 9 --------- .../model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml | 9 --------- .../model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml | 9 --------- .../model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml | 9 --------- .../model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml | 9 --------- .../tongyi/llm/qwen2.5-coder-7b-instruct.yaml | 9 --------- 47 files changed, 420 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml index e5de586c1c..d0ff443827 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml index 6ab39cde2d..d9792e71ee 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml index be6d9a0e07..0b03505c45 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml index d2aca4f514..2a6c040853 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml index a59a3350f6..bad7f4f472 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml @@ -68,15 +68,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml index cab7233c98..c14aee1e1e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml index f82fba0c01..9d74eeca3e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml index e2fb6e0e55..b8601a969a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml index 8803e747e5..4a948be597 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml index 0dc5a066f0..bffe324a96 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml index 2ac0e4692a..0747e96614 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml index 9a7f1312e9..dffb5557ff 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml index c0eef37557..8ae159f1bf 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml index c12444bd7b..93fb37254e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml index 173c55b6b9..a5c9d49609 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml index 692a38140d..e4a6dae637 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml index dc234783cd..6fae8a7d38 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml @@ -66,12 +66,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml index afd7fb4b77..8e20968859 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml index d02ba7af18..9bc50c73fc 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml index 1111298c37..430599300b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml index ef8dd083ad..906995d2b9 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml index 87a4417df5..b33e725dd0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml index 967f258fa9..bb394fad81 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml index 9d44852ac9..118e304a97 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml index df9448ae04..761312bc38 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml index 32ccb8d615..430872fb31 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml index bf976b518a..2628d824fe 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml index 060e7fb4c9..8097459bf0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml index 97cd34929b..e43beeb195 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml index 8d77ba7a2a..c30cb7ca10 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml @@ -67,15 +67,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml index 4458c706aa..e443d6888b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml index 12e9e0dd56..fd20377002 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml @@ -69,15 +69,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml index b811fdece4..31a9fb51bb 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml @@ -69,15 +69,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml index 188dea389a..5f90cf48bc 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml @@ -69,15 +69,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml index bc623e2f03..97820c0f3a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml @@ -69,15 +69,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml index 8977e12e4f..6af36cd6f3 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml @@ -69,15 +69,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml index de237842af..158e2c7ee1 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml index 1fda35abaf..e26a6923d1 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml index 06fd33c5f4..589119b26e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml index ebf8099553..dd608fbf76 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml index e9bc99339d..08237b3958 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml index 3ed85dade8..640b019703 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml index 328519c168..3a90ca7532 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml index d1ed3c2a73..b79755eb9b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml index 0e88c24aa8..e9dd51a341 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml index 35313cd1f7..04f26cf5fe 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml index 35313cd1f7..04f26cf5fe 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml @@ -65,15 +65,6 @@ parameter_rules: help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: From 4638f99aaabdfef5ad751d725ef5ca26027bd7fe Mon Sep 17 00:00:00 2001 From: Benjamin Date: Tue, 24 Sep 2024 13:26:58 +0800 Subject: [PATCH 22/64] fix: change model provider name issue Ref #8691 (#8710) --- api/core/model_runtime/model_providers/jina/jina.yaml | 2 +- api/core/tools/provider/builtin/jina/jina.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml index 2f2d6e6daa..4ff6ba0f22 100644 --- a/api/core/model_runtime/model_providers/jina/jina.yaml +++ b/api/core/model_runtime/model_providers/jina/jina.yaml @@ -1,4 +1,4 @@ -provider: Jina AI +provider: jina label: en_US: Jina AI description: diff --git a/api/core/tools/provider/builtin/jina/jina.yaml b/api/core/tools/provider/builtin/jina/jina.yaml index 9ce5cbd6d1..346175c41f 100644 --- a/api/core/tools/provider/builtin/jina/jina.yaml +++ b/api/core/tools/provider/builtin/jina/jina.yaml @@ -1,6 +1,6 @@ identity: author: Dify - name: Jina AI + name: jina label: en_US: Jina AI zh_Hans: Jina AI From 64baedb48429bf7332feab61d7a83cf1b7c91cda Mon Sep 17 00:00:00 2001 From: ice yao Date: Tue, 24 Sep 2024 14:04:07 +0800 Subject: [PATCH 23/64] fix: update nomic model provider token calculation (#8705) --- api/core/model_runtime/model_providers/_position.yaml | 1 + .../nomic/text_embedding/text_embedding.py | 10 +--------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml index 79ebd00764..80db22ea84 100644 --- a/api/core/model_runtime/model_providers/_position.yaml +++ b/api/core/model_runtime/model_providers/_position.yaml @@ -39,3 +39,4 @@ - zhinao - fireworks - mixedbread +- nomic diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py index 6cccff6d46..ccbfd196a9 100644 --- a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py @@ -77,15 +77,7 @@ class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel): :param texts: texts to embed :return: """ - if len(texts) == 0: - return 0 - - _, prompt_tokens, _ = self.embed_text( - model=model, - credentials=credentials, - texts=texts, - ) - return prompt_tokens + return sum(self._get_num_tokens_by_gpt2(text) for text in texts) def validate_credentials(self, model: str, credentials: dict) -> None: """ From f42ef0624dbb8a1286a4fd00b15eaf90ceb70921 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Tue, 24 Sep 2024 17:23:11 +0800 Subject: [PATCH 24/64] fix: embedded chat on ios (#8718) --- web/public/embed.js | 56 ++++++++++++++++++++-------------------- web/public/embed.min.js | 57 +++++++++++++++++++---------------------- 2 files changed, 54 insertions(+), 59 deletions(-) diff --git a/web/public/embed.js b/web/public/embed.js index 8ed7a67dc8..3c2735b6fc 100644 --- a/web/public/embed.js +++ b/web/public/embed.js @@ -69,38 +69,47 @@ iframe.id = iframeId; iframe.src = iframeUrl; iframe.style.cssText = ` - border: none; position: fixed; flex-direction: column; justify-content: space-between; + border: none; position: absolute; flex-direction: column; justify-content: space-between; box-shadow: rgba(150, 150, 150, 0.2) 0px 10px 30px 0px, rgba(150, 150, 150, 0.2) 0px 0px 0px 1px; - bottom: 5rem; right: 1rem; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem; + bottom: 55px; right: 0; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem; max-height: calc(100vh - 6rem); border-radius: 0.75rem; display: flex; z-index: 2147483647; overflow: hidden; left: unset; background-color: #F3F4F6;user-select: none; `; - document.body.appendChild(iframe); + return iframe; } // Function to reset the iframe position function resetIframePosition() { + if (window.innerWidth <= 640) + return + const targetIframe = document.getElementById(iframeId); const targetButton = document.getElementById(buttonId); if (targetIframe && targetButton) { const buttonRect = targetButton.getBoundingClientRect(); - const buttonBottom = window.innerHeight - buttonRect.bottom; - const buttonRight = window.innerWidth - buttonRect.right; - const buttonLeft = buttonRect.left; - // Adjust iframe position to stay within viewport - targetIframe.style.bottom = `${ - buttonBottom + buttonRect.height + 5 + targetIframe.clientHeight > window.innerHeight - ? buttonBottom - targetIframe.clientHeight - 5 - : buttonBottom + buttonRect.height + 5 - }px`; + const buttonInBottom = buttonRect.top - 5 > targetIframe.clientHeight - targetIframe.style.right = `${ - buttonRight + targetIframe.clientWidth > window.innerWidth - ? window.innerWidth - buttonLeft - targetIframe.clientWidth - : buttonRight - }px`; + if (buttonInBottom) { + targetIframe.style.bottom = `${buttonRect.height + 5}px`; + targetIframe.style.top = 'unset'; + } + else { + targetIframe.style.bottom = 'unset'; + targetIframe.style.top = `${buttonRect.height + 5}px`; + } + + const buttonInRight = buttonRect.right > targetIframe.clientWidth; + + if (buttonInRight) { + targetIframe.style.right = '0'; + targetIframe.style.left = 'unset'; + } + else { + targetIframe.style.right = 'unset'; + targetIframe.style.left = 0; + } } } @@ -146,12 +155,6 @@ box-shadow: var(--${containerDiv.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px); cursor: pointer; z-index: 2147483647; - transition: all 0.2s ease-in-out 0s; - } - `); - styleSheet.sheet.insertRule(` - #${containerDiv.id}:hover { - transform: var(--${containerDiv.id}-hover-transform, scale(1.1)); } `); @@ -167,7 +170,7 @@ containerDiv.addEventListener("click", function () { const targetIframe = document.getElementById(iframeId); if (!targetIframe) { - createIframe(); + containerDiv.appendChild(createIframe()); resetIframePosition(); this.title = "Exit (ESC)"; displayDiv.innerHTML = svgIcons.close; @@ -255,9 +258,6 @@ if (!document.getElementById(buttonId)) { createButton(); } - - createIframe(); - document.getElementById(iframeId).style.display = 'none'; } // Add esc Exit keyboard event triggered @@ -279,4 +279,4 @@ } else { document.body.onload = embedChatbot; } -})(); +})(); \ No newline at end of file diff --git a/web/public/embed.min.js b/web/public/embed.min.js index 0e023cb5d1..eb20858148 100644 --- a/web/public/embed.min.js +++ b/web/public/embed.min.js @@ -1,31 +1,26 @@ -(()=>{let t="difyChatbotConfig",a="dify-chatbot-bubble-button",c="dify-chatbot-bubble-window",h=window[t],p={open:` - - `,close:` - - `};async function e(){if(h&&h.token){var e=new URLSearchParams(await(async()=>{var e=h?.inputs||{};let n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n[e]=(e=t,e=(new TextEncoder).encode(e),e=new Response(new Blob([e]).stream().pipeThrough(new CompressionStream("gzip"))).arrayBuffer(),e=new Uint8Array(await e),await btoa(String.fromCharCode(...e)))})),n})());let t=`${h.baseUrl||`https://${h.isDev?"dev.":""}udify.app`}/chatbot/${h.token}?`+e;function o(){var e=document.createElement("iframe");e.allow="fullscreen;microphone",e.title="dify chatbot bubble window",e.id=c,e.src=t,e.style.cssText=` - border: none; position: fixed; flex-direction: column; justify-content: space-between; - box-shadow: rgba(150, 150, 150, 0.2) 0px 10px 30px 0px, rgba(150, 150, 150, 0.2) 0px 0px 0px 1px; - bottom: 5rem; right: 1rem; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem; - max-height: calc(100vh - 6rem); border-radius: 0.75rem; display: flex; z-index: 2147483647; - overflow: hidden; left: unset; background-color: #F3F4F6;user-select: none; - `,document.body.appendChild(e)}function i(){var e,t,n,o=document.getElementById(c),i=document.getElementById(a);o&&i&&(i=i.getBoundingClientRect(),e=window.innerHeight-i.bottom,t=window.innerWidth-i.right,n=i.left,o.style.bottom=`${e+i.height+5+o.clientHeight>window.innerHeight?e-o.clientHeight-5:e+i.height+5}px`,o.style.right=`${t+o.clientWidth>window.innerWidth?window.innerWidth-n-o.clientWidth:t}px`)}function n(){let n=document.createElement("div");Object.entries(h.containerProps||{}).forEach(([e,t])=>{"className"===e?n.classList.add(...t.split(" ")):"style"===e?"object"==typeof t?Object.assign(n.style,t):n.style.cssText=t:"function"==typeof t?n.addEventListener(e.replace(/^on/,"").toLowerCase(),t):n[e]=t}),n.id=a;var e=document.createElement("style");document.head.appendChild(e),e.sheet.insertRule(` - #${n.id} { - position: fixed; - bottom: var(--${n.id}-bottom, 1rem); - right: var(--${n.id}-right, 1rem); - left: var(--${n.id}-left, unset); - top: var(--${n.id}-top, unset); - width: var(--${n.id}-width, 50px); - height: var(--${n.id}-height, 50px); - border-radius: var(--${n.id}-border-radius, 25px); - background-color: var(--${n.id}-bg-color, #155EEF); - box-shadow: var(--${n.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px); - cursor: pointer; - z-index: 2147483647; - transition: all 0.2s ease-in-out 0s; - } - `),e.sheet.insertRule(` - #${n.id}:hover { - transform: var(--${n.id}-hover-transform, scale(1.1)); - } - `);let t=document.createElement("div");if(t.style.cssText="display: flex; align-items: center; justify-content: center; width: 100%; height: 100%; z-index: 2147483647;",t.innerHTML=p.open,n.appendChild(t),document.body.appendChild(n),n.addEventListener("click",function(){var e=document.getElementById(c);e?(e.style.display="none"===e.style.display?"block":"none",t.innerHTML="none"===e.style.display?p.open:p.close,"none"===e.style.display?document.removeEventListener("keydown",d):document.addEventListener("keydown",d),i()):(o(),i(),this.title="Exit (ESC)",t.innerHTML=p.close,document.addEventListener("keydown",d))}),h.draggable){var s=n;var l=h.dragAxis||"both";let i=!1,d,r;s.addEventListener("mousedown",function(e){i=!0,d=e.clientX-s.offsetLeft,r=e.clientY-s.offsetTop}),document.addEventListener("mousemove",function(e){var t,n,o;i&&(s.style.transition="none",s.style.cursor="grabbing",(t=document.getElementById(c))&&(t.style.display="none",s.querySelector("div").innerHTML=p.open),t=e.clientX-d,e=window.innerHeight-e.clientY-r,o=s.getBoundingClientRect(),n=window.innerWidth-o.width,o=window.innerHeight-o.height,"x"!==l&&"both"!==l||s.style.setProperty(`--${a}-left`,Math.max(0,Math.min(t,n))+"px"),"y"!==l&&"both"!==l||s.style.setProperty(`--${a}-bottom`,Math.max(0,Math.min(e,o))+"px"))}),document.addEventListener("mouseup",function(){i=!1,s.style.transition="",s.style.cursor="pointer"})}}2048 + + `,close:` + + `};async function e(){if(p&&p.token){var e=new URLSearchParams(await async function(){var e=p?.inputs||{};const n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n[e]=(e=t,e=(new TextEncoder).encode(e),e=new Response(new Blob([e]).stream().pipeThrough(new CompressionStream("gzip"))).arrayBuffer(),e=new Uint8Array(await e),await btoa(String.fromCharCode(...e)))})),n}());const i=`${p.baseUrl||`https://${p.isDev?"dev.":""}udify.app`}/chatbot/${p.token}?`+e;function o(){var e,t;window.innerWidth<=640||(e=document.getElementById(c),t=document.getElementById(a),e&&t&&((t=t.getBoundingClientRect()).top-5>e.clientHeight?(e.style.bottom=t.height+5+"px",e.style.top="unset"):(e.style.bottom="unset",e.style.top=t.height+5+"px"),t.right>e.clientWidth?(e.style.right="0",e.style.left="unset"):(e.style.right="unset",e.style.left=0)))}function t(){const n=document.createElement("div");Object.entries(p.containerProps||{}).forEach(([e,t])=>{"className"===e?n.classList.add(...t.split(" ")):"style"===e?"object"==typeof t?Object.assign(n.style,t):n.style.cssText=t:"function"==typeof t?n.addEventListener(e.replace(/^on/,"").toLowerCase(),t):n[e]=t}),n.id=a;var e=document.createElement("style");document.head.appendChild(e),e.sheet.insertRule(` + #${n.id} { + position: fixed; + bottom: var(--${n.id}-bottom, 1rem); + right: var(--${n.id}-right, 1rem); + left: var(--${n.id}-left, unset); + top: var(--${n.id}-top, unset); + width: var(--${n.id}-width, 50px); + height: var(--${n.id}-height, 50px); + border-radius: var(--${n.id}-border-radius, 25px); + background-color: var(--${n.id}-bg-color, #155EEF); + box-shadow: var(--${n.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px); + cursor: pointer; + z-index: 2147483647; + } + `);const t=document.createElement("div");if(t.style.cssText="display: flex; align-items: center; justify-content: center; width: 100%; height: 100%; z-index: 2147483647;",t.innerHTML=h.open,n.appendChild(t),document.body.appendChild(n),n.addEventListener("click",function(){var e=document.getElementById(c);e?(e.style.display="none"===e.style.display?"block":"none",t.innerHTML="none"===e.style.display?h.open:h.close,"none"===e.style.display?document.removeEventListener("keydown",d):document.addEventListener("keydown",d),o()):(n.appendChild(((e=document.createElement("iframe")).allow="fullscreen;microphone",e.title="dify chatbot bubble window",e.id=c,e.src=i,e.style.cssText=` + border: none; position: absolute; flex-direction: column; justify-content: space-between; + box-shadow: rgba(150, 150, 150, 0.2) 0px 10px 30px 0px, rgba(150, 150, 150, 0.2) 0px 0px 0px 1px; + bottom: 55px; right: 0; width: 24rem; max-width: calc(100vw - 2rem); height: 40rem; + max-height: calc(100vh - 6rem); border-radius: 0.75rem; display: flex; z-index: 2147483647; + overflow: hidden; left: unset; background-color: #F3F4F6;user-select: none; + `,e)),o(),this.title="Exit (ESC)",t.innerHTML=h.close,document.addEventListener("keydown",d))}),p.draggable){var s=n;var l=p.dragAxis||"both";let i=!1,d,r;s.addEventListener("mousedown",function(e){i=!0,d=e.clientX-s.offsetLeft,r=e.clientY-s.offsetTop}),document.addEventListener("mousemove",function(e){var t,n,o;i&&(s.style.transition="none",s.style.cursor="grabbing",(t=document.getElementById(c))&&(t.style.display="none",s.querySelector("div").innerHTML=h.open),t=e.clientX-d,e=window.innerHeight-e.clientY-r,o=s.getBoundingClientRect(),n=window.innerWidth-o.width,o=window.innerHeight-o.height,"x"!==l&&"both"!==l||s.style.setProperty(`--${a}-left`,Math.max(0,Math.min(t,n))+"px"),"y"!==l&&"both"!==l||s.style.setProperty(`--${a}-bottom`,Math.max(0,Math.min(e,o))+"px"))}),document.addEventListener("mouseup",function(){i=!1,s.style.transition="",s.style.cursor="pointer"})}}2048 Date: Tue, 24 Sep 2024 17:33:29 +0800 Subject: [PATCH 25/64] chore: remove windows platform timezone set (#8712) --- api/app.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/api/app.py b/api/app.py index 91a49337fc..1b58beee15 100644 --- a/api/app.py +++ b/api/app.py @@ -53,11 +53,9 @@ from services.account_service import AccountService warnings.simplefilter("ignore", ResourceWarning) -# fix windows platform -if os.name == "nt": - os.system('tzutil /s "UTC"') -else: - os.environ["TZ"] = "UTC" +os.environ["TZ"] = "UTC" +# windows platform not support tzset +if hasattr(time, "tzset"): time.tzset() From 1c7877b048d2131c8c133c26c60c8ccd342f0b0c Mon Sep 17 00:00:00 2001 From: Shota Totsuka <153569547+totsukash@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:53:26 +0900 Subject: [PATCH 26/64] fix: remove harm category setting from vertex ai (#8721) --- .../model_providers/vertex_ai/llm/llm.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py index da69b7cdf3..1dd785d545 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py @@ -2,6 +2,7 @@ import base64 import io import json import logging +import time from collections.abc import Generator from typing import Optional, Union, cast @@ -20,7 +21,6 @@ from google.api_core import exceptions from google.cloud import aiplatform from google.oauth2 import service_account from PIL import Image -from vertexai.generative_models import HarmBlockThreshold, HarmCategory from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage from core.model_runtime.entities.message_entities import ( @@ -34,6 +34,7 @@ from core.model_runtime.entities.message_entities import ( ToolPromptMessage, UserPromptMessage, ) +from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, InvokeBadRequestError, @@ -503,20 +504,12 @@ class VertexAiLargeLanguageModel(LargeLanguageModel): else: history.append(content) - safety_settings = { - HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, - HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, - HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE, - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, - } - google_model = glm.GenerativeModel(model_name=model, system_instruction=system_instruction) response = google_model.generate_content( contents=history, generation_config=glm.GenerationConfig(**config_kwargs), stream=stream, - safety_settings=safety_settings, tools=self._convert_tools_to_glm_tool(tools) if tools else None, ) From debe5953a814f075aec5f878bd338829cd63ca9e Mon Sep 17 00:00:00 2001 From: Sa Zhang <55871322+Nick17t@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:19:49 +0800 Subject: [PATCH 27/64] Fix/update jina ai products labels and descriptions (#8730) Co-authored-by: sa zhang --- .../builtin/jina/tools/jina_reader.yaml | 42 +++++++++---------- .../builtin/jina/tools/jina_search.yaml | 31 +++++++------- .../builtin/jina/tools/jina_tokenizer.yaml | 16 +++++-- 3 files changed, 50 insertions(+), 39 deletions(-) diff --git a/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml b/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml index 58ad6d8694..589bc3433d 100644 --- a/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml +++ b/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml @@ -2,14 +2,14 @@ identity: name: jina_reader author: Dify label: - en_US: JinaReader - zh_Hans: JinaReader - pt_BR: JinaReader + en_US: Fetch Single Page + zh_Hans: 获取单页面 + pt_BR: Fetch Single Page description: human: - en_US: Convert any URL to an LLM-friendly input. Experience improved output for your agent and RAG systems at no cost. - zh_Hans: 将任何 URL 转换为 LLM 友好的输入。无需付费即可体验为您的 Agent 和 RAG 系统提供的改进输出。 - pt_BR: Converta qualquer URL em uma entrada amigável ao LLM. Experimente uma saída aprimorada para seus sistemas de agente e RAG sem custo. + en_US: Fetch the target URL (can be a PDF) and convert it into a LLM-friendly markdown. + zh_Hans: 获取目标网址(可以是 PDF),并将其转换为适合大模型处理的 Markdown 格式。 + pt_BR: Busque a URL de destino (que pode ser um PDF) e converta em um Markdown LLM-friendly. llm: A tool for scraping webpages. Input should be a URL. parameters: - name: url @@ -17,13 +17,13 @@ parameters: required: true label: en_US: URL - zh_Hans: 网页链接 + zh_Hans: 网址 pt_BR: URL human_description: - en_US: used for linking to webpages - zh_Hans: 用于链接到网页 - pt_BR: used for linking to webpages - llm_description: url for scraping + en_US: Web link + zh_Hans: 网页链接 + pt_BR: URL da web + llm_description: url para scraping form: llm - name: request_params type: string @@ -31,14 +31,14 @@ parameters: label: en_US: Request params zh_Hans: 请求参数 - pt_BR: Request params + pt_BR: Parâmetros de solicitação human_description: en_US: | request parameters, format: {"key1": "value1", "key2": "value2"} zh_Hans: | 请求参数,格式:{"key1": "value1", "key2": "value2"} pt_BR: | - request parameters, format: {"key1": "value1", "key2": "value2"} + parâmetros de solicitação, formato: {"key1": "value1", "key2": "value2"} llm_description: request parameters form: llm - name: target_selector @@ -51,7 +51,7 @@ parameters: human_description: en_US: css selector for scraping specific elements zh_Hans: css 选择器用于抓取特定元素 - pt_BR: css selector for scraping specific elements + pt_BR: css selector para scraping de elementos específicos llm_description: css selector of the target element to scrape form: form - name: wait_for_selector @@ -64,7 +64,7 @@ parameters: human_description: en_US: css selector for waiting for specific elements zh_Hans: css 选择器用于等待特定元素 - pt_BR: css selector for waiting for specific elements + pt_BR: css selector para aguardar elementos específicos llm_description: css selector of the target element to wait for form: form - name: image_caption @@ -77,8 +77,8 @@ parameters: pt_BR: Legenda da imagem human_description: en_US: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing." - zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签。这允许下游 LLM 在推理和总结等活动中与图像进行交互。" - pt_BR: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing." + zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签,以支持下游模型的图像交互。" + pt_BR: "Adiciona legendas a todas as imagens na URL especificada, adicionando 'Imagem [idx]: [legenda]' como uma tag alt para aquelas que não têm uma. Isso permite que os modelos LLM inferiores interajam com as imagens em atividades como raciocínio e resumo." llm_description: Captions all images at the specified URL form: form - name: gather_all_links_at_the_end @@ -91,8 +91,8 @@ parameters: pt_BR: Coletar todos os links ao final human_description: en_US: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions. - zh_Hans: 最后会创建一个“按钮和链接”部分。这可以帮助下游 LLM 或 Web 代理浏览页面或采取进一步的行动。 - pt_BR: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions. + zh_Hans: 末尾将添加“按钮和链接”部分,方便下游模型或网络代理做页面导航或执行进一步操作。 + pt_BR: Um "Botões & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions. llm_description: Gather all links at the end form: form - name: gather_all_images_at_the_end @@ -105,8 +105,8 @@ parameters: pt_BR: Coletar todas as imagens ao final human_description: en_US: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning. - zh_Hans: 最后会创建一个“图像”部分。这可以让下游的 LLM 概览页面上的所有视觉效果,从而提高推理能力。 - pt_BR: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning. + zh_Hans: 末尾会新增“图片”部分,方便下游模型全面了解页面的视觉内容,提升推理效果。 + pt_BR: Um "Imagens" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning. llm_description: Gather all images at the end form: form - name: proxy_server diff --git a/api/core/tools/provider/builtin/jina/tools/jina_search.yaml b/api/core/tools/provider/builtin/jina/tools/jina_search.yaml index 2bc70e1be1..e58c639e56 100644 --- a/api/core/tools/provider/builtin/jina/tools/jina_search.yaml +++ b/api/core/tools/provider/builtin/jina/tools/jina_search.yaml @@ -2,13 +2,14 @@ identity: name: jina_search author: Dify label: - en_US: JinaSearch - zh_Hans: JinaSearch - pt_BR: JinaSearch + en_US: Search the web + zh_Hans: 联网搜索 + pt_BR: Search the web description: human: - en_US: Search on the web and get the top 5 results. Useful for grounding using information from the web. - zh_Hans: 在网络上搜索返回前 5 个结果。 + en_US: Search on the public web of a given query and return the top results as LLM-friendly markdown. + zh_Hans: 针对给定的查询在互联网上进行搜索,并以适合大模型处理的 Markdown 格式返回最相关的结果。 + pt_BR: Procurar na web pública de uma consulta fornecida e retornar os melhores resultados como markdown para LLMs. llm: A tool for searching results on the web for grounding. Input should be a simple question. parameters: - name: query @@ -16,11 +17,13 @@ parameters: required: true label: en_US: Question (Query) - zh_Hans: 信息查询 + zh_Hans: 查询 + pt_BR: Pergunta (Consulta) human_description: en_US: used to find information on the web zh_Hans: 在网络上搜索信息 - llm_description: simple question to ask on the web + pt_BR: Usado para encontrar informações na web + llm_description: Pergunta simples para fazer na web form: llm - name: image_caption type: boolean @@ -32,7 +35,7 @@ parameters: pt_BR: Legenda da imagem human_description: en_US: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing." - zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签。这允许下游 LLM 在推理和总结等活动中与图像进行交互。" + zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签,以支持下游模型的图像交互。" pt_BR: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing." llm_description: Captions all images at the specified URL form: form @@ -46,8 +49,8 @@ parameters: pt_BR: Coletar todos os links ao final human_description: en_US: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions. - zh_Hans: 最后会创建一个“按钮和链接”部分。这可以帮助下游 LLM 或 Web 代理浏览页面或采取进一步的行动。 - pt_BR: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions. + zh_Hans: 末尾将添加“按钮和链接”部分,汇总页面上的所有链接。方便下游模型或网络代理做页面导航或执行进一步操作。 + pt_BR: Um "Botão & Links" seção será criada no final. Isso ajuda os LLMs ou agentes da web navegando pela página ou executar ações adicionais. llm_description: Gather all links at the end form: form - name: gather_all_images_at_the_end @@ -60,8 +63,8 @@ parameters: pt_BR: Coletar todas as imagens ao final human_description: en_US: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning. - zh_Hans: 最后会创建一个“图像”部分。这可以让下游的 LLM 概览页面上的所有视觉效果,从而提高推理能力。 - pt_BR: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning. + zh_Hans: 末尾会新增“图片”部分,汇总页面上的所有图片。方便下游模型概览页面的视觉内容,提升推理效果。 + pt_BR: Um "Imagens" seção será criada no final. Isso fornece uma visão geral de todas as imagens na página para os LLMs, que pode melhorar a razão. llm_description: Gather all images at the end form: form - name: proxy_server @@ -74,7 +77,7 @@ parameters: human_description: en_US: Use proxy to access URLs zh_Hans: 利用代理访问 URL - pt_BR: Use proxy to access URLs + pt_BR: Usar proxy para acessar URLs llm_description: Use proxy to access URLs form: form - name: no_cache @@ -83,7 +86,7 @@ parameters: default: false label: en_US: Bypass the Cache - zh_Hans: 绕过缓存 + zh_Hans: 是否绕过缓存 pt_BR: Ignorar o cache human_description: en_US: Bypass the Cache diff --git a/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml b/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml index 62a5c7e7ba..74885cdf9a 100644 --- a/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml +++ b/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml @@ -2,11 +2,14 @@ identity: name: jina_tokenizer author: hjlarry label: - en_US: JinaTokenizer + en_US: Segment + zh_Hans: 切分器 + pt_BR: Segment description: human: - en_US: Free API to tokenize text and segment long text into chunks. - zh_Hans: 免费的API可以将文本tokenize,也可以将长文本分割成多个部分。 + en_US: Split long text into chunks and do tokenization. + zh_Hans: 将长文本拆分成小段落,并做分词处理。 + pt_BR: Dividir o texto longo em pedaços e fazer tokenização. llm: Free API to tokenize text and segment long text into chunks. parameters: - name: content @@ -15,6 +18,7 @@ parameters: label: en_US: Content zh_Hans: 内容 + pt_BR: Conteúdo llm_description: the content which need to tokenize or segment form: llm - name: return_tokens @@ -23,18 +27,22 @@ parameters: label: en_US: Return the tokens zh_Hans: 是否返回tokens + pt_BR: Retornar os tokens human_description: en_US: Return the tokens and their corresponding ids in the response. zh_Hans: 返回tokens及其对应的ids。 + pt_BR: Retornar os tokens e seus respectivos ids na resposta. form: form - name: return_chunks type: boolean label: en_US: Return the chunks zh_Hans: 是否分块 + pt_BR: Retornar os chunks human_description: en_US: Chunking the input into semantically meaningful segments while handling a wide variety of text types and edge cases based on common structural cues. - zh_Hans: 将输入分块为具有语义意义的片段,同时根据常见的结构线索处理各种文本类型和边缘情况。 + zh_Hans: 将输入文本分块为语义有意义的片段,同时基于常见的结构线索处理各种文本类型和特殊情况。 + pt_BR: Dividir o texto de entrada em segmentos semanticamente significativos, enquanto lida com uma ampla variedade de tipos de texto e casos de borda com base em pistas estruturais comuns. form: form - name: tokenizer type: select From 4669eb24befe840789de1dc9ae32f631a31a765d Mon Sep 17 00:00:00 2001 From: Jyong <76649700+JohnJyong@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:53:50 +0800 Subject: [PATCH 28/64] add embedding input type parameter (#8724) --- api/core/embedding/cached_embedding.py | 9 ++++++-- api/core/embedding/embedding_constant.py | 10 ++++++++ api/core/model_manager.py | 7 +++++- .../__base/text_embedding_model.py | 23 +++++++++++++++---- .../text_embedding/text_embedding.py | 8 ++++++- .../baichuan/text_embedding/text_embedding.py | 8 ++++++- .../bedrock/text_embedding/text_embedding.py | 8 ++++++- .../cohere/text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../hunyuan/text_embedding/text_embedding.py | 8 ++++++- .../jina/text_embedding/text_embedding.py | 8 ++++++- .../localai/text_embedding/text_embedding.py | 8 ++++++- .../minimax/text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../nomic/text_embedding/text_embedding.py | 2 ++ .../nvidia/text_embedding/text_embedding.py | 8 ++++++- .../oci/text_embedding/text_embedding.py | 9 +++++++- .../ollama/text_embedding/text_embedding.py | 8 ++++++- .../openai/text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../openllm/text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 9 +++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../tongyi/text_embedding/text_embedding.py | 2 ++ .../upstage/text_embedding/text_embedding.py | 10 +++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 8 ++++++- .../wenxin/text_embedding/text_embedding.py | 8 ++++++- .../text_embedding/text_embedding.py | 9 +++++++- .../zhipuai/text_embedding/text_embedding.py | 8 ++++++- 33 files changed, 239 insertions(+), 35 deletions(-) create mode 100644 api/core/embedding/embedding_constant.py diff --git a/api/core/embedding/cached_embedding.py b/api/core/embedding/cached_embedding.py index 8ce12fd59f..75219051cd 100644 --- a/api/core/embedding/cached_embedding.py +++ b/api/core/embedding/cached_embedding.py @@ -5,6 +5,7 @@ from typing import Optional, cast import numpy as np from sqlalchemy.exc import IntegrityError +from core.embedding.embedding_constant import EmbeddingInputType from core.model_manager import ModelInstance from core.model_runtime.entities.model_entities import ModelPropertyKey from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel @@ -56,7 +57,9 @@ class CacheEmbedding(Embeddings): for i in range(0, len(embedding_queue_texts), max_chunks): batch_texts = embedding_queue_texts[i : i + max_chunks] - embedding_result = self._model_instance.invoke_text_embedding(texts=batch_texts, user=self._user) + embedding_result = self._model_instance.invoke_text_embedding( + texts=batch_texts, user=self._user, input_type=EmbeddingInputType.DOCUMENT + ) for vector in embedding_result.embeddings: try: @@ -100,7 +103,9 @@ class CacheEmbedding(Embeddings): redis_client.expire(embedding_cache_key, 600) return list(np.frombuffer(base64.b64decode(embedding), dtype="float")) try: - embedding_result = self._model_instance.invoke_text_embedding(texts=[text], user=self._user) + embedding_result = self._model_instance.invoke_text_embedding( + texts=[text], user=self._user, input_type=EmbeddingInputType.QUERY + ) embedding_results = embedding_result.embeddings[0] embedding_results = (embedding_results / np.linalg.norm(embedding_results)).tolist() diff --git a/api/core/embedding/embedding_constant.py b/api/core/embedding/embedding_constant.py new file mode 100644 index 0000000000..9b4934646b --- /dev/null +++ b/api/core/embedding/embedding_constant.py @@ -0,0 +1,10 @@ +from enum import Enum + + +class EmbeddingInputType(Enum): + """ + Enum for embedding input type. + """ + + DOCUMENT = "document" + QUERY = "query" diff --git a/api/core/model_manager.py b/api/core/model_manager.py index 990efd36c6..74b4452362 100644 --- a/api/core/model_manager.py +++ b/api/core/model_manager.py @@ -3,6 +3,7 @@ import os from collections.abc import Callable, Generator, Sequence from typing import IO, Optional, Union, cast +from core.embedding.embedding_constant import EmbeddingInputType from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import ModelLoadBalancingConfiguration from core.errors.error import ProviderTokenNotInitError @@ -158,12 +159,15 @@ class ModelInstance: tools=tools, ) - def invoke_text_embedding(self, texts: list[str], user: Optional[str] = None) -> TextEmbeddingResult: + def invoke_text_embedding( + self, texts: list[str], user: Optional[str] = None, input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT + ) -> TextEmbeddingResult: """ Invoke large language model :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ if not isinstance(self.model_type_instance, TextEmbeddingModel): @@ -176,6 +180,7 @@ class ModelInstance: credentials=self.credentials, texts=texts, user=user, + input_type=input_type, ) def get_text_embedding_num_tokens(self, texts: list[str]) -> int: diff --git a/api/core/model_runtime/model_providers/__base/text_embedding_model.py b/api/core/model_runtime/model_providers/__base/text_embedding_model.py index 54a4486023..a948dca20d 100644 --- a/api/core/model_runtime/model_providers/__base/text_embedding_model.py +++ b/api/core/model_runtime/model_providers/__base/text_embedding_model.py @@ -4,6 +4,7 @@ from typing import Optional from pydantic import ConfigDict +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.model_providers.__base.ai_model import AIModel @@ -20,35 +21,47 @@ class TextEmbeddingModel(AIModel): model_config = ConfigDict(protected_namespaces=()) def invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ - Invoke large language model + Invoke text embedding model :param model: model name :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ self.started_at = time.perf_counter() try: - return self._invoke(model, credentials, texts, user) + return self._invoke(model, credentials, texts, user, input_type) except Exception as e: raise self._transform_invoke_error(e) @abstractmethod def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ - Invoke large language model + Invoke text embedding model :param model: model name :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ raise NotImplementedError diff --git a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py index d9cff8ecbb..6b270b65ff 100644 --- a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py @@ -7,6 +7,7 @@ import numpy as np import tiktoken from openai import AzureOpenAI +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import AIModelEntity, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -17,7 +18,12 @@ from core.model_runtime.model_providers.azure_openai._constant import EMBEDDING_ class AzureOpenAITextEmbeddingModel(_CommonAzureOpenAI, TextEmbeddingModel): def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: base_model_name = credentials["base_model_name"] credentials_kwargs = self._to_credential_kwargs(credentials) diff --git a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py index 779dfbb608..210c274bdf 100644 --- a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional from requests import post +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -35,7 +36,12 @@ class BaichuanTextEmbeddingModel(TextEmbeddingModel): api_base: str = "http://api.baichuan-ai.com/v1/embeddings" def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py index 251170d1ae..8c4c50b269 100644 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py @@ -13,6 +13,7 @@ from botocore.exceptions import ( UnknownServiceError, ) +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -30,7 +31,12 @@ logger = logging.getLogger(__name__) class BedrockTextEmbeddingModel(TextEmbeddingModel): def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py index a1c5e98118..1f93068a8c 100644 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py @@ -5,6 +5,7 @@ import cohere import numpy as np from cohere.core import RequestOptions +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -25,7 +26,12 @@ class CohereTextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py index 4ad96c4233..cf18f84ac8 100644 --- a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py @@ -6,6 +6,7 @@ import numpy as np import requests from huggingface_hub import HfApi, InferenceClient +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -18,7 +19,12 @@ HUGGINGFACE_ENDPOINT_API = "https://api.endpoints.huggingface.cloud/v2/endpoint/ class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel): def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: client = InferenceClient(token=credentials["huggingfacehub_api_token"]) diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py index 55f3c25804..58baf4933c 100644 --- a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py @@ -1,6 +1,7 @@ import time from typing import Optional +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -23,7 +24,12 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py index 1396e59e18..3e14371f89 100644 --- a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py @@ -9,6 +9,7 @@ from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.common.profile.http_profile import HttpProfile from tencentcloud.hunyuan.v20230901 import hunyuan_client, models +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -26,7 +27,12 @@ class HunyuanTextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index 6c96699ea2..9120f26b8d 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional from requests import post +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -60,7 +61,12 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): return data def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py index 7d258be81e..d8878c7be8 100644 --- a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py @@ -5,6 +5,7 @@ from typing import Optional from requests import post from yarl import URL +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -26,7 +27,12 @@ class LocalAITextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py index 76fd1342bd..d0d1d2aea1 100644 --- a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional from requests import post +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -34,7 +35,12 @@ class MinimaxTextEmbeddingModel(TextEmbeddingModel): api_base: str = "https://api.minimax.chat/v1/embeddings" def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py index 05d9a9a0c6..cdc2d58d0c 100644 --- a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional import requests +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -27,7 +28,12 @@ class MixedBreadTextEmbeddingModel(TextEmbeddingModel): api_base: str = "https://api.mixedbread.ai/v1" def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py index ccbfd196a9..a797521576 100644 --- a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py @@ -5,6 +5,7 @@ from typing import Optional from nomic import embed from nomic import login as nomic_login +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import ( EmbeddingUsage, @@ -46,6 +47,7 @@ class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel): credentials: dict, texts: list[str], user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py index 00cec265d5..a4ea28bd10 100644 --- a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional from requests import post +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -27,7 +28,12 @@ class NvidiaTextEmbeddingModel(TextEmbeddingModel): models: list[str] = ["NV-Embed-QA"] def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py index 80ad2be9f5..4de9296cca 100644 --- a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py @@ -6,6 +6,7 @@ from typing import Optional import numpy as np import oci +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -41,7 +42,12 @@ class OCITextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model @@ -50,6 +56,7 @@ class OCITextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ # get model properties diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py index b4c61d8a6d..0501c8b841 100644 --- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py @@ -8,6 +8,7 @@ from urllib.parse import urljoin import numpy as np import requests +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import ( AIModelEntity, @@ -38,7 +39,12 @@ class OllamaEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py index 535d8388bc..7945723636 100644 --- a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py @@ -6,6 +6,7 @@ import numpy as np import tiktoken from openai import OpenAI +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -19,7 +20,12 @@ class OpenAITextEmbeddingModel(_CommonOpenAI, TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py index e83cfdf873..68b5773e16 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py @@ -7,6 +7,7 @@ from urllib.parse import urljoin import numpy as np import requests +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import ( AIModelEntity, @@ -28,7 +29,12 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py index 00e583cc79..c0a3efbb00 100644 --- a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py @@ -5,6 +5,7 @@ from typing import Optional from requests import post from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import ( @@ -25,7 +26,12 @@ class OpenLLMTextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py index b62a2d2aaf..1e86f351c8 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py @@ -7,6 +7,7 @@ from urllib.parse import urljoin import numpy as np import requests +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import ( AIModelEntity, @@ -28,7 +29,12 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model @@ -37,6 +43,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ diff --git a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py index 71b6fb99c4..b6cf89bcd9 100644 --- a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional from replicate import Client as ReplicateClient +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -14,7 +15,12 @@ from core.model_runtime.model_providers.replicate._common import _CommonReplicat class ReplicateEmbeddingModel(_CommonReplicate, TextEmbeddingModel): def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) diff --git a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py index d55144f8a7..957f2e5d0f 100644 --- a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py @@ -6,6 +6,7 @@ from typing import Any, Optional import boto3 +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -53,7 +54,12 @@ class SageMakerEmbeddingModel(TextEmbeddingModel): return embeddings def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py index 6cdf4933b4..c6c681c15d 100644 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py @@ -1,5 +1,6 @@ from typing import Optional +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import ( OAICompatEmbeddingModel, @@ -16,7 +17,12 @@ class SiliconflowTextEmbeddingModel(OAICompatEmbeddingModel): super().validate_credentials(model, credentials) def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: self._add_custom_parameters(credentials) return super()._invoke(model, credentials, texts, user) diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py index 5783d2e383..0eef0db3e7 100644 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py @@ -4,6 +4,7 @@ from typing import Optional import dashscope import numpy as np +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import ( EmbeddingUsage, @@ -27,6 +28,7 @@ class TongyiTextEmbeddingModel(_CommonTongyi, TextEmbeddingModel): credentials: dict, texts: list[str], user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py index edd4a36d98..812bf92eea 100644 --- a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py @@ -7,6 +7,7 @@ import numpy as np from openai import OpenAI from tokenizers import Tokenizer +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -22,7 +23,14 @@ class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel): def _get_tokenizer(self) -> Tokenizer: return Tokenizer.from_pretrained("upstage/solar-1-mini-tokenizer") - def _invoke(self, model: str, credentials: dict, texts: list[str], user: str | None = None) -> TextEmbeddingResult: + def _invoke( + self, + model: str, + credentials: dict, + texts: list[str], + user: str | None = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, + ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py index 519373a7f3..509b41d951 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py @@ -9,6 +9,7 @@ from google.cloud import aiplatform from google.oauth2 import service_account from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import ( AIModelEntity, @@ -30,7 +31,12 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py index 9cba2cb879..9d800af5f4 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py @@ -2,6 +2,7 @@ import time from decimal import Decimal from typing import Optional +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import ( AIModelEntity, @@ -41,7 +42,12 @@ class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py index 4d6f6dccd0..1b5a0904db 100644 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py @@ -7,6 +7,7 @@ from typing import Any, Optional import numpy as np from requests import Response, post +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.invoke import InvokeError @@ -70,7 +71,12 @@ class WenxinTextEmbeddingModel(TextEmbeddingModel): return WenxinTextEmbedding(api_key, secret_key) def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model diff --git a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py index 8043af1d6c..1627239132 100644 --- a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py @@ -3,6 +3,7 @@ from typing import Optional from xinference_client.client.restful.restful_client import Client, RESTfulEmbeddingModelHandle +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.common_entities import I18nObject from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult @@ -25,7 +26,12 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model @@ -40,6 +46,7 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py index ee20954381..707c08ef1b 100644 --- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py @@ -1,6 +1,7 @@ import time from typing import Optional +from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult from core.model_runtime.errors.validate import CredentialsValidateFailedError @@ -15,7 +16,12 @@ class ZhipuAITextEmbeddingModel(_CommonZhipuaiAI, TextEmbeddingModel): """ def _invoke( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: """ Invoke text embedding model From 91f70d0bd9d96e07c61c5a37cc36d456526758e2 Mon Sep 17 00:00:00 2001 From: ice yao Date: Wed, 25 Sep 2024 08:47:11 +0800 Subject: [PATCH 29/64] Add embedding models in fireworks provider (#8728) --- .../model_providers/fireworks/fireworks.yaml | 1 + .../text_embedding/UAE-Large-V1.yaml | 12 ++ .../fireworks/text_embedding/__init__.py | 0 .../fireworks/text_embedding/gte-base.yaml | 12 ++ .../fireworks/text_embedding/gte-large.yaml | 12 ++ .../text_embedding/nomic-embed-text-v1.5.yaml | 12 ++ .../text_embedding/nomic-embed-text-v1.yaml | 12 ++ .../text_embedding/text_embedding.py | 151 ++++++++++++++++++ .../fireworks/test_text_embedding.py | 54 +++++++ 9 files changed, 266 insertions(+) create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py create mode 100644 api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py diff --git a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml index f886fa23b5..cdb87a55e9 100644 --- a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml +++ b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml @@ -15,6 +15,7 @@ help: en_US: https://fireworks.ai/account/api-keys supported_model_types: - llm + - text-embedding configurate_methods: - predefined-model provider_credential_schema: diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml new file mode 100644 index 0000000000..d7c11691cf --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml @@ -0,0 +1,12 @@ +model: WhereIsAI/UAE-Large-V1 +label: + zh_Hans: UAE-Large-V1 + en_US: UAE-Large-V1 +model_type: text-embedding +model_properties: + context_size: 512 + max_chunks: 1 +pricing: + input: '0.008' + unit: '0.000001' + currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py b/api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml new file mode 100644 index 0000000000..d09bafb4d3 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml @@ -0,0 +1,12 @@ +model: thenlper/gte-base +label: + zh_Hans: GTE-base + en_US: GTE-base +model_type: text-embedding +model_properties: + context_size: 512 + max_chunks: 1 +pricing: + input: '0.008' + unit: '0.000001' + currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml new file mode 100644 index 0000000000..c41fa2f9d3 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml @@ -0,0 +1,12 @@ +model: thenlper/gte-large +label: + zh_Hans: GTE-large + en_US: GTE-large +model_type: text-embedding +model_properties: + context_size: 512 + max_chunks: 1 +pricing: + input: '0.008' + unit: '0.000001' + currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml new file mode 100644 index 0000000000..c9098503d9 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml @@ -0,0 +1,12 @@ +model: nomic-ai/nomic-embed-text-v1.5 +label: + zh_Hans: nomic-embed-text-v1.5 + en_US: nomic-embed-text-v1.5 +model_type: text-embedding +model_properties: + context_size: 8192 + max_chunks: 16 +pricing: + input: '0.008' + unit: '0.000001' + currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml new file mode 100644 index 0000000000..89078d3ff6 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml @@ -0,0 +1,12 @@ +model: nomic-ai/nomic-embed-text-v1 +label: + zh_Hans: nomic-embed-text-v1 + en_US: nomic-embed-text-v1 +model_type: text-embedding +model_properties: + context_size: 8192 + max_chunks: 16 +pricing: + input: '0.008' + unit: '0.000001' + currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py new file mode 100644 index 0000000000..cdce69ff38 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py @@ -0,0 +1,151 @@ +import time +from collections.abc import Mapping +from typing import Optional, Union + +import numpy as np +from openai import OpenAI + +from core.embedding.embedding_constant import EmbeddingInputType +from core.model_runtime.entities.model_entities import PriceType +from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel +from core.model_runtime.model_providers.fireworks._common import _CommonFireworks + + +class FireworksTextEmbeddingModel(_CommonFireworks, TextEmbeddingModel): + """ + Model class for Fireworks text embedding model. + """ + + def _invoke( + self, + model: str, + credentials: dict, + texts: list[str], + user: Optional[str] = None, + input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, + ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :param input_type: input type + :return: embeddings result + """ + + credentials_kwargs = self._to_credential_kwargs(credentials) + client = OpenAI(**credentials_kwargs) + + extra_model_kwargs = {} + if user: + extra_model_kwargs["user"] = user + + extra_model_kwargs["encoding_format"] = "float" + + context_size = self._get_context_size(model, credentials) + max_chunks = self._get_max_chunks(model, credentials) + + inputs = [] + indices = [] + used_tokens = 0 + + for i, text in enumerate(texts): + # Here token count is only an approximation based on the GPT2 tokenizer + # TODO: Optimize for better token estimation and chunking + num_tokens = self._get_num_tokens_by_gpt2(text) + + if num_tokens >= context_size: + cutoff = int(np.floor(len(text) * (context_size / num_tokens))) + # if num tokens is larger than context length, only use the start + inputs.append(text[0:cutoff]) + else: + inputs.append(text) + indices += [i] + + batched_embeddings = [] + _iter = range(0, len(inputs), max_chunks) + + for i in _iter: + embeddings_batch, embedding_used_tokens = self._embedding_invoke( + model=model, + client=client, + texts=inputs[i : i + max_chunks], + extra_model_kwargs=extra_model_kwargs, + ) + used_tokens += embedding_used_tokens + batched_embeddings += embeddings_batch + + usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) + return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model) + + def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: + """ + Get number of tokens for given prompt messages + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :return: + """ + return sum(self._get_num_tokens_by_gpt2(text) for text in texts) + + def validate_credentials(self, model: str, credentials: Mapping) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + # transform credentials to kwargs for model instance + credentials_kwargs = self._to_credential_kwargs(credentials) + client = OpenAI(**credentials_kwargs) + + # call embedding model + self._embedding_invoke(model=model, client=client, texts=["ping"], extra_model_kwargs={}) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + def _embedding_invoke( + self, model: str, client: OpenAI, texts: Union[list[str], str], extra_model_kwargs: dict + ) -> tuple[list[list[float]], int]: + """ + Invoke embedding model + :param model: model name + :param client: model client + :param texts: texts to embed + :param extra_model_kwargs: extra model kwargs + :return: embeddings and used tokens + """ + response = client.embeddings.create(model=model, input=texts, **extra_model_kwargs) + return [data.embedding for data in response.data], response.usage.total_tokens + + def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: + """ + Calculate response usage + + :param model: model name + :param credentials: model credentials + :param tokens: input tokens + :return: usage + """ + input_price_info = self.get_price( + model=model, credentials=credentials, tokens=tokens, price_type=PriceType.INPUT + ) + + usage = EmbeddingUsage( + tokens=tokens, + total_tokens=tokens, + unit_price=input_price_info.unit_price, + price_unit=input_price_info.unit, + total_price=input_price_info.total_amount, + currency=input_price_info.currency, + latency=time.perf_counter() - self.started_at, + ) + + return usage diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py b/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py new file mode 100644 index 0000000000..7bf723b3a9 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py @@ -0,0 +1,54 @@ +import os + +import pytest + +from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.fireworks.text_embedding.text_embedding import FireworksTextEmbeddingModel +from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock + + +@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) +def test_validate_credentials(setup_openai_mock): + model = FireworksTextEmbeddingModel() + + with pytest.raises(CredentialsValidateFailedError): + model.validate_credentials( + model="nomic-ai/nomic-embed-text-v1.5", credentials={"fireworks_api_key": "invalid_key"} + ) + + model.validate_credentials( + model="nomic-ai/nomic-embed-text-v1.5", credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")} + ) + + +@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) +def test_invoke_model(setup_openai_mock): + model = FireworksTextEmbeddingModel() + + result = model.invoke( + model="nomic-ai/nomic-embed-text-v1.5", + credentials={ + "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY"), + }, + texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], + user="foo", + ) + + assert isinstance(result, TextEmbeddingResult) + assert len(result.embeddings) == 4 + assert result.usage.total_tokens == 2 + + +def test_get_num_tokens(): + model = FireworksTextEmbeddingModel() + + num_tokens = model.get_num_tokens( + model="nomic-ai/nomic-embed-text-v1.5", + credentials={ + "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY"), + }, + texts=["hello", "world"], + ) + + assert num_tokens == 2 From 68c7e68a8a0db9efe5c18c85c64be8346d3b2574 Mon Sep 17 00:00:00 2001 From: ybalbert001 <120714773+ybalbert001@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:12:35 +0800 Subject: [PATCH 30/64] Fix Issue: switch LLM of SageMaker endpoint doesn't take effect (#8737) Co-authored-by: Yuanbo Li --- .../model_providers/sagemaker/llm/llm.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py index 04789197ee..97b7692044 100644 --- a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py +++ b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py @@ -84,8 +84,9 @@ class SageMakerLargeLanguageModel(LargeLanguageModel): Model class for Cohere large language model. """ - sagemaker_client: Any = None + sagemaker_session: Any = None predictor: Any = None + sagemaker_endpoint: str = None def _handle_chat_generate_response( self, @@ -211,7 +212,7 @@ class SageMakerLargeLanguageModel(LargeLanguageModel): :param user: unique user id :return: full response or stream response chunk generator result """ - if not self.sagemaker_client: + if not self.sagemaker_session: access_key = credentials.get("aws_access_key_id") secret_key = credentials.get("aws_secret_access_key") aws_region = credentials.get("aws_region") @@ -226,11 +227,14 @@ class SageMakerLargeLanguageModel(LargeLanguageModel): else: boto_session = boto3.Session() - self.sagemaker_client = boto_session.client("sagemaker") - sagemaker_session = Session(boto_session=boto_session, sagemaker_client=self.sagemaker_client) + sagemaker_client = boto_session.client("sagemaker") + self.sagemaker_session = Session(boto_session=boto_session, sagemaker_client=sagemaker_client) + + if self.sagemaker_endpoint != credentials.get("sagemaker_endpoint"): + self.sagemaker_endpoint = credentials.get("sagemaker_endpoint") self.predictor = Predictor( - endpoint_name=credentials.get("sagemaker_endpoint"), - sagemaker_session=sagemaker_session, + endpoint_name=self.sagemaker_endpoint, + sagemaker_session=self.sagemaker_session, serializer=serializers.JSONSerializer(), ) From bf64ff215be9e9094afddbd9523188c78942b039 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:09:20 +0800 Subject: [PATCH 31/64] fix: . is missing in file_extension (#8736) --- api/core/rag/extractor/extract_processor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index fe7eaa32e6..0ffc89b214 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -124,7 +124,7 @@ class ExtractProcessor: extractor = UnstructuredPPTXExtractor(file_path, unstructured_api_url) elif file_extension == ".xml": extractor = UnstructuredXmlExtractor(file_path, unstructured_api_url) - elif file_extension == "epub": + elif file_extension == ".epub": extractor = UnstructuredEpubExtractor(file_path, unstructured_api_url) else: # txt @@ -146,7 +146,7 @@ class ExtractProcessor: extractor = WordExtractor(file_path, upload_file.tenant_id, upload_file.created_by) elif file_extension == ".csv": extractor = CSVExtractor(file_path, autodetect_encoding=True) - elif file_extension == "epub": + elif file_extension == ".epub": extractor = UnstructuredEpubExtractor(file_path) else: # txt From cb1942c242b17d930111cc97947e78f2efb9d1cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Wed, 25 Sep 2024 11:27:17 +0800 Subject: [PATCH 32/64] chore: make url display in the middle of http node (#8741) --- web/app/components/workflow/nodes/http/node.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/workflow/nodes/http/node.tsx b/web/app/components/workflow/nodes/http/node.tsx index 5bbb10fc3a..4b7dbea257 100644 --- a/web/app/components/workflow/nodes/http/node.tsx +++ b/web/app/components/workflow/nodes/http/node.tsx @@ -15,7 +15,7 @@ const Node: FC> = ({
{method}
-
+
Date: Wed, 25 Sep 2024 14:48:06 +0800 Subject: [PATCH 33/64] chore: apply ruff reformat for python-client sdk (#8752) --- sdks/python-client/dify_client/client.py | 137 ++++++++--------------- 1 file changed, 47 insertions(+), 90 deletions(-) diff --git a/sdks/python-client/dify_client/client.py b/sdks/python-client/dify_client/client.py index 2be079bdf3..5e42507a42 100644 --- a/sdks/python-client/dify_client/client.py +++ b/sdks/python-client/dify_client/client.py @@ -1,103 +1,80 @@ import json + import requests class DifyClient: - def __init__(self, api_key, base_url: str = 'https://api.dify.ai/v1'): + def __init__(self, api_key, base_url: str = "https://api.dify.ai/v1"): self.api_key = api_key self.base_url = base_url def _send_request(self, method, endpoint, json=None, params=None, stream=False): - headers = { - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json" - } + headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"} url = f"{self.base_url}{endpoint}" response = requests.request(method, url, json=json, params=params, headers=headers, stream=stream) return response - def _send_request_with_files(self, method, endpoint, data, files): - headers = { - "Authorization": f"Bearer {self.api_key}" - } + headers = {"Authorization": f"Bearer {self.api_key}"} url = f"{self.base_url}{endpoint}" response = requests.request(method, url, data=data, headers=headers, files=files) return response - + def message_feedback(self, message_id, rating, user): - data = { - "rating": rating, - "user": user - } + data = {"rating": rating, "user": user} return self._send_request("POST", f"/messages/{message_id}/feedbacks", data) - + def get_application_parameters(self, user): params = {"user": user} return self._send_request("GET", "/parameters", params=params) - + def file_upload(self, user, files): - data = { - "user": user - } + data = {"user": user} return self._send_request_with_files("POST", "/files/upload", data=data, files=files) - def text_to_audio(self, text:str, user:str, streaming:bool=False): - data = { - "text": text, - "user": user, - "streaming": streaming - } + def text_to_audio(self, text: str, user: str, streaming: bool = False): + data = {"text": text, "user": user, "streaming": streaming} return self._send_request("POST", "/text-to-audio", data=data) - - def get_meta(self,user): - params = { "user": user} - return self._send_request("GET", f"/meta", params=params) + + def get_meta(self, user): + params = {"user": user} + return self._send_request("GET", "/meta", params=params) class CompletionClient(DifyClient): def create_completion_message(self, inputs, response_mode, user, files=None): - data = { - "inputs": inputs, - "response_mode": response_mode, - "user": user, - "files": files - } - return self._send_request("POST", "/completion-messages", data, - stream=True if response_mode == "streaming" else False) + data = {"inputs": inputs, "response_mode": response_mode, "user": user, "files": files} + return self._send_request( + "POST", "/completion-messages", data, stream=True if response_mode == "streaming" else False + ) class ChatClient(DifyClient): def create_chat_message(self, inputs, query, user, response_mode="blocking", conversation_id=None, files=None): - data = { - "inputs": inputs, - "query": query, - "user": user, - "response_mode": response_mode, - "files": files - } + data = {"inputs": inputs, "query": query, "user": user, "response_mode": response_mode, "files": files} if conversation_id: data["conversation_id"] = conversation_id - return self._send_request("POST", "/chat-messages", data, - stream=True if response_mode == "streaming" else False) - - def get_suggested(self, message_id, user:str): + return self._send_request( + "POST", "/chat-messages", data, stream=True if response_mode == "streaming" else False + ) + + def get_suggested(self, message_id, user: str): params = {"user": user} return self._send_request("GET", f"/messages/{message_id}/suggested", params=params) - + def stop_message(self, task_id, user): data = {"user": user} - return self._send_request("POST", f"/chat-messages/{task_id}/stop", data) + return self._send_request("POST", f"/chat-messages/{task_id}/stop", data) def get_conversations(self, user, last_id=None, limit=None, pinned=None): params = {"user": user, "last_id": last_id, "limit": limit, "pinned": pinned} return self._send_request("GET", "/conversations", params=params) - + def get_conversation_messages(self, user, conversation_id=None, first_id=None, limit=None): params = {"user": user} @@ -109,15 +86,15 @@ class ChatClient(DifyClient): params["limit"] = limit return self._send_request("GET", "/messages", params=params) - - def rename_conversation(self, conversation_id, name,auto_generate:bool, user:str): - data = {"name": name, "auto_generate": auto_generate,"user": user} + + def rename_conversation(self, conversation_id, name, auto_generate: bool, user: str): + data = {"name": name, "auto_generate": auto_generate, "user": user} return self._send_request("POST", f"/conversations/{conversation_id}/name", data) def delete_conversation(self, conversation_id, user): data = {"user": user} return self._send_request("DELETE", f"/conversations/{conversation_id}", data) - + def audio_to_text(self, audio_file, user): data = {"user": user} files = {"audio_file": audio_file} @@ -125,10 +102,10 @@ class ChatClient(DifyClient): class WorkflowClient(DifyClient): - def run(self, inputs:dict, response_mode:str="streaming", user:str="abc-123"): + def run(self, inputs: dict, response_mode: str = "streaming", user: str = "abc-123"): data = {"inputs": inputs, "response_mode": response_mode, "user": user} return self._send_request("POST", "/workflows/run", data) - + def stop(self, task_id, user): data = {"user": user} return self._send_request("POST", f"/workflows/tasks/{task_id}/stop", data) @@ -137,10 +114,8 @@ class WorkflowClient(DifyClient): return self._send_request("GET", f"/workflows/run/{workflow_run_id}") - class KnowledgeBaseClient(DifyClient): - - def __init__(self, api_key, base_url: str = 'https://api.dify.ai/v1', dataset_id: str = None): + def __init__(self, api_key, base_url: str = "https://api.dify.ai/v1", dataset_id: str = None): """ Construct a KnowledgeBaseClient object. @@ -150,10 +125,7 @@ class KnowledgeBaseClient(DifyClient): dataset_id (str, optional): ID of the dataset. Defaults to None. You don't need this if you just want to create a new dataset. or list datasets. otherwise you need to set this. """ - super().__init__( - api_key=api_key, - base_url=base_url - ) + super().__init__(api_key=api_key, base_url=base_url) self.dataset_id = dataset_id def _get_dataset_id(self): @@ -162,10 +134,10 @@ class KnowledgeBaseClient(DifyClient): return self.dataset_id def create_dataset(self, name: str, **kwargs): - return self._send_request('POST', '/datasets', {'name': name}, **kwargs) + return self._send_request("POST", "/datasets", {"name": name}, **kwargs) def list_datasets(self, page: int = 1, page_size: int = 20, **kwargs): - return self._send_request('GET', f'/datasets?page={page}&limit={page_size}', **kwargs) + return self._send_request("GET", f"/datasets?page={page}&limit={page_size}", **kwargs) def create_document_by_text(self, name, text, extra_params: dict = None, **kwargs): """ @@ -193,14 +165,7 @@ class KnowledgeBaseClient(DifyClient): } :return: Response from the API """ - data = { - 'indexing_technique': 'high_quality', - 'process_rule': { - 'mode': 'automatic' - }, - 'name': name, - 'text': text - } + data = {"indexing_technique": "high_quality", "process_rule": {"mode": "automatic"}, "name": name, "text": text} if extra_params is not None and isinstance(extra_params, dict): data.update(extra_params) url = f"/datasets/{self._get_dataset_id()}/document/create_by_text" @@ -233,10 +198,7 @@ class KnowledgeBaseClient(DifyClient): } :return: Response from the API """ - data = { - 'name': name, - 'text': text - } + data = {"name": name, "text": text} if extra_params is not None and isinstance(extra_params, dict): data.update(extra_params) url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_text" @@ -269,16 +231,11 @@ class KnowledgeBaseClient(DifyClient): :return: Response from the API """ files = {"file": open(file_path, "rb")} - data = { - 'process_rule': { - 'mode': 'automatic' - }, - 'indexing_technique': 'high_quality' - } + data = {"process_rule": {"mode": "automatic"}, "indexing_technique": "high_quality"} if extra_params is not None and isinstance(extra_params, dict): data.update(extra_params) if original_document_id is not None: - data['original_document_id'] = original_document_id + data["original_document_id"] = original_document_id url = f"/datasets/{self._get_dataset_id()}/document/create_by_file" return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files) @@ -352,11 +309,11 @@ class KnowledgeBaseClient(DifyClient): """ params = {} if page is not None: - params['page'] = page + params["page"] = page if page_size is not None: - params['limit'] = page_size + params["limit"] = page_size if keyword is not None: - params['keyword'] = keyword + params["keyword"] = keyword url = f"/datasets/{self._get_dataset_id()}/documents" return self._send_request("GET", url, params=params, **kwargs) @@ -383,9 +340,9 @@ class KnowledgeBaseClient(DifyClient): url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments" params = {} if keyword is not None: - params['keyword'] = keyword + params["keyword"] = keyword if status is not None: - params['status'] = status + params["status"] = status if "params" in kwargs: params.update(kwargs["params"]) return self._send_request("GET", url, params=params, **kwargs) From d0e0111f88da0fc972b07b1f97893df18cbc0522 Mon Sep 17 00:00:00 2001 From: cherryhuahua <68722306+cherryhuahua@users.noreply.github.com> Date: Wed, 25 Sep 2024 14:51:42 +0800 Subject: [PATCH 34/64] fix:Spark's large language model token calculation error #7911 (#8755) --- api/core/app/apps/base_app_runner.py | 2 +- api/core/model_runtime/model_providers/spark/llm/llm.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index 1b412b8639..203aca3384 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -309,7 +309,7 @@ class AppRunner: if not prompt_messages: prompt_messages = result.prompt_messages - if not usage and result.delta.usage: + if result.delta.usage: usage = result.delta.usage if not usage: diff --git a/api/core/model_runtime/model_providers/spark/llm/llm.py b/api/core/model_runtime/model_providers/spark/llm/llm.py index 57193dc031..1181ba699a 100644 --- a/api/core/model_runtime/model_providers/spark/llm/llm.py +++ b/api/core/model_runtime/model_providers/spark/llm/llm.py @@ -213,18 +213,21 @@ class SparkLargeLanguageModel(LargeLanguageModel): :param prompt_messages: prompt messages :return: llm response chunk generator result """ + completion = "" for index, content in enumerate(client.subscribe()): if isinstance(content, dict): delta = content["data"] else: delta = content - + completion += delta assistant_prompt_message = AssistantPromptMessage( content=delta or "", ) - + temp_assistant_prompt_message = AssistantPromptMessage( + content=completion, + ) prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) + completion_tokens = self.get_num_tokens(model, credentials, [temp_assistant_prompt_message]) # transform usage usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) From b0927c39fb589b9df5c8278dc8bdac115497fe2e Mon Sep 17 00:00:00 2001 From: zhuiyue132 Date: Wed, 25 Sep 2024 15:06:54 +0800 Subject: [PATCH 35/64] fix: expose the configuration of HTTP request node to Docker (#8716) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- docker/.env.example | 4 ++++ docker/docker-compose.yaml | 2 ++ 2 files changed, 6 insertions(+) diff --git a/docker/.env.example b/docker/.env.example index 7eaaceb928..f7479791ce 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -568,6 +568,10 @@ WORKFLOW_MAX_EXECUTION_STEPS=500 WORKFLOW_MAX_EXECUTION_TIME=1200 WORKFLOW_CALL_MAX_DEPTH=5 +# HTTP request node in workflow configuration +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 + # SSRF Proxy server HTTP URL SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 # SSRF Proxy server HTTPS URL diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 16bef279bc..414919063a 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -207,6 +207,8 @@ x-shared-env: &shared-api-worker-env WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_MAX_EXECUTION_TIME:-5} SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} + HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} services: # API service From 2ef8b187fad67c248fc9ce20e440503ba24c0a42 Mon Sep 17 00:00:00 2001 From: Hash Brown Date: Wed, 25 Sep 2024 15:50:51 +0800 Subject: [PATCH 36/64] Add GitHub Actions Workflow for Web Tests (#8753) --- .github/workflows/web-tests.yml | 46 +++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 .github/workflows/web-tests.yml diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml new file mode 100644 index 0000000000..5aee64b8e6 --- /dev/null +++ b/.github/workflows/web-tests.yml @@ -0,0 +1,46 @@ +name: Web Tests + +on: + pull_request: + branches: + - main + paths: + - web/** + +concurrency: + group: web-tests-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + test: + name: Web Tests + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./web + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check changed files + id: changed-files + uses: tj-actions/changed-files@v45 + with: + files: web/** + + - name: Setup Node.js + uses: actions/setup-node@v4 + if: steps.changed-files.outputs.any_changed == 'true' + with: + node-version: 20 + cache: yarn + cache-dependency-path: ./web/package.json + + - name: Install dependencies + if: steps.changed-files.outputs.any_changed == 'true' + run: yarn install --frozen-lockfile + + - name: Run tests + if: steps.changed-files.outputs.any_changed == 'true' + run: yarn test From ef47f68e4ad5713b4694d9e0d0c646fd47efc55d Mon Sep 17 00:00:00 2001 From: NFish Date: Wed, 25 Sep 2024 18:25:06 +0800 Subject: [PATCH 37/64] fix: the translation result may cause a different meaning (#8763) --- web/i18n/es-ES/common.ts | 2 +- web/i18n/zh-Hans/common.ts | 2 +- web/i18n/zh-Hant/common.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/web/i18n/es-ES/common.ts b/web/i18n/es-ES/common.ts index 2ba907361f..59a05f63d8 100644 --- a/web/i18n/es-ES/common.ts +++ b/web/i18n/es-ES/common.ts @@ -202,7 +202,7 @@ const translation = { invitationLink: 'Enlace de invitación', failedInvitationEmails: 'Los siguientes usuarios no fueron invitados exitosamente', ok: 'OK', - removeFromTeam: 'Eliminar del equipo', + removeFromTeam: 'Eliminar del espacio de trabajo', removeFromTeamTip: 'Se eliminará el acceso al equipo', setAdmin: 'Establecer como administrador', setMember: 'Establecer como miembro ordinario', diff --git a/web/i18n/zh-Hans/common.ts b/web/i18n/zh-Hans/common.ts index 52ab7d6f02..7947d32f25 100644 --- a/web/i18n/zh-Hans/common.ts +++ b/web/i18n/zh-Hans/common.ts @@ -200,7 +200,7 @@ const translation = { invitationLink: '邀请链接', failedInvitationEmails: '邀请以下邮箱失败', ok: '好的', - removeFromTeam: '移除团队', + removeFromTeam: '移出团队', removeFromTeamTip: '将取消团队访问', setAdmin: '设为管理员', setMember: '设为普通成员', diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts index c1f3ed2b2b..8cd51b1991 100644 --- a/web/i18n/zh-Hant/common.ts +++ b/web/i18n/zh-Hant/common.ts @@ -194,7 +194,7 @@ const translation = { invitationLink: '邀請連結', failedInvitationEmails: '邀請以下郵箱失敗', ok: '好的', - removeFromTeam: '移除團隊', + removeFromTeam: '移出團隊', removeFromTeamTip: '將取消團隊訪問', setAdmin: '設為管理員', setMember: '設為普通成員', From 02ff6cca70421b6f2d482989a581a98117b64e7d Mon Sep 17 00:00:00 2001 From: "Pan, Wen-Ming" Date: Wed, 25 Sep 2024 21:27:26 +0800 Subject: [PATCH 38/64] feat: add support for Vertex AI Gemini 1.5 002 and experimental models (#8767) --- ...5-flash.yaml => gemini-1.5-flash-001.yaml} | 2 +- .../vertex_ai/llm/gemini-1.5-flash-002.yaml | 37 +++++++++++++++++++ ...i-1.5-pro.yaml => gemini-1.5-pro-001.yaml} | 2 +- .../vertex_ai/llm/gemini-1.5-pro-002.yaml | 37 +++++++++++++++++++ .../llm/gemini-flash-experimental.yaml | 37 +++++++++++++++++++ .../llm/gemini-pro-experimental.yaml | 37 +++++++++++++++++++ 6 files changed, 150 insertions(+), 2 deletions(-) rename api/core/model_runtime/model_providers/vertex_ai/llm/{gemini-1.5-flash.yaml => gemini-1.5-flash-001.yaml} (96%) create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml rename api/core/model_runtime/model_providers/vertex_ai/llm/{gemini-1.5-pro.yaml => gemini-1.5-pro-001.yaml} (96%) create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml create mode 100644 api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml similarity index 96% rename from api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash.yaml rename to api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml index c308f0a322..f5386be06d 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash.yaml +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml @@ -1,6 +1,6 @@ model: gemini-1.5-flash-001 label: - en_US: Gemini 1.5 Flash + en_US: Gemini 1.5 Flash 001 model_type: llm features: - agent-thought diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml new file mode 100644 index 0000000000..97bd44f06b --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml @@ -0,0 +1,37 @@ +model: gemini-1.5-flash-002 +label: + en_US: Gemini 1.5 Flash 002 +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + en_US: Top k + type: int + help: + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_output_tokens + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml similarity index 96% rename from api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro.yaml rename to api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml index 744863e773..5e08f2294e 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro.yaml +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml @@ -1,6 +1,6 @@ model: gemini-1.5-pro-001 label: - en_US: Gemini 1.5 Pro + en_US: Gemini 1.5 Pro 001 model_type: llm features: - agent-thought diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml new file mode 100644 index 0000000000..8f327ea2f3 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml @@ -0,0 +1,37 @@ +model: gemini-1.5-pro-002 +label: + en_US: Gemini 1.5 Pro 002 +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + en_US: Top k + type: int + help: + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_output_tokens + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml new file mode 100644 index 0000000000..0f5eb34c0c --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml @@ -0,0 +1,37 @@ +model: gemini-flash-experimental +label: + en_US: Gemini Flash Experimental +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + en_US: Top k + type: int + help: + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_output_tokens + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml new file mode 100644 index 0000000000..fa31cabb85 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml @@ -0,0 +1,37 @@ +model: gemini-pro-experimental +label: + en_US: Gemini Pro Experimental +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + en_US: Top k + type: int + help: + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_output_tokens + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD From a8b837c4a9143385718f87fdd4863844607daae3 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Wed, 25 Sep 2024 22:55:24 +0800 Subject: [PATCH 39/64] dep: bump ElasticSearch from 8.14.x to 8.15.x (#8197) --- api/poetry.lock | 13 ++++++++----- api/pyproject.toml | 2 +- docker/docker-compose.yaml | 4 ++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/api/poetry.lock b/api/poetry.lock index 184cdb9e81..bce21fb547 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -2333,13 +2333,13 @@ develop = ["aiohttp", "furo", "httpx", "opentelemetry-api", "opentelemetry-sdk", [[package]] name = "elasticsearch" -version = "8.14.0" +version = "8.15.1" description = "Python client for Elasticsearch" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "elasticsearch-8.14.0-py3-none-any.whl", hash = "sha256:cef8ef70a81af027f3da74a4f7d9296b390c636903088439087b8262a468c130"}, - {file = "elasticsearch-8.14.0.tar.gz", hash = "sha256:aa2490029dd96f4015b333c1827aa21fd6c0a4d223b00dfb0fe933b8d09a511b"}, + {file = "elasticsearch-8.15.1-py3-none-any.whl", hash = "sha256:02a0476e98768a30d7926335fc0d305c04fdb928eea1354c6e6040d8c2814569"}, + {file = "elasticsearch-8.15.1.tar.gz", hash = "sha256:40c0d312f8adf8bdc81795bc16a0b546ddf544cb1f90e829a244e4780c4dbfd8"}, ] [package.dependencies] @@ -2347,7 +2347,10 @@ elastic-transport = ">=8.13,<9" [package.extras] async = ["aiohttp (>=3,<4)"] +dev = ["aiohttp", "black", "build", "coverage", "isort", "jinja2", "mapbox-vector-tile", "nox", "numpy", "orjson", "pandas", "pyarrow", "pytest", "pytest-asyncio", "pytest-cov", "python-dateutil", "pyyaml (>=5.4)", "requests (>=2,<3)", "simsimd", "twine", "unasync"] +docs = ["sphinx", "sphinx-autodoc-typehints", "sphinx-rtd-theme (>=2.0)"] orjson = ["orjson (>=3)"] +pyarrow = ["pyarrow (>=1)"] requests = ["requests (>=2.4.0,!=2.32.2,<3.0.0)"] vectorstore-mmr = ["numpy (>=1)", "simsimd (>=3)"] @@ -10498,4 +10501,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "17c4108d92c415d987f8b437ea3e0484c5601a05bfe175339a8546c93c159bc5" +content-hash = "69b42bb1ff033f14e199fee8335356275099421d72bbd7037b7a991ea65cae08" diff --git a/api/pyproject.toml b/api/pyproject.toml index 9e38c09456..f004865d5f 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -253,7 +253,7 @@ alibabacloud_gpdb20160503 = "~3.8.0" alibabacloud_tea_openapi = "~0.3.9" chromadb = "0.5.1" clickhouse-connect = "~0.7.16" -elasticsearch = "8.14.0" +elasticsearch = "~8.15.1" oracledb = "~2.2.1" pgvecto-rs = { version = "~0.2.1", extras = ['sqlalchemy'] } pgvector = "0.2.5" diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 414919063a..95e271a0e9 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -630,7 +630,7 @@ services: # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + image: docker.elastic.co/elasticsearch/elasticsearch:8.15.1 container_name: elasticsearch profiles: - elasticsearch @@ -657,7 +657,7 @@ services: # https://www.elastic.co/guide/en/kibana/current/docker.html # https://www.elastic.co/guide/en/kibana/current/settings.html kibana: - image: docker.elastic.co/kibana/kibana:8.14.3 + image: docker.elastic.co/kibana/kibana:8.15.1 container_name: kibana profiles: - elasticsearch From fefbc43fb035fca2171d949ac5b4d261c343d8b1 Mon Sep 17 00:00:00 2001 From: Qun <51054082+QunBB@users.noreply.github.com> Date: Thu, 26 Sep 2024 08:18:13 +0800 Subject: [PATCH 40/64] chore: fix comfyui tool doc url (#8775) --- api/core/tools/provider/builtin/comfyui/comfyui.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/tools/provider/builtin/comfyui/comfyui.yaml b/api/core/tools/provider/builtin/comfyui/comfyui.yaml index 066fd85308..3891eebf3a 100644 --- a/api/core/tools/provider/builtin/comfyui/comfyui.yaml +++ b/api/core/tools/provider/builtin/comfyui/comfyui.yaml @@ -39,4 +39,4 @@ credentials_for_provider: en_US: The checkpoint name of the ComfyUI server, e.g. xxx.safetensors zh_Hans: ComfyUI服务器的模型名称, 比如 xxx.safetensors pt_BR: The checkpoint name of the ComfyUI server, e.g. xxx.safetensors - url: https://docs.dify.ai/tutorials/tool-configuration/comfyui + url: https://github.com/comfyanonymous/ComfyUI#installing From 5ba19d64e90a9d5c94b8d4e6cd93cd4043fc8943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Thu, 26 Sep 2024 11:22:18 +0800 Subject: [PATCH 41/64] fix: TavilySearch tool get api link (#8780) --- api/core/tools/provider/builtin/tavily/tavily.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/tools/provider/builtin/tavily/tavily.yaml b/api/core/tools/provider/builtin/tavily/tavily.yaml index 7b25a81848..95820f4d18 100644 --- a/api/core/tools/provider/builtin/tavily/tavily.yaml +++ b/api/core/tools/provider/builtin/tavily/tavily.yaml @@ -28,4 +28,4 @@ credentials_for_provider: en_US: Get your Tavily API key from Tavily zh_Hans: 从 TavilyApi 获取您的 Tavily API key pt_BR: Get your Tavily API key from Tavily - url: https://docs.tavily.com/docs/tavily-api/introduction + url: https://docs.tavily.com/docs/welcome From ac737637264f4caf10866ab3953cd5c203961939 Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Thu, 26 Sep 2024 11:23:09 +0800 Subject: [PATCH 42/64] chore: add input_type param desc for the _invoke method of text_embedding (#8778) --- .../azure_openai/text_embedding/text_embedding.py | 10 ++++++++++ .../baichuan/text_embedding/text_embedding.py | 1 + .../bedrock/text_embedding/text_embedding.py | 1 + .../cohere/text_embedding/text_embedding.py | 1 + .../huggingface_hub/text_embedding/text_embedding.py | 10 ++++++++++ .../huggingface_tei/text_embedding/text_embedding.py | 1 + .../hunyuan/text_embedding/text_embedding.py | 1 + .../jina/text_embedding/text_embedding.py | 1 + .../localai/text_embedding/text_embedding.py | 3 ++- .../minimax/text_embedding/text_embedding.py | 1 + .../mixedbread/text_embedding/text_embedding.py | 1 + .../nomic/text_embedding/text_embedding.py | 1 + .../nvidia/text_embedding/text_embedding.py | 1 + .../ollama/text_embedding/text_embedding.py | 1 + .../openai/text_embedding/text_embedding.py | 1 + .../text_embedding/text_embedding.py | 1 + .../openllm/text_embedding/text_embedding.py | 1 + .../replicate/text_embedding/text_embedding.py | 10 ++++++++++ .../sagemaker/text_embedding/text_embedding.py | 1 + .../siliconflow/text_embedding/text_embedding.py | 10 ++++++++++ .../tongyi/text_embedding/text_embedding.py | 1 + .../upstage/text_embedding/text_embedding.py | 1 + .../vertex_ai/text_embedding/text_embedding.py | 2 ++ .../volcengine_maas/text_embedding/text_embedding.py | 1 + .../wenxin/text_embedding/text_embedding.py | 1 + .../zhipuai/text_embedding/text_embedding.py | 1 + 26 files changed, 64 insertions(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py index 6b270b65ff..8701a38050 100644 --- a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py @@ -25,6 +25,16 @@ class AzureOpenAITextEmbeddingModel(_CommonAzureOpenAI, TextEmbeddingModel): user: Optional[str] = None, input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :param input_type: input type + :return: embeddings result + """ base_model_name = credentials["base_model_name"] credentials_kwargs = self._to_credential_kwargs(credentials) client = AzureOpenAI(**credentials_kwargs) diff --git a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py index 210c274bdf..56b9be1c36 100644 --- a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py @@ -50,6 +50,7 @@ class BaichuanTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ api_key = credentials["api_key"] diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py index 8c4c50b269..d9c5726592 100644 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py @@ -45,6 +45,7 @@ class BedrockTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ client_config = Config(region_name=credentials["aws_region"]) diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py index 1f93068a8c..4da2080690 100644 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py @@ -40,6 +40,7 @@ class CohereTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ # get model properties diff --git a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py index cf18f84ac8..b2e6d1b652 100644 --- a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py @@ -26,6 +26,16 @@ class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel user: Optional[str] = None, input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :param input_type: input type + :return: embeddings result + """ client = InferenceClient(token=credentials["huggingfacehub_api_token"]) execute_model = model diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py index 58baf4933c..b8ff3ca549 100644 --- a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py @@ -44,6 +44,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py index 3e14371f89..75701ebc54 100644 --- a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py @@ -41,6 +41,7 @@ class HunyuanTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index 9120f26b8d..c7b729c14e 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -75,6 +75,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ api_key = credentials["api_key"] diff --git a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py index d8878c7be8..ab8ca76c2f 100644 --- a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py @@ -23,7 +23,7 @@ from core.model_runtime.model_providers.__base.text_embedding_model import TextE class LocalAITextEmbeddingModel(TextEmbeddingModel): """ - Model class for Jina text embedding model. + Model class for LocalAI text embedding model. """ def _invoke( @@ -41,6 +41,7 @@ class LocalAITextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ if len(texts) != 1: diff --git a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py index d0d1d2aea1..74d2a221d1 100644 --- a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py @@ -49,6 +49,7 @@ class MinimaxTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ api_key = credentials["minimax_api_key"] diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py index cdc2d58d0c..68b7b448bf 100644 --- a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py @@ -42,6 +42,7 @@ class MixedBreadTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ api_key = credentials["api_key"] diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py index a797521576..857dfb5f41 100644 --- a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py @@ -56,6 +56,7 @@ class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ embeddings, prompt_tokens, total_tokens = self.embed_text( diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py index a4ea28bd10..936ceb8dd2 100644 --- a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py @@ -42,6 +42,7 @@ class NvidiaTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ api_key = credentials["api_key"] diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py index 0501c8b841..5cf3f1c6fa 100644 --- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py @@ -53,6 +53,7 @@ class OllamaEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py index 7945723636..16f1a0cfa1 100644 --- a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py @@ -34,6 +34,7 @@ class OpenAITextEmbeddingModel(_CommonOpenAI, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ # transform credentials to kwargs for model instance diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py index 68b5773e16..64fa6aaa3c 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py @@ -43,6 +43,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ diff --git a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py index c0a3efbb00..c5d4330912 100644 --- a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py @@ -40,6 +40,7 @@ class OpenLLMTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py index b6cf89bcd9..9f724a77ac 100644 --- a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py @@ -22,6 +22,16 @@ class ReplicateEmbeddingModel(_CommonReplicate, TextEmbeddingModel): user: Optional[str] = None, input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :param input_type: input type + :return: embeddings result + """ client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) if "model_version" in credentials: diff --git a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py index 957f2e5d0f..8f993ce672 100644 --- a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py @@ -68,6 +68,7 @@ class SageMakerEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ # get model properties diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py index c6c681c15d..c5dcc12610 100644 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py @@ -24,6 +24,16 @@ class SiliconflowTextEmbeddingModel(OAICompatEmbeddingModel): user: Optional[str] = None, input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, ) -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :param input_type: input type + :return: embeddings result + """ self._add_custom_parameters(credentials) return super()._invoke(model, credentials, texts, user) diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py index 0eef0db3e7..736cd44df8 100644 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py @@ -37,6 +37,7 @@ class TongyiTextEmbeddingModel(_CommonTongyi, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ credentials_kwargs = self._to_credential_kwargs(credentials) diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py index 812bf92eea..b6509cd26c 100644 --- a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py @@ -38,6 +38,7 @@ class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py index 509b41d951..fce9544df0 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py @@ -44,6 +44,8 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel): :param model: model name :param credentials: model credentials :param texts: texts to embed + :param user: unique user id + :param input_type: input type :return: embeddings result """ service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"])) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py index 9d800af5f4..0dd4037c95 100644 --- a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py @@ -56,6 +56,7 @@ class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ if ArkClientV3.is_legacy(credentials): diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py index 1b5a0904db..c21d0c0552 100644 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py @@ -85,6 +85,7 @@ class WenxinTextEmbeddingModel(TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py index 707c08ef1b..14a529dddf 100644 --- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py @@ -30,6 +30,7 @@ class ZhipuAITextEmbeddingModel(_CommonZhipuaiAI, TextEmbeddingModel): :param credentials: model credentials :param texts: texts to embed :param user: unique user id + :param input_type: input type :return: embeddings result """ credentials_kwargs = self._to_credential_kwargs(credentials) From 0c96f0aa51e65d970d26f043be1f592f89dba507 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Thu, 26 Sep 2024 11:24:03 +0800 Subject: [PATCH 43/64] fix: credential *** should be string (#8785) --- api/core/tools/provider/tool_provider.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/api/core/tools/provider/tool_provider.py b/api/core/tools/provider/tool_provider.py index 05c88b904e..321b212014 100644 --- a/api/core/tools/provider/tool_provider.py +++ b/api/core/tools/provider/tool_provider.py @@ -153,6 +153,9 @@ class ToolProviderController(BaseModel, ABC): # check type credential_schema = credentials_need_to_validate[credential_name] + if not credential_schema.required and credentials[credential_name] is None: + continue + if credential_schema.type in { ToolProviderCredentials.CredentialsType.SECRET_INPUT, ToolProviderCredentials.CredentialsType.TEXT_INPUT, From 4c9ef6e8301b2986653f11ffad837cf9e45f3284 Mon Sep 17 00:00:00 2001 From: Aaron Ji <127167174+DresAaron@users.noreply.github.com> Date: Thu, 26 Sep 2024 11:29:35 +0800 Subject: [PATCH 44/64] fix: update usage for Jina Embeddings v3 (#8771) --- .../model_providers/jina/jina.yaml | 43 ------------------- .../jina/text_embedding/text_embedding.py | 20 +++------ 2 files changed, 6 insertions(+), 57 deletions(-) diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml index 4ff6ba0f22..970b22965b 100644 --- a/api/core/model_runtime/model_providers/jina/jina.yaml +++ b/api/core/model_runtime/model_providers/jina/jina.yaml @@ -67,46 +67,3 @@ model_credential_schema: required: false type: text-input default: '8192' - - variable: task - label: - zh_Hans: 下游任务 - en_US: Downstream task - placeholder: - zh_Hans: 选择将使用向量模型的下游任务。模型将返回针对该任务优化的向量。 - en_US: Select the downstream task for which the embeddings will be used. The model will return the optimized embeddings for that task. - required: false - type: select - options: - - value: retrieval.query - label: - en_US: retrieval.query - - value: retrieval.passage - label: - en_US: retrieval.passage - - value: separation - label: - en_US: separation - - value: classification - label: - en_US: classification - - value: text-matching - label: - en_US: text-matching - - variable: dimensions - label: - zh_Hans: 输出维度 - en_US: Output dimensions - placeholder: - zh_Hans: 输入您的输出维度 - en_US: Enter output dimensions - required: false - type: text-input - - variable: late_chunking - label: - zh_Hans: 后期分块 - en_US: Late chunking - placeholder: - zh_Hans: 应用后期分块技术来利用模型的长上下文功能来生成上下文块向量化。 - en_US: Apply the late chunking technique to leverage the model's long-context capabilities for generating contextual chunk embeddings. - required: false - type: switch diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index c7b729c14e..b397129512 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -28,7 +28,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): api_base: str = "https://api.jina.ai/v1" - def _to_payload(self, model: str, texts: list[str], credentials: dict) -> dict: + def _to_payload(self, model: str, texts: list[str], credentials: dict, input_type: EmbeddingInputType) -> dict: """ Parse model credentials @@ -45,18 +45,10 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): data = {"model": model, "input": [transform_jina_input_text(model, text) for text in texts]} - task = credentials.get("task") - dimensions = credentials.get("dimensions") - late_chunking = credentials.get("late_chunking") - - if task is not None: - data["task"] = task - - if dimensions is not None: - data["dimensions"] = int(dimensions) - - if late_chunking is not None: - data["late_chunking"] = late_chunking + # model specific parameters + if model == "jina-embeddings-v3": + # set `task` type according to input type for the best performance + data["task"] = "retrieval.query" if input_type == EmbeddingInputType.QUERY else "retrieval.passage" return data @@ -88,7 +80,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel): url = base_url + "/embeddings" headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - data = self._to_payload(model=model, texts=texts, credentials=credentials) + data = self._to_payload(model=model, texts=texts, credentials=credentials, input_type=input_type) try: response = post(url, headers=headers, data=dumps(data)) From a0b0809b1c1598db7e8762d78c6d9f6d34070168 Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Thu, 26 Sep 2024 11:29:53 +0800 Subject: [PATCH 45/64] Add more models for SiliconFlow (#8779) --- .../siliconflow/llm/_position.yaml | 39 ++++++++++++------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml index 43db4aed11..a3e5d0981f 100644 --- a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml @@ -1,25 +1,38 @@ -- Qwen/Qwen2.5-7B-Instruct -- Qwen/Qwen2.5-14B-Instruct -- Qwen/Qwen2.5-32B-Instruct - Qwen/Qwen2.5-72B-Instruct +- Qwen/Qwen2.5-Math-72B-Instruct +- Qwen/Qwen2.5-32B-Instruct +- Qwen/Qwen2.5-14B-Instruct +- Qwen/Qwen2.5-7B-Instruct +- Qwen/Qwen2.5-Coder-7B-Instruct +- deepseek-ai/DeepSeek-V2.5 - Qwen/Qwen2-72B-Instruct - Qwen/Qwen2-57B-A14B-Instruct - Qwen/Qwen2-7B-Instruct - Qwen/Qwen2-1.5B-Instruct -- 01-ai/Yi-1.5-34B-Chat -- 01-ai/Yi-1.5-9B-Chat-16K -- 01-ai/Yi-1.5-6B-Chat -- THUDM/glm-4-9b-chat -- deepseek-ai/DeepSeek-V2.5 - deepseek-ai/DeepSeek-V2-Chat - deepseek-ai/DeepSeek-Coder-V2-Instruct +- THUDM/glm-4-9b-chat +- THUDM/chatglm3-6b +- 01-ai/Yi-1.5-34B-Chat-16K +- 01-ai/Yi-1.5-9B-Chat-16K +- 01-ai/Yi-1.5-6B-Chat +- internlm/internlm2_5-20b-chat - internlm/internlm2_5-7b-chat -- google/gemma-2-27b-it -- google/gemma-2-9b-it -- meta-llama/Meta-Llama-3-70B-Instruct -- meta-llama/Meta-Llama-3-8B-Instruct - meta-llama/Meta-Llama-3.1-405B-Instruct - meta-llama/Meta-Llama-3.1-70B-Instruct - meta-llama/Meta-Llama-3.1-8B-Instruct -- mistralai/Mixtral-8x7B-Instruct-v0.1 +- meta-llama/Meta-Llama-3-70B-Instruct +- meta-llama/Meta-Llama-3-8B-Instruct +- google/gemma-2-27b-it +- google/gemma-2-9b-it - mistralai/Mistral-7B-Instruct-v0.2 +- Pro/Qwen/Qwen2-7B-Instruct +- Pro/Qwen/Qwen2-1.5B-Instruct +- Pro/THUDM/glm-4-9b-chat +- Pro/THUDM/chatglm3-6b +- Pro/01-ai/Yi-1.5-9B-Chat-16K +- Pro/01-ai/Yi-1.5-6B-Chat +- Pro/internlm/internlm2_5-7b-chat +- Pro/meta-llama/Meta-Llama-3.1-8B-Instruct +- Pro/meta-llama/Meta-Llama-3-8B-Instruct +- Pro/google/gemma-2-9b-it From d1173a69f8ecf6680b78d194f8c266cada2042ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Thu, 26 Sep 2024 13:48:06 +0800 Subject: [PATCH 46/64] fix: the Image-1X tool (#8787) --- .../tools/provider/builtin/stepfun/stepfun.py | 2 +- .../provider/builtin/stepfun/stepfun.yaml | 17 ++--------- .../provider/builtin/stepfun/tools/image.py | 13 ++------- .../provider/builtin/stepfun/tools/image.yaml | 29 ------------------- 4 files changed, 5 insertions(+), 56 deletions(-) diff --git a/api/core/tools/provider/builtin/stepfun/stepfun.py b/api/core/tools/provider/builtin/stepfun/stepfun.py index b24f730c95..239db85b11 100644 --- a/api/core/tools/provider/builtin/stepfun/stepfun.py +++ b/api/core/tools/provider/builtin/stepfun/stepfun.py @@ -16,7 +16,7 @@ class StepfunProvider(BuiltinToolProviderController): user_id="", tool_parameters={ "prompt": "cute girl, blue eyes, white hair, anime style", - "size": "1024x1024", + "size": "256x256", "n": 1, }, ) diff --git a/api/core/tools/provider/builtin/stepfun/stepfun.yaml b/api/core/tools/provider/builtin/stepfun/stepfun.yaml index 1f841ec369..e8139a4d7d 100644 --- a/api/core/tools/provider/builtin/stepfun/stepfun.yaml +++ b/api/core/tools/provider/builtin/stepfun/stepfun.yaml @@ -4,11 +4,9 @@ identity: label: en_US: Image-1X zh_Hans: 阶跃星辰绘画 - pt_BR: Image-1X description: en_US: Image-1X zh_Hans: 阶跃星辰绘画 - pt_BR: Image-1X icon: icon.png tags: - image @@ -20,27 +18,16 @@ credentials_for_provider: label: en_US: Stepfun API key zh_Hans: 阶跃星辰API key - pt_BR: Stepfun API key - help: - en_US: Please input your stepfun API key - zh_Hans: 请输入你的阶跃星辰 API key - pt_BR: Please input your stepfun API key placeholder: - en_US: Please input your stepfun API key + en_US: Please input your Stepfun API key zh_Hans: 请输入你的阶跃星辰 API key - pt_BR: Please input your stepfun API key + url: https://platform.stepfun.com/interface-key stepfun_base_url: type: text-input required: false label: en_US: Stepfun base URL zh_Hans: 阶跃星辰 base URL - pt_BR: Stepfun base URL - help: - en_US: Please input your Stepfun base URL - zh_Hans: 请输入你的阶跃星辰 base URL - pt_BR: Please input your Stepfun base URL placeholder: en_US: Please input your Stepfun base URL zh_Hans: 请输入你的阶跃星辰 base URL - pt_BR: Please input your Stepfun base URL diff --git a/api/core/tools/provider/builtin/stepfun/tools/image.py b/api/core/tools/provider/builtin/stepfun/tools/image.py index 0b92b122bf..eb55dae518 100644 --- a/api/core/tools/provider/builtin/stepfun/tools/image.py +++ b/api/core/tools/provider/builtin/stepfun/tools/image.py @@ -1,4 +1,3 @@ -import random from typing import Any, Union from openai import OpenAI @@ -19,7 +18,7 @@ class StepfunTool(BuiltinTool): """ invoke tools """ - base_url = self.runtime.credentials.get("stepfun_base_url", "https://api.stepfun.com") + base_url = self.runtime.credentials.get("stepfun_base_url") or "https://api.stepfun.com" base_url = str(URL(base_url) / "v1") client = OpenAI( @@ -28,9 +27,7 @@ class StepfunTool(BuiltinTool): ) extra_body = {} - model = tool_parameters.get("model", "step-1x-medium") - if not model: - return self.create_text_message("Please input model name") + model = "step-1x-medium" # prompt prompt = tool_parameters.get("prompt", "") if not prompt: @@ -67,9 +64,3 @@ class StepfunTool(BuiltinTool): ) ) return result - - @staticmethod - def _generate_random_id(length=8): - characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - random_id = "".join(random.choices(characters, k=length)) - return random_id diff --git a/api/core/tools/provider/builtin/stepfun/tools/image.yaml b/api/core/tools/provider/builtin/stepfun/tools/image.yaml index dcc5bd2db2..8d7c9b6586 100644 --- a/api/core/tools/provider/builtin/stepfun/tools/image.yaml +++ b/api/core/tools/provider/builtin/stepfun/tools/image.yaml @@ -29,35 +29,6 @@ parameters: pt_BR: Image prompt, you can check the official documentation of step-1x llm_description: Image prompt of step-1x you should describe the image you want to generate as a list of words as possible as detailed form: llm - - name: model - type: select - required: false - human_description: - en_US: used for selecting the model name - zh_Hans: 用于选择模型的名字 - pt_BR: used for selecting the model name - label: - en_US: Model Name - zh_Hans: 模型名字 - pt_BR: Model Name - form: form - options: - - value: step-1x-turbo - label: - en_US: turbo - zh_Hans: turbo - pt_BR: turbo - - value: step-1x-medium - label: - en_US: medium - zh_Hans: medium - pt_BR: medium - - value: step-1x-large - label: - en_US: large - zh_Hans: large - pt_BR: large - default: step-1x-medium - name: size type: select required: false From 62406991df10d6eaa721df1b56e8df957802759b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Thu, 26 Sep 2024 16:28:20 +0800 Subject: [PATCH 47/64] fix: start node input config modal raise 'variable name is required' (#8793) --- .../configuration/config-var/config-modal/index.tsx | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/web/app/components/app/configuration/config-var/config-modal/index.tsx b/web/app/components/app/configuration/config-var/config-modal/index.tsx index 606280653e..ac1d86c1a2 100644 --- a/web/app/components/app/configuration/config-var/config-modal/index.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/index.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import React, { useCallback, useState } from 'react' +import React, { useCallback, useEffect, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { useContext } from 'use-context-selector' import ModalFoot from '../modal-foot' @@ -40,6 +40,12 @@ const ConfigModal: FC = ({ const { t } = useTranslation() const [tempPayload, setTempPayload] = useState(payload || getNewVarInWorkflow('') as any) const { type, label, variable, options, max_length } = tempPayload + const modalRef = useRef(null) + useEffect(() => { + // To fix the first input element auto focus, then directly close modal will raise error + if (isShow) + modalRef.current?.focus() + }, [isShow]) const isStringInput = type === InputVarType.textInput || type === InputVarType.paragraph const checkVariableName = useCallback((value: string) => { @@ -135,7 +141,7 @@ const ConfigModal: FC = ({ isShow={isShow} onClose={onClose} > -
+
From 128a66f7fe8383cb30dec5e5e3896d75a225c77e Mon Sep 17 00:00:00 2001 From: cx <88480957+free-cx@users.noreply.github.com> Date: Thu, 26 Sep 2024 16:34:40 +0800 Subject: [PATCH 48/64] =?UTF-8?q?fix:=20Ollama=20modelfeature=20set=20visi?= =?UTF-8?q?on,=20and=20an=20exception=20occurred=20at=20the=E2=80=A6=20(#8?= =?UTF-8?q?783)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../model_providers/ollama/llm/llm.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index ff732e6925..a7ea53e0e9 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -364,14 +364,21 @@ class OllamaLargeLanguageModel(LargeLanguageModel): if chunk_json["done"]: # calculate num tokens - if "prompt_eval_count" in chunk_json and "eval_count" in chunk_json: - # transform usage + if "prompt_eval_count" in chunk_json: prompt_tokens = chunk_json["prompt_eval_count"] - completion_tokens = chunk_json["eval_count"] else: - # calculate num tokens - prompt_tokens = self._get_num_tokens_by_gpt2(prompt_messages[0].content) - completion_tokens = self._get_num_tokens_by_gpt2(full_text) + prompt_message_content = prompt_messages[0].content + if isinstance(prompt_message_content, str): + prompt_tokens = self._get_num_tokens_by_gpt2(prompt_message_content) + else: + content_text = "" + for message_content in prompt_message_content: + if message_content.type == PromptMessageContentType.TEXT: + message_content = cast(TextPromptMessageContent, message_content) + content_text += message_content.data + prompt_tokens = self._get_num_tokens_by_gpt2(content_text) + + completion_tokens = chunk_json.get("eval_count", self._get_num_tokens_by_gpt2(full_text)) # transform usage usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) From 008e0efeb00faecd44877245ec48f08fda6a048f Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Thu, 26 Sep 2024 16:36:21 +0800 Subject: [PATCH 49/64] refactor: update delete method as an abstract method (#8794) --- api/core/rag/datasource/vdb/vector_base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/core/rag/datasource/vdb/vector_base.py b/api/core/rag/datasource/vdb/vector_base.py index 1a0dc7f48b..22e191340d 100644 --- a/api/core/rag/datasource/vdb/vector_base.py +++ b/api/core/rag/datasource/vdb/vector_base.py @@ -45,6 +45,7 @@ class BaseVector(ABC): def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: raise NotImplementedError + @abstractmethod def delete(self) -> None: raise NotImplementedError From 6df14e50b2c71e2fc9b0601943d37a78f47e69c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Thu, 26 Sep 2024 17:50:36 +0800 Subject: [PATCH 50/64] fix: workflow as tool always outdated (#8798) --- web/app/components/tools/workflow-tool/configure-button.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/tools/workflow-tool/configure-button.tsx b/web/app/components/tools/workflow-tool/configure-button.tsx index d2c5142f53..6521410dae 100644 --- a/web/app/components/tools/workflow-tool/configure-button.tsx +++ b/web/app/components/tools/workflow-tool/configure-button.tsx @@ -65,7 +65,7 @@ const WorkflowToolConfigureButton = ({ else { if (item.type === 'paragraph' && param.type !== 'string') return true - if (param.type !== item.type && !(param.type === 'string' && item.type === 'paragraph')) + if (item.type === 'text-input' && param.type !== 'string') return true } } From 3d2cb25a6704ecd53b132deb4d7ea77b34a43d0d Mon Sep 17 00:00:00 2001 From: Joel Date: Thu, 26 Sep 2024 17:53:11 +0800 Subject: [PATCH 51/64] fix: change wrong company name (#8801) --- web/app/activate/page.tsx | 2 +- web/app/forgot-password/page.tsx | 2 +- web/app/install/page.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/web/app/activate/page.tsx b/web/app/activate/page.tsx index 90874f50ce..0f18544335 100644 --- a/web/app/activate/page.tsx +++ b/web/app/activate/page.tsx @@ -22,7 +22,7 @@ const Activate = () => {
- © {new Date().getFullYear()} Dify, Inc. All rights reserved. + © {new Date().getFullYear()} LangGenius, Inc. All rights reserved.
diff --git a/web/app/forgot-password/page.tsx b/web/app/forgot-password/page.tsx index fa44d1a20c..bb46011c06 100644 --- a/web/app/forgot-password/page.tsx +++ b/web/app/forgot-password/page.tsx @@ -28,7 +28,7 @@ const ForgotPassword = () => {
{token ? : }
- © {new Date().getFullYear()} Dify, Inc. All rights reserved. + © {new Date().getFullYear()} LangGenius, Inc. All rights reserved.
diff --git a/web/app/install/page.tsx b/web/app/install/page.tsx index 9fa38dd15e..395fae34ec 100644 --- a/web/app/install/page.tsx +++ b/web/app/install/page.tsx @@ -22,7 +22,7 @@ const Install = () => {
- © {new Date().getFullYear()} Dify, Inc. All rights reserved. + © {new Date().getFullYear()} LangGenius, Inc. All rights reserved.
From 03edfbe6f530edfc4fb7781adda3852d6ab643a2 Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Thu, 26 Sep 2024 19:04:25 +0800 Subject: [PATCH 52/64] feat: add qwen to add custom model parameters (#8759) --- .../model_providers/tongyi/llm/llm.py | 102 +++++++++--------- .../model_providers/tongyi/tongyi.yaml | 43 +++++++- 2 files changed, 92 insertions(+), 53 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py index f90c7f075f..3e3585b30a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py @@ -18,7 +18,7 @@ from dashscope.common.error import ( UnsupportedModel, ) -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta +from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, ImagePromptMessageContent, @@ -35,6 +35,7 @@ from core.model_runtime.entities.model_entities import ( FetchFrom, I18nObject, ModelFeature, + ModelPropertyKey, ModelType, ParameterRule, ParameterType, @@ -97,6 +98,11 @@ class TongyiLargeLanguageModel(LargeLanguageModel): :param tools: tools for tool calling :return: """ + # Check if the model was added via get_customizable_model_schema + if self.get_customizable_model_schema(model, credentials) is not None: + # For custom models, tokens are not calculated. + return 0 + if model in {"qwen-turbo-chat", "qwen-plus-chat"}: model = model.replace("-chat", "") if model == "farui-plus": @@ -537,55 +543,51 @@ class TongyiLargeLanguageModel(LargeLanguageModel): :param credentials: model credentials :return: AIModelEntity or None """ - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="top_k", - type=ParameterType.INT, - min=0, - max=99, - label=I18nObject(zh_Hans="top_k", en_US="top_k"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - min=1, - max=128000, - default=1024, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ParameterRule( - name="seed", - type=ParameterType.INT, - default=1234, - label=I18nObject(zh_Hans="随机种子", en_US="Random Seed"), - ), - ParameterRule( - name="repetition_penalty", - type=ParameterType.FLOAT, - default=1.1, - label=I18nObject(zh_Hans="重复惩罚", en_US="Repetition Penalty"), - ), - ] - - entity = AIModelEntity( + return AIModelEntity( model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + label=I18nObject(en_US=model, zh_Hans=model), model_type=ModelType.LLM, - model_properties={}, - parameter_rules=rules, + features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL] + if credentials.get("function_calling_type") == "tool_call" + else [], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000)), + ModelPropertyKey.MODE: LLMMode.CHAT.value, + }, + parameter_rules=[ + ParameterRule( + name="temperature", + use_template="temperature", + label=I18nObject(en_US="Temperature", zh_Hans="温度"), + type=ParameterType.FLOAT, + ), + ParameterRule( + name="max_tokens", + use_template="max_tokens", + default=512, + min=1, + max=int(credentials.get("max_tokens", 1024)), + label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"), + type=ParameterType.INT, + ), + ParameterRule( + name="top_p", + use_template="top_p", + label=I18nObject(en_US="Top P", zh_Hans="Top P"), + type=ParameterType.FLOAT, + ), + ParameterRule( + name="top_k", + use_template="top_k", + label=I18nObject(en_US="Top K", zh_Hans="Top K"), + type=ParameterType.FLOAT, + ), + ParameterRule( + name="frequency_penalty", + use_template="frequency_penalty", + label=I18nObject(en_US="Frequency Penalty", zh_Hans="重复惩罚"), + type=ParameterType.FLOAT, + ), + ], ) - - return entity diff --git a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml index fabe6d90e6..1a09c20fd9 100644 --- a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml +++ b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml @@ -37,14 +37,51 @@ model_credential_schema: en_US: Model Name zh_Hans: 模型名称 placeholder: - en_US: Enter full model name - zh_Hans: 输入模型全称 + en_US: Enter your model name + zh_Hans: 输入模型名称 credential_form_schemas: - variable: dashscope_api_key - required: true label: en_US: API Key type: secret-input + required: true placeholder: zh_Hans: 在此输入您的 API Key en_US: Enter your API Key + - variable: context_size + label: + zh_Hans: 模型上下文长度 + en_US: Model context size + required: true + type: text-input + default: '4096' + placeholder: + zh_Hans: 在此输入您的模型上下文长度 + en_US: Enter your Model context size + - variable: max_tokens + label: + zh_Hans: 最大 token 上限 + en_US: Upper bound for max tokens + default: '4096' + type: text-input + show_on: + - variable: __model_type + value: llm + - variable: function_calling_type + label: + en_US: Function calling + type: select + required: false + default: no_call + options: + - value: no_call + label: + en_US: Not Support + zh_Hans: 不支持 + - value: function_call + label: + en_US: Support + zh_Hans: 支持 + show_on: + - variable: __model_type + value: llm From 9a4b53a212054901f9c597ac64c0eb97e461e641 Mon Sep 17 00:00:00 2001 From: AAEE86 <33052466+AAEE86@users.noreply.github.com> Date: Thu, 26 Sep 2024 19:08:59 +0800 Subject: [PATCH 53/64] feat: add stream for Gemini (#8678) --- .../google/llm/gemini-1.5-flash-8b-exp-0827.yaml | 9 +++++++++ .../google/llm/gemini-1.5-flash-exp-0827.yaml | 9 +++++++++ .../google/llm/gemini-1.5-flash-latest.yaml | 9 +++++++++ .../google/llm/gemini-1.5-pro-exp-0801.yaml | 9 +++++++++ .../google/llm/gemini-1.5-pro-exp-0827.yaml | 9 +++++++++ .../google/llm/gemini-1.5-pro-latest.yaml | 9 +++++++++ .../model_providers/google/llm/gemini-pro-vision.yaml | 9 +++++++++ .../model_providers/google/llm/gemini-pro.yaml | 9 +++++++++ 8 files changed, 72 insertions(+) diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml index bbc697e934..4e0209890a 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml @@ -32,6 +32,15 @@ parameter_rules: max: 8192 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml index c5695e5dda..faabc5e4d1 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml @@ -32,6 +32,15 @@ parameter_rules: max: 8192 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml index 24b1c5af8a..6a0344699a 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml @@ -32,6 +32,15 @@ parameter_rules: max: 8192 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml index 0a918e0d7b..97c68f7a18 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml @@ -32,6 +32,15 @@ parameter_rules: max: 8192 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml index 7452ce46e7..860e4816a1 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml @@ -32,6 +32,15 @@ parameter_rules: max: 8192 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml index b3e1ecf3af..92cd6b310d 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml @@ -32,6 +32,15 @@ parameter_rules: max: 8192 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml index 075e484e46..2d213d56ad 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml @@ -27,6 +27,15 @@ parameter_rules: default: 4096 min: 1 max: 4096 + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml index 4e9f59e7da..e2f487c1ee 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml @@ -31,6 +31,15 @@ parameter_rules: max: 2048 - name: response_format use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false pricing: input: '0.00' output: '0.00' From 3dfbc348e3f53b26a3d385e83dea10bf0ef73736 Mon Sep 17 00:00:00 2001 From: Hash Brown Date: Thu, 26 Sep 2024 19:41:59 +0800 Subject: [PATCH 54/64] feat: improved SVG output UX (#8765) --- .../base/chat/chat/answer/index.tsx | 13 ++ web/app/components/base/markdown.tsx | 119 ++++++++++-------- web/app/components/base/svg-gallery/index.tsx | 18 ++- .../workflow/panel/chat-record/index.tsx | 6 +- 4 files changed, 95 insertions(+), 61 deletions(-) diff --git a/web/app/components/base/chat/chat/answer/index.tsx b/web/app/components/base/chat/chat/answer/index.tsx index 705cd73ddf..8184967edc 100644 --- a/web/app/components/base/chat/chat/answer/index.tsx +++ b/web/app/components/base/chat/chat/answer/index.tsx @@ -85,6 +85,19 @@ const Answer: FC = ({ getContentWidth() }, [responding]) + // Recalculate contentWidth when content changes (e.g., SVG preview/source toggle) + useEffect(() => { + if (!containerRef.current) + return + const resizeObserver = new ResizeObserver(() => { + getContentWidth() + }) + resizeObserver.observe(containerRef.current) + return () => { + resizeObserver.disconnect() + } + }, []) + return (
diff --git a/web/app/components/base/markdown.tsx b/web/app/components/base/markdown.tsx index 443ee3410c..39a399cc9f 100644 --- a/web/app/components/base/markdown.tsx +++ b/web/app/components/base/markdown.tsx @@ -116,59 +116,80 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props } const match = /language-(\w+)/.exec(className || '') const language = match?.[1] const languageShowName = getCorrectCapitalizationLanguageName(language || '') - let chartData = JSON.parse(String('{"title":{"text":"ECharts error - Wrong JSON format."}}').replace(/\n$/, '')) - if (language === 'echarts') { - try { - chartData = JSON.parse(String(children).replace(/\n$/, '')) + const chartData = useMemo(() => { + if (language === 'echarts') { + try { + return JSON.parse(String(children).replace(/\n$/, '')) + } + catch (error) {} } - catch (error) { - } - } + return JSON.parse('{"title":{"text":"ECharts error - Wrong JSON format."}}') + }, [language, children]) - // Use `useMemo` to ensure that `SyntaxHighlighter` only re-renders when necessary - return useMemo(() => { - return (!inline && match) - ? ( -
-
-
{languageShowName}
-
- {language === 'mermaid' && } - -
-
- {(language === 'mermaid' && isSVG) - ? () - : (language === 'echarts' - ? (
) - : (language === 'svg' - ? () - : ( - {String(children).replace(/\n$/, '')} - )))} + const renderCodeContent = useMemo(() => { + const content = String(children).replace(/\n$/, '') + if (language === 'mermaid' && isSVG) { + return + } + else if (language === 'echarts') { + return ( +
+ + +
) - : ({children}) - }, [chartData, children, className, inline, isSVG, language, languageShowName, match, props]) + } + else if (language === 'svg' && isSVG) { + return ( + + + + ) + } + else { + return ( + + {content} + + ) + } + }, [language, match, props, children, chartData, isSVG]) + + if (inline || !match) + return {children} + + return ( +
+
+
{languageShowName}
+
+ {(['mermaid', 'svg']).includes(language!) && } + +
+
+ {renderCodeContent} +
+ ) }) CodeBlock.displayName = 'CodeBlock' diff --git a/web/app/components/base/svg-gallery/index.tsx b/web/app/components/base/svg-gallery/index.tsx index 81e8e87655..4368df00e9 100644 --- a/web/app/components/base/svg-gallery/index.tsx +++ b/web/app/components/base/svg-gallery/index.tsx @@ -29,7 +29,7 @@ export const SVGRenderer = ({ content }: { content: string }) => { if (svgRef.current) { try { svgRef.current.innerHTML = '' - const draw = SVG().addTo(svgRef.current).size('100%', '100%') + const draw = SVG().addTo(svgRef.current) const parser = new DOMParser() const svgDoc = parser.parseFromString(content, 'image/svg+xml') @@ -40,13 +40,11 @@ export const SVGRenderer = ({ content }: { content: string }) => { const originalWidth = parseInt(svgElement.getAttribute('width') || '400', 10) const originalHeight = parseInt(svgElement.getAttribute('height') || '600', 10) - const scale = Math.min(windowSize.width / originalWidth, windowSize.height / originalHeight, 1) - const scaledWidth = originalWidth * scale - const scaledHeight = originalHeight * scale - draw.size(scaledWidth, scaledHeight) + draw.viewbox(0, 0, originalWidth, originalHeight) + + svgRef.current.style.width = `${Math.min(originalWidth, 298)}px` const rootElement = draw.svg(content) - rootElement.scale(scale) rootElement.click(() => { setImagePreview(svgToDataURL(svgElement as Element)) @@ -54,7 +52,7 @@ export const SVGRenderer = ({ content }: { content: string }) => { } catch (error) { if (svgRef.current) - svgRef.current.innerHTML = 'Error rendering SVG. Wait for the image content to complete.' + svgRef.current.innerHTML = 'Error rendering SVG. Wait for the image content to complete.' } } }, [content, windowSize]) @@ -62,14 +60,14 @@ export const SVGRenderer = ({ content }: { content: string }) => { return ( <>
{imagePreview && ( setImagePreview('')} />)} diff --git a/web/app/components/workflow/panel/chat-record/index.tsx b/web/app/components/workflow/panel/chat-record/index.tsx index 1bcfd6474d..16d2c304a7 100644 --- a/web/app/components/workflow/panel/chat-record/index.tsx +++ b/web/app/components/workflow/panel/chat-record/index.tsx @@ -90,7 +90,7 @@ const ChatRecord = () => { return (
{ supportCitationHitInfo: true, } as any} chatList={chatList} - chatContainerClassName='px-4' + chatContainerClassName='px-3' chatContainerInnerClassName='pt-6 w-full max-w-full mx-auto' chatFooterClassName='px-4 rounded-b-2xl' chatFooterInnerClassName='pb-4 w-full max-w-full mx-auto' @@ -129,6 +129,8 @@ const ChatRecord = () => { noChatInput allToolIcons={{}} showPromptLog + noSpacing + chatAnswerContainerInner='!pr-2' />
From 063474f4083c6e73402c8dd8b9862ec8dabd14b8 Mon Sep 17 00:00:00 2001 From: ice yao Date: Thu, 26 Sep 2024 22:21:01 +0800 Subject: [PATCH 55/64] Add llama3.2 model in fireworks provider (#8809) --- .../llm/llama-v3p2-11b-vision-instruct.yaml | 46 +++++++++++++++++++ .../fireworks/llm/llama-v3p2-1b-instruct.yaml | 46 +++++++++++++++++++ .../fireworks/llm/llama-v3p2-3b-instruct.yaml | 46 +++++++++++++++++++ .../llm/llama-v3p2-90b-vision-instruct.yaml | 46 +++++++++++++++++++ 4 files changed, 184 insertions(+) create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml create mode 100644 api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml new file mode 100644 index 0000000000..31415a24fa --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p2-11b-vision-instruct +label: + zh_Hans: Llama 3.2 11B Vision Instruct + en_US: Llama 3.2 11B Vision Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.2' + output: '0.2' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml new file mode 100644 index 0000000000..c2fd77d256 --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p2-1b-instruct +label: + zh_Hans: Llama 3.2 1B Instruct + en_US: Llama 3.2 1B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.1' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml new file mode 100644 index 0000000000..4b3c459c7b --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p2-3b-instruct +label: + zh_Hans: Llama 3.2 3B Instruct + en_US: Llama 3.2 3B Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.1' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml new file mode 100644 index 0000000000..0aece7455d --- /dev/null +++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml @@ -0,0 +1,46 @@ +model: accounts/fireworks/models/llama-v3p2-90b-vision-instruct +label: + zh_Hans: Llama 3.2 90B Vision Instruct + en_US: Llama 3.2 90B Vision Instruct +model_type: llm +features: + - agent-thought + - tool-call +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + - name: max_tokens + use_template: max_tokens + - name: context_length_exceeded_behavior + default: None + label: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + help: + zh_Hans: 上下文长度超出行为 + en_US: Context Length Exceeded Behavior + type: string + options: + - None + - truncate + - error + - name: response_format + use_template: response_format +pricing: + input: '0.9' + output: '0.9' + unit: '0.000001' + currency: USD From ecc951609d4988424bfba58094d6e256ae86144e Mon Sep 17 00:00:00 2001 From: wenmeng zhou Date: Thu, 26 Sep 2024 22:32:33 +0800 Subject: [PATCH 56/64] add more detailed doc for models of qwen series (#8799) Co-authored-by: crazywoola <427733928@qq.com> --- .../model_runtime/model_providers/tongyi/llm/farui-plus.yaml | 1 + .../model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml | 1 + .../model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml | 1 + .../model_providers/tongyi/llm/qwen-coder-turbo.yaml | 1 + .../model_runtime/model_providers/tongyi/llm/qwen-long.yaml | 2 +- .../model_providers/tongyi/llm/qwen-math-plus-0816.yaml | 1 + .../model_providers/tongyi/llm/qwen-math-plus-0919.yaml | 1 + .../model_providers/tongyi/llm/qwen-math-plus-latest.yaml | 1 + .../model_providers/tongyi/llm/qwen-math-plus.yaml | 1 + .../model_providers/tongyi/llm/qwen-math-turbo-0919.yaml | 1 + .../model_providers/tongyi/llm/qwen-math-turbo-latest.yaml | 1 + .../model_providers/tongyi/llm/qwen-math-turbo.yaml | 1 + .../model_providers/tongyi/llm/qwen-max-0107.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-max-0403.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-max-0428.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-max-0919.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-max-1201.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-max-latest.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-max-longcontext.yaml | 2 ++ .../model_runtime/model_providers/tongyi/llm/qwen-max.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-0206.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-0624.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-0723.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-0806.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-0919.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-chat.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-plus-latest.yaml | 2 ++ .../model_runtime/model_providers/tongyi/llm/qwen-plus.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-turbo-0206.yaml | 3 +++ .../model_providers/tongyi/llm/qwen-turbo-0624.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-turbo-0919.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-turbo-chat.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-turbo-latest.yaml | 2 ++ .../model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml | 2 ++ .../model_providers/tongyi/llm/qwen-vl-max-0201.yaml | 1 + .../model_providers/tongyi/llm/qwen-vl-max-0809.yaml | 1 + .../model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml | 1 + .../model_providers/tongyi/llm/qwen-vl-plus-0201.yaml | 1 + .../model_providers/tongyi/llm/qwen-vl-plus-0809.yaml | 1 + .../model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml | 1 + .../model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml | 1 + .../model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml | 1 + .../tongyi/text_embedding/text-embedding-v1.yaml | 1 + .../tongyi/text_embedding/text-embedding-v2.yaml | 1 + .../tongyi/text_embedding/text-embedding-v3.yaml | 1 + 54 files changed, 77 insertions(+), 1 deletion(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml index d0ff443827..34a57d1fc0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: farui-plus label: en_US: farui-plus diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml index d9792e71ee..64a3f33133 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-coder-turbo-0919 label: en_US: qwen-coder-turbo-0919 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml index 0b03505c45..a4c93f7047 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-coder-turbo-latest label: en_US: qwen-coder-turbo-latest diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml index 2a6c040853..ff68faed80 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-coder-turbo label: en_US: qwen-coder-turbo diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml index bad7f4f472..c3dbb3616f 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml @@ -1,4 +1,4 @@ -# model docs: https://help.aliyun.com/zh/model-studio/getting-started/models#27b2b3a15d5c6 +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-long label: en_US: qwen-long diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml index c14aee1e1e..42fe1f6862 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-plus-0816 label: en_US: qwen-math-plus-0816 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml index 9d74eeca3e..9b6567b8cd 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-plus-0919 label: en_US: qwen-math-plus-0919 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml index b8601a969a..b2a2393b36 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-plus-latest label: en_US: qwen-math-plus-latest diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml index 4a948be597..63f4b7ff0a 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-plus label: en_US: qwen-math-plus diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml index bffe324a96..4da90eec3e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-turbo-0919 label: en_US: qwen-math-turbo-0919 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml index 0747e96614..d29f8851dd 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-turbo-latest label: en_US: qwen-math-turbo-latest diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml index dffb5557ff..2a8f7f725e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-math-turbo label: en_US: qwen-math-turbo diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml index 8ae159f1bf..ef1841b517 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-0107 label: en_US: qwen-max-0107 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml index 93fb37254e..a2ea5df130 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max-0403, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-0403 label: en_US: qwen-max-0403 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml index a5c9d49609..a467665f11 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max-0428, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-0428 label: en_US: qwen-max-0428 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml index e4a6dae637..78661eaea0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max-0919, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-0919 label: en_US: qwen-max-0919 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml index 6fae8a7d38..6f4674576b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-1201 label: en_US: qwen-max-1201 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml index 8e20968859..8b5f005473 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-latest label: en_US: qwen-max-latest diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml index 9bc50c73fc..098494ff95 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max-longcontext label: en_US: qwen-max-longcontext diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml index c6a64dc507..9d0d3f8db3 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-max, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) model: qwen-max label: en_US: qwen-max diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml index 430599300b..0b1a6f81df 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus-0206, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-0206 label: en_US: qwen-plus-0206 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml index 906995d2b9..7706005bb5 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus-0624, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-0624 label: en_US: qwen-plus-0624 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml index b33e725dd0..348276fc08 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus-0723, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-0723 label: en_US: qwen-plus-0723 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml index bb394fad81..29f125135e 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus-0806, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-0806 label: en_US: qwen-plus-0806 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml index 118e304a97..905fa1e102 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus-0919, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-0919 label: en_US: qwen-plus-0919 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml index 761312bc38..c7a3549727 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-chat label: en_US: qwen-plus-chat diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml index 430872fb31..608f52c296 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus-latest, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus-latest label: en_US: qwen-plus-latest diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml index f3fce30209..9089e57255 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-plus, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) model: qwen-plus label: en_US: qwen-plus diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml index 2628d824fe..7ee0d44f2f 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml @@ -1,3 +1,6 @@ +# this model corresponds to qwen-turbo-0206, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) + model: qwen-turbo-0206 label: en_US: qwen-turbo-0206 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml index 8097459bf0..20a3f7eb64 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-turbo-0624, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) model: qwen-turbo-0624 label: en_US: qwen-turbo-0624 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml index e43beeb195..ba73dec363 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-turbo-0919, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) model: qwen-turbo-0919 label: en_US: qwen-turbo-0919 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml index c30cb7ca10..d785b7fe85 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-turbo, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) model: qwen-turbo-chat label: en_US: qwen-turbo-chat diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml index e443d6888b..fe38a4283c 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-turbo-latest, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) model: qwen-turbo-latest label: en_US: qwen-turbo-latest diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml index 33f05967c2..215c9ec5fc 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml @@ -1,3 +1,5 @@ +# this model corresponds to qwen-turbo, for more details +# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) model: qwen-turbo label: en_US: qwen-turbo diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml index 63b6074d0d..d80168ffc3 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-vl-max-0201 label: en_US: qwen-vl-max-0201 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml index fd20377002..50e10226a5 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-vl-max-0809 label: en_US: qwen-vl-max-0809 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml index 31a9fb51bb..21b127f56c 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-vl-max label: en_US: qwen-vl-max diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml index 5f90cf48bc..03cb039d15 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-vl-plus-0201 label: en_US: qwen-vl-plus-0201 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml index 97820c0f3a..67b2b2ebdd 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-vl-plus-0809 label: en_US: qwen-vl-plus-0809 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml index 6af36cd6f3..f55764c6c0 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen-vl-plus label: en_US: qwen-vl-plus diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml index 158e2c7ee1..ea157f42de 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2-math-1.5b-instruct label: en_US: qwen2-math-1.5b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml index e26a6923d1..37052a9233 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2-math-72b-instruct label: en_US: qwen2-math-72b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml index 589119b26e..e182f1c27f 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2-math-7b-instruct label: en_US: qwen2-math-7b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml index dd608fbf76..9e75ccc1f2 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-0.5b-instruct label: en_US: qwen2.5-0.5b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml index 08237b3958..67c9d31243 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-1.5b-instruct label: en_US: qwen2.5-1.5b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml index 640b019703..2a38be921c 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-14b-instruct label: en_US: qwen2.5-14b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml index 3a90ca7532..e6e4fbf978 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-32b-instruct label: en_US: qwen2.5-32b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml index b79755eb9b..8f250379a7 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-3b-instruct label: en_US: qwen2.5-3b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml index e9dd51a341..bb3cdd6141 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-72b-instruct label: en_US: qwen2.5-72b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml index 04f26cf5fe..fdcd3d4275 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-7b-instruct label: en_US: qwen2.5-7b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml index 04f26cf5fe..fdcd3d4275 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models model: qwen2.5-7b-instruct label: en_US: qwen2.5-7b-instruct diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml index f4303c53d3..52e35d8b50 100644 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml +++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw model: text-embedding-v1 model_type: text-embedding model_properties: diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml index f6be3544ed..5bb6a8f424 100644 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml +++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw model: text-embedding-v2 model_type: text-embedding model_properties: diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml index 171a379ee2..d8af0e2b63 100644 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml +++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml @@ -1,3 +1,4 @@ +# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw model: text-embedding-v3 model_type: text-embedding model_properties: From e5efd09ebbd3e327efdc4c96075a4723c17927a0 Mon Sep 17 00:00:00 2001 From: CXwudi Date: Thu, 26 Sep 2024 21:14:33 -0400 Subject: [PATCH 57/64] chore: massive update of the Gemini models based on latest documentation (#8822) --- .../google/llm/gemini-1.5-flash-001.yaml | 48 +++++++++++++++++++ .../google/llm/gemini-1.5-flash-002.yaml | 48 +++++++++++++++++++ .../llm/gemini-1.5-flash-8b-exp-0924.yaml | 48 +++++++++++++++++++ .../google/llm/gemini-1.5-flash-latest.yaml | 2 +- .../google/llm/gemini-1.5-flash.yaml | 48 +++++++++++++++++++ .../google/llm/gemini-1.5-pro-001.yaml | 48 +++++++++++++++++++ .../google/llm/gemini-1.5-pro-002.yaml | 48 +++++++++++++++++++ .../google/llm/gemini-1.5-pro-latest.yaml | 2 +- .../google/llm/gemini-1.5-pro.yaml | 48 +++++++++++++++++++ 9 files changed, 338 insertions(+), 2 deletions(-) create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml create mode 100644 api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml new file mode 100644 index 0000000000..d84e9937e0 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-flash-001 +label: + en_US: Gemini 1.5 Flash 001 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml new file mode 100644 index 0000000000..2ff70564b2 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-flash-002 +label: + en_US: Gemini 1.5 Flash 002 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml new file mode 100644 index 0000000000..2aea8149f4 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-flash-8b-exp-0924 +label: + en_US: Gemini 1.5 Flash 8B 0924 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml index 6a0344699a..a22fcca941 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml @@ -1,6 +1,6 @@ model: gemini-1.5-flash-latest label: - en_US: Gemini 1.5 Flash + en_US: Gemini 1.5 Flash Latest model_type: llm features: - agent-thought diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml new file mode 100644 index 0000000000..dfd55c3a94 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-flash +label: + en_US: Gemini 1.5 Flash +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml new file mode 100644 index 0000000000..a1feff171d --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-pro-001 +label: + en_US: Gemini 1.5 Pro 001 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 2097152 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml new file mode 100644 index 0000000000..9ae07a06c5 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-pro-002 +label: + en_US: Gemini 1.5 Pro 002 +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 2097152 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml index 92cd6b310d..d1bf7d269d 100644 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml @@ -1,6 +1,6 @@ model: gemini-1.5-pro-latest label: - en_US: Gemini 1.5 Pro + en_US: Gemini 1.5 Pro Latest model_type: llm features: - agent-thought diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml new file mode 100644 index 0000000000..bdd70b34a2 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml @@ -0,0 +1,48 @@ +model: gemini-1.5-pro +label: + en_US: Gemini 1.5 Pro +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 2097152 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_tokens_to_sample + use_template: max_tokens + required: true + default: 8192 + min: 1 + max: 8192 + - name: response_format + use_template: response_format + - name: stream + label: + zh_Hans: 流式输出 + en_US: Stream + type: boolean + help: + zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 + en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. + default: false +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD From a36117e12d06707e63de5b8dc51d97bf060b4e7c Mon Sep 17 00:00:00 2001 From: Shai Perednik Date: Thu, 26 Sep 2024 21:15:33 -0400 Subject: [PATCH 58/64] Updated the YouTube channel to Dify's (#8817) --- api/core/tools/provider/builtin/youtube/youtube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/tools/provider/builtin/youtube/youtube.py b/api/core/tools/provider/builtin/youtube/youtube.py index aad876491c..07e430bcbf 100644 --- a/api/core/tools/provider/builtin/youtube/youtube.py +++ b/api/core/tools/provider/builtin/youtube/youtube.py @@ -13,7 +13,7 @@ class YahooFinanceProvider(BuiltinToolProviderController): ).invoke( user_id="", tool_parameters={ - "channel": "TOKYO GIRLS COLLECTION", + "channel": "UC2JZCsZSOudXA08cMMRCL9g", "start_date": "2020-01-01", "end_date": "2024-12-31", }, From 6fbaabc1bc4b8b7830d55bac7609afdd703242f9 Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Fri, 27 Sep 2024 11:13:29 +0800 Subject: [PATCH 59/64] feat: add pgvecto-rs and analyticdb in docker/.env.example (#8823) --- docker/.env.example | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/docker/.env.example b/docker/.env.example index f7479791ce..d43c3edc7e 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -346,7 +346,7 @@ VOLCENGINE_TOS_REGION=your-region # ------------------------------ # The type of vector store to use. -# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, ``chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`. VECTOR_STORE=weaviate # The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. @@ -385,13 +385,30 @@ MYSCALE_PASSWORD= MYSCALE_DATABASE=dify MYSCALE_FTS_PARAMS= -# pgvector configurations, only available when VECTOR_STORE is `pgvecto-rs or pgvector` +# pgvector configurations, only available when VECTOR_STORE is `pgvector` PGVECTOR_HOST=pgvector PGVECTOR_PORT=5432 PGVECTOR_USER=postgres PGVECTOR_PASSWORD=difyai123456 PGVECTOR_DATABASE=dify +# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` +PGVECTO_RS_HOST=pgvecto-rs +PGVECTO_RS_PORT=5432 +PGVECTO_RS_USER=postgres +PGVECTO_RS_PASSWORD=difyai123456 +PGVECTO_RS_DATABASE=dify + +# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_REGION_ID=cn-hangzhou +ANALYTICDB_INSTANCE_ID=gp-ab123456 +ANALYTICDB_ACCOUNT=testaccount +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE=dify +ANALYTICDB_NAMESPACE_PASSWORD=difypassword + # TiDB vector configurations, only available when VECTOR_STORE is `tidb` TIDB_VECTOR_HOST=tidb TIDB_VECTOR_PORT=4000 From d6b9587a979573fc22b380a13b80770883fc08e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Fri, 27 Sep 2024 11:13:40 +0800 Subject: [PATCH 60/64] fix: close log status option raise error (#8826) --- web/app/components/app/log/filter.tsx | 2 ++ web/app/components/app/workflow-log/filter.tsx | 2 ++ 2 files changed, 4 insertions(+) diff --git a/web/app/components/app/log/filter.tsx b/web/app/components/app/log/filter.tsx index 0552b44d16..b8d7ca5a36 100644 --- a/web/app/components/app/log/filter.tsx +++ b/web/app/components/app/log/filter.tsx @@ -55,6 +55,8 @@ const Filter: FC = ({ isChatMode, appId, queryParams, setQueryPara className='!w-[300px]' onSelect={ (item) => { + if (!item.value) + return setQueryParams({ ...queryParams, annotation_status: item.value as string }) } } diff --git a/web/app/components/app/workflow-log/filter.tsx b/web/app/components/app/workflow-log/filter.tsx index d239c39d2c..58b5252c07 100644 --- a/web/app/components/app/workflow-log/filter.tsx +++ b/web/app/components/app/workflow-log/filter.tsx @@ -23,6 +23,8 @@ const Filter: FC = ({ queryParams, setQueryParams }: IFilterProps) className='!min-w-[100px]' onSelect={ (item) => { + if (!item.value) + return setQueryParams({ ...queryParams, status: item.value as string }) } } From 4c1063e1c589d239df7287350464c4a7d729cab2 Mon Sep 17 00:00:00 2001 From: 8bitpd <51897400+lpdink@users.noreply.github.com> Date: Fri, 27 Sep 2024 12:05:21 +0800 Subject: [PATCH 61/64] fix: AnalyticdbVector retrieval scores (#8803) --- .../vdb/analyticdb/analyticdb_vector.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py index 612542dab1..6dcd98dcfd 100644 --- a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py +++ b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py @@ -40,19 +40,8 @@ class AnalyticdbConfig(BaseModel): class AnalyticdbVector(BaseVector): - _instance = None - _init = False - - def __new__(cls, *args, **kwargs): - if cls._instance is None: - cls._instance = super().__new__(cls) - return cls._instance - def __init__(self, collection_name: str, config: AnalyticdbConfig): - # collection_name must be updated every time self._collection_name = collection_name.lower() - if AnalyticdbVector._init: - return try: from alibabacloud_gpdb20160503.client import Client from alibabacloud_tea_openapi import models as open_api_models @@ -62,7 +51,6 @@ class AnalyticdbVector(BaseVector): self._client_config = open_api_models.Config(user_agent="dify", **config.to_analyticdb_client_params()) self._client = Client(self._client_config) self._initialize() - AnalyticdbVector._init = True def _initialize(self) -> None: cache_key = f"vector_indexing_{self.config.instance_id}" @@ -257,11 +245,14 @@ class AnalyticdbVector(BaseVector): documents = [] for match in response.body.matches.match: if match.score > score_threshold: + metadata = json.loads(match.metadata.get("metadata_")) + metadata["score"] = match.score doc = Document( page_content=match.metadata.get("page_content"), - metadata=json.loads(match.metadata.get("metadata_")), + metadata=metadata, ) documents.append(doc) + documents = sorted(documents, key=lambda x: x.metadata["score"], reverse=True) return documents def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: @@ -286,12 +277,14 @@ class AnalyticdbVector(BaseVector): for match in response.body.matches.match: if match.score > score_threshold: metadata = json.loads(match.metadata.get("metadata_")) + metadata["score"] = match.score doc = Document( page_content=match.metadata.get("page_content"), vector=match.metadata.get("vector"), metadata=metadata, ) documents.append(doc) + documents = sorted(documents, key=lambda x: x.metadata["score"], reverse=True) return documents def delete(self) -> None: From 29275c7447c4afbe2a92fe23e31fbebf79b1541e Mon Sep 17 00:00:00 2001 From: zhuhao <37029601+hwzhuhao@users.noreply.github.com> Date: Fri, 27 Sep 2024 12:11:56 +0800 Subject: [PATCH 62/64] feat: deprecate mistral model for siliconflow (#8828) --- .../siliconflow/llm/mistral-7b-instruct-v0.2.yaml | 1 + .../siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml index 27664eab6c..89fb153ba0 100644 --- a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml +++ b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml @@ -28,3 +28,4 @@ pricing: output: '0' unit: '0.000001' currency: RMB +deprecated: true diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml index fd7aada428..2785e7496f 100644 --- a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml +++ b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml @@ -28,3 +28,4 @@ pricing: output: '1.26' unit: '0.000001' currency: RMB +deprecated: true From bb781764b829b83471a0546cbced7c842e826231 Mon Sep 17 00:00:00 2001 From: HowardChan Date: Fri, 27 Sep 2024 12:13:00 +0800 Subject: [PATCH 63/64] Add Llama3.2 models in Groq provider (#8831) --- .../groq/llm/llama-3.2-11b-text-preview.yaml | 25 +++++++++++++++++++ .../groq/llm/llama-3.2-1b-preview.yaml | 25 +++++++++++++++++++ .../groq/llm/llama-3.2-3b-preview.yaml | 25 +++++++++++++++++++ .../groq/llm/llama-3.2-90b-text-preview.yaml | 25 +++++++++++++++++++ 4 files changed, 100 insertions(+) create mode 100644 api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml create mode 100644 api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml create mode 100644 api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml create mode 100644 api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml new file mode 100644 index 0000000000..019d453723 --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml @@ -0,0 +1,25 @@ +model: llama-3.2-11b-text-preview +label: + zh_Hans: Llama 3.2 11B Text (Preview) + en_US: Llama 3.2 11B Text (Preview) +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml new file mode 100644 index 0000000000..a44e4ff508 --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml @@ -0,0 +1,25 @@ +model: llama-3.2-1b-preview +label: + zh_Hans: Llama 3.2 1B Text (Preview) + en_US: Llama 3.2 1B Text (Preview) +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml new file mode 100644 index 0000000000..f2fdd0a05e --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml @@ -0,0 +1,25 @@ +model: llama-3.2-3b-preview +label: + zh_Hans: Llama 3.2 3B Text (Preview) + en_US: Llama 3.2 3B Text (Preview) +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml new file mode 100644 index 0000000000..3b34e7c079 --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml @@ -0,0 +1,25 @@ +model: llama-3.2-90b-text-preview +label: + zh_Hans: Llama 3.2 90B Text (Preview) + en_US: Llama 3.2 90B Text (Preview) +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 131072 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.1' + unit: '0.000001' + currency: USD From 0603359e2dea807f8cac2cfe5ac68aeb081627e4 Mon Sep 17 00:00:00 2001 From: CXwudi Date: Fri, 27 Sep 2024 01:49:03 -0400 Subject: [PATCH 64/64] fix: delete harm catalog settings for gemini (#8829) --- .../model_providers/google/llm/llm.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py index 3fc6787a44..e686ad08d9 100644 --- a/api/core/model_runtime/model_providers/google/llm/llm.py +++ b/api/core/model_runtime/model_providers/google/llm/llm.py @@ -9,8 +9,8 @@ import google.ai.generativelanguage as glm import google.generativeai as genai import requests from google.api_core import exceptions -from google.generativeai import client -from google.generativeai.types import ContentType, GenerateContentResponse, HarmBlockThreshold, HarmCategory +from google.generativeai.client import _ClientManager +from google.generativeai.types import ContentType, GenerateContentResponse from google.generativeai.types.content_types import to_part from PIL import Image @@ -200,24 +200,16 @@ class GoogleLargeLanguageModel(LargeLanguageModel): history.append(content) # Create a new ClientManager with tenant's API key - new_client_manager = client._ClientManager() + new_client_manager = _ClientManager() new_client_manager.configure(api_key=credentials["google_api_key"]) new_custom_client = new_client_manager.make_client("generative") google_model._client = new_custom_client - safety_settings = { - HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, - HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, - HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE, - HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, - } - response = google_model.generate_content( contents=history, generation_config=genai.types.GenerationConfig(**config_kwargs), stream=stream, - safety_settings=safety_settings, tools=self._convert_tools_to_glm_tool(tools) if tools else None, request_options={"timeout": 600}, )