mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-15 14:25:53 +08:00
Merge branch 'feat/support-remove-first-and-remove-last-in-variable-assigner' into deploy/dev
This commit is contained in:
commit
39c651edb3
@ -34,4 +34,4 @@ if you see such error message when you open this project in codespaces:
|
|||||||

|

|
||||||
|
|
||||||
a simple workaround is change `/signin` endpoint into another one, then login with GitHub account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine.
|
a simple workaround is change `/signin` endpoint into another one, then login with GitHub account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine.
|
||||||
The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204)
|
The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204)
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
// README at: https://github.com/devcontainers/templates/tree/main/src/anaconda
|
// README at: https://github.com/devcontainers/templates/tree/main/src/anaconda
|
||||||
{
|
{
|
||||||
"name": "Python 3.12",
|
"name": "Python 3.12",
|
||||||
"build": {
|
"build": {
|
||||||
"context": "..",
|
"context": "..",
|
||||||
"dockerfile": "Dockerfile"
|
"dockerfile": "Dockerfile"
|
||||||
},
|
},
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
This file copied into the container along with environment.yml* from the parent
|
This file copied into the container along with environment.yml* from the parent
|
||||||
folder. This file is included to prevents the Dockerfile COPY instruction from
|
folder. This file is included to prevents the Dockerfile COPY instruction from
|
||||||
failing if no environment.yml is found.
|
failing if no environment.yml is found.
|
||||||
|
@ -5,18 +5,35 @@ root = true
|
|||||||
|
|
||||||
# Unix-style newlines with a newline ending every file
|
# Unix-style newlines with a newline ending every file
|
||||||
[*]
|
[*]
|
||||||
|
charset = utf-8
|
||||||
end_of_line = lf
|
end_of_line = lf
|
||||||
insert_final_newline = true
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
|
[*.py]
|
||||||
|
indent_size = 4
|
||||||
|
indent_style = space
|
||||||
|
|
||||||
|
[*.{yml,yaml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
[*.toml]
|
||||||
|
indent_size = 4
|
||||||
|
indent_style = space
|
||||||
|
|
||||||
|
# Markdown and MDX are whitespace sensitive languages.
|
||||||
|
# Do not remove trailing spaces.
|
||||||
|
[*.{md,mdx}]
|
||||||
|
trim_trailing_whitespace = false
|
||||||
|
|
||||||
# Matches multiple files with brace expansion notation
|
# Matches multiple files with brace expansion notation
|
||||||
# Set default charset
|
# Set default charset
|
||||||
[*.{js,tsx}]
|
[*.{js,tsx}]
|
||||||
charset = utf-8
|
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
|
# Matches the exact files package.json
|
||||||
# Matches the exact files either package.json or .travis.yml
|
[package.json]
|
||||||
[{package.json,.travis.yml}]
|
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,5 +1,5 @@
|
|||||||
# Ensure that .sh scripts use LF as line separator, even if they are checked out
|
# Ensure that .sh scripts use LF as line separator, even if they are checked out
|
||||||
# to Windows(NTFS) file-system, by a user of Docker for Windows.
|
# to Windows(NTFS) file-system, by a user of Docker for Windows.
|
||||||
# These .sh scripts will be run from the Container after `docker compose up -d`.
|
# These .sh scripts will be run from the Container after `docker compose up -d`.
|
||||||
# If they appear to be CRLF style, Dash from the Container will fail to execute
|
# If they appear to be CRLF style, Dash from the Container will fail to execute
|
||||||
# them.
|
# them.
|
||||||
|
22
.github/linters/editorconfig-checker.json
vendored
Normal file
22
.github/linters/editorconfig-checker.json
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"Verbose": false,
|
||||||
|
"Debug": false,
|
||||||
|
"IgnoreDefaults": false,
|
||||||
|
"SpacesAfterTabs": false,
|
||||||
|
"NoColor": false,
|
||||||
|
"Exclude": [
|
||||||
|
"^web/public/vs/",
|
||||||
|
"^web/public/pdf.worker.min.mjs$",
|
||||||
|
"web/app/components/base/icons/src/vender/"
|
||||||
|
],
|
||||||
|
"AllowedContentTypes": [],
|
||||||
|
"PassedFiles": [],
|
||||||
|
"Disable": {
|
||||||
|
"EndOfLine": false,
|
||||||
|
"Indentation": false,
|
||||||
|
"IndentSize": true,
|
||||||
|
"InsertFinalNewline": false,
|
||||||
|
"TrimTrailingWhitespace": false,
|
||||||
|
"MaxLineLength": false
|
||||||
|
}
|
||||||
|
}
|
17
.github/workflows/style.yml
vendored
17
.github/workflows/style.yml
vendored
@ -9,6 +9,12 @@ concurrency:
|
|||||||
group: style-${{ github.head_ref || github.run_id }}
|
group: style-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
checks: write
|
||||||
|
statuses: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
python-style:
|
python-style:
|
||||||
name: Python Style
|
name: Python Style
|
||||||
@ -163,3 +169,14 @@ jobs:
|
|||||||
VALIDATE_DOCKERFILE_HADOLINT: true
|
VALIDATE_DOCKERFILE_HADOLINT: true
|
||||||
VALIDATE_XML: true
|
VALIDATE_XML: true
|
||||||
VALIDATE_YAML: true
|
VALIDATE_YAML: true
|
||||||
|
|
||||||
|
- name: EditorConfig checks
|
||||||
|
uses: super-linter/super-linter/slim@v7
|
||||||
|
env:
|
||||||
|
DEFAULT_BRANCH: main
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
IGNORE_GENERATED_FILES: true
|
||||||
|
IGNORE_GITIGNORED_FILES: true
|
||||||
|
# EditorConfig validation
|
||||||
|
VALIDATE_EDITORCONFIG: true
|
||||||
|
EDITORCONFIG_FILE_NAME: editorconfig-checker.json
|
||||||
|
@ -90,4 +90,4 @@ Recomendamos revisar este documento cuidadosamente antes de proceder con la conf
|
|||||||
No dudes en contactarnos si encuentras algún problema durante el proceso de configuración.
|
No dudes en contactarnos si encuentras algún problema durante el proceso de configuración.
|
||||||
## Obteniendo Ayuda
|
## Obteniendo Ayuda
|
||||||
|
|
||||||
Si alguna vez te quedas atascado o tienes una pregunta urgente mientras contribuyes, simplemente envíanos tus consultas a través del issue relacionado de GitHub, o únete a nuestro [Discord](https://discord.gg/8Tpq4AcN9c) para una charla rápida.
|
Si alguna vez te quedas atascado o tienes una pregunta urgente mientras contribuyes, simplemente envíanos tus consultas a través del issue relacionado de GitHub, o únete a nuestro [Discord](https://discord.gg/8Tpq4AcN9c) para una charla rápida.
|
||||||
|
@ -90,4 +90,4 @@ Nous recommandons de revoir attentivement ce document avant de procéder à la c
|
|||||||
N'hésitez pas à nous contacter si vous rencontrez des problèmes pendant le processus de configuration.
|
N'hésitez pas à nous contacter si vous rencontrez des problèmes pendant le processus de configuration.
|
||||||
## Obtenir de l'aide
|
## Obtenir de l'aide
|
||||||
|
|
||||||
Si jamais vous êtes bloqué ou avez une question urgente en contribuant, envoyez-nous simplement vos questions via le problème GitHub concerné, ou rejoignez notre [Discord](https://discord.gg/8Tpq4AcN9c) pour une discussion rapide.
|
Si jamais vous êtes bloqué ou avez une question urgente en contribuant, envoyez-nous simplement vos questions via le problème GitHub concerné, ou rejoignez notre [Discord](https://discord.gg/8Tpq4AcN9c) pour une discussion rapide.
|
||||||
|
@ -90,4 +90,4 @@ PR 설명에 기존 이슈를 연결하거나 새 이슈를 여는 것을 잊지
|
|||||||
설정 과정에서 문제가 발생하면 언제든지 연락해 주세요.
|
설정 과정에서 문제가 발생하면 언제든지 연락해 주세요.
|
||||||
## 도움 받기
|
## 도움 받기
|
||||||
|
|
||||||
기여하는 동안 막히거나 긴급한 질문이 있으면, 관련 GitHub 이슈를 통해 질문을 보내거나, 빠른 대화를 위해 우리의 [Discord](https://discord.gg/8Tpq4AcN9c)에 참여하세요.
|
기여하는 동안 막히거나 긴급한 질문이 있으면, 관련 GitHub 이슈를 통해 질문을 보내거나, 빠른 대화를 위해 우리의 [Discord](https://discord.gg/8Tpq4AcN9c)에 참여하세요.
|
||||||
|
@ -90,4 +90,4 @@ Recomendamos revisar este documento cuidadosamente antes de prosseguir com a con
|
|||||||
Sinta-se à vontade para entrar em contato se encontrar quaisquer problemas durante o processo de configuração.
|
Sinta-se à vontade para entrar em contato se encontrar quaisquer problemas durante o processo de configuração.
|
||||||
## Obtendo Ajuda
|
## Obtendo Ajuda
|
||||||
|
|
||||||
Se você ficar preso ou tiver uma dúvida urgente enquanto contribui, simplesmente envie suas perguntas através do problema relacionado no GitHub, ou entre no nosso [Discord](https://discord.gg/8Tpq4AcN9c) para uma conversa rápida.
|
Se você ficar preso ou tiver uma dúvida urgente enquanto contribui, simplesmente envie suas perguntas através do problema relacionado no GitHub, ou entre no nosso [Discord](https://discord.gg/8Tpq4AcN9c) para uma conversa rápida.
|
||||||
|
@ -90,4 +90,4 @@ Kuruluma geçmeden önce bu belgeyi dikkatlice incelemenizi öneririz, çünkü
|
|||||||
Kurulum süreci sırasında herhangi bir sorunla karşılaşırsanız bizimle iletişime geçmekten çekinmeyin.
|
Kurulum süreci sırasında herhangi bir sorunla karşılaşırsanız bizimle iletişime geçmekten çekinmeyin.
|
||||||
## Yardım Almak
|
## Yardım Almak
|
||||||
|
|
||||||
Katkıda bulunurken takılırsanız veya yanıcı bir sorunuz olursa, sorularınızı ilgili GitHub sorunu aracılığıyla bize gönderin veya hızlı bir sohbet için [Discord'umuza](https://discord.gg/8Tpq4AcN9c) katılın.
|
Katkıda bulunurken takılırsanız veya yanıcı bir sorunuz olursa, sorularınızı ilgili GitHub sorunu aracılığıyla bize gönderin veya hızlı bir sohbet için [Discord'umuza](https://discord.gg/8Tpq4AcN9c) katılın.
|
||||||
|
518
README_SI.md
518
README_SI.md
@ -1,259 +1,259 @@
|
|||||||

|

|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">Predstavljamo nalaganje datotek Dify Workflow: znova ustvarite Google NotebookLM Podcast</a>
|
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">Predstavljamo nalaganje datotek Dify Workflow: znova ustvarite Google NotebookLM Podcast</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||||
<a href="https://docs.dify.ai/getting-started/install-self-hosted">Samostojno gostovanje</a> ·
|
<a href="https://docs.dify.ai/getting-started/install-self-hosted">Samostojno gostovanje</a> ·
|
||||||
<a href="https://docs.dify.ai">Dokumentacija</a> ·
|
<a href="https://docs.dify.ai">Dokumentacija</a> ·
|
||||||
<a href="https://dify.ai/pricing">Pregled ponudb izdelkov Dify</a>
|
<a href="https://dify.ai/pricing">Pregled ponudb izdelkov Dify</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://dify.ai" target="_blank">
|
<a href="https://dify.ai" target="_blank">
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Product-F04438"></a>
|
<img alt="Static Badge" src="https://img.shields.io/badge/Product-F04438"></a>
|
||||||
<a href="https://dify.ai/pricing" target="_blank">
|
<a href="https://dify.ai/pricing" target="_blank">
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/free-pricing?logo=free&color=%20%23155EEF&label=pricing&labelColor=%20%23528bff"></a>
|
<img alt="Static Badge" src="https://img.shields.io/badge/free-pricing?logo=free&color=%20%23155EEF&label=pricing&labelColor=%20%23528bff"></a>
|
||||||
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
|
||||||
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
|
||||||
alt="chat on Discord"></a>
|
alt="chat on Discord"></a>
|
||||||
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
|
||||||
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
|
||||||
alt="follow on X(Twitter)"></a>
|
alt="follow on X(Twitter)"></a>
|
||||||
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
|
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
|
||||||
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
|
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
|
||||||
alt="follow on LinkedIn"></a>
|
alt="follow on LinkedIn"></a>
|
||||||
<a href="https://hub.docker.com/u/langgenius" target="_blank">
|
<a href="https://hub.docker.com/u/langgenius" target="_blank">
|
||||||
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
|
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
|
||||||
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
|
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
|
||||||
<img alt="Commits last month" src="https://img.shields.io/github/commit-activity/m/langgenius/dify?labelColor=%20%2332b583&color=%20%2312b76a"></a>
|
<img alt="Commits last month" src="https://img.shields.io/github/commit-activity/m/langgenius/dify?labelColor=%20%2332b583&color=%20%2312b76a"></a>
|
||||||
<a href="https://github.com/langgenius/dify/" target="_blank">
|
<a href="https://github.com/langgenius/dify/" target="_blank">
|
||||||
<img alt="Issues closed" src="https://img.shields.io/github/issues-search?query=repo%3Alanggenius%2Fdify%20is%3Aclosed&label=issues%20closed&labelColor=%20%237d89b0&color=%20%235d6b98"></a>
|
<img alt="Issues closed" src="https://img.shields.io/github/issues-search?query=repo%3Alanggenius%2Fdify%20is%3Aclosed&label=issues%20closed&labelColor=%20%237d89b0&color=%20%235d6b98"></a>
|
||||||
<a href="https://github.com/langgenius/dify/discussions/" target="_blank">
|
<a href="https://github.com/langgenius/dify/discussions/" target="_blank">
|
||||||
<img alt="Discussion posts" src="https://img.shields.io/github/discussions/langgenius/dify?labelColor=%20%239b8afb&color=%20%237a5af8"></a>
|
<img alt="Discussion posts" src="https://img.shields.io/github/discussions/langgenius/dify?labelColor=%20%239b8afb&color=%20%237a5af8"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||||
<a href="./README_SI.md"><img alt="README Slovenščina" src="https://img.shields.io/badge/Sloven%C5%A1%C4%8Dina-d9d9d9"></a>
|
<a href="./README_SI.md"><img alt="README Slovenščina" src="https://img.shields.io/badge/Sloven%C5%A1%C4%8Dina-d9d9d9"></a>
|
||||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
Dify je odprtokodna platforma za razvoj aplikacij LLM. Njegov intuitivni vmesnik združuje agentski potek dela z umetno inteligenco, cevovod RAG, zmogljivosti agentov, upravljanje modelov, funkcije opazovanja in več, kar vam omogoča hiter prehod od prototipa do proizvodnje.
|
Dify je odprtokodna platforma za razvoj aplikacij LLM. Njegov intuitivni vmesnik združuje agentski potek dela z umetno inteligenco, cevovod RAG, zmogljivosti agentov, upravljanje modelov, funkcije opazovanja in več, kar vam omogoča hiter prehod od prototipa do proizvodnje.
|
||||||
|
|
||||||
## Hitri začetek
|
## Hitri začetek
|
||||||
> Preden namestite Dify, se prepričajte, da vaša naprava izpolnjuje naslednje minimalne sistemske zahteve:
|
> Preden namestite Dify, se prepričajte, da vaša naprava izpolnjuje naslednje minimalne sistemske zahteve:
|
||||||
>
|
>
|
||||||
>- CPU >= 2 Core
|
>- CPU >= 2 Core
|
||||||
>- RAM >= 4 GiB
|
>- RAM >= 4 GiB
|
||||||
|
|
||||||
</br>
|
</br>
|
||||||
|
|
||||||
Najlažji način za zagon strežnika Dify je prek docker compose . Preden zaženete Dify z naslednjimi ukazi, se prepričajte, da sta Docker in Docker Compose nameščena na vašem računalniku:
|
Najlažji način za zagon strežnika Dify je prek docker compose . Preden zaženete Dify z naslednjimi ukazi, se prepričajte, da sta Docker in Docker Compose nameščena na vašem računalniku:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd dify
|
cd dify
|
||||||
cd docker
|
cd docker
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
Po zagonu lahko dostopate do nadzorne plošče Dify v brskalniku na [http://localhost/install](http://localhost/install) in začnete postopek inicializacije.
|
Po zagonu lahko dostopate do nadzorne plošče Dify v brskalniku na [http://localhost/install](http://localhost/install) in začnete postopek inicializacije.
|
||||||
|
|
||||||
#### Iskanje pomoči
|
#### Iskanje pomoči
|
||||||
Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) če naletite na težave pri nastavitvi Dify. Če imate še vedno težave, se obrnite na [skupnost ali nas](#community--contact).
|
Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) če naletite na težave pri nastavitvi Dify. Če imate še vedno težave, se obrnite na [skupnost ali nas](#community--contact).
|
||||||
|
|
||||||
> Če želite prispevati k Difyju ali narediti dodaten razvoj, glejte naš vodnik za [uvajanje iz izvorne kode](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
|
> Če želite prispevati k Difyju ali narediti dodaten razvoj, glejte naš vodnik za [uvajanje iz izvorne kode](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
|
||||||
|
|
||||||
## Ključne značilnosti
|
## Ključne značilnosti
|
||||||
**1. Potek dela**:
|
**1. Potek dela**:
|
||||||
Zgradite in preizkusite zmogljive poteke dela AI na vizualnem platnu, pri čemer izkoristite vse naslednje funkcije in več.
|
Zgradite in preizkusite zmogljive poteke dela AI na vizualnem platnu, pri čemer izkoristite vse naslednje funkcije in več.
|
||||||
|
|
||||||
|
|
||||||
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
|
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**2. Celovita podpora za modele**:
|
**2. Celovita podpora za modele**:
|
||||||
Brezhibna integracija s stotinami lastniških/odprtokodnih LLM-jev ducatov ponudnikov sklepanja in samostojnih rešitev, ki pokrivajo GPT, Mistral, Llama3 in vse modele, združljive z API-jem OpenAI. Celoten seznam podprtih ponudnikov modelov najdete [tukaj](https://docs.dify.ai/getting-started/readme/model-providers).
|
Brezhibna integracija s stotinami lastniških/odprtokodnih LLM-jev ducatov ponudnikov sklepanja in samostojnih rešitev, ki pokrivajo GPT, Mistral, Llama3 in vse modele, združljive z API-jem OpenAI. Celoten seznam podprtih ponudnikov modelov najdete [tukaj](https://docs.dify.ai/getting-started/readme/model-providers).
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
**3. Prompt IDE**:
|
**3. Prompt IDE**:
|
||||||
intuitivni vmesnik za ustvarjanje pozivov, primerjavo zmogljivosti modela in dodajanje dodatnih funkcij, kot je pretvorba besedila v govor, aplikaciji, ki temelji na klepetu.
|
intuitivni vmesnik za ustvarjanje pozivov, primerjavo zmogljivosti modela in dodajanje dodatnih funkcij, kot je pretvorba besedila v govor, aplikaciji, ki temelji na klepetu.
|
||||||
|
|
||||||
**4. RAG Pipeline**:
|
**4. RAG Pipeline**:
|
||||||
E Obsežne zmogljivosti RAG, ki pokrivajo vse od vnosa dokumenta do priklica, s podporo za ekstrakcijo besedila iz datotek PDF, PPT in drugih običajnih formatov dokumentov.
|
E Obsežne zmogljivosti RAG, ki pokrivajo vse od vnosa dokumenta do priklica, s podporo za ekstrakcijo besedila iz datotek PDF, PPT in drugih običajnih formatov dokumentov.
|
||||||
|
|
||||||
**5. Agent capabilities**:
|
**5. Agent capabilities**:
|
||||||
definirate lahko agente, ki temeljijo na klicanju funkcij LLM ali ReAct, in dodate vnaprej izdelana orodja ali orodja po meri za agenta. Dify ponuja več kot 50 vgrajenih orodij za agente AI, kot so Google Search, DALL·E, Stable Diffusion in WolframAlpha.
|
definirate lahko agente, ki temeljijo na klicanju funkcij LLM ali ReAct, in dodate vnaprej izdelana orodja ali orodja po meri za agenta. Dify ponuja več kot 50 vgrajenih orodij za agente AI, kot so Google Search, DALL·E, Stable Diffusion in WolframAlpha.
|
||||||
|
|
||||||
**6. LLMOps**:
|
**6. LLMOps**:
|
||||||
Spremljajte in analizirajte dnevnike aplikacij in učinkovitost skozi čas. Pozive, nabore podatkov in modele lahko nenehno izboljšujete na podlagi proizvodnih podatkov in opomb.
|
Spremljajte in analizirajte dnevnike aplikacij in učinkovitost skozi čas. Pozive, nabore podatkov in modele lahko nenehno izboljšujete na podlagi proizvodnih podatkov in opomb.
|
||||||
|
|
||||||
**7. Backend-as-a-Service**:
|
**7. Backend-as-a-Service**:
|
||||||
AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
|
AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
|
||||||
|
|
||||||
## Primerjava Funkcij
|
## Primerjava Funkcij
|
||||||
|
|
||||||
<table style="width: 100%;">
|
<table style="width: 100%;">
|
||||||
<tr>
|
<tr>
|
||||||
<th align="center">Funkcija</th>
|
<th align="center">Funkcija</th>
|
||||||
<th align="center">Dify.AI</th>
|
<th align="center">Dify.AI</th>
|
||||||
<th align="center">LangChain</th>
|
<th align="center">LangChain</th>
|
||||||
<th align="center">Flowise</th>
|
<th align="center">Flowise</th>
|
||||||
<th align="center">OpenAI Assistants API</th>
|
<th align="center">OpenAI Assistants API</th>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Programski pristop</td>
|
<td align="center">Programski pristop</td>
|
||||||
<td align="center">API + usmerjeno v aplikacije</td>
|
<td align="center">API + usmerjeno v aplikacije</td>
|
||||||
<td align="center">Python koda</td>
|
<td align="center">Python koda</td>
|
||||||
<td align="center">Usmerjeno v aplikacije</td>
|
<td align="center">Usmerjeno v aplikacije</td>
|
||||||
<td align="center">Usmerjeno v API</td>
|
<td align="center">Usmerjeno v API</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Podprti LLM-ji</td>
|
<td align="center">Podprti LLM-ji</td>
|
||||||
<td align="center">Bogata izbira</td>
|
<td align="center">Bogata izbira</td>
|
||||||
<td align="center">Bogata izbira</td>
|
<td align="center">Bogata izbira</td>
|
||||||
<td align="center">Bogata izbira</td>
|
<td align="center">Bogata izbira</td>
|
||||||
<td align="center">Samo OpenAI</td>
|
<td align="center">Samo OpenAI</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">RAG pogon</td>
|
<td align="center">RAG pogon</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Agent</td>
|
<td align="center">Agent</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Potek dela</td>
|
<td align="center">Potek dela</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Spremljanje</td>
|
<td align="center">Spremljanje</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Funkcija za podjetja (SSO/nadzor dostopa)</td>
|
<td align="center">Funkcija za podjetja (SSO/nadzor dostopa)</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td align="center">Lokalna namestitev</td>
|
<td align="center">Lokalna namestitev</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">✅</td>
|
<td align="center">✅</td>
|
||||||
<td align="center">❌</td>
|
<td align="center">❌</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
## Uporaba Dify
|
## Uporaba Dify
|
||||||
|
|
||||||
- **Cloud </br>**
|
- **Cloud </br>**
|
||||||
Gostimo storitev Dify Cloud za vsakogar, ki jo lahko preizkusite brez nastavitev. Zagotavlja vse zmožnosti različice za samostojno namestitev in vključuje 200 brezplačnih klicev GPT-4 v načrtu peskovnika.
|
Gostimo storitev Dify Cloud za vsakogar, ki jo lahko preizkusite brez nastavitev. Zagotavlja vse zmožnosti različice za samostojno namestitev in vključuje 200 brezplačnih klicev GPT-4 v načrtu peskovnika.
|
||||||
|
|
||||||
- **Self-hosting Dify Community Edition</br>**
|
- **Self-hosting Dify Community Edition</br>**
|
||||||
Hitro zaženite Dify v svojem okolju s tem [začetnim vodnikom](#quick-start) . Za dodatne reference in podrobnejša navodila uporabite našo [dokumentacijo](https://docs.dify.ai) .
|
Hitro zaženite Dify v svojem okolju s tem [začetnim vodnikom](#quick-start) . Za dodatne reference in podrobnejša navodila uporabite našo [dokumentacijo](https://docs.dify.ai) .
|
||||||
|
|
||||||
|
|
||||||
- **Dify za podjetja/organizacije</br>**
|
- **Dify za podjetja/organizacije</br>**
|
||||||
Ponujamo dodatne funkcije, osredotočene na podjetja. Zabeležite svoja vprašanja prek tega klepetalnega robota ali nam pošljite e-pošto, da se pogovorimo o potrebah podjetja. </br>
|
Ponujamo dodatne funkcije, osredotočene na podjetja. Zabeležite svoja vprašanja prek tega klepetalnega robota ali nam pošljite e-pošto, da se pogovorimo o potrebah podjetja. </br>
|
||||||
> Za novoustanovljena podjetja in mala podjetja, ki uporabljajo AWS, si oglejte Dify Premium na AWS Marketplace in ga z enim klikom uvedite v svoj AWS VPC. To je cenovno ugodna ponudba AMI z možnostjo ustvarjanja aplikacij z logotipom in blagovno znamko po meri.
|
> Za novoustanovljena podjetja in mala podjetja, ki uporabljajo AWS, si oglejte Dify Premium na AWS Marketplace in ga z enim klikom uvedite v svoj AWS VPC. To je cenovno ugodna ponudba AMI z možnostjo ustvarjanja aplikacij z logotipom in blagovno znamko po meri.
|
||||||
|
|
||||||
|
|
||||||
## Staying ahead
|
## Staying ahead
|
||||||
|
|
||||||
Star Dify on GitHub and be instantly notified of new releases.
|
Star Dify on GitHub and be instantly notified of new releases.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
## Napredne nastavitve
|
## Napredne nastavitve
|
||||||
|
|
||||||
Če morate prilagoditi konfiguracijo, si oglejte komentarje v naši datoteki .env.example in posodobite ustrezne vrednosti v svoji .env datoteki. Poleg tega boste morda morali prilagoditi docker-compose.yamlsamo datoteko, na primer spremeniti različice slike, preslikave vrat ali namestitve nosilca, glede na vaše specifično okolje in zahteve za uvajanje. Po kakršnih koli spremembah ponovno zaženite docker-compose up -d. Celoten seznam razpoložljivih spremenljivk okolja najdete tukaj .
|
Če morate prilagoditi konfiguracijo, si oglejte komentarje v naši datoteki .env.example in posodobite ustrezne vrednosti v svoji .env datoteki. Poleg tega boste morda morali prilagoditi docker-compose.yamlsamo datoteko, na primer spremeniti različice slike, preslikave vrat ali namestitve nosilca, glede na vaše specifično okolje in zahteve za uvajanje. Po kakršnih koli spremembah ponovno zaženite docker-compose up -d. Celoten seznam razpoložljivih spremenljivk okolja najdete tukaj .
|
||||||
|
|
||||||
Če želite konfigurirati visoko razpoložljivo nastavitev, so na voljo Helm Charts in datoteke YAML, ki jih prispeva skupnost, ki omogočajo uvedbo Difyja v Kubernetes.
|
Če želite konfigurirati visoko razpoložljivo nastavitev, so na voljo Helm Charts in datoteke YAML, ki jih prispeva skupnost, ki omogočajo uvedbo Difyja v Kubernetes.
|
||||||
|
|
||||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||||
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
|
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
|
||||||
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
|
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
|
||||||
- [YAML file by @wyy-holding](https://github.com/wyy-holding/dify-k8s)
|
- [YAML file by @wyy-holding](https://github.com/wyy-holding/dify-k8s)
|
||||||
|
|
||||||
#### Uporaba Terraform za uvajanje
|
#### Uporaba Terraform za uvajanje
|
||||||
|
|
||||||
namestite Dify v Cloud Platform z enim klikom z uporabo [terraform](https://www.terraform.io/)
|
namestite Dify v Cloud Platform z enim klikom z uporabo [terraform](https://www.terraform.io/)
|
||||||
|
|
||||||
##### Azure Global
|
##### Azure Global
|
||||||
- [Azure Terraform by @nikawang](https://github.com/nikawang/dify-azure-terraform)
|
- [Azure Terraform by @nikawang](https://github.com/nikawang/dify-azure-terraform)
|
||||||
|
|
||||||
##### Google Cloud
|
##### Google Cloud
|
||||||
- [Google Cloud Terraform by @sotazum](https://github.com/DeNA/dify-google-cloud-terraform)
|
- [Google Cloud Terraform by @sotazum](https://github.com/DeNA/dify-google-cloud-terraform)
|
||||||
|
|
||||||
#### Uporaba AWS CDK za uvajanje
|
#### Uporaba AWS CDK za uvajanje
|
||||||
|
|
||||||
Uvedite Dify v AWS z uporabo [CDK](https://aws.amazon.com/cdk/)
|
Uvedite Dify v AWS z uporabo [CDK](https://aws.amazon.com/cdk/)
|
||||||
|
|
||||||
##### AWS
|
##### AWS
|
||||||
- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws)
|
- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws)
|
||||||
|
|
||||||
## Prispevam
|
## Prispevam
|
||||||
|
|
||||||
Za tiste, ki bi radi prispevali kodo, si oglejte naš vodnik za prispevke . Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
|
Za tiste, ki bi radi prispevali kodo, si oglejte naš vodnik za prispevke . Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
> Iščemo sodelavce za pomoč pri prevajanju Difyja v jezike, ki niso mandarinščina ali angleščina. Če želite pomagati, si oglejte i18n README za več informacij in nam pustite komentar v global-userskanalu našega strežnika skupnosti Discord .
|
> Iščemo sodelavce za pomoč pri prevajanju Difyja v jezike, ki niso mandarinščina ali angleščina. Če želite pomagati, si oglejte i18n README za več informacij in nam pustite komentar v global-userskanalu našega strežnika skupnosti Discord .
|
||||||
|
|
||||||
## Skupnost in stik
|
## Skupnost in stik
|
||||||
|
|
||||||
* [Github Discussion](https://github.com/langgenius/dify/discussions). Najboljše za: izmenjavo povratnih informacij in postavljanje vprašanj.
|
* [Github Discussion](https://github.com/langgenius/dify/discussions). Najboljše za: izmenjavo povratnih informacij in postavljanje vprašanj.
|
||||||
* [GitHub Issues](https://github.com/langgenius/dify/issues). Najboljše za: hrošče, na katere naletite pri uporabi Dify.AI, in predloge funkcij. Oglejte si naš [vodnik za prispevke](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
* [GitHub Issues](https://github.com/langgenius/dify/issues). Najboljše za: hrošče, na katere naletite pri uporabi Dify.AI, in predloge funkcij. Oglejte si naš [vodnik za prispevke](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||||
* [Discord](https://discord.gg/FngNHpbcY7). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
|
* [Discord](https://discord.gg/FngNHpbcY7). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
|
||||||
* [X(Twitter)](https://twitter.com/dify_ai). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
|
* [X(Twitter)](https://twitter.com/dify_ai). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
|
||||||
|
|
||||||
**Contributors**
|
**Contributors**
|
||||||
|
|
||||||
<a href="https://github.com/langgenius/dify/graphs/contributors">
|
<a href="https://github.com/langgenius/dify/graphs/contributors">
|
||||||
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
|
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
## Star history
|
## Star history
|
||||||
|
|
||||||
[](https://star-history.com/#langgenius/dify&Date)
|
[](https://star-history.com/#langgenius/dify&Date)
|
||||||
|
|
||||||
|
|
||||||
## Varnostno razkritje
|
## Varnostno razkritje
|
||||||
|
|
||||||
Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj na GitHub. Namesto tega pošljite vprašanja na security@dify.ai in zagotovili vam bomo podrobnejši odgovor.
|
Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj na GitHub. Namesto tega pošljite vprašanja na security@dify.ai in zagotovili vam bomo podrobnejši odgovor.
|
||||||
|
|
||||||
## Licenca
|
## Licenca
|
||||||
|
|
||||||
To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
|
To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
|
||||||
|
@ -16,4 +16,4 @@ logs
|
|||||||
.ruff_cache
|
.ruff_cache
|
||||||
|
|
||||||
# venv
|
# venv
|
||||||
.venv
|
.venv
|
||||||
|
@ -52,7 +52,6 @@ def initialize_extensions(app: DifyApp):
|
|||||||
ext_mail,
|
ext_mail,
|
||||||
ext_migrate,
|
ext_migrate,
|
||||||
ext_otel,
|
ext_otel,
|
||||||
ext_otel_patch,
|
|
||||||
ext_proxy_fix,
|
ext_proxy_fix,
|
||||||
ext_redis,
|
ext_redis,
|
||||||
ext_repositories,
|
ext_repositories,
|
||||||
@ -85,7 +84,6 @@ def initialize_extensions(app: DifyApp):
|
|||||||
ext_proxy_fix,
|
ext_proxy_fix,
|
||||||
ext_blueprints,
|
ext_blueprints,
|
||||||
ext_commands,
|
ext_commands,
|
||||||
ext_otel_patch, # Apply patch before initializing OpenTelemetry
|
|
||||||
ext_otel,
|
ext_otel,
|
||||||
]
|
]
|
||||||
for ext in extensions:
|
for ext in extensions:
|
||||||
|
337
api/commands.py
337
api/commands.py
@ -17,6 +17,7 @@ from core.rag.models.document import Document
|
|||||||
from events.app_event import app_was_created
|
from events.app_event import app_was_created
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from extensions.ext_redis import redis_client
|
from extensions.ext_redis import redis_client
|
||||||
|
from extensions.ext_storage import storage
|
||||||
from libs.helper import email as email_validate
|
from libs.helper import email as email_validate
|
||||||
from libs.password import hash_password, password_pattern, valid_password
|
from libs.password import hash_password, password_pattern, valid_password
|
||||||
from libs.rsa import generate_key_pair
|
from libs.rsa import generate_key_pair
|
||||||
@ -443,13 +444,13 @@ def convert_to_agent_apps():
|
|||||||
WHERE a.mode = 'chat'
|
WHERE a.mode = 'chat'
|
||||||
AND am.agent_mode is not null
|
AND am.agent_mode is not null
|
||||||
AND (
|
AND (
|
||||||
am.agent_mode like '%"strategy": "function_call"%'
|
am.agent_mode like '%"strategy": "function_call"%'
|
||||||
OR am.agent_mode like '%"strategy": "react"%'
|
OR am.agent_mode like '%"strategy": "react"%'
|
||||||
)
|
)
|
||||||
AND (
|
AND (
|
||||||
am.agent_mode like '{"enabled": true%'
|
am.agent_mode like '{"enabled": true%'
|
||||||
OR am.agent_mode like '{"max_iteration": %'
|
OR am.agent_mode like '{"max_iteration": %'
|
||||||
) ORDER BY a.created_at DESC LIMIT 1000
|
) ORDER BY a.created_at DESC LIMIT 1000
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with db.engine.begin() as conn:
|
with db.engine.begin() as conn:
|
||||||
@ -815,3 +816,331 @@ def clear_free_plan_tenant_expired_logs(days: int, batch: int, tenant_ids: list[
|
|||||||
ClearFreePlanTenantExpiredLogs.process(days, batch, tenant_ids)
|
ClearFreePlanTenantExpiredLogs.process(days, batch, tenant_ids)
|
||||||
|
|
||||||
click.echo(click.style("Clear free plan tenant expired logs completed.", fg="green"))
|
click.echo(click.style("Clear free plan tenant expired logs completed.", fg="green"))
|
||||||
|
|
||||||
|
|
||||||
|
@click.option("-f", "--force", is_flag=True, help="Skip user confirmation and force the command to execute.")
|
||||||
|
@click.command("clear-orphaned-file-records", help="Clear orphaned file records.")
|
||||||
|
def clear_orphaned_file_records(force: bool):
|
||||||
|
"""
|
||||||
|
Clear orphaned file records in the database.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# define tables and columns to process
|
||||||
|
files_tables = [
|
||||||
|
{"table": "upload_files", "id_column": "id", "key_column": "key"},
|
||||||
|
{"table": "tool_files", "id_column": "id", "key_column": "file_key"},
|
||||||
|
]
|
||||||
|
ids_tables = [
|
||||||
|
{"type": "uuid", "table": "message_files", "column": "upload_file_id"},
|
||||||
|
{"type": "text", "table": "documents", "column": "data_source_info"},
|
||||||
|
{"type": "text", "table": "document_segments", "column": "content"},
|
||||||
|
{"type": "text", "table": "messages", "column": "answer"},
|
||||||
|
{"type": "text", "table": "workflow_node_executions", "column": "inputs"},
|
||||||
|
{"type": "text", "table": "workflow_node_executions", "column": "process_data"},
|
||||||
|
{"type": "text", "table": "workflow_node_executions", "column": "outputs"},
|
||||||
|
{"type": "text", "table": "conversations", "column": "introduction"},
|
||||||
|
{"type": "text", "table": "conversations", "column": "system_instruction"},
|
||||||
|
{"type": "json", "table": "messages", "column": "inputs"},
|
||||||
|
{"type": "json", "table": "messages", "column": "message"},
|
||||||
|
]
|
||||||
|
|
||||||
|
# notify user and ask for confirmation
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
"This command will first find and delete orphaned file records from the message_files table,", fg="yellow"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
"and then it will find and delete orphaned file records in the following tables:",
|
||||||
|
fg="yellow",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
for files_table in files_tables:
|
||||||
|
click.echo(click.style(f"- {files_table['table']}", fg="yellow"))
|
||||||
|
click.echo(
|
||||||
|
click.style("The following tables and columns will be scanned to find orphaned file records:", fg="yellow")
|
||||||
|
)
|
||||||
|
for ids_table in ids_tables:
|
||||||
|
click.echo(click.style(f"- {ids_table['table']} ({ids_table['column']})", fg="yellow"))
|
||||||
|
click.echo("")
|
||||||
|
|
||||||
|
click.echo(click.style("!!! USE WITH CAUTION !!!", fg="red"))
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
(
|
||||||
|
"Since not all patterns have been fully tested, "
|
||||||
|
"please note that this command may delete unintended file records."
|
||||||
|
),
|
||||||
|
fg="yellow",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
click.echo(
|
||||||
|
click.style("This cannot be undone. Please make sure to back up your database before proceeding.", fg="yellow")
|
||||||
|
)
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
(
|
||||||
|
"It is also recommended to run this during the maintenance window, "
|
||||||
|
"as this may cause high load on your instance."
|
||||||
|
),
|
||||||
|
fg="yellow",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not force:
|
||||||
|
click.confirm("Do you want to proceed?", abort=True)
|
||||||
|
|
||||||
|
# start the cleanup process
|
||||||
|
click.echo(click.style("Starting orphaned file records cleanup.", fg="white"))
|
||||||
|
|
||||||
|
# clean up the orphaned records in the message_files table where message_id doesn't exist in messages table
|
||||||
|
try:
|
||||||
|
click.echo(
|
||||||
|
click.style("- Listing message_files records where message_id doesn't exist in messages table", fg="white")
|
||||||
|
)
|
||||||
|
query = (
|
||||||
|
"SELECT mf.id, mf.message_id "
|
||||||
|
"FROM message_files mf LEFT JOIN messages m ON mf.message_id = m.id "
|
||||||
|
"WHERE m.id IS NULL"
|
||||||
|
)
|
||||||
|
orphaned_message_files = []
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
rs = conn.execute(db.text(query))
|
||||||
|
for i in rs:
|
||||||
|
orphaned_message_files.append({"id": str(i[0]), "message_id": str(i[1])})
|
||||||
|
|
||||||
|
if orphaned_message_files:
|
||||||
|
click.echo(click.style(f"Found {len(orphaned_message_files)} orphaned message_files records:", fg="white"))
|
||||||
|
for record in orphaned_message_files:
|
||||||
|
click.echo(click.style(f" - id: {record['id']}, message_id: {record['message_id']}", fg="black"))
|
||||||
|
|
||||||
|
if not force:
|
||||||
|
click.confirm(
|
||||||
|
(
|
||||||
|
f"Do you want to proceed "
|
||||||
|
f"to delete all {len(orphaned_message_files)} orphaned message_files records?"
|
||||||
|
),
|
||||||
|
abort=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
click.echo(click.style("- Deleting orphaned message_files records", fg="white"))
|
||||||
|
query = "DELETE FROM message_files WHERE id IN :ids"
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
conn.execute(db.text(query), {"ids": tuple([record["id"] for record in orphaned_message_files])})
|
||||||
|
click.echo(
|
||||||
|
click.style(f"Removed {len(orphaned_message_files)} orphaned message_files records.", fg="green")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
click.echo(click.style("No orphaned message_files records found. There is nothing to delete.", fg="green"))
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(click.style(f"Error deleting orphaned message_files records: {str(e)}", fg="red"))
|
||||||
|
|
||||||
|
# clean up the orphaned records in the rest of the *_files tables
|
||||||
|
try:
|
||||||
|
# fetch file id and keys from each table
|
||||||
|
all_files_in_tables = []
|
||||||
|
for files_table in files_tables:
|
||||||
|
click.echo(click.style(f"- Listing file records in table {files_table['table']}", fg="white"))
|
||||||
|
query = f"SELECT {files_table['id_column']}, {files_table['key_column']} FROM {files_table['table']}"
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
rs = conn.execute(db.text(query))
|
||||||
|
for i in rs:
|
||||||
|
all_files_in_tables.append({"table": files_table["table"], "id": str(i[0]), "key": i[1]})
|
||||||
|
click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white"))
|
||||||
|
|
||||||
|
# fetch referred table and columns
|
||||||
|
guid_regexp = "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
|
||||||
|
all_ids_in_tables = []
|
||||||
|
for ids_table in ids_tables:
|
||||||
|
query = ""
|
||||||
|
if ids_table["type"] == "uuid":
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
f"- Listing file ids in column {ids_table['column']} in table {ids_table['table']}", fg="white"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
query = (
|
||||||
|
f"SELECT {ids_table['column']} FROM {ids_table['table']} WHERE {ids_table['column']} IS NOT NULL"
|
||||||
|
)
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
rs = conn.execute(db.text(query))
|
||||||
|
for i in rs:
|
||||||
|
all_ids_in_tables.append({"table": ids_table["table"], "id": str(i[0])})
|
||||||
|
elif ids_table["type"] == "text":
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
f"- Listing file-id-like strings in column {ids_table['column']} in table {ids_table['table']}",
|
||||||
|
fg="white",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
query = (
|
||||||
|
f"SELECT regexp_matches({ids_table['column']}, '{guid_regexp}', 'g') AS extracted_id "
|
||||||
|
f"FROM {ids_table['table']}"
|
||||||
|
)
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
rs = conn.execute(db.text(query))
|
||||||
|
for i in rs:
|
||||||
|
for j in i[0]:
|
||||||
|
all_ids_in_tables.append({"table": ids_table["table"], "id": j})
|
||||||
|
elif ids_table["type"] == "json":
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
(
|
||||||
|
f"- Listing file-id-like JSON string in column {ids_table['column']} "
|
||||||
|
f"in table {ids_table['table']}"
|
||||||
|
),
|
||||||
|
fg="white",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
query = (
|
||||||
|
f"SELECT regexp_matches({ids_table['column']}::text, '{guid_regexp}', 'g') AS extracted_id "
|
||||||
|
f"FROM {ids_table['table']}"
|
||||||
|
)
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
rs = conn.execute(db.text(query))
|
||||||
|
for i in rs:
|
||||||
|
for j in i[0]:
|
||||||
|
all_ids_in_tables.append({"table": ids_table["table"], "id": j})
|
||||||
|
click.echo(click.style(f"Found {len(all_ids_in_tables)} file ids in tables.", fg="white"))
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(click.style(f"Error fetching keys: {str(e)}", fg="red"))
|
||||||
|
return
|
||||||
|
|
||||||
|
# find orphaned files
|
||||||
|
all_files = [file["id"] for file in all_files_in_tables]
|
||||||
|
all_ids = [file["id"] for file in all_ids_in_tables]
|
||||||
|
orphaned_files = list(set(all_files) - set(all_ids))
|
||||||
|
if not orphaned_files:
|
||||||
|
click.echo(click.style("No orphaned file records found. There is nothing to delete.", fg="green"))
|
||||||
|
return
|
||||||
|
click.echo(click.style(f"Found {len(orphaned_files)} orphaned file records.", fg="white"))
|
||||||
|
for file in orphaned_files:
|
||||||
|
click.echo(click.style(f"- orphaned file id: {file}", fg="black"))
|
||||||
|
if not force:
|
||||||
|
click.confirm(f"Do you want to proceed to delete all {len(orphaned_files)} orphaned file records?", abort=True)
|
||||||
|
|
||||||
|
# delete orphaned records for each file
|
||||||
|
try:
|
||||||
|
for files_table in files_tables:
|
||||||
|
click.echo(click.style(f"- Deleting orphaned file records in table {files_table['table']}", fg="white"))
|
||||||
|
query = f"DELETE FROM {files_table['table']} WHERE {files_table['id_column']} IN :ids"
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
conn.execute(db.text(query), {"ids": tuple(orphaned_files)})
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(click.style(f"Error deleting orphaned file records: {str(e)}", fg="red"))
|
||||||
|
return
|
||||||
|
click.echo(click.style(f"Removed {len(orphaned_files)} orphaned file records.", fg="green"))
|
||||||
|
|
||||||
|
|
||||||
|
@click.option("-f", "--force", is_flag=True, help="Skip user confirmation and force the command to execute.")
|
||||||
|
@click.command("remove-orphaned-files-on-storage", help="Remove orphaned files on the storage.")
|
||||||
|
def remove_orphaned_files_on_storage(force: bool):
|
||||||
|
"""
|
||||||
|
Remove orphaned files on the storage.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# define tables and columns to process
|
||||||
|
files_tables = [
|
||||||
|
{"table": "upload_files", "key_column": "key"},
|
||||||
|
{"table": "tool_files", "key_column": "file_key"},
|
||||||
|
]
|
||||||
|
storage_paths = ["image_files", "tools", "upload_files"]
|
||||||
|
|
||||||
|
# notify user and ask for confirmation
|
||||||
|
click.echo(click.style("This command will find and remove orphaned files on the storage,", fg="yellow"))
|
||||||
|
click.echo(
|
||||||
|
click.style("by comparing the files on the storage with the records in the following tables:", fg="yellow")
|
||||||
|
)
|
||||||
|
for files_table in files_tables:
|
||||||
|
click.echo(click.style(f"- {files_table['table']}", fg="yellow"))
|
||||||
|
click.echo(click.style("The following paths on the storage will be scanned to find orphaned files:", fg="yellow"))
|
||||||
|
for storage_path in storage_paths:
|
||||||
|
click.echo(click.style(f"- {storage_path}", fg="yellow"))
|
||||||
|
click.echo("")
|
||||||
|
|
||||||
|
click.echo(click.style("!!! USE WITH CAUTION !!!", fg="red"))
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
"Currently, this command will work only for opendal based storage (STORAGE_TYPE=opendal).", fg="yellow"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
"Since not all patterns have been fully tested, please note that this command may delete unintended files.",
|
||||||
|
fg="yellow",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
click.echo(
|
||||||
|
click.style("This cannot be undone. Please make sure to back up your storage before proceeding.", fg="yellow")
|
||||||
|
)
|
||||||
|
click.echo(
|
||||||
|
click.style(
|
||||||
|
(
|
||||||
|
"It is also recommended to run this during the maintenance window, "
|
||||||
|
"as this may cause high load on your instance."
|
||||||
|
),
|
||||||
|
fg="yellow",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not force:
|
||||||
|
click.confirm("Do you want to proceed?", abort=True)
|
||||||
|
|
||||||
|
# start the cleanup process
|
||||||
|
click.echo(click.style("Starting orphaned files cleanup.", fg="white"))
|
||||||
|
|
||||||
|
# fetch file id and keys from each table
|
||||||
|
all_files_in_tables = []
|
||||||
|
try:
|
||||||
|
for files_table in files_tables:
|
||||||
|
click.echo(click.style(f"- Listing files from table {files_table['table']}", fg="white"))
|
||||||
|
query = f"SELECT {files_table['key_column']} FROM {files_table['table']}"
|
||||||
|
with db.engine.begin() as conn:
|
||||||
|
rs = conn.execute(db.text(query))
|
||||||
|
for i in rs:
|
||||||
|
all_files_in_tables.append(str(i[0]))
|
||||||
|
click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white"))
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(click.style(f"Error fetching keys: {str(e)}", fg="red"))
|
||||||
|
|
||||||
|
all_files_on_storage = []
|
||||||
|
for storage_path in storage_paths:
|
||||||
|
try:
|
||||||
|
click.echo(click.style(f"- Scanning files on storage path {storage_path}", fg="white"))
|
||||||
|
files = storage.scan(path=storage_path, files=True, directories=False)
|
||||||
|
all_files_on_storage.extend(files)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
click.echo(click.style(f" -> Skipping path {storage_path} as it does not exist.", fg="yellow"))
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(click.style(f" -> Error scanning files on storage path {storage_path}: {str(e)}", fg="red"))
|
||||||
|
continue
|
||||||
|
click.echo(click.style(f"Found {len(all_files_on_storage)} files on storage.", fg="white"))
|
||||||
|
|
||||||
|
# find orphaned files
|
||||||
|
orphaned_files = list(set(all_files_on_storage) - set(all_files_in_tables))
|
||||||
|
if not orphaned_files:
|
||||||
|
click.echo(click.style("No orphaned files found. There is nothing to remove.", fg="green"))
|
||||||
|
return
|
||||||
|
click.echo(click.style(f"Found {len(orphaned_files)} orphaned files.", fg="white"))
|
||||||
|
for file in orphaned_files:
|
||||||
|
click.echo(click.style(f"- orphaned file: {file}", fg="black"))
|
||||||
|
if not force:
|
||||||
|
click.confirm(f"Do you want to proceed to remove all {len(orphaned_files)} orphaned files?", abort=True)
|
||||||
|
|
||||||
|
# delete orphaned files
|
||||||
|
removed_files = 0
|
||||||
|
error_files = 0
|
||||||
|
for file in orphaned_files:
|
||||||
|
try:
|
||||||
|
storage.delete(file)
|
||||||
|
removed_files += 1
|
||||||
|
click.echo(click.style(f"- Removing orphaned file: {file}", fg="white"))
|
||||||
|
except Exception as e:
|
||||||
|
error_files += 1
|
||||||
|
click.echo(click.style(f"- Error deleting orphaned file {file}: {str(e)}", fg="red"))
|
||||||
|
continue
|
||||||
|
if error_files == 0:
|
||||||
|
click.echo(click.style(f"Removed {removed_files} orphaned files without errors.", fg="green"))
|
||||||
|
else:
|
||||||
|
click.echo(click.style(f"Removed {removed_files} orphaned files, with {error_files} errors.", fg="yellow"))
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from typing import Optional
|
import enum
|
||||||
|
from typing import Literal, Optional
|
||||||
|
|
||||||
from pydantic import Field, PositiveInt
|
from pydantic import Field, PositiveInt
|
||||||
from pydantic_settings import BaseSettings
|
from pydantic_settings import BaseSettings
|
||||||
@ -9,6 +10,14 @@ class OpenSearchConfig(BaseSettings):
|
|||||||
Configuration settings for OpenSearch
|
Configuration settings for OpenSearch
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
class AuthMethod(enum.StrEnum):
|
||||||
|
"""
|
||||||
|
Authentication method for OpenSearch
|
||||||
|
"""
|
||||||
|
|
||||||
|
BASIC = "basic"
|
||||||
|
AWS_MANAGED_IAM = "aws_managed_iam"
|
||||||
|
|
||||||
OPENSEARCH_HOST: Optional[str] = Field(
|
OPENSEARCH_HOST: Optional[str] = Field(
|
||||||
description="Hostname or IP address of the OpenSearch server (e.g., 'localhost' or 'opensearch.example.com')",
|
description="Hostname or IP address of the OpenSearch server (e.g., 'localhost' or 'opensearch.example.com')",
|
||||||
default=None,
|
default=None,
|
||||||
@ -19,6 +28,16 @@ class OpenSearchConfig(BaseSettings):
|
|||||||
default=9200,
|
default=9200,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
OPENSEARCH_SECURE: bool = Field(
|
||||||
|
description="Whether to use SSL/TLS encrypted connection for OpenSearch (True for HTTPS, False for HTTP)",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
OPENSEARCH_AUTH_METHOD: AuthMethod = Field(
|
||||||
|
description="Authentication method for OpenSearch connection (default is 'basic')",
|
||||||
|
default=AuthMethod.BASIC,
|
||||||
|
)
|
||||||
|
|
||||||
OPENSEARCH_USER: Optional[str] = Field(
|
OPENSEARCH_USER: Optional[str] = Field(
|
||||||
description="Username for authenticating with OpenSearch",
|
description="Username for authenticating with OpenSearch",
|
||||||
default=None,
|
default=None,
|
||||||
@ -29,7 +48,11 @@ class OpenSearchConfig(BaseSettings):
|
|||||||
default=None,
|
default=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
OPENSEARCH_SECURE: bool = Field(
|
OPENSEARCH_AWS_REGION: Optional[str] = Field(
|
||||||
description="Whether to use SSL/TLS encrypted connection for OpenSearch (True for HTTPS, False for HTTP)",
|
description="AWS region for OpenSearch (e.g. 'us-west-2')",
|
||||||
default=False,
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
OPENSEARCH_AWS_SERVICE: Optional[Literal["es", "aoss"]] = Field(
|
||||||
|
description="AWS service for OpenSearch (e.g. 'aoss' for OpenSearch Serverless)", default=None
|
||||||
)
|
)
|
||||||
|
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||||||
|
|
||||||
CURRENT_VERSION: str = Field(
|
CURRENT_VERSION: str = Field(
|
||||||
description="Dify version",
|
description="Dify version",
|
||||||
default="1.3.0",
|
default="1.3.1",
|
||||||
)
|
)
|
||||||
|
|
||||||
COMMIT_SHA: str = Field(
|
COMMIT_SHA: str = Field(
|
||||||
|
@ -16,11 +16,25 @@ AUDIO_EXTENSIONS.extend([ext.upper() for ext in AUDIO_EXTENSIONS])
|
|||||||
|
|
||||||
|
|
||||||
if dify_config.ETL_TYPE == "Unstructured":
|
if dify_config.ETL_TYPE == "Unstructured":
|
||||||
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls"]
|
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls", "vtt", "properties"]
|
||||||
DOCUMENT_EXTENSIONS.extend(("doc", "docx", "csv", "eml", "msg", "pptx", "xml", "epub"))
|
DOCUMENT_EXTENSIONS.extend(("doc", "docx", "csv", "eml", "msg", "pptx", "xml", "epub"))
|
||||||
if dify_config.UNSTRUCTURED_API_URL:
|
if dify_config.UNSTRUCTURED_API_URL:
|
||||||
DOCUMENT_EXTENSIONS.append("ppt")
|
DOCUMENT_EXTENSIONS.append("ppt")
|
||||||
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
|
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
|
||||||
else:
|
else:
|
||||||
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls", "docx", "csv"]
|
DOCUMENT_EXTENSIONS = [
|
||||||
|
"txt",
|
||||||
|
"markdown",
|
||||||
|
"md",
|
||||||
|
"mdx",
|
||||||
|
"pdf",
|
||||||
|
"html",
|
||||||
|
"htm",
|
||||||
|
"xlsx",
|
||||||
|
"xls",
|
||||||
|
"docx",
|
||||||
|
"csv",
|
||||||
|
"vtt",
|
||||||
|
"properties",
|
||||||
|
]
|
||||||
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
|
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
|
||||||
|
@ -186,7 +186,7 @@ class AnnotationUpdateDeleteApi(Resource):
|
|||||||
app_id = str(app_id)
|
app_id = str(app_id)
|
||||||
annotation_id = str(annotation_id)
|
annotation_id = str(annotation_id)
|
||||||
AppAnnotationService.delete_app_annotation(app_id, annotation_id)
|
AppAnnotationService.delete_app_annotation(app_id, annotation_id)
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class AnnotationBatchImportApi(Resource):
|
class AnnotationBatchImportApi(Resource):
|
||||||
|
@ -84,7 +84,7 @@ class TraceAppConfigApi(Resource):
|
|||||||
result = OpsService.delete_tracing_app_config(app_id=app_id, tracing_provider=args["tracing_provider"])
|
result = OpsService.delete_tracing_app_config(app_id=app_id, tracing_provider=args["tracing_provider"])
|
||||||
if not result:
|
if not result:
|
||||||
raise TracingConfigNotExist()
|
raise TracingConfigNotExist()
|
||||||
return {"result": "success"}
|
return {"result": "success"}, 204
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise BadRequest(str(e))
|
raise BadRequest(str(e))
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ class ApiKeyAuthDataSourceBindingDelete(Resource):
|
|||||||
|
|
||||||
ApiKeyAuthService.delete_provider_auth(current_user.current_tenant_id, binding_id)
|
ApiKeyAuthService.delete_provider_auth(current_user.current_tenant_id, binding_id)
|
||||||
|
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(ApiKeyAuthDataSource, "/api-key-auth/data-source")
|
api.add_resource(ApiKeyAuthDataSource, "/api-key-auth/data-source")
|
||||||
|
@ -40,7 +40,7 @@ from core.indexing_runner import IndexingRunner
|
|||||||
from core.model_manager import ModelManager
|
from core.model_manager import ModelManager
|
||||||
from core.model_runtime.entities.model_entities import ModelType
|
from core.model_runtime.entities.model_entities import ModelType
|
||||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||||
from core.plugin.manager.exc import PluginDaemonClientSideError
|
from core.plugin.impl.exc import PluginDaemonClientSideError
|
||||||
from core.rag.extractor.entity.extract_setting import ExtractSetting
|
from core.rag.extractor.entity.extract_setting import ExtractSetting
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from extensions.ext_redis import redis_client
|
from extensions.ext_redis import redis_client
|
||||||
|
@ -131,7 +131,7 @@ class DatasetDocumentSegmentListApi(Resource):
|
|||||||
except services.errors.account.NoPermissionError as e:
|
except services.errors.account.NoPermissionError as e:
|
||||||
raise Forbidden(str(e))
|
raise Forbidden(str(e))
|
||||||
SegmentService.delete_segments(segment_ids, document, dataset)
|
SegmentService.delete_segments(segment_ids, document, dataset)
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class DatasetDocumentSegmentApi(Resource):
|
class DatasetDocumentSegmentApi(Resource):
|
||||||
@ -333,7 +333,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
|
|||||||
except services.errors.account.NoPermissionError as e:
|
except services.errors.account.NoPermissionError as e:
|
||||||
raise Forbidden(str(e))
|
raise Forbidden(str(e))
|
||||||
SegmentService.delete_segment(segment, document, dataset)
|
SegmentService.delete_segment(segment, document, dataset)
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class DatasetDocumentSegmentBatchImportApi(Resource):
|
class DatasetDocumentSegmentBatchImportApi(Resource):
|
||||||
@ -590,7 +590,7 @@ class ChildChunkUpdateApi(Resource):
|
|||||||
SegmentService.delete_child_chunk(child_chunk, dataset)
|
SegmentService.delete_child_chunk(child_chunk, dataset)
|
||||||
except ChildChunkDeleteIndexServiceError as e:
|
except ChildChunkDeleteIndexServiceError as e:
|
||||||
raise ChildChunkDeleteIndexError(str(e))
|
raise ChildChunkDeleteIndexError(str(e))
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
@setup_required
|
@setup_required
|
||||||
@login_required
|
@login_required
|
||||||
|
@ -135,7 +135,7 @@ class ExternalApiTemplateApi(Resource):
|
|||||||
raise Forbidden()
|
raise Forbidden()
|
||||||
|
|
||||||
ExternalDatasetService.delete_external_knowledge_api(current_user.current_tenant_id, external_knowledge_api_id)
|
ExternalDatasetService.delete_external_knowledge_api(current_user.current_tenant_id, external_knowledge_api_id)
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class ExternalApiUseCheckApi(Resource):
|
class ExternalApiUseCheckApi(Resource):
|
||||||
|
@ -82,7 +82,7 @@ class DatasetMetadataApi(Resource):
|
|||||||
DatasetService.check_dataset_permission(dataset, current_user)
|
DatasetService.check_dataset_permission(dataset, current_user)
|
||||||
|
|
||||||
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
|
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
|
||||||
return 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class DatasetMetadataBuiltInFieldApi(Resource):
|
class DatasetMetadataBuiltInFieldApi(Resource):
|
||||||
|
@ -113,7 +113,7 @@ class InstalledAppApi(InstalledAppResource):
|
|||||||
db.session.delete(installed_app)
|
db.session.delete(installed_app)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
||||||
return {"result": "success", "message": "App uninstalled successfully"}
|
return {"result": "success", "message": "App uninstalled successfully"}, 204
|
||||||
|
|
||||||
def patch(self, installed_app):
|
def patch(self, installed_app):
|
||||||
parser = reqparse.RequestParser()
|
parser = reqparse.RequestParser()
|
||||||
|
@ -72,7 +72,7 @@ class SavedMessageApi(InstalledAppResource):
|
|||||||
|
|
||||||
SavedMessageService.delete(app_model, current_user, message_id)
|
SavedMessageService.delete(app_model, current_user, message_id)
|
||||||
|
|
||||||
return {"result": "success"}
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(
|
api.add_resource(
|
||||||
|
@ -99,7 +99,7 @@ class APIBasedExtensionDetailAPI(Resource):
|
|||||||
|
|
||||||
APIBasedExtensionService.delete(extension_data_from_db)
|
APIBasedExtensionService.delete(extension_data_from_db)
|
||||||
|
|
||||||
return {"result": "success"}
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(CodeBasedExtensionAPI, "/code-based-extension")
|
api.add_resource(CodeBasedExtensionAPI, "/code-based-extension")
|
||||||
|
@ -86,7 +86,7 @@ class TagUpdateDeleteApi(Resource):
|
|||||||
|
|
||||||
TagService.delete_tag(tag_id)
|
TagService.delete_tag(tag_id)
|
||||||
|
|
||||||
return 200
|
return 204
|
||||||
|
|
||||||
|
|
||||||
class TagBindingCreateApi(Resource):
|
class TagBindingCreateApi(Resource):
|
||||||
|
@ -5,7 +5,7 @@ from werkzeug.exceptions import Forbidden
|
|||||||
from controllers.console import api
|
from controllers.console import api
|
||||||
from controllers.console.wraps import account_initialization_required, setup_required
|
from controllers.console.wraps import account_initialization_required, setup_required
|
||||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||||
from core.plugin.manager.exc import PluginPermissionDeniedError
|
from core.plugin.impl.exc import PluginPermissionDeniedError
|
||||||
from libs.login import login_required
|
from libs.login import login_required
|
||||||
from services.plugin.endpoint_service import EndpointService
|
from services.plugin.endpoint_service import EndpointService
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ from controllers.console import api
|
|||||||
from controllers.console.workspace import plugin_permission_required
|
from controllers.console.workspace import plugin_permission_required
|
||||||
from controllers.console.wraps import account_initialization_required, setup_required
|
from controllers.console.wraps import account_initialization_required, setup_required
|
||||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||||
from core.plugin.manager.exc import PluginDaemonClientSideError
|
from core.plugin.impl.exc import PluginDaemonClientSideError
|
||||||
from libs.login import login_required
|
from libs.login import login_required
|
||||||
from models.account import TenantPluginPermission
|
from models.account import TenantPluginPermission
|
||||||
from services.plugin.plugin_permission_service import PluginPermissionService
|
from services.plugin.plugin_permission_service import PluginPermissionService
|
||||||
|
@ -70,12 +70,26 @@ class FilePreviewApi(Resource):
|
|||||||
direct_passthrough=True,
|
direct_passthrough=True,
|
||||||
headers={},
|
headers={},
|
||||||
)
|
)
|
||||||
|
# add Accept-Ranges header for audio/video files
|
||||||
|
if upload_file.mime_type in [
|
||||||
|
"audio/mpeg",
|
||||||
|
"audio/wav",
|
||||||
|
"audio/mp4",
|
||||||
|
"audio/ogg",
|
||||||
|
"audio/flac",
|
||||||
|
"audio/aac",
|
||||||
|
"video/mp4",
|
||||||
|
"video/webm",
|
||||||
|
"video/quicktime",
|
||||||
|
"audio/x-m4a",
|
||||||
|
]:
|
||||||
|
response.headers["Accept-Ranges"] = "bytes"
|
||||||
if upload_file.size > 0:
|
if upload_file.size > 0:
|
||||||
response.headers["Content-Length"] = str(upload_file.size)
|
response.headers["Content-Length"] = str(upload_file.size)
|
||||||
if args["as_attachment"]:
|
if args["as_attachment"]:
|
||||||
encoded_filename = quote(upload_file.name)
|
encoded_filename = quote(upload_file.name)
|
||||||
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
|
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
|
||||||
response.headers["Content-Type"] = "application/octet-stream"
|
response.headers["Content-Type"] = "application/octet-stream"
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ class AnnotationListApi(Resource):
|
|||||||
class AnnotationUpdateDeleteApi(Resource):
|
class AnnotationUpdateDeleteApi(Resource):
|
||||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
|
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
|
||||||
@marshal_with(annotation_fields)
|
@marshal_with(annotation_fields)
|
||||||
def post(self, app_model: App, end_user: EndUser, annotation_id):
|
def put(self, app_model: App, end_user: EndUser, annotation_id):
|
||||||
if not current_user.is_editor:
|
if not current_user.is_editor:
|
||||||
raise Forbidden()
|
raise Forbidden()
|
||||||
|
|
||||||
@ -98,7 +98,7 @@ class AnnotationUpdateDeleteApi(Resource):
|
|||||||
|
|
||||||
annotation_id = str(annotation_id)
|
annotation_id = str(annotation_id)
|
||||||
AppAnnotationService.delete_app_annotation(app_model.id, annotation_id)
|
AppAnnotationService.delete_app_annotation(app_model.id, annotation_id)
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(AnnotationReplyActionApi, "/apps/annotation-reply/<string:action>")
|
api.add_resource(AnnotationReplyActionApi, "/apps/annotation-reply/<string:action>")
|
||||||
|
@ -72,7 +72,7 @@ class ConversationDetailApi(Resource):
|
|||||||
ConversationService.delete(app_model, conversation_id, end_user)
|
ConversationService.delete(app_model, conversation_id, end_user)
|
||||||
except services.errors.conversation.ConversationNotExistsError:
|
except services.errors.conversation.ConversationNotExistsError:
|
||||||
raise NotFound("Conversation Not Exists.")
|
raise NotFound("Conversation Not Exists.")
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class ConversationRenameApi(Resource):
|
class ConversationRenameApi(Resource):
|
||||||
|
@ -323,7 +323,7 @@ class DocumentDeleteApi(DatasetApiResource):
|
|||||||
except services.errors.document.DocumentIndexingError:
|
except services.errors.document.DocumentIndexingError:
|
||||||
raise DocumentIndexingError("Cannot delete document during indexing.")
|
raise DocumentIndexingError("Cannot delete document during indexing.")
|
||||||
|
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
class DocumentListApi(DatasetApiResource):
|
class DocumentListApi(DatasetApiResource):
|
||||||
|
@ -63,7 +63,7 @@ class DatasetMetadataServiceApi(DatasetApiResource):
|
|||||||
DatasetService.check_dataset_permission(dataset, current_user)
|
DatasetService.check_dataset_permission(dataset, current_user)
|
||||||
|
|
||||||
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
|
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
|
||||||
return 200
|
return 204
|
||||||
|
|
||||||
|
|
||||||
class DatasetMetadataBuiltInFieldServiceApi(DatasetApiResource):
|
class DatasetMetadataBuiltInFieldServiceApi(DatasetApiResource):
|
||||||
|
@ -159,7 +159,7 @@ class DatasetSegmentApi(DatasetApiResource):
|
|||||||
if not segment:
|
if not segment:
|
||||||
raise NotFound("Segment not found.")
|
raise NotFound("Segment not found.")
|
||||||
SegmentService.delete_segment(segment, document, dataset)
|
SegmentService.delete_segment(segment, document, dataset)
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
@cloud_edition_billing_resource_check("vector_space", "dataset")
|
@cloud_edition_billing_resource_check("vector_space", "dataset")
|
||||||
def post(self, tenant_id, dataset_id, document_id, segment_id):
|
def post(self, tenant_id, dataset_id, document_id, segment_id):
|
||||||
@ -344,7 +344,7 @@ class DatasetChildChunkApi(DatasetApiResource):
|
|||||||
except ChildChunkDeleteIndexServiceError as e:
|
except ChildChunkDeleteIndexServiceError as e:
|
||||||
raise ChildChunkDeleteIndexError(str(e))
|
raise ChildChunkDeleteIndexError(str(e))
|
||||||
|
|
||||||
return {"result": "success"}, 200
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
@cloud_edition_billing_resource_check("vector_space", "dataset")
|
@cloud_edition_billing_resource_check("vector_space", "dataset")
|
||||||
@cloud_edition_billing_knowledge_limit_check("add_segment", "dataset")
|
@cloud_edition_billing_knowledge_limit_check("add_segment", "dataset")
|
||||||
|
@ -67,7 +67,7 @@ class SavedMessageApi(WebApiResource):
|
|||||||
|
|
||||||
SavedMessageService.delete(app_model, end_user, message_id)
|
SavedMessageService.delete(app_model, end_user, message_id)
|
||||||
|
|
||||||
return {"result": "success"}
|
return {"result": "success"}, 204
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(SavedMessageListApi, "/saved-messages")
|
api.add_resource(SavedMessageListApi, "/saved-messages")
|
||||||
|
@ -69,6 +69,13 @@ class CotAgentRunner(BaseAgentRunner, ABC):
|
|||||||
tool_instances, prompt_messages_tools = self._init_prompt_tools()
|
tool_instances, prompt_messages_tools = self._init_prompt_tools()
|
||||||
self._prompt_messages_tools = prompt_messages_tools
|
self._prompt_messages_tools = prompt_messages_tools
|
||||||
|
|
||||||
|
# fix metadata filter not work
|
||||||
|
if app_config.dataset is not None:
|
||||||
|
metadata_filtering_conditions = app_config.dataset.retrieve_config.metadata_filtering_conditions
|
||||||
|
for key, dataset_retriever_tool in tool_instances.items():
|
||||||
|
if hasattr(dataset_retriever_tool, "retrieval_tool"):
|
||||||
|
dataset_retriever_tool.retrieval_tool.metadata_filtering_conditions = metadata_filtering_conditions
|
||||||
|
|
||||||
function_call_state = True
|
function_call_state = True
|
||||||
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
|
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
|
||||||
final_answer = ""
|
final_answer = ""
|
||||||
|
@ -45,6 +45,13 @@ class FunctionCallAgentRunner(BaseAgentRunner):
|
|||||||
# convert tools into ModelRuntime Tool format
|
# convert tools into ModelRuntime Tool format
|
||||||
tool_instances, prompt_messages_tools = self._init_prompt_tools()
|
tool_instances, prompt_messages_tools = self._init_prompt_tools()
|
||||||
|
|
||||||
|
# fix metadata filter not work
|
||||||
|
if app_config.dataset is not None:
|
||||||
|
metadata_filtering_conditions = app_config.dataset.retrieve_config.metadata_filtering_conditions
|
||||||
|
for key, dataset_retriever_tool in tool_instances.items():
|
||||||
|
if hasattr(dataset_retriever_tool, "retrieval_tool"):
|
||||||
|
dataset_retriever_tool.retrieval_tool.metadata_filtering_conditions = metadata_filtering_conditions
|
||||||
|
|
||||||
assert app_config.agent
|
assert app_config.agent
|
||||||
|
|
||||||
iteration_step = 1
|
iteration_step = 1
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
ENGLISH_REACT_COMPLETION_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
|
ENGLISH_REACT_COMPLETION_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
|
||||||
|
|
||||||
{{instruction}}
|
{{instruction}}
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ Thought:""" # noqa: E501
|
|||||||
ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}}
|
ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}}
|
||||||
Thought:"""
|
Thought:"""
|
||||||
|
|
||||||
ENGLISH_REACT_CHAT_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
|
ENGLISH_REACT_CHAT_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
|
||||||
|
|
||||||
{{instruction}}
|
{{instruction}}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ from typing import Any, Optional
|
|||||||
from core.agent.entities import AgentInvokeMessage
|
from core.agent.entities import AgentInvokeMessage
|
||||||
from core.agent.plugin_entities import AgentStrategyEntity, AgentStrategyParameter
|
from core.agent.plugin_entities import AgentStrategyEntity, AgentStrategyParameter
|
||||||
from core.agent.strategy.base import BaseAgentStrategy
|
from core.agent.strategy.base import BaseAgentStrategy
|
||||||
from core.plugin.manager.agent import PluginAgentManager
|
from core.plugin.impl.agent import PluginAgentClient
|
||||||
from core.plugin.utils.converter import convert_parameters_to_plugin_format
|
from core.plugin.utils.converter import convert_parameters_to_plugin_format
|
||||||
|
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ class PluginAgentStrategy(BaseAgentStrategy):
|
|||||||
"""
|
"""
|
||||||
Invoke the agent strategy.
|
Invoke the agent strategy.
|
||||||
"""
|
"""
|
||||||
manager = PluginAgentManager()
|
manager = PluginAgentClient()
|
||||||
|
|
||||||
initialized_params = self.initialize_parameters(params)
|
initialized_params = self.initialize_parameters(params)
|
||||||
params = convert_parameters_to_plugin_format(initialized_params)
|
params = convert_parameters_to_plugin_format(initialized_params)
|
||||||
|
@ -25,8 +25,8 @@ from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotA
|
|||||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||||
from core.ops.ops_trace_manager import TraceQueueManager
|
from core.ops.ops_trace_manager import TraceQueueManager
|
||||||
from core.prompt.utils.get_thread_messages_length import get_thread_messages_length
|
from core.prompt.utils.get_thread_messages_length import get_thread_messages_length
|
||||||
from core.repository import RepositoryFactory
|
from core.workflow.repository import RepositoryFactory
|
||||||
from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from factories import file_factory
|
from factories import file_factory
|
||||||
from models.account import Account
|
from models.account import Account
|
||||||
|
@ -62,10 +62,10 @@ from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage
|
|||||||
from core.model_runtime.entities.llm_entities import LLMUsage
|
from core.model_runtime.entities.llm_entities import LLMUsage
|
||||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||||
from core.ops.ops_trace_manager import TraceQueueManager
|
from core.ops.ops_trace_manager import TraceQueueManager
|
||||||
from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
|
||||||
from core.workflow.enums import SystemVariableKey
|
from core.workflow.enums import SystemVariableKey
|
||||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||||
from core.workflow.nodes import NodeType
|
from core.workflow.nodes import NodeType
|
||||||
|
from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
||||||
from events.message_event import message_was_created
|
from events.message_event import message_was_created
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models import Conversation, EndUser, Message, MessageFile
|
from models import Conversation, EndUser, Message, MessageFile
|
||||||
|
@ -23,8 +23,8 @@ from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerat
|
|||||||
from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse
|
from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse
|
||||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||||
from core.ops.ops_trace_manager import TraceQueueManager
|
from core.ops.ops_trace_manager import TraceQueueManager
|
||||||
from core.repository import RepositoryFactory
|
from core.workflow.repository import RepositoryFactory
|
||||||
from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from factories import file_factory
|
from factories import file_factory
|
||||||
from models import Account, App, EndUser, Workflow
|
from models import Account, App, EndUser, Workflow
|
||||||
|
@ -54,8 +54,8 @@ from core.app.entities.task_entities import (
|
|||||||
from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline
|
from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline
|
||||||
from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage
|
from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage
|
||||||
from core.ops.ops_trace_manager import TraceQueueManager
|
from core.ops.ops_trace_manager import TraceQueueManager
|
||||||
from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
|
||||||
from core.workflow.enums import SystemVariableKey
|
from core.workflow.enums import SystemVariableKey
|
||||||
|
from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.account import Account
|
from models.account import Account
|
||||||
from models.enums import CreatedByRole
|
from models.enums import CreatedByRole
|
||||||
|
@ -49,12 +49,12 @@ from core.file import FILE_MODEL_IDENTITY, File
|
|||||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||||
from core.ops.entities.trace_entity import TraceTaskName
|
from core.ops.entities.trace_entity import TraceTaskName
|
||||||
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
|
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
|
||||||
from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
|
||||||
from core.tools.tool_manager import ToolManager
|
from core.tools.tool_manager import ToolManager
|
||||||
from core.workflow.entities.node_entities import NodeRunMetadataKey
|
from core.workflow.entities.node_entities import NodeRunMetadataKey
|
||||||
from core.workflow.enums import SystemVariableKey
|
from core.workflow.enums import SystemVariableKey
|
||||||
from core.workflow.nodes import NodeType
|
from core.workflow.nodes import NodeType
|
||||||
from core.workflow.nodes.tool.entities import ToolNodeData
|
from core.workflow.nodes.tool.entities import ToolNodeData
|
||||||
|
from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
|
||||||
from core.workflow.workflow_entry import WorkflowEntry
|
from core.workflow.workflow_entry import WorkflowEntry
|
||||||
from models.account import Account
|
from models.account import Account
|
||||||
from models.enums import CreatedByRole, WorkflowRunTriggeredFrom
|
from models.enums import CreatedByRole, WorkflowRunTriggeredFrom
|
||||||
@ -381,6 +381,8 @@ class WorkflowCycleManage:
|
|||||||
workflow_node_execution.elapsed_time = elapsed_time
|
workflow_node_execution.elapsed_time = elapsed_time
|
||||||
workflow_node_execution.execution_metadata = execution_metadata
|
workflow_node_execution.execution_metadata = execution_metadata
|
||||||
|
|
||||||
|
self._workflow_node_execution_repository.update(workflow_node_execution)
|
||||||
|
|
||||||
return workflow_node_execution
|
return workflow_node_execution
|
||||||
|
|
||||||
def _handle_workflow_node_execution_retried(
|
def _handle_workflow_node_execution_retried(
|
||||||
|
@ -1 +1 @@
|
|||||||
1
|
1
|
||||||
|
@ -10,13 +10,13 @@ class NodeJsTemplateTransformer(TemplateTransformer):
|
|||||||
f"""
|
f"""
|
||||||
// declare main function
|
// declare main function
|
||||||
{cls._code_placeholder}
|
{cls._code_placeholder}
|
||||||
|
|
||||||
// decode and prepare input object
|
// decode and prepare input object
|
||||||
var inputs_obj = JSON.parse(Buffer.from('{cls._inputs_placeholder}', 'base64').toString('utf-8'))
|
var inputs_obj = JSON.parse(Buffer.from('{cls._inputs_placeholder}', 'base64').toString('utf-8'))
|
||||||
|
|
||||||
// execute main function
|
// execute main function
|
||||||
var output_obj = main(inputs_obj)
|
var output_obj = main(inputs_obj)
|
||||||
|
|
||||||
// convert output to json and print
|
// convert output to json and print
|
||||||
var output_json = JSON.stringify(output_obj)
|
var output_json = JSON.stringify(output_obj)
|
||||||
var result = `<<RESULT>>${{output_json}}<<RESULT>>`
|
var result = `<<RESULT>>${{output_json}}<<RESULT>>`
|
||||||
|
@ -21,20 +21,20 @@ class Jinja2TemplateTransformer(TemplateTransformer):
|
|||||||
import jinja2
|
import jinja2
|
||||||
template = jinja2.Template('''{cls._code_placeholder}''')
|
template = jinja2.Template('''{cls._code_placeholder}''')
|
||||||
return template.render(**inputs)
|
return template.render(**inputs)
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
|
|
||||||
# decode and prepare input dict
|
# decode and prepare input dict
|
||||||
inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8'))
|
inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8'))
|
||||||
|
|
||||||
# execute main function
|
# execute main function
|
||||||
output = main(**inputs_obj)
|
output = main(**inputs_obj)
|
||||||
|
|
||||||
# convert output and print
|
# convert output and print
|
||||||
result = f'''<<RESULT>>{{output}}<<RESULT>>'''
|
result = f'''<<RESULT>>{{output}}<<RESULT>>'''
|
||||||
print(result)
|
print(result)
|
||||||
|
|
||||||
""")
|
""")
|
||||||
return runner_script
|
return runner_script
|
||||||
|
|
||||||
@ -43,15 +43,15 @@ class Jinja2TemplateTransformer(TemplateTransformer):
|
|||||||
preload_script = dedent("""
|
preload_script = dedent("""
|
||||||
import jinja2
|
import jinja2
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
|
|
||||||
def _jinja2_preload_():
|
def _jinja2_preload_():
|
||||||
# prepare jinja2 environment, load template and render before to avoid sandbox issue
|
# prepare jinja2 environment, load template and render before to avoid sandbox issue
|
||||||
template = jinja2.Template('{{s}}')
|
template = jinja2.Template('{{s}}')
|
||||||
template.render(s='a')
|
template.render(s='a')
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
_jinja2_preload_()
|
_jinja2_preload_()
|
||||||
|
|
||||||
""")
|
""")
|
||||||
|
|
||||||
return preload_script
|
return preload_script
|
||||||
|
@ -9,16 +9,16 @@ class Python3TemplateTransformer(TemplateTransformer):
|
|||||||
runner_script = dedent(f"""
|
runner_script = dedent(f"""
|
||||||
# declare main function
|
# declare main function
|
||||||
{cls._code_placeholder}
|
{cls._code_placeholder}
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
|
|
||||||
# decode and prepare input dict
|
# decode and prepare input dict
|
||||||
inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8'))
|
inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8'))
|
||||||
|
|
||||||
# execute main function
|
# execute main function
|
||||||
output_obj = main(**inputs_obj)
|
output_obj = main(**inputs_obj)
|
||||||
|
|
||||||
# convert output to json and print
|
# convert output to json and print
|
||||||
output_json = json.dumps(output_obj, indent=4)
|
output_json = json.dumps(output_obj, indent=4)
|
||||||
result = f'''<<RESULT>>{{output_json}}<<RESULT>>'''
|
result = f'''<<RESULT>>{{output_json}}<<RESULT>>'''
|
||||||
|
@ -3,6 +3,8 @@ import logging
|
|||||||
import re
|
import re
|
||||||
from typing import Optional, cast
|
from typing import Optional, cast
|
||||||
|
|
||||||
|
import json_repair
|
||||||
|
|
||||||
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
|
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
|
||||||
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
|
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
|
||||||
from core.llm_generator.prompts import (
|
from core.llm_generator.prompts import (
|
||||||
@ -366,7 +368,20 @@ class LLMGenerator:
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
generated_json_schema = cast(str, response.message.content)
|
raw_content = response.message.content
|
||||||
|
|
||||||
|
if not isinstance(raw_content, str):
|
||||||
|
raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
parsed_content = json.loads(raw_content)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
parsed_content = json_repair.loads(raw_content)
|
||||||
|
|
||||||
|
if not isinstance(parsed_content, dict | list):
|
||||||
|
raise ValueError(f"Failed to parse structured output from llm: {raw_content}")
|
||||||
|
|
||||||
|
generated_json_schema = json.dumps(parsed_content, indent=2, ensure_ascii=False)
|
||||||
return {"output": generated_json_schema, "error": ""}
|
return {"output": generated_json_schema, "error": ""}
|
||||||
|
|
||||||
except InvokeError as e:
|
except InvokeError as e:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Written by YORKI MINAKO🤡, Edited by Xiaoyi
|
# Written by YORKI MINAKO🤡, Edited by Xiaoyi
|
||||||
CONVERSATION_TITLE_PROMPT = """You need to decompose the user's input into "subject" and "intention" in order to accurately figure out what the user's input language actually is.
|
CONVERSATION_TITLE_PROMPT = """You need to decompose the user's input into "subject" and "intention" in order to accurately figure out what the user's input language actually is.
|
||||||
Notice: the language type user uses could be diverse, which can be English, Chinese, Italian, Español, Arabic, Japanese, French, and etc.
|
Notice: the language type user uses could be diverse, which can be English, Chinese, Italian, Español, Arabic, Japanese, French, and etc.
|
||||||
ENSURE your output is in the SAME language as the user's input!
|
ENSURE your output is in the SAME language as the user's input!
|
||||||
Your output is restricted only to: (Input language) Intention + Subject(short as possible)
|
Your output is restricted only to: (Input language) Intention + Subject(short as possible)
|
||||||
@ -58,7 +58,7 @@ User Input: yo, 你今天咋样?
|
|||||||
"Your Output": "查询今日我的状态☺️"
|
"Your Output": "查询今日我的状态☺️"
|
||||||
}
|
}
|
||||||
|
|
||||||
User Input:
|
User Input:
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
|
|
||||||
PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE = (
|
PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE = (
|
||||||
@ -163,11 +163,11 @@ Here is a task description for which I would like you to create a high-quality p
|
|||||||
{{TASK_DESCRIPTION}}
|
{{TASK_DESCRIPTION}}
|
||||||
</task_description>
|
</task_description>
|
||||||
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
|
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
|
||||||
- Do not include <input> or <output> section and variables in the prompt, assume user will add them at their own will.
|
- Do not include <input> or <output> section and variables in the prompt, assume user will add them at their own will.
|
||||||
- Clear instructions for the AI that will be using this prompt, demarcated with <instruction> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
|
- Clear instructions for the AI that will be using this prompt, demarcated with <instruction> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
|
||||||
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not include variables in the prompt. Give three pairs of input and output examples.
|
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not include variables in the prompt. Give three pairs of input and output examples.
|
||||||
- Include other relevant sections demarcated with appropriate XML tags like <examples>, <instruction>.
|
- Include other relevant sections demarcated with appropriate XML tags like <examples>, <instruction>.
|
||||||
- Use the same language as task description.
|
- Use the same language as task description.
|
||||||
- Output in ``` xml ``` and start with <instruction>
|
- Output in ``` xml ``` and start with <instruction>
|
||||||
Please generate the full prompt template with at least 300 words and output only the prompt template.
|
Please generate the full prompt template with at least 300 words and output only the prompt template.
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
@ -178,28 +178,28 @@ Here is a task description for which I would like you to create a high-quality p
|
|||||||
{{TASK_DESCRIPTION}}
|
{{TASK_DESCRIPTION}}
|
||||||
</task_description>
|
</task_description>
|
||||||
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
|
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
|
||||||
- Descriptive variable names surrounded by {{ }} (two curly brackets) to indicate where the actual values will be substituted in. Choose variable names that clearly indicate the type of value expected. Variable names have to be composed of number, english alphabets and underline and nothing else.
|
- Descriptive variable names surrounded by {{ }} (two curly brackets) to indicate where the actual values will be substituted in. Choose variable names that clearly indicate the type of value expected. Variable names have to be composed of number, english alphabets and underline and nothing else.
|
||||||
- Clear instructions for the AI that will be using this prompt, demarcated with <instruction> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
|
- Clear instructions for the AI that will be using this prompt, demarcated with <instruction> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
|
||||||
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not use curly brackets any other than in <instruction> section.
|
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not use curly brackets any other than in <instruction> section.
|
||||||
- Any other relevant sections demarcated with appropriate XML tags like <input>, <output>, etc.
|
- Any other relevant sections demarcated with appropriate XML tags like <input>, <output>, etc.
|
||||||
- Use the same language as task description.
|
- Use the same language as task description.
|
||||||
- Output in ``` xml ``` and start with <instruction>
|
- Output in ``` xml ``` and start with <instruction>
|
||||||
Please generate the full prompt template and output only the prompt template.
|
Please generate the full prompt template and output only the prompt template.
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
|
|
||||||
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """
|
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """
|
||||||
I need to extract the following information from the input text. The <information to be extracted> tag specifies the 'type', 'description' and 'required' of the information to be extracted.
|
I need to extract the following information from the input text. The <information to be extracted> tag specifies the 'type', 'description' and 'required' of the information to be extracted.
|
||||||
<information to be extracted>
|
<information to be extracted>
|
||||||
variables name bounded two double curly brackets. Variable name has to be composed of number, english alphabets and underline and nothing else.
|
variables name bounded two double curly brackets. Variable name has to be composed of number, english alphabets and underline and nothing else.
|
||||||
</information to be extracted>
|
</information to be extracted>
|
||||||
|
|
||||||
Step 1: Carefully read the input and understand the structure of the expected output.
|
Step 1: Carefully read the input and understand the structure of the expected output.
|
||||||
Step 2: Extract relevant parameters from the provided text based on the name and description of object.
|
Step 2: Extract relevant parameters from the provided text based on the name and description of object.
|
||||||
Step 3: Structure the extracted parameters to JSON object as specified in <structure>.
|
Step 3: Structure the extracted parameters to JSON object as specified in <structure>.
|
||||||
Step 4: Ensure that the list of variable_names is properly formatted and valid. The output should not contain any XML tags. Output an empty list if there is no valid variable name in input text.
|
Step 4: Ensure that the list of variable_names is properly formatted and valid. The output should not contain any XML tags. Output an empty list if there is no valid variable name in input text.
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
Here is the structure of the expected output, I should always follow the output structure.
|
Here is the structure of the expected output, I should always follow the output structure.
|
||||||
["variable_name_1", "variable_name_2"]
|
["variable_name_1", "variable_name_2"]
|
||||||
|
|
||||||
### Input Text
|
### Input Text
|
||||||
@ -214,13 +214,13 @@ I should always output a valid list. Output nothing other than the list of varia
|
|||||||
|
|
||||||
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """
|
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """
|
||||||
<instruction>
|
<instruction>
|
||||||
Step 1: Identify the purpose of the chatbot from the variable {{TASK_DESCRIPTION}} and infer chatbot's tone (e.g., friendly, professional, etc.) to add personality traits.
|
Step 1: Identify the purpose of the chatbot from the variable {{TASK_DESCRIPTION}} and infer chatbot's tone (e.g., friendly, professional, etc.) to add personality traits.
|
||||||
Step 2: Create a coherent and engaging opening statement.
|
Step 2: Create a coherent and engaging opening statement.
|
||||||
Step 3: Ensure the output is welcoming and clearly explains what the chatbot is designed to do. Do not include any XML tags in the output.
|
Step 3: Ensure the output is welcoming and clearly explains what the chatbot is designed to do. Do not include any XML tags in the output.
|
||||||
Please use the same language as the user's input language. If user uses chinese then generate opening statement in chinese, if user uses english then generate opening statement in english.
|
Please use the same language as the user's input language. If user uses chinese then generate opening statement in chinese, if user uses english then generate opening statement in english.
|
||||||
Example Input:
|
Example Input:
|
||||||
Provide customer support for an e-commerce website
|
Provide customer support for an e-commerce website
|
||||||
Example Output:
|
Example Output:
|
||||||
Welcome! I'm here to assist you with any questions or issues you might have with your shopping experience. Whether you're looking for product information, need help with your order, or have any other inquiries, feel free to ask. I'm friendly, helpful, and ready to support you in any way I can.
|
Welcome! I'm here to assist you with any questions or issues you might have with your shopping experience. Whether you're looking for product information, need help with your order, or have any other inquiries, feel free to ask. I'm friendly, helpful, and ready to support you in any way I can.
|
||||||
<Task>
|
<Task>
|
||||||
Here is the task description: {{INPUT_TEXT}}
|
Here is the task description: {{INPUT_TEXT}}
|
||||||
@ -276,15 +276,15 @@ Your task is to convert simple user descriptions into properly formatted JSON Sc
|
|||||||
{
|
{
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"email": {
|
"email": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"format": "email"
|
"format": "email"
|
||||||
},
|
},
|
||||||
"password": {
|
"password": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"minLength": 8
|
"minLength": 8
|
||||||
},
|
},
|
||||||
"age": {
|
"age": {
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"minimum": 18
|
"minimum": 18
|
||||||
}
|
}
|
||||||
|
@ -307,4 +307,4 @@ Runtime Errors:
|
|||||||
"""
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
For interface method details, see: [Interfaces](./interfaces.md). For specific implementations, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py).
|
For interface method details, see: [Interfaces](./interfaces.md). For specific implementations, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py).
|
||||||
|
@ -170,4 +170,4 @@ Runtime Errors:
|
|||||||
"""
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
For interface method explanations, see: [Interfaces](./interfaces.md). For detailed implementation, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py).
|
For interface method explanations, see: [Interfaces](./interfaces.md). For detailed implementation, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py).
|
||||||
|
@ -294,4 +294,4 @@ provider_credential_schema:
|
|||||||
"""
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
接口方法说明见:[Interfaces](./interfaces.md),具体实现可参考:[llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py)。
|
接口方法说明见:[Interfaces](./interfaces.md),具体实现可参考:[llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py)。
|
||||||
|
@ -169,4 +169,4 @@ pricing: # 价格信息
|
|||||||
"""
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
接口方法说明见:[Interfaces](./interfaces.md),具体实现可参考:[llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py)。
|
接口方法说明见:[Interfaces](./interfaces.md),具体实现可参考:[llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py)。
|
||||||
|
@ -26,7 +26,7 @@ from core.model_runtime.errors.invoke import (
|
|||||||
)
|
)
|
||||||
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer
|
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer
|
||||||
from core.plugin.entities.plugin_daemon import PluginDaemonInnerError, PluginModelProviderEntity
|
from core.plugin.entities.plugin_daemon import PluginDaemonInnerError, PluginModelProviderEntity
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
|
|
||||||
class AIModel(BaseModel):
|
class AIModel(BaseModel):
|
||||||
@ -141,7 +141,7 @@ class AIModel(BaseModel):
|
|||||||
:param credentials: model credentials
|
:param credentials: model credentials
|
||||||
:return: model schema
|
:return: model schema
|
||||||
"""
|
"""
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
cache_key = f"{self.tenant_id}:{self.plugin_id}:{self.provider_name}:{self.model_type.value}:{model}"
|
cache_key = f"{self.tenant_id}:{self.plugin_id}:{self.provider_name}:{self.model_type.value}:{model}"
|
||||||
# sort credentials
|
# sort credentials
|
||||||
sorted_credentials = sorted(credentials.items()) if credentials else []
|
sorted_credentials = sorted(credentials.items()) if credentials else []
|
||||||
|
@ -2,7 +2,7 @@ import logging
|
|||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
from collections.abc import Generator, Sequence
|
from collections.abc import Generator, Sequence
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union, cast
|
||||||
|
|
||||||
from pydantic import ConfigDict
|
from pydantic import ConfigDict
|
||||||
|
|
||||||
@ -20,7 +20,8 @@ from core.model_runtime.entities.model_entities import (
|
|||||||
PriceType,
|
PriceType,
|
||||||
)
|
)
|
||||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.model_runtime.utils.helper import convert_llm_result_chunk_to_str
|
||||||
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -140,7 +141,7 @@ class LargeLanguageModel(AIModel):
|
|||||||
result: Union[LLMResult, Generator[LLMResultChunk, None, None]]
|
result: Union[LLMResult, Generator[LLMResultChunk, None, None]]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
result = plugin_model_manager.invoke_llm(
|
result = plugin_model_manager.invoke_llm(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id=user or "unknown",
|
user_id=user or "unknown",
|
||||||
@ -280,7 +281,9 @@ class LargeLanguageModel(AIModel):
|
|||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
)
|
)
|
||||||
|
|
||||||
assistant_message.content += chunk.delta.message.content
|
text = convert_llm_result_chunk_to_str(chunk.delta.message.content)
|
||||||
|
current_content = cast(str, assistant_message.content)
|
||||||
|
assistant_message.content = current_content + text
|
||||||
real_model = chunk.model
|
real_model = chunk.model
|
||||||
if chunk.delta.usage:
|
if chunk.delta.usage:
|
||||||
usage = chunk.delta.usage
|
usage = chunk.delta.usage
|
||||||
@ -326,7 +329,7 @@ class LargeLanguageModel(AIModel):
|
|||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
if dify_config.PLUGIN_BASED_TOKEN_COUNTING_ENABLED:
|
if dify_config.PLUGIN_BASED_TOKEN_COUNTING_ENABLED:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.get_llm_num_tokens(
|
return plugin_model_manager.get_llm_num_tokens(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id="unknown",
|
user_id="unknown",
|
||||||
|
@ -5,7 +5,7 @@ from pydantic import ConfigDict
|
|||||||
|
|
||||||
from core.model_runtime.entities.model_entities import ModelType
|
from core.model_runtime.entities.model_entities import ModelType
|
||||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
|
|
||||||
class ModerationModel(AIModel):
|
class ModerationModel(AIModel):
|
||||||
@ -31,7 +31,7 @@ class ModerationModel(AIModel):
|
|||||||
self.started_at = time.perf_counter()
|
self.started_at = time.perf_counter()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.invoke_moderation(
|
return plugin_model_manager.invoke_moderation(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id=user or "unknown",
|
user_id=user or "unknown",
|
||||||
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||||||
from core.model_runtime.entities.model_entities import ModelType
|
from core.model_runtime.entities.model_entities import ModelType
|
||||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
|
|
||||||
class RerankModel(AIModel):
|
class RerankModel(AIModel):
|
||||||
@ -36,7 +36,7 @@ class RerankModel(AIModel):
|
|||||||
:return: rerank result
|
:return: rerank result
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.invoke_rerank(
|
return plugin_model_manager.invoke_rerank(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id=user or "unknown",
|
user_id=user or "unknown",
|
||||||
|
@ -4,7 +4,7 @@ from pydantic import ConfigDict
|
|||||||
|
|
||||||
from core.model_runtime.entities.model_entities import ModelType
|
from core.model_runtime.entities.model_entities import ModelType
|
||||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
|
|
||||||
class Speech2TextModel(AIModel):
|
class Speech2TextModel(AIModel):
|
||||||
@ -28,7 +28,7 @@ class Speech2TextModel(AIModel):
|
|||||||
:return: text for given audio file
|
:return: text for given audio file
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.invoke_speech_to_text(
|
return plugin_model_manager.invoke_speech_to_text(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id=user or "unknown",
|
user_id=user or "unknown",
|
||||||
|
@ -6,7 +6,7 @@ from core.entities.embedding_type import EmbeddingInputType
|
|||||||
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
|
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
|
||||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
|
|
||||||
class TextEmbeddingModel(AIModel):
|
class TextEmbeddingModel(AIModel):
|
||||||
@ -38,7 +38,7 @@ class TextEmbeddingModel(AIModel):
|
|||||||
:return: embeddings result
|
:return: embeddings result
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.invoke_text_embedding(
|
return plugin_model_manager.invoke_text_embedding(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id=user or "unknown",
|
user_id=user or "unknown",
|
||||||
@ -61,7 +61,7 @@ class TextEmbeddingModel(AIModel):
|
|||||||
:param texts: texts to embed
|
:param texts: texts to embed
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.get_text_embedding_num_tokens(
|
return plugin_model_manager.get_text_embedding_num_tokens(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id="unknown",
|
user_id="unknown",
|
||||||
|
@ -6,7 +6,7 @@ from pydantic import ConfigDict
|
|||||||
|
|
||||||
from core.model_runtime.entities.model_entities import ModelType
|
from core.model_runtime.entities.model_entities import ModelType
|
||||||
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
from core.model_runtime.model_providers.__base.ai_model import AIModel
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ class TTSModel(AIModel):
|
|||||||
:return: translated audio file
|
:return: translated audio file
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.invoke_tts(
|
return plugin_model_manager.invoke_tts(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id=user or "unknown",
|
user_id=user or "unknown",
|
||||||
@ -65,7 +65,7 @@ class TTSModel(AIModel):
|
|||||||
:param credentials: The credentials required to access the TTS model.
|
:param credentials: The credentials required to access the TTS model.
|
||||||
:return: A list of voices supported by the TTS model.
|
:return: A list of voices supported by the TTS model.
|
||||||
"""
|
"""
|
||||||
plugin_model_manager = PluginModelManager()
|
plugin_model_manager = PluginModelClient()
|
||||||
return plugin_model_manager.get_tts_model_voices(
|
return plugin_model_manager.get_tts_model_voices(
|
||||||
tenant_id=self.tenant_id,
|
tenant_id=self.tenant_id,
|
||||||
user_id="unknown",
|
user_id="unknown",
|
||||||
|
@ -22,8 +22,8 @@ from core.model_runtime.schema_validators.model_credential_schema_validator impo
|
|||||||
from core.model_runtime.schema_validators.provider_credential_schema_validator import ProviderCredentialSchemaValidator
|
from core.model_runtime.schema_validators.provider_credential_schema_validator import ProviderCredentialSchemaValidator
|
||||||
from core.plugin.entities.plugin import ModelProviderID
|
from core.plugin.entities.plugin import ModelProviderID
|
||||||
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
|
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
|
||||||
from core.plugin.manager.asset import PluginAssetManager
|
from core.plugin.impl.asset import PluginAssetManager
|
||||||
from core.plugin.manager.model import PluginModelManager
|
from core.plugin.impl.model import PluginModelClient
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ class ModelProviderFactory:
|
|||||||
self.provider_position_map = {}
|
self.provider_position_map = {}
|
||||||
|
|
||||||
self.tenant_id = tenant_id
|
self.tenant_id = tenant_id
|
||||||
self.plugin_model_manager = PluginModelManager()
|
self.plugin_model_manager = PluginModelClient()
|
||||||
|
|
||||||
if not self.provider_position_map:
|
if not self.provider_position_map:
|
||||||
# get the path of current classes
|
# get the path of current classes
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
import pydantic
|
import pydantic
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from core.model_runtime.entities.message_entities import PromptMessageContentUnionTypes
|
||||||
|
|
||||||
|
|
||||||
def dump_model(model: BaseModel) -> dict:
|
def dump_model(model: BaseModel) -> dict:
|
||||||
if hasattr(pydantic, "model_dump"):
|
if hasattr(pydantic, "model_dump"):
|
||||||
@ -8,3 +10,18 @@ def dump_model(model: BaseModel) -> dict:
|
|||||||
return pydantic.model_dump(model) # type: ignore
|
return pydantic.model_dump(model) # type: ignore
|
||||||
else:
|
else:
|
||||||
return model.model_dump()
|
return model.model_dump()
|
||||||
|
|
||||||
|
|
||||||
|
def convert_llm_result_chunk_to_str(content: None | str | list[PromptMessageContentUnionTypes]) -> str:
|
||||||
|
if content is None:
|
||||||
|
message_text = ""
|
||||||
|
elif isinstance(content, str):
|
||||||
|
message_text = content
|
||||||
|
elif isinstance(content, list):
|
||||||
|
# Assuming the list contains PromptMessageContent objects with a "data" attribute
|
||||||
|
message_text = "".join(
|
||||||
|
item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
message_text = str(content)
|
||||||
|
return message_text
|
||||||
|
@ -1 +1 @@
|
|||||||
3
|
3
|
||||||
|
@ -1 +1 @@
|
|||||||
2
|
2
|
||||||
|
@ -1 +1 @@
|
|||||||
1
|
1
|
||||||
|
@ -29,7 +29,7 @@ from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
|
|||||||
UnitEnum,
|
UnitEnum,
|
||||||
)
|
)
|
||||||
from core.ops.utils import filter_none_values
|
from core.ops.utils import filter_none_values
|
||||||
from core.repository.repository_factory import RepositoryFactory
|
from core.workflow.repository.repository_factory import RepositoryFactory
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.model import EndUser
|
from models.model import EndUser
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ from core.ops.langsmith_trace.entities.langsmith_trace_entity import (
|
|||||||
LangSmithRunUpdateModel,
|
LangSmithRunUpdateModel,
|
||||||
)
|
)
|
||||||
from core.ops.utils import filter_none_values, generate_dotted_order
|
from core.ops.utils import filter_none_values, generate_dotted_order
|
||||||
from core.repository.repository_factory import RepositoryFactory
|
from core.workflow.repository.repository_factory import RepositoryFactory
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.model import EndUser, MessageFile
|
from models.model import EndUser, MessageFile
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ from core.ops.entities.trace_entity import (
|
|||||||
TraceTaskName,
|
TraceTaskName,
|
||||||
WorkflowTraceInfo,
|
WorkflowTraceInfo,
|
||||||
)
|
)
|
||||||
from core.repository.repository_factory import RepositoryFactory
|
from core.workflow.repository.repository_factory import RepositoryFactory
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from models.model import EndUser, MessageFile
|
from models.model import EndUser, MessageFile
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation):
|
|||||||
raise ValueError("missing query")
|
raise ValueError("missing query")
|
||||||
|
|
||||||
return cls.invoke_chat_app(app, user, conversation_id, query, stream, inputs, files)
|
return cls.invoke_chat_app(app, user, conversation_id, query, stream, inputs, files)
|
||||||
elif app.mode == AppMode.WORKFLOW.value:
|
elif app.mode == AppMode.WORKFLOW:
|
||||||
return cls.invoke_workflow_app(app, user, stream, inputs, files)
|
return cls.invoke_workflow_app(app, user, stream, inputs, files)
|
||||||
elif app.mode == AppMode.COMPLETION:
|
elif app.mode == AppMode.COMPLETION:
|
||||||
return cls.invoke_completion_app(app, user, stream, inputs, files)
|
return cls.invoke_completion_app(app, user, stream, inputs, files)
|
||||||
|
@ -239,8 +239,8 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation):
|
|||||||
content = payload.text
|
content = payload.text
|
||||||
|
|
||||||
SUMMARY_PROMPT = """You are a professional language researcher, you are interested in the language
|
SUMMARY_PROMPT = """You are a professional language researcher, you are interested in the language
|
||||||
and you can quickly aimed at the main point of an webpage and reproduce it in your own words but
|
and you can quickly aimed at the main point of an webpage and reproduce it in your own words but
|
||||||
retain the original meaning and keep the key points.
|
retain the original meaning and keep the key points.
|
||||||
however, the text you got is too long, what you got is possible a part of the text.
|
however, the text you got is too long, what you got is possible a part of the text.
|
||||||
Please summarize the text you got.
|
Please summarize the text you got.
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
|
from collections.abc import Mapping
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import StrEnum
|
from enum import StrEnum
|
||||||
from typing import Generic, Optional, TypeVar
|
from typing import Any, Generic, Optional, TypeVar
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field
|
from pydantic import BaseModel, ConfigDict, Field
|
||||||
|
|
||||||
@ -158,3 +159,11 @@ class PluginInstallTaskStartResponse(BaseModel):
|
|||||||
class PluginUploadResponse(BaseModel):
|
class PluginUploadResponse(BaseModel):
|
||||||
unique_identifier: str = Field(description="The unique identifier of the plugin.")
|
unique_identifier: str = Field(description="The unique identifier of the plugin.")
|
||||||
manifest: PluginDeclaration
|
manifest: PluginDeclaration
|
||||||
|
|
||||||
|
|
||||||
|
class PluginOAuthAuthorizationUrlResponse(BaseModel):
|
||||||
|
authorization_url: str = Field(description="The URL of the authorization.")
|
||||||
|
|
||||||
|
|
||||||
|
class PluginOAuthCredentialsResponse(BaseModel):
|
||||||
|
credentials: Mapping[str, Any] = Field(description="The credentials of the OAuth.")
|
||||||
|
@ -6,10 +6,10 @@ from core.plugin.entities.plugin import GenericProviderID
|
|||||||
from core.plugin.entities.plugin_daemon import (
|
from core.plugin.entities.plugin_daemon import (
|
||||||
PluginAgentProviderEntity,
|
PluginAgentProviderEntity,
|
||||||
)
|
)
|
||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
class PluginAgentManager(BasePluginManager):
|
class PluginAgentClient(BasePluginClient):
|
||||||
def fetch_agent_strategy_providers(self, tenant_id: str) -> list[PluginAgentProviderEntity]:
|
def fetch_agent_strategy_providers(self, tenant_id: str) -> list[PluginAgentProviderEntity]:
|
||||||
"""
|
"""
|
||||||
Fetch agent providers for the given tenant.
|
Fetch agent providers for the given tenant.
|
@ -1,7 +1,7 @@
|
|||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
class PluginAssetManager(BasePluginManager):
|
class PluginAssetManager(BasePluginClient):
|
||||||
def fetch_asset(self, tenant_id: str, id: str) -> bytes:
|
def fetch_asset(self, tenant_id: str, id: str) -> bytes:
|
||||||
"""
|
"""
|
||||||
Fetch an asset by id.
|
Fetch an asset by id.
|
@ -18,7 +18,7 @@ from core.model_runtime.errors.invoke import (
|
|||||||
)
|
)
|
||||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||||
from core.plugin.entities.plugin_daemon import PluginDaemonBasicResponse, PluginDaemonError, PluginDaemonInnerError
|
from core.plugin.entities.plugin_daemon import PluginDaemonBasicResponse, PluginDaemonError, PluginDaemonInnerError
|
||||||
from core.plugin.manager.exc import (
|
from core.plugin.impl.exc import (
|
||||||
PluginDaemonBadRequestError,
|
PluginDaemonBadRequestError,
|
||||||
PluginDaemonInternalServerError,
|
PluginDaemonInternalServerError,
|
||||||
PluginDaemonNotFoundError,
|
PluginDaemonNotFoundError,
|
||||||
@ -37,7 +37,7 @@ T = TypeVar("T", bound=(BaseModel | dict | list | bool | str))
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BasePluginManager:
|
class BasePluginClient:
|
||||||
def _request(
|
def _request(
|
||||||
self,
|
self,
|
||||||
method: str,
|
method: str,
|
@ -1,9 +1,9 @@
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
class PluginDebuggingManager(BasePluginManager):
|
class PluginDebuggingClient(BasePluginClient):
|
||||||
def get_debugging_key(self, tenant_id: str) -> str:
|
def get_debugging_key(self, tenant_id: str) -> str:
|
||||||
"""
|
"""
|
||||||
Get the debugging key for the given tenant.
|
Get the debugging key for the given tenant.
|
@ -1,8 +1,8 @@
|
|||||||
from core.plugin.entities.endpoint import EndpointEntityWithInstance
|
from core.plugin.entities.endpoint import EndpointEntityWithInstance
|
||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
class PluginEndpointManager(BasePluginManager):
|
class PluginEndpointClient(BasePluginClient):
|
||||||
def create_endpoint(
|
def create_endpoint(
|
||||||
self, tenant_id: str, user_id: str, plugin_unique_identifier: str, name: str, settings: dict
|
self, tenant_id: str, user_id: str, plugin_unique_identifier: str, name: str, settings: dict
|
||||||
) -> bool:
|
) -> bool:
|
@ -18,10 +18,10 @@ from core.plugin.entities.plugin_daemon import (
|
|||||||
PluginTextEmbeddingNumTokensResponse,
|
PluginTextEmbeddingNumTokensResponse,
|
||||||
PluginVoicesResponse,
|
PluginVoicesResponse,
|
||||||
)
|
)
|
||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
class PluginModelManager(BasePluginManager):
|
class PluginModelClient(BasePluginClient):
|
||||||
def fetch_model_providers(self, tenant_id: str) -> Sequence[PluginModelProviderEntity]:
|
def fetch_model_providers(self, tenant_id: str) -> Sequence[PluginModelProviderEntity]:
|
||||||
"""
|
"""
|
||||||
Fetch model providers for the given tenant.
|
Fetch model providers for the given tenant.
|
98
api/core/plugin/impl/oauth.py
Normal file
98
api/core/plugin/impl/oauth.py
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
from collections.abc import Mapping
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from werkzeug import Request
|
||||||
|
|
||||||
|
from core.plugin.entities.plugin_daemon import PluginOAuthAuthorizationUrlResponse, PluginOAuthCredentialsResponse
|
||||||
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
|
class OAuthHandler(BasePluginClient):
|
||||||
|
def get_authorization_url(
|
||||||
|
self,
|
||||||
|
tenant_id: str,
|
||||||
|
user_id: str,
|
||||||
|
plugin_id: str,
|
||||||
|
provider: str,
|
||||||
|
system_credentials: Mapping[str, Any],
|
||||||
|
) -> PluginOAuthAuthorizationUrlResponse:
|
||||||
|
return self._request_with_plugin_daemon_response(
|
||||||
|
"POST",
|
||||||
|
f"plugin/{tenant_id}/dispatch/oauth/get_authorization_url",
|
||||||
|
PluginOAuthAuthorizationUrlResponse,
|
||||||
|
data={
|
||||||
|
"user_id": user_id,
|
||||||
|
"data": {
|
||||||
|
"provider": provider,
|
||||||
|
"system_credentials": system_credentials,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
headers={
|
||||||
|
"X-Plugin-ID": plugin_id,
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_credentials(
|
||||||
|
self,
|
||||||
|
tenant_id: str,
|
||||||
|
user_id: str,
|
||||||
|
plugin_id: str,
|
||||||
|
provider: str,
|
||||||
|
system_credentials: Mapping[str, Any],
|
||||||
|
request: Request,
|
||||||
|
) -> PluginOAuthCredentialsResponse:
|
||||||
|
"""
|
||||||
|
Get credentials from the given request.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# encode request to raw http request
|
||||||
|
raw_request_bytes = self._convert_request_to_raw_data(request)
|
||||||
|
|
||||||
|
return self._request_with_plugin_daemon_response(
|
||||||
|
"POST",
|
||||||
|
f"plugin/{tenant_id}/dispatch/oauth/get_credentials",
|
||||||
|
PluginOAuthCredentialsResponse,
|
||||||
|
data={
|
||||||
|
"user_id": user_id,
|
||||||
|
"data": {
|
||||||
|
"provider": provider,
|
||||||
|
"system_credentials": system_credentials,
|
||||||
|
"raw_request_bytes": raw_request_bytes,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
headers={
|
||||||
|
"X-Plugin-ID": plugin_id,
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _convert_request_to_raw_data(self, request: Request) -> bytes:
|
||||||
|
"""
|
||||||
|
Convert a Request object to raw HTTP data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The Request object to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The raw HTTP data as bytes.
|
||||||
|
"""
|
||||||
|
# Start with the request line
|
||||||
|
method = request.method
|
||||||
|
path = request.path
|
||||||
|
protocol = request.headers.get("HTTP_VERSION", "HTTP/1.1")
|
||||||
|
raw_data = f"{method} {path} {protocol}\r\n".encode()
|
||||||
|
|
||||||
|
# Add headers
|
||||||
|
for header_name, header_value in request.headers.items():
|
||||||
|
raw_data += f"{header_name}: {header_value}\r\n".encode()
|
||||||
|
|
||||||
|
# Add empty line to separate headers from body
|
||||||
|
raw_data += b"\r\n"
|
||||||
|
|
||||||
|
# Add body if exists
|
||||||
|
body = request.get_data(as_text=False)
|
||||||
|
if body:
|
||||||
|
raw_data += body
|
||||||
|
|
||||||
|
return raw_data
|
@ -10,10 +10,10 @@ from core.plugin.entities.plugin import (
|
|||||||
PluginInstallationSource,
|
PluginInstallationSource,
|
||||||
)
|
)
|
||||||
from core.plugin.entities.plugin_daemon import PluginInstallTask, PluginInstallTaskStartResponse, PluginUploadResponse
|
from core.plugin.entities.plugin_daemon import PluginInstallTask, PluginInstallTaskStartResponse, PluginUploadResponse
|
||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
|
|
||||||
|
|
||||||
class PluginInstallationManager(BasePluginManager):
|
class PluginInstaller(BasePluginClient):
|
||||||
def fetch_plugin_by_identifier(
|
def fetch_plugin_by_identifier(
|
||||||
self,
|
self,
|
||||||
tenant_id: str,
|
tenant_id: str,
|
@ -5,11 +5,11 @@ from pydantic import BaseModel
|
|||||||
|
|
||||||
from core.plugin.entities.plugin import GenericProviderID, ToolProviderID
|
from core.plugin.entities.plugin import GenericProviderID, ToolProviderID
|
||||||
from core.plugin.entities.plugin_daemon import PluginBasicBooleanResponse, PluginToolProviderEntity
|
from core.plugin.entities.plugin_daemon import PluginBasicBooleanResponse, PluginToolProviderEntity
|
||||||
from core.plugin.manager.base import BasePluginManager
|
from core.plugin.impl.base import BasePluginClient
|
||||||
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
|
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
|
||||||
|
|
||||||
|
|
||||||
class PluginToolManager(BasePluginManager):
|
class PluginToolManager(BasePluginClient):
|
||||||
def fetch_tool_providers(self, tenant_id: str) -> list[PluginToolProviderEntity]:
|
def fetch_tool_providers(self, tenant_id: str) -> list[PluginToolProviderEntity]:
|
||||||
"""
|
"""
|
||||||
Fetch tool providers for the given tenant.
|
Fetch tool providers for the given tenant.
|
@ -10,4 +10,4 @@
|
|||||||
],
|
],
|
||||||
"query_prompt": "\n\n用户:{{#query#}}",
|
"query_prompt": "\n\n用户:{{#query#}}",
|
||||||
"stops": ["用户:"]
|
"stops": ["用户:"]
|
||||||
}
|
}
|
||||||
|
@ -6,4 +6,4 @@
|
|||||||
],
|
],
|
||||||
"query_prompt": "{{#query#}}",
|
"query_prompt": "{{#query#}}",
|
||||||
"stops": null
|
"stops": null
|
||||||
}
|
}
|
||||||
|
@ -6,4 +6,4 @@
|
|||||||
],
|
],
|
||||||
"query_prompt": "{{#query#}}",
|
"query_prompt": "{{#query#}}",
|
||||||
"stops": null
|
"stops": null
|
||||||
}
|
}
|
||||||
|
@ -156,8 +156,8 @@ class AnalyticdbVectorBySql:
|
|||||||
values = []
|
values = []
|
||||||
id_prefix = str(uuid.uuid4()) + "_"
|
id_prefix = str(uuid.uuid4()) + "_"
|
||||||
sql = f"""
|
sql = f"""
|
||||||
INSERT INTO {self.table_name}
|
INSERT INTO {self.table_name}
|
||||||
(id, ref_doc_id, vector, page_content, metadata_, to_tsvector)
|
(id, ref_doc_id, vector, page_content, metadata_, to_tsvector)
|
||||||
VALUES (%s, %s, %s, %s, %s, to_tsvector('zh_cn', %s));
|
VALUES (%s, %s, %s, %s, %s, to_tsvector('zh_cn', %s));
|
||||||
"""
|
"""
|
||||||
for i, doc in enumerate(documents):
|
for i, doc in enumerate(documents):
|
||||||
@ -242,7 +242,7 @@ class AnalyticdbVectorBySql:
|
|||||||
where_clause += f"AND metadata_->>'document_id' IN ({document_ids})"
|
where_clause += f"AND metadata_->>'document_id' IN ({document_ids})"
|
||||||
with self._get_cursor() as cur:
|
with self._get_cursor() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
f"""SELECT id, vector, page_content, metadata_,
|
f"""SELECT id, vector, page_content, metadata_,
|
||||||
ts_rank(to_tsvector, to_tsquery_from_text(%s, 'zh_cn'), 32) AS score
|
ts_rank(to_tsvector, to_tsquery_from_text(%s, 'zh_cn'), 32) AS score
|
||||||
FROM {self.table_name}
|
FROM {self.table_name}
|
||||||
WHERE to_tsvector@@to_tsquery_from_text(%s, 'zh_cn') {where_clause}
|
WHERE to_tsvector@@to_tsquery_from_text(%s, 'zh_cn') {where_clause}
|
||||||
|
@ -27,8 +27,8 @@ class MilvusConfig(BaseModel):
|
|||||||
|
|
||||||
uri: str # Milvus server URI
|
uri: str # Milvus server URI
|
||||||
token: Optional[str] = None # Optional token for authentication
|
token: Optional[str] = None # Optional token for authentication
|
||||||
user: str # Username for authentication
|
user: Optional[str] = None # Username for authentication
|
||||||
password: str # Password for authentication
|
password: Optional[str] = None # Password for authentication
|
||||||
batch_size: int = 100 # Batch size for operations
|
batch_size: int = 100 # Batch size for operations
|
||||||
database: str = "default" # Database name
|
database: str = "default" # Database name
|
||||||
enable_hybrid_search: bool = False # Flag to enable hybrid search
|
enable_hybrid_search: bool = False # Flag to enable hybrid search
|
||||||
@ -43,10 +43,11 @@ class MilvusConfig(BaseModel):
|
|||||||
"""
|
"""
|
||||||
if not values.get("uri"):
|
if not values.get("uri"):
|
||||||
raise ValueError("config MILVUS_URI is required")
|
raise ValueError("config MILVUS_URI is required")
|
||||||
if not values.get("user"):
|
if not values.get("token"):
|
||||||
raise ValueError("config MILVUS_USER is required")
|
if not values.get("user"):
|
||||||
if not values.get("password"):
|
raise ValueError("config MILVUS_USER is required")
|
||||||
raise ValueError("config MILVUS_PASSWORD is required")
|
if not values.get("password"):
|
||||||
|
raise ValueError("config MILVUS_PASSWORD is required")
|
||||||
return values
|
return values
|
||||||
|
|
||||||
def to_milvus_params(self):
|
def to_milvus_params(self):
|
||||||
@ -356,11 +357,14 @@ class MilvusVector(BaseVector):
|
|||||||
)
|
)
|
||||||
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
||||||
|
|
||||||
def _init_client(self, config) -> MilvusClient:
|
def _init_client(self, config: MilvusConfig) -> MilvusClient:
|
||||||
"""
|
"""
|
||||||
Initialize and return a Milvus client.
|
Initialize and return a Milvus client.
|
||||||
"""
|
"""
|
||||||
client = MilvusClient(uri=config.uri, user=config.user, password=config.password, db_name=config.database)
|
if config.token:
|
||||||
|
client = MilvusClient(uri=config.uri, token=config.token, db_name=config.database)
|
||||||
|
else:
|
||||||
|
client = MilvusClient(uri=config.uri, user=config.user, password=config.password, db_name=config.database)
|
||||||
return client
|
return client
|
||||||
|
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ class OceanBaseVector(BaseVector):
|
|||||||
|
|
||||||
full_sql = f"""SELECT metadata, text, MATCH (text) AGAINST (:query) AS score
|
full_sql = f"""SELECT metadata, text, MATCH (text) AGAINST (:query) AS score
|
||||||
FROM {self._collection_name}
|
FROM {self._collection_name}
|
||||||
WHERE MATCH (text) AGAINST (:query) > 0
|
WHERE MATCH (text) AGAINST (:query) > 0
|
||||||
{where_clause}
|
{where_clause}
|
||||||
ORDER BY score DESC
|
ORDER BY score DESC
|
||||||
LIMIT {top_k}"""
|
LIMIT {top_k}"""
|
||||||
|
@ -59,12 +59,12 @@ CREATE TABLE IF NOT EXISTS {table_name} (
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
SQL_CREATE_INDEX_PQ = """
|
SQL_CREATE_INDEX_PQ = """
|
||||||
CREATE INDEX IF NOT EXISTS embedding_{table_name}_pq_idx ON {table_name}
|
CREATE INDEX IF NOT EXISTS embedding_{table_name}_pq_idx ON {table_name}
|
||||||
USING hnsw (embedding vector_cosine_ops) WITH (m = 16, ef_construction = 64, enable_pq=on, pq_m={pq_m});
|
USING hnsw (embedding vector_cosine_ops) WITH (m = 16, ef_construction = 64, enable_pq=on, pq_m={pq_m});
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SQL_CREATE_INDEX = """
|
SQL_CREATE_INDEX = """
|
||||||
CREATE INDEX IF NOT EXISTS embedding_cosine_{table_name}_idx ON {table_name}
|
CREATE INDEX IF NOT EXISTS embedding_cosine_{table_name}_idx ON {table_name}
|
||||||
USING hnsw (embedding vector_cosine_ops) WITH (m = 16, ef_construction = 64);
|
USING hnsw (embedding vector_cosine_ops) WITH (m = 16, ef_construction = 64);
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import ssl
|
from typing import Any, Literal, Optional
|
||||||
from typing import Any, Optional
|
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from opensearchpy import OpenSearch, helpers
|
from opensearchpy import OpenSearch, Urllib3AWSV4SignerAuth, Urllib3HttpConnection, helpers
|
||||||
from opensearchpy.helpers import BulkIndexError
|
from opensearchpy.helpers import BulkIndexError
|
||||||
from pydantic import BaseModel, model_validator
|
from pydantic import BaseModel, model_validator
|
||||||
|
|
||||||
@ -24,9 +23,12 @@ logger = logging.getLogger(__name__)
|
|||||||
class OpenSearchConfig(BaseModel):
|
class OpenSearchConfig(BaseModel):
|
||||||
host: str
|
host: str
|
||||||
port: int
|
port: int
|
||||||
|
secure: bool = False
|
||||||
|
auth_method: Literal["basic", "aws_managed_iam"] = "basic"
|
||||||
user: Optional[str] = None
|
user: Optional[str] = None
|
||||||
password: Optional[str] = None
|
password: Optional[str] = None
|
||||||
secure: bool = False
|
aws_region: Optional[str] = None
|
||||||
|
aws_service: Optional[str] = None
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -35,24 +37,40 @@ class OpenSearchConfig(BaseModel):
|
|||||||
raise ValueError("config OPENSEARCH_HOST is required")
|
raise ValueError("config OPENSEARCH_HOST is required")
|
||||||
if not values.get("port"):
|
if not values.get("port"):
|
||||||
raise ValueError("config OPENSEARCH_PORT is required")
|
raise ValueError("config OPENSEARCH_PORT is required")
|
||||||
|
if values.get("auth_method") == "aws_managed_iam":
|
||||||
|
if not values.get("aws_region"):
|
||||||
|
raise ValueError("config OPENSEARCH_AWS_REGION is required for AWS_MANAGED_IAM auth method")
|
||||||
|
if not values.get("aws_service"):
|
||||||
|
raise ValueError("config OPENSEARCH_AWS_SERVICE is required for AWS_MANAGED_IAM auth method")
|
||||||
return values
|
return values
|
||||||
|
|
||||||
def create_ssl_context(self) -> ssl.SSLContext:
|
def create_aws_managed_iam_auth(self) -> Urllib3AWSV4SignerAuth:
|
||||||
ssl_context = ssl.create_default_context()
|
import boto3 # type: ignore
|
||||||
ssl_context.check_hostname = False
|
|
||||||
ssl_context.verify_mode = ssl.CERT_NONE # Disable Certificate Validation
|
return Urllib3AWSV4SignerAuth(
|
||||||
return ssl_context
|
credentials=boto3.Session().get_credentials(),
|
||||||
|
region=self.aws_region,
|
||||||
|
service=self.aws_service, # type: ignore[arg-type]
|
||||||
|
)
|
||||||
|
|
||||||
def to_opensearch_params(self) -> dict[str, Any]:
|
def to_opensearch_params(self) -> dict[str, Any]:
|
||||||
params = {
|
params = {
|
||||||
"hosts": [{"host": self.host, "port": self.port}],
|
"hosts": [{"host": self.host, "port": self.port}],
|
||||||
"use_ssl": self.secure,
|
"use_ssl": self.secure,
|
||||||
"verify_certs": self.secure,
|
"verify_certs": self.secure,
|
||||||
|
"connection_class": Urllib3HttpConnection,
|
||||||
|
"pool_maxsize": 20,
|
||||||
}
|
}
|
||||||
if self.user and self.password:
|
|
||||||
|
if self.auth_method == "basic":
|
||||||
|
logger.info("Using basic authentication for OpenSearch Vector DB")
|
||||||
|
|
||||||
params["http_auth"] = (self.user, self.password)
|
params["http_auth"] = (self.user, self.password)
|
||||||
if self.secure:
|
elif self.auth_method == "aws_managed_iam":
|
||||||
params["ssl_context"] = self.create_ssl_context()
|
logger.info("Using AWS managed IAM role for OpenSearch Vector DB")
|
||||||
|
|
||||||
|
params["http_auth"] = self.create_aws_managed_iam_auth()
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
@ -76,16 +94,23 @@ class OpenSearchVector(BaseVector):
|
|||||||
action = {
|
action = {
|
||||||
"_op_type": "index",
|
"_op_type": "index",
|
||||||
"_index": self._collection_name.lower(),
|
"_index": self._collection_name.lower(),
|
||||||
"_id": uuid4().hex,
|
|
||||||
"_source": {
|
"_source": {
|
||||||
Field.CONTENT_KEY.value: documents[i].page_content,
|
Field.CONTENT_KEY.value: documents[i].page_content,
|
||||||
Field.VECTOR.value: embeddings[i], # Make sure you pass an array here
|
Field.VECTOR.value: embeddings[i], # Make sure you pass an array here
|
||||||
Field.METADATA_KEY.value: documents[i].metadata,
|
Field.METADATA_KEY.value: documents[i].metadata,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
# See https://github.com/langchain-ai/langchainjs/issues/4346#issuecomment-1935123377
|
||||||
|
if self._client_config.aws_service not in ["aoss"]:
|
||||||
|
action["_id"] = uuid4().hex
|
||||||
actions.append(action)
|
actions.append(action)
|
||||||
|
|
||||||
helpers.bulk(self._client, actions)
|
helpers.bulk(
|
||||||
|
client=self._client,
|
||||||
|
actions=actions,
|
||||||
|
timeout=30,
|
||||||
|
max_retries=3,
|
||||||
|
)
|
||||||
|
|
||||||
def get_ids_by_metadata_field(self, key: str, value: str):
|
def get_ids_by_metadata_field(self, key: str, value: str):
|
||||||
query = {"query": {"term": {f"{Field.METADATA_KEY.value}.{key}": value}}}
|
query = {"query": {"term": {f"{Field.METADATA_KEY.value}.{key}": value}}}
|
||||||
@ -234,6 +259,7 @@ class OpenSearchVector(BaseVector):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.info(f"Creating OpenSearch index {self._collection_name.lower()}")
|
||||||
self._client.indices.create(index=self._collection_name.lower(), body=index_body)
|
self._client.indices.create(index=self._collection_name.lower(), body=index_body)
|
||||||
|
|
||||||
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
||||||
@ -252,9 +278,12 @@ class OpenSearchVectorFactory(AbstractVectorFactory):
|
|||||||
open_search_config = OpenSearchConfig(
|
open_search_config = OpenSearchConfig(
|
||||||
host=dify_config.OPENSEARCH_HOST or "localhost",
|
host=dify_config.OPENSEARCH_HOST or "localhost",
|
||||||
port=dify_config.OPENSEARCH_PORT,
|
port=dify_config.OPENSEARCH_PORT,
|
||||||
|
secure=dify_config.OPENSEARCH_SECURE,
|
||||||
|
auth_method=dify_config.OPENSEARCH_AUTH_METHOD.value,
|
||||||
user=dify_config.OPENSEARCH_USER,
|
user=dify_config.OPENSEARCH_USER,
|
||||||
password=dify_config.OPENSEARCH_PASSWORD,
|
password=dify_config.OPENSEARCH_PASSWORD,
|
||||||
secure=dify_config.OPENSEARCH_SECURE,
|
aws_region=dify_config.OPENSEARCH_AWS_REGION,
|
||||||
|
aws_service=dify_config.OPENSEARCH_AWS_SERVICE,
|
||||||
)
|
)
|
||||||
|
|
||||||
return OpenSearchVector(collection_name=collection_name, config=open_search_config)
|
return OpenSearchVector(collection_name=collection_name, config=open_search_config)
|
||||||
|
@ -59,8 +59,8 @@ CREATE TABLE IF NOT EXISTS {table_name} (
|
|||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
SQL_CREATE_INDEX = """
|
SQL_CREATE_INDEX = """
|
||||||
CREATE INDEX IF NOT EXISTS idx_docs_{table_name} ON {table_name}(text)
|
CREATE INDEX IF NOT EXISTS idx_docs_{table_name} ON {table_name}(text)
|
||||||
INDEXTYPE IS CTXSYS.CONTEXT PARAMETERS
|
INDEXTYPE IS CTXSYS.CONTEXT PARAMETERS
|
||||||
('FILTER CTXSYS.NULL_FILTER SECTION GROUP CTXSYS.HTML_SECTION_GROUP LEXER world_lexer')
|
('FILTER CTXSYS.NULL_FILTER SECTION GROUP CTXSYS.HTML_SECTION_GROUP LEXER world_lexer')
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ class OracleVector(BaseVector):
|
|||||||
with conn.cursor() as cur:
|
with conn.cursor() as cur:
|
||||||
try:
|
try:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
f"""INSERT INTO {self.table_name} (id, text, meta, embedding)
|
f"""INSERT INTO {self.table_name} (id, text, meta, embedding)
|
||||||
VALUES (:1, :2, :3, :4)""",
|
VALUES (:1, :2, :3, :4)""",
|
||||||
value,
|
value,
|
||||||
)
|
)
|
||||||
@ -227,8 +227,8 @@ class OracleVector(BaseVector):
|
|||||||
conn.outputtypehandler = self.output_type_handler
|
conn.outputtypehandler = self.output_type_handler
|
||||||
with conn.cursor() as cur:
|
with conn.cursor() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
f"""SELECT meta, text, vector_distance(embedding,(select to_vector(:1) from dual),cosine)
|
f"""SELECT meta, text, vector_distance(embedding,(select to_vector(:1) from dual),cosine)
|
||||||
AS distance FROM {self.table_name}
|
AS distance FROM {self.table_name}
|
||||||
{where_clause} ORDER BY distance fetch first {top_k} rows only""",
|
{where_clause} ORDER BY distance fetch first {top_k} rows only""",
|
||||||
[numpy.array(query_vector)],
|
[numpy.array(query_vector)],
|
||||||
)
|
)
|
||||||
@ -290,7 +290,7 @@ class OracleVector(BaseVector):
|
|||||||
document_ids = ", ".join(f"'{id}'" for id in document_ids_filter)
|
document_ids = ", ".join(f"'{id}'" for id in document_ids_filter)
|
||||||
where_clause = f" AND metadata->>'document_id' in ({document_ids}) "
|
where_clause = f" AND metadata->>'document_id' in ({document_ids}) "
|
||||||
cur.execute(
|
cur.execute(
|
||||||
f"""select meta, text, embedding FROM {self.table_name}
|
f"""select meta, text, embedding FROM {self.table_name}
|
||||||
WHERE CONTAINS(text, :kk, 1) > 0 {where_clause}
|
WHERE CONTAINS(text, :kk, 1) > 0 {where_clause}
|
||||||
order by score(1) desc fetch first {top_k} rows only""",
|
order by score(1) desc fetch first {top_k} rows only""",
|
||||||
kk=" ACCUM ".join(entities),
|
kk=" ACCUM ".join(entities),
|
||||||
|
@ -61,7 +61,7 @@ CREATE TABLE IF NOT EXISTS {table_name} (
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
SQL_CREATE_INDEX = """
|
SQL_CREATE_INDEX = """
|
||||||
CREATE INDEX IF NOT EXISTS embedding_cosine_v1_idx ON {table_name}
|
CREATE INDEX IF NOT EXISTS embedding_cosine_v1_idx ON {table_name}
|
||||||
USING hnsw (embedding vector_cosine_ops) WITH (m = 16, ef_construction = 64);
|
USING hnsw (embedding vector_cosine_ops) WITH (m = 16, ef_construction = 64);
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ CREATE TABLE IF NOT EXISTS {table_name} (
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
SQL_CREATE_INDEX = """
|
SQL_CREATE_INDEX = """
|
||||||
CREATE INDEX IF NOT EXISTS embedding_cosine_v1_idx ON {table_name}
|
CREATE INDEX IF NOT EXISTS embedding_cosine_v1_idx ON {table_name}
|
||||||
USING hnsw (embedding floatvector_cosine_ops) WITH (m = 16, ef_construction = 64);
|
USING hnsw (embedding floatvector_cosine_ops) WITH (m = 16, ef_construction = 64);
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -205,9 +205,9 @@ class TiDBVector(BaseVector):
|
|||||||
|
|
||||||
with Session(self._engine) as session:
|
with Session(self._engine) as session:
|
||||||
select_statement = sql_text(f"""
|
select_statement = sql_text(f"""
|
||||||
SELECT meta, text, distance
|
SELECT meta, text, distance
|
||||||
FROM (
|
FROM (
|
||||||
SELECT
|
SELECT
|
||||||
meta,
|
meta,
|
||||||
text,
|
text,
|
||||||
{tidb_dist_func}(vector, :query_vector_str) AS distance
|
{tidb_dist_func}(vector, :query_vector_str) AS distance
|
||||||
|
@ -20,7 +20,7 @@ class WaterCrawlProvider:
|
|||||||
}
|
}
|
||||||
if options.get("crawl_sub_pages", True):
|
if options.get("crawl_sub_pages", True):
|
||||||
spider_options["page_limit"] = options.get("limit", 1)
|
spider_options["page_limit"] = options.get("limit", 1)
|
||||||
spider_options["max_depth"] = options.get("depth", 1)
|
spider_options["max_depth"] = options.get("max_depth", 1)
|
||||||
spider_options["include_paths"] = options.get("includes", "").split(",") if options.get("includes") else []
|
spider_options["include_paths"] = options.get("includes", "").split(",") if options.get("includes") else []
|
||||||
spider_options["exclude_paths"] = options.get("excludes", "").split(",") if options.get("excludes") else []
|
spider_options["exclude_paths"] = options.get("excludes", "").split(",") if options.get("excludes") else []
|
||||||
|
|
||||||
|
@ -52,14 +52,16 @@ class RerankModelRunner(BaseRerankRunner):
|
|||||||
rerank_documents = []
|
rerank_documents = []
|
||||||
|
|
||||||
for result in rerank_result.docs:
|
for result in rerank_result.docs:
|
||||||
# format document
|
if score_threshold is None or result.score >= score_threshold:
|
||||||
rerank_document = Document(
|
# format document
|
||||||
page_content=result.text,
|
rerank_document = Document(
|
||||||
metadata=documents[result.index].metadata,
|
page_content=result.text,
|
||||||
provider=documents[result.index].provider,
|
metadata=documents[result.index].metadata,
|
||||||
)
|
provider=documents[result.index].provider,
|
||||||
if rerank_document.metadata is not None:
|
)
|
||||||
rerank_document.metadata["score"] = result.score
|
if rerank_document.metadata is not None:
|
||||||
rerank_documents.append(rerank_document)
|
rerank_document.metadata["score"] = result.score
|
||||||
|
rerank_documents.append(rerank_document)
|
||||||
|
|
||||||
return rerank_documents
|
rerank_documents.sort(key=lambda x: x.metadata.get("score", 0.0), reverse=True)
|
||||||
|
return rerank_documents[:top_n] if top_n else rerank_documents
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user