diff --git a/.devcontainer/README.md b/.devcontainer/README.md
index df12a3c2d6..2b18630a21 100644
--- a/.devcontainer/README.md
+++ b/.devcontainer/README.md
@@ -34,4 +34,4 @@ if you see such error message when you open this project in codespaces:

a simple workaround is change `/signin` endpoint into another one, then login with GitHub account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine.
-The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204)
\ No newline at end of file
+The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204)
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 339ad60ce0..8246544061 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -2,7 +2,7 @@
// README at: https://github.com/devcontainers/templates/tree/main/src/anaconda
{
"name": "Python 3.12",
- "build": {
+ "build": {
"context": "..",
"dockerfile": "Dockerfile"
},
diff --git a/.devcontainer/noop.txt b/.devcontainer/noop.txt
index dde8dc3c10..49de88dbd4 100644
--- a/.devcontainer/noop.txt
+++ b/.devcontainer/noop.txt
@@ -1,3 +1,3 @@
This file copied into the container along with environment.yml* from the parent
-folder. This file is included to prevents the Dockerfile COPY instruction from
-failing if no environment.yml is found.
\ No newline at end of file
+folder. This file is included to prevents the Dockerfile COPY instruction from
+failing if no environment.yml is found.
diff --git a/web/.editorconfig b/.editorconfig
similarity index 51%
rename from web/.editorconfig
rename to .editorconfig
index e1d3f0b992..374da0b5d2 100644
--- a/web/.editorconfig
+++ b/.editorconfig
@@ -5,18 +5,35 @@ root = true
# Unix-style newlines with a newline ending every file
[*]
+charset = utf-8
end_of_line = lf
insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.py]
+indent_size = 4
+indent_style = space
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+
+[*.toml]
+indent_size = 4
+indent_style = space
+
+# Markdown and MDX are whitespace sensitive languages.
+# Do not remove trailing spaces.
+[*.{md,mdx}]
+trim_trailing_whitespace = false
# Matches multiple files with brace expansion notation
# Set default charset
[*.{js,tsx}]
-charset = utf-8
indent_style = space
indent_size = 2
-
-# Matches the exact files either package.json or .travis.yml
-[{package.json,.travis.yml}]
+# Matches the exact files package.json
+[package.json]
indent_style = space
indent_size = 2
diff --git a/.gitattributes b/.gitattributes
index a10da53408..a32a39f65c 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,5 +1,5 @@
# Ensure that .sh scripts use LF as line separator, even if they are checked out
-# to Windows(NTFS) file-system, by a user of Docker for Windows.
+# to Windows(NTFS) file-system, by a user of Docker for Windows.
# These .sh scripts will be run from the Container after `docker compose up -d`.
# If they appear to be CRLF style, Dash from the Container will fail to execute
# them.
diff --git a/.github/linters/editorconfig-checker.json b/.github/linters/editorconfig-checker.json
new file mode 100644
index 0000000000..ce6e9ae341
--- /dev/null
+++ b/.github/linters/editorconfig-checker.json
@@ -0,0 +1,22 @@
+{
+ "Verbose": false,
+ "Debug": false,
+ "IgnoreDefaults": false,
+ "SpacesAfterTabs": false,
+ "NoColor": false,
+ "Exclude": [
+ "^web/public/vs/",
+ "^web/public/pdf.worker.min.mjs$",
+ "web/app/components/base/icons/src/vender/"
+ ],
+ "AllowedContentTypes": [],
+ "PassedFiles": [],
+ "Disable": {
+ "EndOfLine": false,
+ "Indentation": false,
+ "IndentSize": true,
+ "InsertFinalNewline": false,
+ "TrimTrailingWhitespace": false,
+ "MaxLineLength": false
+ }
+}
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index 98e5fd5150..56f9b433f3 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -9,6 +9,12 @@ concurrency:
group: style-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
+permissions:
+ checks: write
+ statuses: write
+ contents: read
+
+
jobs:
python-style:
name: Python Style
@@ -163,3 +169,14 @@ jobs:
VALIDATE_DOCKERFILE_HADOLINT: true
VALIDATE_XML: true
VALIDATE_YAML: true
+
+ - name: EditorConfig checks
+ uses: super-linter/super-linter/slim@v7
+ env:
+ DEFAULT_BRANCH: main
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ IGNORE_GENERATED_FILES: true
+ IGNORE_GITIGNORED_FILES: true
+ # EditorConfig validation
+ VALIDATE_EDITORCONFIG: true
+ EDITORCONFIG_FILE_NAME: editorconfig-checker.json
diff --git a/CONTRIBUTING_ES.md b/CONTRIBUTING_ES.md
index 261aa0fda1..98cbb5b457 100644
--- a/CONTRIBUTING_ES.md
+++ b/CONTRIBUTING_ES.md
@@ -90,4 +90,4 @@ Recomendamos revisar este documento cuidadosamente antes de proceder con la conf
No dudes en contactarnos si encuentras algún problema durante el proceso de configuración.
## Obteniendo Ayuda
-Si alguna vez te quedas atascado o tienes una pregunta urgente mientras contribuyes, simplemente envíanos tus consultas a través del issue relacionado de GitHub, o únete a nuestro [Discord](https://discord.gg/8Tpq4AcN9c) para una charla rápida.
\ No newline at end of file
+Si alguna vez te quedas atascado o tienes una pregunta urgente mientras contribuyes, simplemente envíanos tus consultas a través del issue relacionado de GitHub, o únete a nuestro [Discord](https://discord.gg/8Tpq4AcN9c) para una charla rápida.
diff --git a/CONTRIBUTING_FR.md b/CONTRIBUTING_FR.md
index c3418f86cc..fc8410dfd6 100644
--- a/CONTRIBUTING_FR.md
+++ b/CONTRIBUTING_FR.md
@@ -90,4 +90,4 @@ Nous recommandons de revoir attentivement ce document avant de procéder à la c
N'hésitez pas à nous contacter si vous rencontrez des problèmes pendant le processus de configuration.
## Obtenir de l'aide
-Si jamais vous êtes bloqué ou avez une question urgente en contribuant, envoyez-nous simplement vos questions via le problème GitHub concerné, ou rejoignez notre [Discord](https://discord.gg/8Tpq4AcN9c) pour une discussion rapide.
\ No newline at end of file
+Si jamais vous êtes bloqué ou avez une question urgente en contribuant, envoyez-nous simplement vos questions via le problème GitHub concerné, ou rejoignez notre [Discord](https://discord.gg/8Tpq4AcN9c) pour une discussion rapide.
diff --git a/CONTRIBUTING_KR.md b/CONTRIBUTING_KR.md
index fcf44d495a..78d3f38c47 100644
--- a/CONTRIBUTING_KR.md
+++ b/CONTRIBUTING_KR.md
@@ -90,4 +90,4 @@ PR 설명에 기존 이슈를 연결하거나 새 이슈를 여는 것을 잊지
설정 과정에서 문제가 발생하면 언제든지 연락해 주세요.
## 도움 받기
-기여하는 동안 막히거나 긴급한 질문이 있으면, 관련 GitHub 이슈를 통해 질문을 보내거나, 빠른 대화를 위해 우리의 [Discord](https://discord.gg/8Tpq4AcN9c)에 참여하세요.
\ No newline at end of file
+기여하는 동안 막히거나 긴급한 질문이 있으면, 관련 GitHub 이슈를 통해 질문을 보내거나, 빠른 대화를 위해 우리의 [Discord](https://discord.gg/8Tpq4AcN9c)에 참여하세요.
diff --git a/CONTRIBUTING_PT.md b/CONTRIBUTING_PT.md
index bba76c17ee..7347fd7f9c 100644
--- a/CONTRIBUTING_PT.md
+++ b/CONTRIBUTING_PT.md
@@ -90,4 +90,4 @@ Recomendamos revisar este documento cuidadosamente antes de prosseguir com a con
Sinta-se à vontade para entrar em contato se encontrar quaisquer problemas durante o processo de configuração.
## Obtendo Ajuda
-Se você ficar preso ou tiver uma dúvida urgente enquanto contribui, simplesmente envie suas perguntas através do problema relacionado no GitHub, ou entre no nosso [Discord](https://discord.gg/8Tpq4AcN9c) para uma conversa rápida.
\ No newline at end of file
+Se você ficar preso ou tiver uma dúvida urgente enquanto contribui, simplesmente envie suas perguntas através do problema relacionado no GitHub, ou entre no nosso [Discord](https://discord.gg/8Tpq4AcN9c) para uma conversa rápida.
diff --git a/CONTRIBUTING_TR.md b/CONTRIBUTING_TR.md
index 4e216d22a4..681f05689b 100644
--- a/CONTRIBUTING_TR.md
+++ b/CONTRIBUTING_TR.md
@@ -90,4 +90,4 @@ Kuruluma geçmeden önce bu belgeyi dikkatlice incelemenizi öneririz, çünkü
Kurulum süreci sırasında herhangi bir sorunla karşılaşırsanız bizimle iletişime geçmekten çekinmeyin.
## Yardım Almak
-Katkıda bulunurken takılırsanız veya yanıcı bir sorunuz olursa, sorularınızı ilgili GitHub sorunu aracılığıyla bize gönderin veya hızlı bir sohbet için [Discord'umuza](https://discord.gg/8Tpq4AcN9c) katılın.
\ No newline at end of file
+Katkıda bulunurken takılırsanız veya yanıcı bir sorunuz olursa, sorularınızı ilgili GitHub sorunu aracılığıyla bize gönderin veya hızlı bir sohbet için [Discord'umuza](https://discord.gg/8Tpq4AcN9c) katılın.
diff --git a/README_SI.md b/README_SI.md
index caa5975973..9a38b558b4 100644
--- a/README_SI.md
+++ b/README_SI.md
@@ -1,259 +1,259 @@
-
-
-
-
-
-Dify je odprtokodna platforma za razvoj aplikacij LLM. Njegov intuitivni vmesnik združuje agentski potek dela z umetno inteligenco, cevovod RAG, zmogljivosti agentov, upravljanje modelov, funkcije opazovanja in več, kar vam omogoča hiter prehod od prototipa do proizvodnje.
-
-## Hitri začetek
-> Preden namestite Dify, se prepričajte, da vaša naprava izpolnjuje naslednje minimalne sistemske zahteve:
->
->- CPU >= 2 Core
->- RAM >= 4 GiB
-
-
-
-Najlažji način za zagon strežnika Dify je prek docker compose . Preden zaženete Dify z naslednjimi ukazi, se prepričajte, da sta Docker in Docker Compose nameščena na vašem računalniku:
-
-```bash
-cd dify
-cd docker
-cp .env.example .env
-docker compose up -d
-```
-
-Po zagonu lahko dostopate do nadzorne plošče Dify v brskalniku na [http://localhost/install](http://localhost/install) in začnete postopek inicializacije.
-
-#### Iskanje pomoči
-Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) če naletite na težave pri nastavitvi Dify. Če imate še vedno težave, se obrnite na [skupnost ali nas](#community--contact).
-
-> Če želite prispevati k Difyju ali narediti dodaten razvoj, glejte naš vodnik za [uvajanje iz izvorne kode](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
-
-## Ključne značilnosti
-**1. Potek dela**:
- Zgradite in preizkusite zmogljive poteke dela AI na vizualnem platnu, pri čemer izkoristite vse naslednje funkcije in več.
-
-
- https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
-
-
-
-**2. Celovita podpora za modele**:
- Brezhibna integracija s stotinami lastniških/odprtokodnih LLM-jev ducatov ponudnikov sklepanja in samostojnih rešitev, ki pokrivajo GPT, Mistral, Llama3 in vse modele, združljive z API-jem OpenAI. Celoten seznam podprtih ponudnikov modelov najdete [tukaj](https://docs.dify.ai/getting-started/readme/model-providers).
-
-
-
-
-**3. Prompt IDE**:
- intuitivni vmesnik za ustvarjanje pozivov, primerjavo zmogljivosti modela in dodajanje dodatnih funkcij, kot je pretvorba besedila v govor, aplikaciji, ki temelji na klepetu.
-
-**4. RAG Pipeline**:
- E Obsežne zmogljivosti RAG, ki pokrivajo vse od vnosa dokumenta do priklica, s podporo za ekstrakcijo besedila iz datotek PDF, PPT in drugih običajnih formatov dokumentov.
-
-**5. Agent capabilities**:
- definirate lahko agente, ki temeljijo na klicanju funkcij LLM ali ReAct, in dodate vnaprej izdelana orodja ali orodja po meri za agenta. Dify ponuja več kot 50 vgrajenih orodij za agente AI, kot so Google Search, DALL·E, Stable Diffusion in WolframAlpha.
-
-**6. LLMOps**:
- Spremljajte in analizirajte dnevnike aplikacij in učinkovitost skozi čas. Pozive, nabore podatkov in modele lahko nenehno izboljšujete na podlagi proizvodnih podatkov in opomb.
-
-**7. Backend-as-a-Service**:
- AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
-
-## Primerjava Funkcij
-
-
-
-
Funkcija
-
Dify.AI
-
LangChain
-
Flowise
-
OpenAI Assistants API
-
-
-
Programski pristop
-
API + usmerjeno v aplikacije
-
Python koda
-
Usmerjeno v aplikacije
-
Usmerjeno v API
-
-
-
Podprti LLM-ji
-
Bogata izbira
-
Bogata izbira
-
Bogata izbira
-
Samo OpenAI
-
-
-
RAG pogon
-
✅
-
✅
-
✅
-
✅
-
-
-
Agent
-
✅
-
✅
-
❌
-
✅
-
-
-
Potek dela
-
✅
-
❌
-
✅
-
❌
-
-
-
Spremljanje
-
✅
-
✅
-
❌
-
❌
-
-
-
Funkcija za podjetja (SSO/nadzor dostopa)
-
✅
-
❌
-
❌
-
❌
-
-
-
Lokalna namestitev
-
✅
-
✅
-
✅
-
❌
-
-
-
-## Uporaba Dify
-
-- **Cloud **
-Gostimo storitev Dify Cloud za vsakogar, ki jo lahko preizkusite brez nastavitev. Zagotavlja vse zmožnosti različice za samostojno namestitev in vključuje 200 brezplačnih klicev GPT-4 v načrtu peskovnika.
-
-- **Self-hosting Dify Community Edition**
-Hitro zaženite Dify v svojem okolju s tem [začetnim vodnikom](#quick-start) . Za dodatne reference in podrobnejša navodila uporabite našo [dokumentacijo](https://docs.dify.ai) .
-
-
-- **Dify za podjetja/organizacije**
-Ponujamo dodatne funkcije, osredotočene na podjetja. Zabeležite svoja vprašanja prek tega klepetalnega robota ali nam pošljite e-pošto, da se pogovorimo o potrebah podjetja.
- > Za novoustanovljena podjetja in mala podjetja, ki uporabljajo AWS, si oglejte Dify Premium na AWS Marketplace in ga z enim klikom uvedite v svoj AWS VPC. To je cenovno ugodna ponudba AMI z možnostjo ustvarjanja aplikacij z logotipom in blagovno znamko po meri.
-
-
-## Staying ahead
-
-Star Dify on GitHub and be instantly notified of new releases.
-
-
-
-
-## Napredne nastavitve
-
-Če morate prilagoditi konfiguracijo, si oglejte komentarje v naši datoteki .env.example in posodobite ustrezne vrednosti v svoji .env datoteki. Poleg tega boste morda morali prilagoditi docker-compose.yamlsamo datoteko, na primer spremeniti različice slike, preslikave vrat ali namestitve nosilca, glede na vaše specifično okolje in zahteve za uvajanje. Po kakršnih koli spremembah ponovno zaženite docker-compose up -d. Celoten seznam razpoložljivih spremenljivk okolja najdete tukaj .
-
-Če želite konfigurirati visoko razpoložljivo nastavitev, so na voljo Helm Charts in datoteke YAML, ki jih prispeva skupnost, ki omogočajo uvedbo Difyja v Kubernetes.
-
-- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
-- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
-- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
-- [YAML file by @wyy-holding](https://github.com/wyy-holding/dify-k8s)
-
-#### Uporaba Terraform za uvajanje
-
-namestite Dify v Cloud Platform z enim klikom z uporabo [terraform](https://www.terraform.io/)
-
-##### Azure Global
-- [Azure Terraform by @nikawang](https://github.com/nikawang/dify-azure-terraform)
-
-##### Google Cloud
-- [Google Cloud Terraform by @sotazum](https://github.com/DeNA/dify-google-cloud-terraform)
-
-#### Uporaba AWS CDK za uvajanje
-
-Uvedite Dify v AWS z uporabo [CDK](https://aws.amazon.com/cdk/)
-
-##### AWS
-- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws)
-
-## Prispevam
-
-Za tiste, ki bi radi prispevali kodo, si oglejte naš vodnik za prispevke . Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
-
-
-
-> Iščemo sodelavce za pomoč pri prevajanju Difyja v jezike, ki niso mandarinščina ali angleščina. Če želite pomagati, si oglejte i18n README za več informacij in nam pustite komentar v global-userskanalu našega strežnika skupnosti Discord .
-
-## Skupnost in stik
-
-* [Github Discussion](https://github.com/langgenius/dify/discussions). Najboljše za: izmenjavo povratnih informacij in postavljanje vprašanj.
-* [GitHub Issues](https://github.com/langgenius/dify/issues). Najboljše za: hrošče, na katere naletite pri uporabi Dify.AI, in predloge funkcij. Oglejte si naš [vodnik za prispevke](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
-* [Discord](https://discord.gg/FngNHpbcY7). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
-* [X(Twitter)](https://twitter.com/dify_ai). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
-
-**Contributors**
-
-
-
-
-
-## Star history
-
-[](https://star-history.com/#langgenius/dify&Date)
-
-
-## Varnostno razkritje
-
-Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj na GitHub. Namesto tega pošljite vprašanja na security@dify.ai in zagotovili vam bomo podrobnejši odgovor.
-
-## Licenca
-
-To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
+
+
+
+
+
+Dify je odprtokodna platforma za razvoj aplikacij LLM. Njegov intuitivni vmesnik združuje agentski potek dela z umetno inteligenco, cevovod RAG, zmogljivosti agentov, upravljanje modelov, funkcije opazovanja in več, kar vam omogoča hiter prehod od prototipa do proizvodnje.
+
+## Hitri začetek
+> Preden namestite Dify, se prepričajte, da vaša naprava izpolnjuje naslednje minimalne sistemske zahteve:
+>
+>- CPU >= 2 Core
+>- RAM >= 4 GiB
+
+
+
+Najlažji način za zagon strežnika Dify je prek docker compose . Preden zaženete Dify z naslednjimi ukazi, se prepričajte, da sta Docker in Docker Compose nameščena na vašem računalniku:
+
+```bash
+cd dify
+cd docker
+cp .env.example .env
+docker compose up -d
+```
+
+Po zagonu lahko dostopate do nadzorne plošče Dify v brskalniku na [http://localhost/install](http://localhost/install) in začnete postopek inicializacije.
+
+#### Iskanje pomoči
+Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) če naletite na težave pri nastavitvi Dify. Če imate še vedno težave, se obrnite na [skupnost ali nas](#community--contact).
+
+> Če želite prispevati k Difyju ali narediti dodaten razvoj, glejte naš vodnik za [uvajanje iz izvorne kode](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
+
+## Ključne značilnosti
+**1. Potek dela**:
+ Zgradite in preizkusite zmogljive poteke dela AI na vizualnem platnu, pri čemer izkoristite vse naslednje funkcije in več.
+
+
+ https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
+
+
+
+**2. Celovita podpora za modele**:
+ Brezhibna integracija s stotinami lastniških/odprtokodnih LLM-jev ducatov ponudnikov sklepanja in samostojnih rešitev, ki pokrivajo GPT, Mistral, Llama3 in vse modele, združljive z API-jem OpenAI. Celoten seznam podprtih ponudnikov modelov najdete [tukaj](https://docs.dify.ai/getting-started/readme/model-providers).
+
+
+
+
+**3. Prompt IDE**:
+ intuitivni vmesnik za ustvarjanje pozivov, primerjavo zmogljivosti modela in dodajanje dodatnih funkcij, kot je pretvorba besedila v govor, aplikaciji, ki temelji na klepetu.
+
+**4. RAG Pipeline**:
+ E Obsežne zmogljivosti RAG, ki pokrivajo vse od vnosa dokumenta do priklica, s podporo za ekstrakcijo besedila iz datotek PDF, PPT in drugih običajnih formatov dokumentov.
+
+**5. Agent capabilities**:
+ definirate lahko agente, ki temeljijo na klicanju funkcij LLM ali ReAct, in dodate vnaprej izdelana orodja ali orodja po meri za agenta. Dify ponuja več kot 50 vgrajenih orodij za agente AI, kot so Google Search, DALL·E, Stable Diffusion in WolframAlpha.
+
+**6. LLMOps**:
+ Spremljajte in analizirajte dnevnike aplikacij in učinkovitost skozi čas. Pozive, nabore podatkov in modele lahko nenehno izboljšujete na podlagi proizvodnih podatkov in opomb.
+
+**7. Backend-as-a-Service**:
+ AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
+
+## Primerjava Funkcij
+
+
+
+
Funkcija
+
Dify.AI
+
LangChain
+
Flowise
+
OpenAI Assistants API
+
+
+
Programski pristop
+
API + usmerjeno v aplikacije
+
Python koda
+
Usmerjeno v aplikacije
+
Usmerjeno v API
+
+
+
Podprti LLM-ji
+
Bogata izbira
+
Bogata izbira
+
Bogata izbira
+
Samo OpenAI
+
+
+
RAG pogon
+
✅
+
✅
+
✅
+
✅
+
+
+
Agent
+
✅
+
✅
+
❌
+
✅
+
+
+
Potek dela
+
✅
+
❌
+
✅
+
❌
+
+
+
Spremljanje
+
✅
+
✅
+
❌
+
❌
+
+
+
Funkcija za podjetja (SSO/nadzor dostopa)
+
✅
+
❌
+
❌
+
❌
+
+
+
Lokalna namestitev
+
✅
+
✅
+
✅
+
❌
+
+
+
+## Uporaba Dify
+
+- **Cloud **
+Gostimo storitev Dify Cloud za vsakogar, ki jo lahko preizkusite brez nastavitev. Zagotavlja vse zmožnosti različice za samostojno namestitev in vključuje 200 brezplačnih klicev GPT-4 v načrtu peskovnika.
+
+- **Self-hosting Dify Community Edition**
+Hitro zaženite Dify v svojem okolju s tem [začetnim vodnikom](#quick-start) . Za dodatne reference in podrobnejša navodila uporabite našo [dokumentacijo](https://docs.dify.ai) .
+
+
+- **Dify za podjetja/organizacije**
+Ponujamo dodatne funkcije, osredotočene na podjetja. Zabeležite svoja vprašanja prek tega klepetalnega robota ali nam pošljite e-pošto, da se pogovorimo o potrebah podjetja.
+ > Za novoustanovljena podjetja in mala podjetja, ki uporabljajo AWS, si oglejte Dify Premium na AWS Marketplace in ga z enim klikom uvedite v svoj AWS VPC. To je cenovno ugodna ponudba AMI z možnostjo ustvarjanja aplikacij z logotipom in blagovno znamko po meri.
+
+
+## Staying ahead
+
+Star Dify on GitHub and be instantly notified of new releases.
+
+
+
+
+## Napredne nastavitve
+
+Če morate prilagoditi konfiguracijo, si oglejte komentarje v naši datoteki .env.example in posodobite ustrezne vrednosti v svoji .env datoteki. Poleg tega boste morda morali prilagoditi docker-compose.yamlsamo datoteko, na primer spremeniti različice slike, preslikave vrat ali namestitve nosilca, glede na vaše specifično okolje in zahteve za uvajanje. Po kakršnih koli spremembah ponovno zaženite docker-compose up -d. Celoten seznam razpoložljivih spremenljivk okolja najdete tukaj .
+
+Če želite konfigurirati visoko razpoložljivo nastavitev, so na voljo Helm Charts in datoteke YAML, ki jih prispeva skupnost, ki omogočajo uvedbo Difyja v Kubernetes.
+
+- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
+- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
+- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
+- [YAML file by @wyy-holding](https://github.com/wyy-holding/dify-k8s)
+
+#### Uporaba Terraform za uvajanje
+
+namestite Dify v Cloud Platform z enim klikom z uporabo [terraform](https://www.terraform.io/)
+
+##### Azure Global
+- [Azure Terraform by @nikawang](https://github.com/nikawang/dify-azure-terraform)
+
+##### Google Cloud
+- [Google Cloud Terraform by @sotazum](https://github.com/DeNA/dify-google-cloud-terraform)
+
+#### Uporaba AWS CDK za uvajanje
+
+Uvedite Dify v AWS z uporabo [CDK](https://aws.amazon.com/cdk/)
+
+##### AWS
+- [AWS CDK by @KevinZhao](https://github.com/aws-samples/solution-for-deploying-dify-on-aws)
+
+## Prispevam
+
+Za tiste, ki bi radi prispevali kodo, si oglejte naš vodnik za prispevke . Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
+
+
+
+> Iščemo sodelavce za pomoč pri prevajanju Difyja v jezike, ki niso mandarinščina ali angleščina. Če želite pomagati, si oglejte i18n README za več informacij in nam pustite komentar v global-userskanalu našega strežnika skupnosti Discord .
+
+## Skupnost in stik
+
+* [Github Discussion](https://github.com/langgenius/dify/discussions). Najboljše za: izmenjavo povratnih informacij in postavljanje vprašanj.
+* [GitHub Issues](https://github.com/langgenius/dify/issues). Najboljše za: hrošče, na katere naletite pri uporabi Dify.AI, in predloge funkcij. Oglejte si naš [vodnik za prispevke](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
+* [Discord](https://discord.gg/FngNHpbcY7). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
+* [X(Twitter)](https://twitter.com/dify_ai). Najboljše za: deljenje vaših aplikacij in druženje s skupnostjo.
+
+**Contributors**
+
+
+
+
+
+## Star history
+
+[](https://star-history.com/#langgenius/dify&Date)
+
+
+## Varnostno razkritje
+
+Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj na GitHub. Namesto tega pošljite vprašanja na security@dify.ai in zagotovili vam bomo podrobnejši odgovor.
+
+## Licenca
+
+To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
diff --git a/api/.dockerignore b/api/.dockerignore
index 447edcda08..a0ce59d221 100644
--- a/api/.dockerignore
+++ b/api/.dockerignore
@@ -16,4 +16,4 @@ logs
.ruff_cache
# venv
-.venv
\ No newline at end of file
+.venv
diff --git a/api/app_factory.py b/api/app_factory.py
index 9648d770ab..586f2ded9e 100644
--- a/api/app_factory.py
+++ b/api/app_factory.py
@@ -52,7 +52,6 @@ def initialize_extensions(app: DifyApp):
ext_mail,
ext_migrate,
ext_otel,
- ext_otel_patch,
ext_proxy_fix,
ext_redis,
ext_repositories,
@@ -85,7 +84,6 @@ def initialize_extensions(app: DifyApp):
ext_proxy_fix,
ext_blueprints,
ext_commands,
- ext_otel_patch, # Apply patch before initializing OpenTelemetry
ext_otel,
]
for ext in extensions:
diff --git a/api/commands.py b/api/commands.py
index c5394c6f87..07bc6cd927 100644
--- a/api/commands.py
+++ b/api/commands.py
@@ -17,6 +17,7 @@ from core.rag.models.document import Document
from events.app_event import app_was_created
from extensions.ext_database import db
from extensions.ext_redis import redis_client
+from extensions.ext_storage import storage
from libs.helper import email as email_validate
from libs.password import hash_password, password_pattern, valid_password
from libs.rsa import generate_key_pair
@@ -443,13 +444,13 @@ def convert_to_agent_apps():
WHERE a.mode = 'chat'
AND am.agent_mode is not null
AND (
- am.agent_mode like '%"strategy": "function_call"%'
+ am.agent_mode like '%"strategy": "function_call"%'
OR am.agent_mode like '%"strategy": "react"%'
- )
+ )
AND (
- am.agent_mode like '{"enabled": true%'
+ am.agent_mode like '{"enabled": true%'
OR am.agent_mode like '{"max_iteration": %'
- ) ORDER BY a.created_at DESC LIMIT 1000
+ ) ORDER BY a.created_at DESC LIMIT 1000
"""
with db.engine.begin() as conn:
@@ -815,3 +816,331 @@ def clear_free_plan_tenant_expired_logs(days: int, batch: int, tenant_ids: list[
ClearFreePlanTenantExpiredLogs.process(days, batch, tenant_ids)
click.echo(click.style("Clear free plan tenant expired logs completed.", fg="green"))
+
+
+@click.option("-f", "--force", is_flag=True, help="Skip user confirmation and force the command to execute.")
+@click.command("clear-orphaned-file-records", help="Clear orphaned file records.")
+def clear_orphaned_file_records(force: bool):
+ """
+ Clear orphaned file records in the database.
+ """
+
+ # define tables and columns to process
+ files_tables = [
+ {"table": "upload_files", "id_column": "id", "key_column": "key"},
+ {"table": "tool_files", "id_column": "id", "key_column": "file_key"},
+ ]
+ ids_tables = [
+ {"type": "uuid", "table": "message_files", "column": "upload_file_id"},
+ {"type": "text", "table": "documents", "column": "data_source_info"},
+ {"type": "text", "table": "document_segments", "column": "content"},
+ {"type": "text", "table": "messages", "column": "answer"},
+ {"type": "text", "table": "workflow_node_executions", "column": "inputs"},
+ {"type": "text", "table": "workflow_node_executions", "column": "process_data"},
+ {"type": "text", "table": "workflow_node_executions", "column": "outputs"},
+ {"type": "text", "table": "conversations", "column": "introduction"},
+ {"type": "text", "table": "conversations", "column": "system_instruction"},
+ {"type": "json", "table": "messages", "column": "inputs"},
+ {"type": "json", "table": "messages", "column": "message"},
+ ]
+
+ # notify user and ask for confirmation
+ click.echo(
+ click.style(
+ "This command will first find and delete orphaned file records from the message_files table,", fg="yellow"
+ )
+ )
+ click.echo(
+ click.style(
+ "and then it will find and delete orphaned file records in the following tables:",
+ fg="yellow",
+ )
+ )
+ for files_table in files_tables:
+ click.echo(click.style(f"- {files_table['table']}", fg="yellow"))
+ click.echo(
+ click.style("The following tables and columns will be scanned to find orphaned file records:", fg="yellow")
+ )
+ for ids_table in ids_tables:
+ click.echo(click.style(f"- {ids_table['table']} ({ids_table['column']})", fg="yellow"))
+ click.echo("")
+
+ click.echo(click.style("!!! USE WITH CAUTION !!!", fg="red"))
+ click.echo(
+ click.style(
+ (
+ "Since not all patterns have been fully tested, "
+ "please note that this command may delete unintended file records."
+ ),
+ fg="yellow",
+ )
+ )
+ click.echo(
+ click.style("This cannot be undone. Please make sure to back up your database before proceeding.", fg="yellow")
+ )
+ click.echo(
+ click.style(
+ (
+ "It is also recommended to run this during the maintenance window, "
+ "as this may cause high load on your instance."
+ ),
+ fg="yellow",
+ )
+ )
+ if not force:
+ click.confirm("Do you want to proceed?", abort=True)
+
+ # start the cleanup process
+ click.echo(click.style("Starting orphaned file records cleanup.", fg="white"))
+
+ # clean up the orphaned records in the message_files table where message_id doesn't exist in messages table
+ try:
+ click.echo(
+ click.style("- Listing message_files records where message_id doesn't exist in messages table", fg="white")
+ )
+ query = (
+ "SELECT mf.id, mf.message_id "
+ "FROM message_files mf LEFT JOIN messages m ON mf.message_id = m.id "
+ "WHERE m.id IS NULL"
+ )
+ orphaned_message_files = []
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(query))
+ for i in rs:
+ orphaned_message_files.append({"id": str(i[0]), "message_id": str(i[1])})
+
+ if orphaned_message_files:
+ click.echo(click.style(f"Found {len(orphaned_message_files)} orphaned message_files records:", fg="white"))
+ for record in orphaned_message_files:
+ click.echo(click.style(f" - id: {record['id']}, message_id: {record['message_id']}", fg="black"))
+
+ if not force:
+ click.confirm(
+ (
+ f"Do you want to proceed "
+ f"to delete all {len(orphaned_message_files)} orphaned message_files records?"
+ ),
+ abort=True,
+ )
+
+ click.echo(click.style("- Deleting orphaned message_files records", fg="white"))
+ query = "DELETE FROM message_files WHERE id IN :ids"
+ with db.engine.begin() as conn:
+ conn.execute(db.text(query), {"ids": tuple([record["id"] for record in orphaned_message_files])})
+ click.echo(
+ click.style(f"Removed {len(orphaned_message_files)} orphaned message_files records.", fg="green")
+ )
+ else:
+ click.echo(click.style("No orphaned message_files records found. There is nothing to delete.", fg="green"))
+ except Exception as e:
+ click.echo(click.style(f"Error deleting orphaned message_files records: {str(e)}", fg="red"))
+
+ # clean up the orphaned records in the rest of the *_files tables
+ try:
+ # fetch file id and keys from each table
+ all_files_in_tables = []
+ for files_table in files_tables:
+ click.echo(click.style(f"- Listing file records in table {files_table['table']}", fg="white"))
+ query = f"SELECT {files_table['id_column']}, {files_table['key_column']} FROM {files_table['table']}"
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(query))
+ for i in rs:
+ all_files_in_tables.append({"table": files_table["table"], "id": str(i[0]), "key": i[1]})
+ click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white"))
+
+ # fetch referred table and columns
+ guid_regexp = "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
+ all_ids_in_tables = []
+ for ids_table in ids_tables:
+ query = ""
+ if ids_table["type"] == "uuid":
+ click.echo(
+ click.style(
+ f"- Listing file ids in column {ids_table['column']} in table {ids_table['table']}", fg="white"
+ )
+ )
+ query = (
+ f"SELECT {ids_table['column']} FROM {ids_table['table']} WHERE {ids_table['column']} IS NOT NULL"
+ )
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(query))
+ for i in rs:
+ all_ids_in_tables.append({"table": ids_table["table"], "id": str(i[0])})
+ elif ids_table["type"] == "text":
+ click.echo(
+ click.style(
+ f"- Listing file-id-like strings in column {ids_table['column']} in table {ids_table['table']}",
+ fg="white",
+ )
+ )
+ query = (
+ f"SELECT regexp_matches({ids_table['column']}, '{guid_regexp}', 'g') AS extracted_id "
+ f"FROM {ids_table['table']}"
+ )
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(query))
+ for i in rs:
+ for j in i[0]:
+ all_ids_in_tables.append({"table": ids_table["table"], "id": j})
+ elif ids_table["type"] == "json":
+ click.echo(
+ click.style(
+ (
+ f"- Listing file-id-like JSON string in column {ids_table['column']} "
+ f"in table {ids_table['table']}"
+ ),
+ fg="white",
+ )
+ )
+ query = (
+ f"SELECT regexp_matches({ids_table['column']}::text, '{guid_regexp}', 'g') AS extracted_id "
+ f"FROM {ids_table['table']}"
+ )
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(query))
+ for i in rs:
+ for j in i[0]:
+ all_ids_in_tables.append({"table": ids_table["table"], "id": j})
+ click.echo(click.style(f"Found {len(all_ids_in_tables)} file ids in tables.", fg="white"))
+
+ except Exception as e:
+ click.echo(click.style(f"Error fetching keys: {str(e)}", fg="red"))
+ return
+
+ # find orphaned files
+ all_files = [file["id"] for file in all_files_in_tables]
+ all_ids = [file["id"] for file in all_ids_in_tables]
+ orphaned_files = list(set(all_files) - set(all_ids))
+ if not orphaned_files:
+ click.echo(click.style("No orphaned file records found. There is nothing to delete.", fg="green"))
+ return
+ click.echo(click.style(f"Found {len(orphaned_files)} orphaned file records.", fg="white"))
+ for file in orphaned_files:
+ click.echo(click.style(f"- orphaned file id: {file}", fg="black"))
+ if not force:
+ click.confirm(f"Do you want to proceed to delete all {len(orphaned_files)} orphaned file records?", abort=True)
+
+ # delete orphaned records for each file
+ try:
+ for files_table in files_tables:
+ click.echo(click.style(f"- Deleting orphaned file records in table {files_table['table']}", fg="white"))
+ query = f"DELETE FROM {files_table['table']} WHERE {files_table['id_column']} IN :ids"
+ with db.engine.begin() as conn:
+ conn.execute(db.text(query), {"ids": tuple(orphaned_files)})
+ except Exception as e:
+ click.echo(click.style(f"Error deleting orphaned file records: {str(e)}", fg="red"))
+ return
+ click.echo(click.style(f"Removed {len(orphaned_files)} orphaned file records.", fg="green"))
+
+
+@click.option("-f", "--force", is_flag=True, help="Skip user confirmation and force the command to execute.")
+@click.command("remove-orphaned-files-on-storage", help="Remove orphaned files on the storage.")
+def remove_orphaned_files_on_storage(force: bool):
+ """
+ Remove orphaned files on the storage.
+ """
+
+ # define tables and columns to process
+ files_tables = [
+ {"table": "upload_files", "key_column": "key"},
+ {"table": "tool_files", "key_column": "file_key"},
+ ]
+ storage_paths = ["image_files", "tools", "upload_files"]
+
+ # notify user and ask for confirmation
+ click.echo(click.style("This command will find and remove orphaned files on the storage,", fg="yellow"))
+ click.echo(
+ click.style("by comparing the files on the storage with the records in the following tables:", fg="yellow")
+ )
+ for files_table in files_tables:
+ click.echo(click.style(f"- {files_table['table']}", fg="yellow"))
+ click.echo(click.style("The following paths on the storage will be scanned to find orphaned files:", fg="yellow"))
+ for storage_path in storage_paths:
+ click.echo(click.style(f"- {storage_path}", fg="yellow"))
+ click.echo("")
+
+ click.echo(click.style("!!! USE WITH CAUTION !!!", fg="red"))
+ click.echo(
+ click.style(
+ "Currently, this command will work only for opendal based storage (STORAGE_TYPE=opendal).", fg="yellow"
+ )
+ )
+ click.echo(
+ click.style(
+ "Since not all patterns have been fully tested, please note that this command may delete unintended files.",
+ fg="yellow",
+ )
+ )
+ click.echo(
+ click.style("This cannot be undone. Please make sure to back up your storage before proceeding.", fg="yellow")
+ )
+ click.echo(
+ click.style(
+ (
+ "It is also recommended to run this during the maintenance window, "
+ "as this may cause high load on your instance."
+ ),
+ fg="yellow",
+ )
+ )
+ if not force:
+ click.confirm("Do you want to proceed?", abort=True)
+
+ # start the cleanup process
+ click.echo(click.style("Starting orphaned files cleanup.", fg="white"))
+
+ # fetch file id and keys from each table
+ all_files_in_tables = []
+ try:
+ for files_table in files_tables:
+ click.echo(click.style(f"- Listing files from table {files_table['table']}", fg="white"))
+ query = f"SELECT {files_table['key_column']} FROM {files_table['table']}"
+ with db.engine.begin() as conn:
+ rs = conn.execute(db.text(query))
+ for i in rs:
+ all_files_in_tables.append(str(i[0]))
+ click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white"))
+ except Exception as e:
+ click.echo(click.style(f"Error fetching keys: {str(e)}", fg="red"))
+
+ all_files_on_storage = []
+ for storage_path in storage_paths:
+ try:
+ click.echo(click.style(f"- Scanning files on storage path {storage_path}", fg="white"))
+ files = storage.scan(path=storage_path, files=True, directories=False)
+ all_files_on_storage.extend(files)
+ except FileNotFoundError as e:
+ click.echo(click.style(f" -> Skipping path {storage_path} as it does not exist.", fg="yellow"))
+ continue
+ except Exception as e:
+ click.echo(click.style(f" -> Error scanning files on storage path {storage_path}: {str(e)}", fg="red"))
+ continue
+ click.echo(click.style(f"Found {len(all_files_on_storage)} files on storage.", fg="white"))
+
+ # find orphaned files
+ orphaned_files = list(set(all_files_on_storage) - set(all_files_in_tables))
+ if not orphaned_files:
+ click.echo(click.style("No orphaned files found. There is nothing to remove.", fg="green"))
+ return
+ click.echo(click.style(f"Found {len(orphaned_files)} orphaned files.", fg="white"))
+ for file in orphaned_files:
+ click.echo(click.style(f"- orphaned file: {file}", fg="black"))
+ if not force:
+ click.confirm(f"Do you want to proceed to remove all {len(orphaned_files)} orphaned files?", abort=True)
+
+ # delete orphaned files
+ removed_files = 0
+ error_files = 0
+ for file in orphaned_files:
+ try:
+ storage.delete(file)
+ removed_files += 1
+ click.echo(click.style(f"- Removing orphaned file: {file}", fg="white"))
+ except Exception as e:
+ error_files += 1
+ click.echo(click.style(f"- Error deleting orphaned file {file}: {str(e)}", fg="red"))
+ continue
+ if error_files == 0:
+ click.echo(click.style(f"Removed {removed_files} orphaned files without errors.", fg="green"))
+ else:
+ click.echo(click.style(f"Removed {removed_files} orphaned files, with {error_files} errors.", fg="yellow"))
diff --git a/api/configs/middleware/vdb/opensearch_config.py b/api/configs/middleware/vdb/opensearch_config.py
index 81dde4c04d..96f478e9a6 100644
--- a/api/configs/middleware/vdb/opensearch_config.py
+++ b/api/configs/middleware/vdb/opensearch_config.py
@@ -1,4 +1,5 @@
-from typing import Optional
+import enum
+from typing import Literal, Optional
from pydantic import Field, PositiveInt
from pydantic_settings import BaseSettings
@@ -9,6 +10,14 @@ class OpenSearchConfig(BaseSettings):
Configuration settings for OpenSearch
"""
+ class AuthMethod(enum.StrEnum):
+ """
+ Authentication method for OpenSearch
+ """
+
+ BASIC = "basic"
+ AWS_MANAGED_IAM = "aws_managed_iam"
+
OPENSEARCH_HOST: Optional[str] = Field(
description="Hostname or IP address of the OpenSearch server (e.g., 'localhost' or 'opensearch.example.com')",
default=None,
@@ -19,6 +28,16 @@ class OpenSearchConfig(BaseSettings):
default=9200,
)
+ OPENSEARCH_SECURE: bool = Field(
+ description="Whether to use SSL/TLS encrypted connection for OpenSearch (True for HTTPS, False for HTTP)",
+ default=False,
+ )
+
+ OPENSEARCH_AUTH_METHOD: AuthMethod = Field(
+ description="Authentication method for OpenSearch connection (default is 'basic')",
+ default=AuthMethod.BASIC,
+ )
+
OPENSEARCH_USER: Optional[str] = Field(
description="Username for authenticating with OpenSearch",
default=None,
@@ -29,7 +48,11 @@ class OpenSearchConfig(BaseSettings):
default=None,
)
- OPENSEARCH_SECURE: bool = Field(
- description="Whether to use SSL/TLS encrypted connection for OpenSearch (True for HTTPS, False for HTTP)",
- default=False,
+ OPENSEARCH_AWS_REGION: Optional[str] = Field(
+ description="AWS region for OpenSearch (e.g. 'us-west-2')",
+ default=None,
+ )
+
+ OPENSEARCH_AWS_SERVICE: Optional[Literal["es", "aoss"]] = Field(
+ description="AWS service for OpenSearch (e.g. 'aoss' for OpenSearch Serverless)", default=None
)
diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py
index a33c7727dc..c7960e1356 100644
--- a/api/configs/packaging/__init__.py
+++ b/api/configs/packaging/__init__.py
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
- default="1.3.0",
+ default="1.3.1",
)
COMMIT_SHA: str = Field(
diff --git a/api/constants/__init__.py b/api/constants/__init__.py
index 9162357466..a84de0a451 100644
--- a/api/constants/__init__.py
+++ b/api/constants/__init__.py
@@ -16,11 +16,25 @@ AUDIO_EXTENSIONS.extend([ext.upper() for ext in AUDIO_EXTENSIONS])
if dify_config.ETL_TYPE == "Unstructured":
- DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls"]
+ DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls", "vtt", "properties"]
DOCUMENT_EXTENSIONS.extend(("doc", "docx", "csv", "eml", "msg", "pptx", "xml", "epub"))
if dify_config.UNSTRUCTURED_API_URL:
DOCUMENT_EXTENSIONS.append("ppt")
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
else:
- DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls", "docx", "csv"]
+ DOCUMENT_EXTENSIONS = [
+ "txt",
+ "markdown",
+ "md",
+ "mdx",
+ "pdf",
+ "html",
+ "htm",
+ "xlsx",
+ "xls",
+ "docx",
+ "csv",
+ "vtt",
+ "properties",
+ ]
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py
index fcd8ed1882..48353a63af 100644
--- a/api/controllers/console/app/annotation.py
+++ b/api/controllers/console/app/annotation.py
@@ -186,7 +186,7 @@ class AnnotationUpdateDeleteApi(Resource):
app_id = str(app_id)
annotation_id = str(annotation_id)
AppAnnotationService.delete_app_annotation(app_id, annotation_id)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
class AnnotationBatchImportApi(Resource):
diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py
index dd25af8ebf..7176440e16 100644
--- a/api/controllers/console/app/ops_trace.py
+++ b/api/controllers/console/app/ops_trace.py
@@ -84,7 +84,7 @@ class TraceAppConfigApi(Resource):
result = OpsService.delete_tracing_app_config(app_id=app_id, tracing_provider=args["tracing_provider"])
if not result:
raise TracingConfigNotExist()
- return {"result": "success"}
+ return {"result": "success"}, 204
except Exception as e:
raise BadRequest(str(e))
diff --git a/api/controllers/console/auth/data_source_bearer_auth.py b/api/controllers/console/auth/data_source_bearer_auth.py
index ea00c2b8c2..5f0762e4a5 100644
--- a/api/controllers/console/auth/data_source_bearer_auth.py
+++ b/api/controllers/console/auth/data_source_bearer_auth.py
@@ -65,7 +65,7 @@ class ApiKeyAuthDataSourceBindingDelete(Resource):
ApiKeyAuthService.delete_provider_auth(current_user.current_tenant_id, binding_id)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
api.add_resource(ApiKeyAuthDataSource, "/api-key-auth/data-source")
diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py
index 0b40312368..3588abeff5 100644
--- a/api/controllers/console/datasets/datasets_document.py
+++ b/api/controllers/console/datasets/datasets_document.py
@@ -40,7 +40,7 @@ from core.indexing_runner import IndexingRunner
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError
-from core.plugin.manager.exc import PluginDaemonClientSideError
+from core.plugin.impl.exc import PluginDaemonClientSideError
from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db
from extensions.ext_redis import redis_client
diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py
index 696aaa94db..5c54ecbe81 100644
--- a/api/controllers/console/datasets/datasets_segments.py
+++ b/api/controllers/console/datasets/datasets_segments.py
@@ -131,7 +131,7 @@ class DatasetDocumentSegmentListApi(Resource):
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
SegmentService.delete_segments(segment_ids, document, dataset)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
class DatasetDocumentSegmentApi(Resource):
@@ -333,7 +333,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
SegmentService.delete_segment(segment, document, dataset)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
class DatasetDocumentSegmentBatchImportApi(Resource):
@@ -590,7 +590,7 @@ class ChildChunkUpdateApi(Resource):
SegmentService.delete_child_chunk(child_chunk, dataset)
except ChildChunkDeleteIndexServiceError as e:
raise ChildChunkDeleteIndexError(str(e))
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
@setup_required
@login_required
diff --git a/api/controllers/console/datasets/external.py b/api/controllers/console/datasets/external.py
index 2c031172bf..aee8323f23 100644
--- a/api/controllers/console/datasets/external.py
+++ b/api/controllers/console/datasets/external.py
@@ -135,7 +135,7 @@ class ExternalApiTemplateApi(Resource):
raise Forbidden()
ExternalDatasetService.delete_external_knowledge_api(current_user.current_tenant_id, external_knowledge_api_id)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
class ExternalApiUseCheckApi(Resource):
diff --git a/api/controllers/console/datasets/metadata.py b/api/controllers/console/datasets/metadata.py
index fc9711169f..e4cac40ca1 100644
--- a/api/controllers/console/datasets/metadata.py
+++ b/api/controllers/console/datasets/metadata.py
@@ -82,7 +82,7 @@ class DatasetMetadataApi(Resource):
DatasetService.check_dataset_permission(dataset, current_user)
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
- return 200
+ return {"result": "success"}, 204
class DatasetMetadataBuiltInFieldApi(Resource):
diff --git a/api/controllers/console/explore/installed_app.py b/api/controllers/console/explore/installed_app.py
index 86550b2bdf..132da11878 100644
--- a/api/controllers/console/explore/installed_app.py
+++ b/api/controllers/console/explore/installed_app.py
@@ -113,7 +113,7 @@ class InstalledAppApi(InstalledAppResource):
db.session.delete(installed_app)
db.session.commit()
- return {"result": "success", "message": "App uninstalled successfully"}
+ return {"result": "success", "message": "App uninstalled successfully"}, 204
def patch(self, installed_app):
parser = reqparse.RequestParser()
diff --git a/api/controllers/console/explore/saved_message.py b/api/controllers/console/explore/saved_message.py
index 9f0c496645..3a1655d0ee 100644
--- a/api/controllers/console/explore/saved_message.py
+++ b/api/controllers/console/explore/saved_message.py
@@ -72,7 +72,7 @@ class SavedMessageApi(InstalledAppResource):
SavedMessageService.delete(app_model, current_user, message_id)
- return {"result": "success"}
+ return {"result": "success"}, 204
api.add_resource(
diff --git a/api/controllers/console/extension.py b/api/controllers/console/extension.py
index ed6cedb220..833da0d03c 100644
--- a/api/controllers/console/extension.py
+++ b/api/controllers/console/extension.py
@@ -99,7 +99,7 @@ class APIBasedExtensionDetailAPI(Resource):
APIBasedExtensionService.delete(extension_data_from_db)
- return {"result": "success"}
+ return {"result": "success"}, 204
api.add_resource(CodeBasedExtensionAPI, "/code-based-extension")
diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py
index da83f64019..0d0d7ae95f 100644
--- a/api/controllers/console/tag/tags.py
+++ b/api/controllers/console/tag/tags.py
@@ -86,7 +86,7 @@ class TagUpdateDeleteApi(Resource):
TagService.delete_tag(tag_id)
- return 200
+ return 204
class TagBindingCreateApi(Resource):
diff --git a/api/controllers/console/workspace/endpoint.py b/api/controllers/console/workspace/endpoint.py
index 46dee20f8b..aa1a78935d 100644
--- a/api/controllers/console/workspace/endpoint.py
+++ b/api/controllers/console/workspace/endpoint.py
@@ -5,7 +5,7 @@ from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.wraps import account_initialization_required, setup_required
from core.model_runtime.utils.encoders import jsonable_encoder
-from core.plugin.manager.exc import PluginPermissionDeniedError
+from core.plugin.impl.exc import PluginPermissionDeniedError
from libs.login import login_required
from services.plugin.endpoint_service import EndpointService
diff --git a/api/controllers/console/workspace/plugin.py b/api/controllers/console/workspace/plugin.py
index e9c1884c60..6f9ae18750 100644
--- a/api/controllers/console/workspace/plugin.py
+++ b/api/controllers/console/workspace/plugin.py
@@ -10,7 +10,7 @@ from controllers.console import api
from controllers.console.workspace import plugin_permission_required
from controllers.console.wraps import account_initialization_required, setup_required
from core.model_runtime.utils.encoders import jsonable_encoder
-from core.plugin.manager.exc import PluginDaemonClientSideError
+from core.plugin.impl.exc import PluginDaemonClientSideError
from libs.login import login_required
from models.account import TenantPluginPermission
from services.plugin.plugin_permission_service import PluginPermissionService
diff --git a/api/controllers/files/image_preview.py b/api/controllers/files/image_preview.py
index 5adfe16a79..9199069585 100644
--- a/api/controllers/files/image_preview.py
+++ b/api/controllers/files/image_preview.py
@@ -70,12 +70,26 @@ class FilePreviewApi(Resource):
direct_passthrough=True,
headers={},
)
+ # add Accept-Ranges header for audio/video files
+ if upload_file.mime_type in [
+ "audio/mpeg",
+ "audio/wav",
+ "audio/mp4",
+ "audio/ogg",
+ "audio/flac",
+ "audio/aac",
+ "video/mp4",
+ "video/webm",
+ "video/quicktime",
+ "audio/x-m4a",
+ ]:
+ response.headers["Accept-Ranges"] = "bytes"
if upload_file.size > 0:
response.headers["Content-Length"] = str(upload_file.size)
if args["as_attachment"]:
encoded_filename = quote(upload_file.name)
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
- response.headers["Content-Type"] = "application/octet-stream"
+ response.headers["Content-Type"] = "application/octet-stream"
return response
diff --git a/api/controllers/service_api/app/annotation.py b/api/controllers/service_api/app/annotation.py
index cffa3665b1..c50f551faf 100644
--- a/api/controllers/service_api/app/annotation.py
+++ b/api/controllers/service_api/app/annotation.py
@@ -79,7 +79,7 @@ class AnnotationListApi(Resource):
class AnnotationUpdateDeleteApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
@marshal_with(annotation_fields)
- def post(self, app_model: App, end_user: EndUser, annotation_id):
+ def put(self, app_model: App, end_user: EndUser, annotation_id):
if not current_user.is_editor:
raise Forbidden()
@@ -98,7 +98,7 @@ class AnnotationUpdateDeleteApi(Resource):
annotation_id = str(annotation_id)
AppAnnotationService.delete_app_annotation(app_model.id, annotation_id)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
api.add_resource(AnnotationReplyActionApi, "/apps/annotation-reply/")
diff --git a/api/controllers/service_api/app/conversation.py b/api/controllers/service_api/app/conversation.py
index 55600a3fd0..dfc357e1ab 100644
--- a/api/controllers/service_api/app/conversation.py
+++ b/api/controllers/service_api/app/conversation.py
@@ -72,7 +72,7 @@ class ConversationDetailApi(Resource):
ConversationService.delete(app_model, conversation_id, end_user)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
class ConversationRenameApi(Resource):
diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py
index eec6afc9ef..9e943e2b2d 100644
--- a/api/controllers/service_api/dataset/document.py
+++ b/api/controllers/service_api/dataset/document.py
@@ -323,7 +323,7 @@ class DocumentDeleteApi(DatasetApiResource):
except services.errors.document.DocumentIndexingError:
raise DocumentIndexingError("Cannot delete document during indexing.")
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
class DocumentListApi(DatasetApiResource):
diff --git a/api/controllers/service_api/dataset/metadata.py b/api/controllers/service_api/dataset/metadata.py
index 298c8a8df8..35578eae54 100644
--- a/api/controllers/service_api/dataset/metadata.py
+++ b/api/controllers/service_api/dataset/metadata.py
@@ -63,7 +63,7 @@ class DatasetMetadataServiceApi(DatasetApiResource):
DatasetService.check_dataset_permission(dataset, current_user)
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
- return 200
+ return 204
class DatasetMetadataBuiltInFieldServiceApi(DatasetApiResource):
diff --git a/api/controllers/service_api/dataset/segment.py b/api/controllers/service_api/dataset/segment.py
index 2a79e15cc5..95753cfd67 100644
--- a/api/controllers/service_api/dataset/segment.py
+++ b/api/controllers/service_api/dataset/segment.py
@@ -159,7 +159,7 @@ class DatasetSegmentApi(DatasetApiResource):
if not segment:
raise NotFound("Segment not found.")
SegmentService.delete_segment(segment, document, dataset)
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
@cloud_edition_billing_resource_check("vector_space", "dataset")
def post(self, tenant_id, dataset_id, document_id, segment_id):
@@ -344,7 +344,7 @@ class DatasetChildChunkApi(DatasetApiResource):
except ChildChunkDeleteIndexServiceError as e:
raise ChildChunkDeleteIndexError(str(e))
- return {"result": "success"}, 200
+ return {"result": "success"}, 204
@cloud_edition_billing_resource_check("vector_space", "dataset")
@cloud_edition_billing_knowledge_limit_check("add_segment", "dataset")
diff --git a/api/controllers/web/saved_message.py b/api/controllers/web/saved_message.py
index 6a9b818907..ab2d4abcd3 100644
--- a/api/controllers/web/saved_message.py
+++ b/api/controllers/web/saved_message.py
@@ -67,7 +67,7 @@ class SavedMessageApi(WebApiResource):
SavedMessageService.delete(app_model, end_user, message_id)
- return {"result": "success"}
+ return {"result": "success"}, 204
api.add_resource(SavedMessageListApi, "/saved-messages")
diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py
index feb8abf6ef..de3b7e1ad7 100644
--- a/api/core/agent/cot_agent_runner.py
+++ b/api/core/agent/cot_agent_runner.py
@@ -69,6 +69,13 @@ class CotAgentRunner(BaseAgentRunner, ABC):
tool_instances, prompt_messages_tools = self._init_prompt_tools()
self._prompt_messages_tools = prompt_messages_tools
+ # fix metadata filter not work
+ if app_config.dataset is not None:
+ metadata_filtering_conditions = app_config.dataset.retrieve_config.metadata_filtering_conditions
+ for key, dataset_retriever_tool in tool_instances.items():
+ if hasattr(dataset_retriever_tool, "retrieval_tool"):
+ dataset_retriever_tool.retrieval_tool.metadata_filtering_conditions = metadata_filtering_conditions
+
function_call_state = True
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
final_answer = ""
diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py
index a1110e7709..874bd6b93b 100644
--- a/api/core/agent/fc_agent_runner.py
+++ b/api/core/agent/fc_agent_runner.py
@@ -45,6 +45,13 @@ class FunctionCallAgentRunner(BaseAgentRunner):
# convert tools into ModelRuntime Tool format
tool_instances, prompt_messages_tools = self._init_prompt_tools()
+ # fix metadata filter not work
+ if app_config.dataset is not None:
+ metadata_filtering_conditions = app_config.dataset.retrieve_config.metadata_filtering_conditions
+ for key, dataset_retriever_tool in tool_instances.items():
+ if hasattr(dataset_retriever_tool, "retrieval_tool"):
+ dataset_retriever_tool.retrieval_tool.metadata_filtering_conditions = metadata_filtering_conditions
+
assert app_config.agent
iteration_step = 1
diff --git a/api/core/agent/prompt/template.py b/api/core/agent/prompt/template.py
index ef64fd29fc..f5ba2119f4 100644
--- a/api/core/agent/prompt/template.py
+++ b/api/core/agent/prompt/template.py
@@ -1,4 +1,4 @@
-ENGLISH_REACT_COMPLETION_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
+ENGLISH_REACT_COMPLETION_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
{{instruction}}
@@ -47,7 +47,7 @@ Thought:""" # noqa: E501
ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}}
Thought:"""
-ENGLISH_REACT_CHAT_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
+ENGLISH_REACT_CHAT_PROMPT_TEMPLATES = """Respond to the human as helpfully and accurately as possible.
{{instruction}}
diff --git a/api/core/agent/strategy/plugin.py b/api/core/agent/strategy/plugin.py
index a4b25f46e6..79b074cf95 100644
--- a/api/core/agent/strategy/plugin.py
+++ b/api/core/agent/strategy/plugin.py
@@ -4,7 +4,7 @@ from typing import Any, Optional
from core.agent.entities import AgentInvokeMessage
from core.agent.plugin_entities import AgentStrategyEntity, AgentStrategyParameter
from core.agent.strategy.base import BaseAgentStrategy
-from core.plugin.manager.agent import PluginAgentManager
+from core.plugin.impl.agent import PluginAgentClient
from core.plugin.utils.converter import convert_parameters_to_plugin_format
@@ -42,7 +42,7 @@ class PluginAgentStrategy(BaseAgentStrategy):
"""
Invoke the agent strategy.
"""
- manager = PluginAgentManager()
+ manager = PluginAgentClient()
initialized_params = self.initialize_parameters(params)
params = convert_parameters_to_plugin_format(initialized_params)
diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py
index 6079b51daa..fd0d7fafbd 100644
--- a/api/core/app/apps/advanced_chat/app_generator.py
+++ b/api/core/app/apps/advanced_chat/app_generator.py
@@ -25,8 +25,8 @@ from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotA
from core.model_runtime.errors.invoke import InvokeAuthorizationError
from core.ops.ops_trace_manager import TraceQueueManager
from core.prompt.utils.get_thread_messages_length import get_thread_messages_length
-from core.repository import RepositoryFactory
-from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
+from core.workflow.repository import RepositoryFactory
+from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from extensions.ext_database import db
from factories import file_factory
from models.account import Account
diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
index 43ccaea9c0..1f4db54a9c 100644
--- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py
+++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
@@ -62,10 +62,10 @@ from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.ops_trace_manager import TraceQueueManager
-from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes import NodeType
+from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from events.message_event import message_was_created
from extensions.ext_database import db
from models import Conversation, EndUser, Message, MessageFile
diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py
index 6be3a7331d..9c3d78a338 100644
--- a/api/core/app/apps/workflow/app_generator.py
+++ b/api/core/app/apps/workflow/app_generator.py
@@ -23,8 +23,8 @@ from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerat
from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse
from core.model_runtime.errors.invoke import InvokeAuthorizationError
from core.ops.ops_trace_manager import TraceQueueManager
-from core.repository import RepositoryFactory
-from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
+from core.workflow.repository import RepositoryFactory
+from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser, Workflow
diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py
index 68131a7463..67cad9c998 100644
--- a/api/core/app/apps/workflow/generate_task_pipeline.py
+++ b/api/core/app/apps/workflow/generate_task_pipeline.py
@@ -54,8 +54,8 @@ from core.app.entities.task_entities import (
from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline
from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage
from core.ops.ops_trace_manager import TraceQueueManager
-from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from core.workflow.enums import SystemVariableKey
+from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from extensions.ext_database import db
from models.account import Account
from models.enums import CreatedByRole
diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py
index 38e7c9eb12..09e2ee74e6 100644
--- a/api/core/app/task_pipeline/workflow_cycle_manage.py
+++ b/api/core/app/task_pipeline/workflow_cycle_manage.py
@@ -49,12 +49,12 @@ from core.file import FILE_MODEL_IDENTITY, File
from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.entities.trace_entity import TraceTaskName
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
-from core.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from core.tools.tool_manager import ToolManager
from core.workflow.entities.node_entities import NodeRunMetadataKey
from core.workflow.enums import SystemVariableKey
from core.workflow.nodes import NodeType
from core.workflow.nodes.tool.entities import ToolNodeData
+from core.workflow.repository.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from core.workflow.workflow_entry import WorkflowEntry
from models.account import Account
from models.enums import CreatedByRole, WorkflowRunTriggeredFrom
@@ -381,6 +381,8 @@ class WorkflowCycleManage:
workflow_node_execution.elapsed_time = elapsed_time
workflow_node_execution.execution_metadata = execution_metadata
+ self._workflow_node_execution_repository.update(workflow_node_execution)
+
return workflow_node_execution
def _handle_workflow_node_execution_retried(
diff --git a/api/core/external_data_tool/api/__builtin__ b/api/core/external_data_tool/api/__builtin__
index 56a6051ca2..d00491fd7e 100644
--- a/api/core/external_data_tool/api/__builtin__
+++ b/api/core/external_data_tool/api/__builtin__
@@ -1 +1 @@
-1
\ No newline at end of file
+1
diff --git a/api/core/helper/code_executor/javascript/javascript_transformer.py b/api/core/helper/code_executor/javascript/javascript_transformer.py
index d67a0903aa..62489cdf29 100644
--- a/api/core/helper/code_executor/javascript/javascript_transformer.py
+++ b/api/core/helper/code_executor/javascript/javascript_transformer.py
@@ -10,13 +10,13 @@ class NodeJsTemplateTransformer(TemplateTransformer):
f"""
// declare main function
{cls._code_placeholder}
-
+
// decode and prepare input object
var inputs_obj = JSON.parse(Buffer.from('{cls._inputs_placeholder}', 'base64').toString('utf-8'))
-
+
// execute main function
var output_obj = main(inputs_obj)
-
+
// convert output to json and print
var output_json = JSON.stringify(output_obj)
var result = `<>${{output_json}}<>`
diff --git a/api/core/helper/code_executor/jinja2/jinja2_transformer.py b/api/core/helper/code_executor/jinja2/jinja2_transformer.py
index 63d58edbc7..54c78cdf92 100644
--- a/api/core/helper/code_executor/jinja2/jinja2_transformer.py
+++ b/api/core/helper/code_executor/jinja2/jinja2_transformer.py
@@ -21,20 +21,20 @@ class Jinja2TemplateTransformer(TemplateTransformer):
import jinja2
template = jinja2.Template('''{cls._code_placeholder}''')
return template.render(**inputs)
-
+
import json
from base64 import b64decode
-
+
# decode and prepare input dict
inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8'))
-
+
# execute main function
output = main(**inputs_obj)
-
+
# convert output and print
result = f'''<>{{output}}<>'''
print(result)
-
+
""")
return runner_script
@@ -43,15 +43,15 @@ class Jinja2TemplateTransformer(TemplateTransformer):
preload_script = dedent("""
import jinja2
from base64 import b64decode
-
+
def _jinja2_preload_():
# prepare jinja2 environment, load template and render before to avoid sandbox issue
template = jinja2.Template('{{s}}')
template.render(s='a')
-
+
if __name__ == '__main__':
_jinja2_preload_()
-
+
""")
return preload_script
diff --git a/api/core/helper/code_executor/python3/python3_transformer.py b/api/core/helper/code_executor/python3/python3_transformer.py
index 75a5a44d08..836fd273ae 100644
--- a/api/core/helper/code_executor/python3/python3_transformer.py
+++ b/api/core/helper/code_executor/python3/python3_transformer.py
@@ -9,16 +9,16 @@ class Python3TemplateTransformer(TemplateTransformer):
runner_script = dedent(f"""
# declare main function
{cls._code_placeholder}
-
+
import json
from base64 import b64decode
-
+
# decode and prepare input dict
inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8'))
-
+
# execute main function
output_obj = main(**inputs_obj)
-
+
# convert output to json and print
output_json = json.dumps(output_obj, indent=4)
result = f'''<>{{output_json}}<>'''
diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py
index d5d2ca60fa..e5dbc30689 100644
--- a/api/core/llm_generator/llm_generator.py
+++ b/api/core/llm_generator/llm_generator.py
@@ -3,6 +3,8 @@ import logging
import re
from typing import Optional, cast
+import json_repair
+
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.llm_generator.prompts import (
@@ -366,7 +368,20 @@ class LLMGenerator:
),
)
- generated_json_schema = cast(str, response.message.content)
+ raw_content = response.message.content
+
+ if not isinstance(raw_content, str):
+ raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}")
+
+ try:
+ parsed_content = json.loads(raw_content)
+ except json.JSONDecodeError:
+ parsed_content = json_repair.loads(raw_content)
+
+ if not isinstance(parsed_content, dict | list):
+ raise ValueError(f"Failed to parse structured output from llm: {raw_content}")
+
+ generated_json_schema = json.dumps(parsed_content, indent=2, ensure_ascii=False)
return {"output": generated_json_schema, "error": ""}
except InvokeError as e:
diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py
index fad7cea01c..34ea3aec26 100644
--- a/api/core/llm_generator/prompts.py
+++ b/api/core/llm_generator/prompts.py
@@ -1,5 +1,5 @@
# Written by YORKI MINAKO🤡, Edited by Xiaoyi
-CONVERSATION_TITLE_PROMPT = """You need to decompose the user's input into "subject" and "intention" in order to accurately figure out what the user's input language actually is.
+CONVERSATION_TITLE_PROMPT = """You need to decompose the user's input into "subject" and "intention" in order to accurately figure out what the user's input language actually is.
Notice: the language type user uses could be diverse, which can be English, Chinese, Italian, Español, Arabic, Japanese, French, and etc.
ENSURE your output is in the SAME language as the user's input!
Your output is restricted only to: (Input language) Intention + Subject(short as possible)
@@ -58,7 +58,7 @@ User Input: yo, 你今天咋样?
"Your Output": "查询今日我的状态☺️"
}
-User Input:
+User Input:
""" # noqa: E501
PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE = (
@@ -163,11 +163,11 @@ Here is a task description for which I would like you to create a high-quality p
{{TASK_DESCRIPTION}}
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
-- Do not include or