From a7c1e4c7ae21f1eb8fb228d7fa7f9185362e5d62 Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Mon, 22 Jul 2024 17:23:19 +0800
Subject: [PATCH 01/19] chore: remove support email from readme (#6530)
---
README.md | 1 -
README_AR.md | 1 -
README_ES.md | 1 -
README_FR.md | 1 -
README_JA.md | 3 +--
README_KL.md | 1 -
README_KR.md | 1 -
7 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/README.md b/README.md
index 40a6837c42..5d8221bcbd 100644
--- a/README.md
+++ b/README.md
@@ -216,7 +216,6 @@ At the same time, please consider supporting Dify by sharing it on social media
* [Github Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions.
* [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
-* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Best for: questions you have about using Dify.AI.
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
* [Twitter](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
diff --git a/README_AR.md b/README_AR.md
index 35be2ba9b6..c91602721e 100644
--- a/README_AR.md
+++ b/README_AR.md
@@ -199,7 +199,6 @@ docker compose up -d
## المجتمع والاتصال
* [مناقشة Github](https://github.com/langgenius/dify/discussions). الأفضل لـ: مشاركة التعليقات وطرح الأسئلة.
* [المشكلات على GitHub](https://github.com/langgenius/dify/issues). الأفضل لـ: الأخطاء التي تواجهها في استخدام Dify.AI، واقتراحات الميزات. انظر [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
-* [البريد الإلكتروني](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). الأفضل لـ: الأسئلة التي تتعلق باستخدام Dify.AI.
* [Discord](https://discord.gg/FngNHpbcY7). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
* [تويتر](https://twitter.com/dify_ai). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
diff --git a/README_ES.md b/README_ES.md
index ed613be8d4..84c06a2503 100644
--- a/README_ES.md
+++ b/README_ES.md
@@ -224,7 +224,6 @@ Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en
* [Discusión en GitHub](https://github.com/langgenius/dify/discussions). Lo mejor para: compartir comentarios y hacer preguntas.
* [Reporte de problemas en GitHub](https://github.com/langgenius/dify/issues). Lo mejor para: errores que encuentres usando Dify.AI y propuestas de características. Consulta nuestra [Guía de contribución](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
-* [Correo electrónico](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Lo mejor para: preguntas que tengas sobre el uso de Dify.AI.
* [Discord](https://discord.gg/FngNHpbcY7). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
* [Twitter](https://twitter.com/dify_ai). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
diff --git a/README_FR.md b/README_FR.md
index 6f09773bf2..768c9390d8 100644
--- a/README_FR.md
+++ b/README_FR.md
@@ -222,7 +222,6 @@ Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur le
* [Discussion GitHub](https://github.com/langgenius/dify/discussions). Meilleur pour: partager des commentaires et poser des questions.
* [Problèmes GitHub](https://github.com/langgenius/dify/issues). Meilleur pour: les bogues que vous rencontrez en utilisant Dify.AI et les propositions de fonctionnalités. Consultez notre [Guide de contribution](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
-* [E-mail](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Meilleur pour: les questions que vous avez sur l'utilisation de Dify.AI.
* [Discord](https://discord.gg/FngNHpbcY7). Meilleur pour: partager vos applications et passer du temps avec la communauté.
* [Twitter](https://twitter.com/dify_ai). Meilleur pour: partager vos applications et passer du temps avec la communauté.
diff --git a/README_JA.md b/README_JA.md
index 55f6e173fd..f4cccd5271 100644
--- a/README_JA.md
+++ b/README_JA.md
@@ -221,7 +221,6 @@ docker compose up -d
* [Github Discussion](https://github.com/langgenius/dify/discussions). 主に: フィードバックの共有や質問。
* [GitHub Issues](https://github.com/langgenius/dify/issues). 主に: Dify.AIを使用する際に発生するエラーや問題については、[貢献ガイド](CONTRIBUTING_JA.md)を参照してください
-* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). 主に: Dify.AIの使用に関する質問。
* [Discord](https://discord.gg/FngNHpbcY7). 主に: アプリケーションの共有やコミュニティとの交流。
* [Twitter](https://twitter.com/dify_ai). 主に: アプリケーションの共有やコミュニティとの交流。
@@ -239,7 +238,7 @@ docker compose up -d
diff --git a/README_KL.md b/README_KL.md
index 7fdc0b5181..6a15f39bc6 100644
--- a/README_KL.md
+++ b/README_KL.md
@@ -224,7 +224,6 @@ At the same time, please consider supporting Dify by sharing it on social media
). Best for: sharing feedback and asking questions.
* [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
-* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Best for: questions you have about using Dify.AI.
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
* [Twitter](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
diff --git a/README_KR.md b/README_KR.md
index fa1980a99f..bb15fac8ef 100644
--- a/README_KR.md
+++ b/README_KR.md
@@ -214,7 +214,6 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했
* [Github 토론](https://github.com/langgenius/dify/discussions). 피드백 공유 및 질문하기에 적합합니다.
* [GitHub 이슈](https://github.com/langgenius/dify/issues). Dify.AI 사용 중 발견한 버그와 기능 제안에 적합합니다. [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요.
-* [이메일](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Dify.AI 사용에 대한 질문하기에 적합합니다.
* [디스코드](https://discord.gg/FngNHpbcY7). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
* [트위터](https://twitter.com/dify_ai). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
From dc7335cdf8bb4a2eb6e14b5fe759b69e320f4f13 Mon Sep 17 00:00:00 2001
From: Joel
Date: Mon, 22 Jul 2024 18:16:33 +0800
Subject: [PATCH 02/19] chore: use node specify llm to auto generate prompt
(#6525)
---
.../config-prompt/simple-prompt-input.tsx | 10 ++
.../config/automatic/get-automatic-res.tsx | 12 +-
.../nodes/_base/components/prompt/editor.tsx | 5 +-
.../llm/components/config-prompt-item.tsx | 5 +-
.../nodes/llm/components/config-prompt.tsx | 6 +-
.../llm/components/prompt-generator-btn.tsx | 6 +
.../components/workflow/nodes/llm/panel.tsx | 1 +
web/service/debug.ts | 1 +
web/types/app.ts | 109 +++++++++---------
9 files changed, 98 insertions(+), 57 deletions(-)
diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
index b0a140fc97..da46d3a609 100644
--- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
+++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
@@ -14,6 +14,7 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap'
import cn from '@/utils/classnames'
import { type PromptVariable } from '@/models/debug'
import Tooltip from '@/app/components/base/tooltip'
+import type { CompletionParams } from '@/types/app'
import { AppType } from '@/types/app'
import { getNewVar, getVars } from '@/utils/var'
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
@@ -58,6 +59,7 @@ const Prompt: FC = ({
const { eventEmitter } = useEventEmitterContextContext()
const {
modelConfig,
+ completionParams,
dataSets,
setModelConfig,
setPrevPromptConfig,
@@ -247,6 +249,14 @@ const Prompt: FC = ({
{showAutomatic && (
void
onFinished: (res: AutomaticRes) => void
@@ -57,6 +59,7 @@ const TryLabel: FC<{
const GetAutomaticRes: FC = ({
mode,
+ model,
isShow,
onClose,
isInLLMNode,
@@ -149,10 +152,17 @@ const GetAutomaticRes: FC = ({
return
setLoadingTrue()
try {
- const res = await generateRule({
+ const { error, ...res } = await generateRule({
instruction,
+ model_config: model,
})
setRes(res)
+ if (error) {
+ Toast.notify({
+ type: 'error',
+ message: error,
+ })
+ }
}
finally {
setLoadingFalse()
diff --git a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx
index b350ff0f67..873dc0f17e 100644
--- a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx
+++ b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx
@@ -9,6 +9,7 @@ import { useTranslation } from 'react-i18next'
import { useBoolean } from 'ahooks'
import { BlockEnum, EditionType } from '../../../../types'
import type {
+ ModelConfig,
Node,
NodeOutPutVar,
Variable,
@@ -58,6 +59,7 @@ type Props = {
availableNodes?: Node[]
isSupportPromptGenerator?: boolean
onGenerated?: (prompt: string) => void
+ modelConfig?: ModelConfig
// for jinja
isSupportJinja?: boolean
editionType?: EditionType
@@ -90,6 +92,7 @@ const Editor: FC = ({
varList = [],
handleAddVariable,
onGenerated,
+ modelConfig,
}) => {
const { t } = useTranslation()
const { eventEmitter } = useEventEmitterContextContext()
@@ -130,7 +133,7 @@ const Editor: FC = ({
{value?.length || 0}
{isSupportPromptGenerator && (
-
+
)}
diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx
index 237e90ca9a..39715a7c71 100644
--- a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx
+++ b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx
@@ -4,7 +4,7 @@ import React, { useCallback, useEffect, useState } from 'react'
import { uniqueId } from 'lodash-es'
import { useTranslation } from 'react-i18next'
import { RiQuestionLine } from '@remixicon/react'
-import type { PromptItem, Variable } from '../../../types'
+import type { ModelConfig, PromptItem, Variable } from '../../../types'
import { EditionType } from '../../../types'
import { useWorkflowStore } from '../../../store'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
@@ -38,6 +38,7 @@ type Props = {
availableNodes: any
varList: Variable[]
handleAddVariable: (payload: any) => void
+ modelConfig?: ModelConfig
}
const roleOptions = [
@@ -77,6 +78,7 @@ const ConfigPromptItem: FC = ({
availableNodes,
varList,
handleAddVariable,
+ modelConfig,
}) => {
const { t } = useTranslation()
const workflowStore = useWorkflowStore()
@@ -138,6 +140,7 @@ const ConfigPromptItem: FC = ({
availableNodes={availableNodes}
isSupportPromptGenerator={payload.role === PromptRole.system}
onGenerated={handleGenerated}
+ modelConfig={modelConfig}
isSupportJinja
editionType={payload.edition_type}
onEditionTypeChange={onEditionTypeChange}
diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx
index 8db8425b61..2c6d725c42 100644
--- a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx
+++ b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx
@@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next'
import produce from 'immer'
import { ReactSortable } from 'react-sortablejs'
import { v4 as uuid4 } from 'uuid'
-import type { PromptItem, ValueSelector, Var, Variable } from '../../../types'
+import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types'
import { EditionType, PromptRole } from '../../../types'
import useAvailableVarList from '../../_base/hooks/use-available-var-list'
import { useWorkflowStore } from '../../../store'
@@ -33,6 +33,7 @@ type Props = {
}
varList?: Variable[]
handleAddVariable: (payload: any) => void
+ modelConfig: ModelConfig
}
const ConfigPrompt: FC = ({
@@ -47,6 +48,7 @@ const ConfigPrompt: FC = ({
hasSetBlockStatus,
varList = [],
handleAddVariable,
+ modelConfig,
}) => {
const { t } = useTranslation()
const workflowStore = useWorkflowStore()
@@ -199,6 +201,7 @@ const ConfigPrompt: FC = ({
availableNodes={availableNodesWithParent}
varList={varList}
handleAddVariable={handleAddVariable}
+ modelConfig={modelConfig}
/>
)
@@ -234,6 +237,7 @@ const ConfigPrompt: FC = ({
onEditionTypeChange={handleCompletionEditionTypeChange}
handleAddVariable={handleAddVariable}
onGenerated={handleGenerated}
+ modelConfig={modelConfig}
/>
)}
diff --git a/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx b/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
index 63d123402e..ed8e7df770 100644
--- a/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
+++ b/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
@@ -7,14 +7,19 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
import { AppType } from '@/types/app'
import type { AutomaticRes } from '@/service/debug'
+import type { ModelConfig } from '@/app/components/workflow/types'
+import type { Model } from '@/types/app'
+
type Props = {
className?: string
onGenerated?: (prompt: string) => void
+ modelConfig?: ModelConfig
}
const PromptGeneratorBtn: FC = ({
className,
onGenerated,
+ modelConfig,
}) => {
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
const handleAutomaticRes = useCallback((res: AutomaticRes) => {
@@ -32,6 +37,7 @@ const PromptGeneratorBtn: FC = ({
isShow={showAutomatic}
onClose={showAutomaticFalse}
onFinished={handleAutomaticRes}
+ model={modelConfig as Model}
isInLLMNode
/>
)}
diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx
index 791dc6133d..1c2ec3c985 100644
--- a/web/app/components/workflow/nodes/llm/panel.tsx
+++ b/web/app/components/workflow/nodes/llm/panel.tsx
@@ -178,6 +178,7 @@ const Panel: FC> = ({
hasSetBlockStatus={hasSetBlockStatus}
varList={inputs.prompt_config?.jinja2_variables || []}
handleAddVariable={handleAddVariable}
+ modelConfig={model}
/>
)}
diff --git a/web/service/debug.ts b/web/service/debug.ts
index a373a0dd6a..8e90fe565f 100644
--- a/web/service/debug.ts
+++ b/web/service/debug.ts
@@ -7,6 +7,7 @@ export type AutomaticRes = {
prompt: string
variables: string[]
opening_statement: string
+ error?: string
}
export const sendChatMessage = async (appId: string, body: Record, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {
diff --git a/web/types/app.ts b/web/types/app.ts
index ed73e2f5f7..9432e6d05a 100644
--- a/web/types/app.ts
+++ b/web/types/app.ts
@@ -135,9 +135,64 @@ export enum AgentStrategy {
react = 'react',
}
+export type CompletionParams = {
+ /** Maximum number of tokens in the answer message returned by Completion */
+ max_tokens: number
+ /**
+ * A number between 0 and 2.
+ * The larger the number, the more random the result;
+ * otherwise, the more deterministic.
+ * When in use, choose either `temperature` or `top_p`.
+ * Default is 1.
+ */
+ temperature: number
+ /**
+ * Represents the proportion of probability mass samples to take,
+ * e.g., 0.1 means taking the top 10% probability mass samples.
+ * The determinism between the samples is basically consistent.
+ * Among these results, the `top_p` probability mass results are taken.
+ * When in use, choose either `temperature` or `top_p`.
+ * Default is 1.
+ */
+ top_p: number
+ /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
+ echo: boolean
+ /**
+ * Specify up to 4 to automatically stop generating before the text specified in `stop`.
+ * Suitable for use in chat mode.
+ * For example, specify "Q" and "A",
+ * and provide some Q&A examples as context,
+ * and the model will give out in Q&A format and stop generating before Q&A.
+ */
+ stop: string[]
+ /**
+ * A number between -2.0 and 2.0.
+ * The larger the value, the less the model will repeat topics and the more it will provide new topics.
+ */
+ presence_penalty: number
+ /**
+ * A number between -2.0 and 2.0.
+ * A lower setting will make the model appear less cultured,
+ * always repeating expressions.
+ * The difference between `frequency_penalty` and `presence_penalty`
+ * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
+ * while `presence_penalty` penalizes a word based on its occurrence in the input text.
+ */
+ frequency_penalty: number
+}
/**
* Model configuration. The backend type.
*/
+export type Model = {
+ /** LLM provider, e.g., OPENAI */
+ provider: string
+ /** Model name, e.g, gpt-3.5.turbo */
+ name: string
+ mode: ModelModeType
+ /** Default Completion call parameters */
+ completion_params: CompletionParams
+}
+
export type ModelConfig = {
opening_statement: string
suggested_questions?: string[]
@@ -174,59 +229,7 @@ export type ModelConfig = {
strategy?: AgentStrategy
tools: ToolItem[]
}
- model: {
- /** LLM provider, e.g., OPENAI */
- provider: string
- /** Model name, e.g, gpt-3.5.turbo */
- name: string
- mode: ModelModeType
- /** Default Completion call parameters */
- completion_params: {
- /** Maximum number of tokens in the answer message returned by Completion */
- max_tokens: number
- /**
- * A number between 0 and 2.
- * The larger the number, the more random the result;
- * otherwise, the more deterministic.
- * When in use, choose either `temperature` or `top_p`.
- * Default is 1.
- */
- temperature: number
- /**
- * Represents the proportion of probability mass samples to take,
- * e.g., 0.1 means taking the top 10% probability mass samples.
- * The determinism between the samples is basically consistent.
- * Among these results, the `top_p` probability mass results are taken.
- * When in use, choose either `temperature` or `top_p`.
- * Default is 1.
- */
- top_p: number
- /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
- echo: boolean
- /**
- * Specify up to 4 to automatically stop generating before the text specified in `stop`.
- * Suitable for use in chat mode.
- * For example, specify "Q" and "A",
- * and provide some Q&A examples as context,
- * and the model will give out in Q&A format and stop generating before Q&A.
- */
- stop: string[]
- /**
- * A number between -2.0 and 2.0.
- * The larger the value, the less the model will repeat topics and the more it will provide new topics.
- */
- presence_penalty: number
- /**
- * A number between -2.0 and 2.0.
- * A lower setting will make the model appear less cultured,
- * always repeating expressions.
- * The difference between `frequency_penalty` and `presence_penalty`
- * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
- * while `presence_penalty` penalizes a word based on its occurrence in the input text.
- */
- frequency_penalty: number
- }
- }
+ model: Model
dataset_configs: DatasetConfigs
file_upload?: {
image: VisionSettings
From 71a72114111b5849d6a8f36e9ad1ec0f6e635027 Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Mon, 22 Jul 2024 19:56:46 +0800
Subject: [PATCH 03/19] Feat/add email support for pro and team (#6533)
---
.../header/account-dropdown/index.tsx | 15 +++++++++++
web/app/components/header/utils/util.ts | 25 +++++++++++++++++++
web/i18n/de-DE/common.ts | 1 +
web/i18n/en-US/common.ts | 1 +
web/i18n/es-ES/common.ts | 1 +
web/i18n/fr-FR/common.ts | 1 +
web/i18n/hi-IN/common.ts | 1 +
web/i18n/it-IT/common.ts | 1 +
web/i18n/ja-JP/common.ts | 1 +
web/i18n/ko-KR/common.ts | 1 +
web/i18n/pl-PL/common.ts | 1 +
web/i18n/pt-BR/common.ts | 1 +
web/i18n/ro-RO/common.ts | 1 +
web/i18n/uk-UA/common.ts | 1 +
web/i18n/vi-VN/common.ts | 1 +
web/i18n/zh-Hans/common.ts | 1 +
web/i18n/zh-Hant/common.ts | 1 +
17 files changed, 55 insertions(+)
create mode 100644 web/app/components/header/utils/util.ts
diff --git a/web/app/components/header/account-dropdown/index.tsx b/web/app/components/header/account-dropdown/index.tsx
index 006c0311e0..2298bff82d 100644
--- a/web/app/components/header/account-dropdown/index.tsx
+++ b/web/app/components/header/account-dropdown/index.tsx
@@ -8,6 +8,7 @@ import Link from 'next/link'
import { Menu, Transition } from '@headlessui/react'
import Indicator from '../indicator'
import AccountAbout from '../account-about'
+import { mailToSupport } from '../utils/util'
import WorkplaceSelector from './workplace-selector'
import classNames from '@/utils/classnames'
import I18n from '@/context/i18n'
@@ -18,6 +19,9 @@ import { ArrowUpRight } from '@/app/components/base/icons/src/vender/line/arrows
import { LogOut01 } from '@/app/components/base/icons/src/vender/line/general'
import { useModalContext } from '@/context/modal-context'
import { LanguagesSupported } from '@/i18n/language'
+import { useProviderContext } from '@/context/provider-context'
+import { Plan } from '@/app/components/billing/type'
+
export type IAppSelecotr = {
isMobile: boolean
}
@@ -34,6 +38,8 @@ export default function AppSelector({ isMobile }: IAppSelecotr) {
const { t } = useTranslation()
const { userProfile, langeniusVersionInfo } = useAppContext()
const { setShowAccountSettingModal } = useModalContext()
+ const { plan } = useProviderContext()
+ const canEmailSupport = plan.type === Plan.professional || plan.type === Plan.team || plan.type === Plan.enterprise
const handleLogout = async () => {
await logout({
@@ -105,6 +111,15 @@ export default function AppSelector({ isMobile }: IAppSelecotr) {
+
+
+ }
{
+ let mailtoLink = `mailto:${email}`
+
+ if (subject)
+ mailtoLink += `?subject=${encodeURIComponent(subject)}`
+
+ if (body)
+ mailtoLink += `&body=${encodeURIComponent(body)}`
+
+ return mailtoLink
+}
+
+export const mailToSupport = (account: string, plan: string, version: string) => {
+ const subject = `Technical Support Request ${plan} ${account}`
+ const body = `
+ Please do not remove the following information:
+ -----------------------------------------------
+ Current Plan: ${plan}
+ Account: ${account}
+ Version: ${version}
+ Platform:
+ Problem Description:
+ `
+ return generateMailToLink('support@dify.ai', subject, body)
+}
diff --git a/web/i18n/de-DE/common.ts b/web/i18n/de-DE/common.ts
index 9b5e4efc74..bd0ef66e55 100644
--- a/web/i18n/de-DE/common.ts
+++ b/web/i18n/de-DE/common.ts
@@ -119,6 +119,7 @@ const translation = {
},
userProfile: {
settings: 'Einstellungen',
+ emailSupport: 'E-Mail-Support',
workspace: 'Arbeitsbereich',
createWorkspace: 'Arbeitsbereich erstellen',
helpCenter: 'Hilfe',
diff --git a/web/i18n/en-US/common.ts b/web/i18n/en-US/common.ts
index 0514763b62..5c7a3d1d0e 100644
--- a/web/i18n/en-US/common.ts
+++ b/web/i18n/en-US/common.ts
@@ -124,6 +124,7 @@ const translation = {
},
userProfile: {
settings: 'Settings',
+ emailSupport: 'Email Support',
workspace: 'Workspace',
createWorkspace: 'Create Workspace',
helpCenter: 'Help',
diff --git a/web/i18n/es-ES/common.ts b/web/i18n/es-ES/common.ts
index e60c5441a7..4afc06b098 100644
--- a/web/i18n/es-ES/common.ts
+++ b/web/i18n/es-ES/common.ts
@@ -124,6 +124,7 @@ const translation = {
},
userProfile: {
settings: 'Configuraciones',
+ emailSupport: 'Soporte de Correo Electrónico',
workspace: 'Espacio de trabajo',
createWorkspace: 'Crear espacio de trabajo',
helpCenter: 'Ayuda',
diff --git a/web/i18n/fr-FR/common.ts b/web/i18n/fr-FR/common.ts
index 6d15638ff1..bdd94e7a16 100644
--- a/web/i18n/fr-FR/common.ts
+++ b/web/i18n/fr-FR/common.ts
@@ -119,6 +119,7 @@ const translation = {
},
userProfile: {
settings: 'Paramètres',
+ emailSupport: 'Support par courriel',
workspace: 'Espace de travail',
createWorkspace: 'Créer un Espace de Travail',
helpCenter: 'Aide',
diff --git a/web/i18n/hi-IN/common.ts b/web/i18n/hi-IN/common.ts
index 739f96c5f9..6f1385cb3d 100644
--- a/web/i18n/hi-IN/common.ts
+++ b/web/i18n/hi-IN/common.ts
@@ -128,6 +128,7 @@ const translation = {
},
userProfile: {
settings: 'सेटिंग्स',
+ emailSupport: 'सहायता',
workspace: 'वर्कस्पेस',
createWorkspace: 'वर्कस्पेस बनाएं',
helpCenter: 'सहायता',
diff --git a/web/i18n/it-IT/common.ts b/web/i18n/it-IT/common.ts
index 2cebc0c18a..cc9c34e2dc 100644
--- a/web/i18n/it-IT/common.ts
+++ b/web/i18n/it-IT/common.ts
@@ -129,6 +129,7 @@ const translation = {
},
userProfile: {
settings: 'Impostazioni',
+ emailSupport: 'Supporto Email',
workspace: 'Workspace',
createWorkspace: 'Crea Workspace',
helpCenter: 'Aiuto',
diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts
index e5a5b18cae..b92a2ccd21 100644
--- a/web/i18n/ja-JP/common.ts
+++ b/web/i18n/ja-JP/common.ts
@@ -119,6 +119,7 @@ const translation = {
},
userProfile: {
settings: '設定',
+ emailSupport: 'サポート',
workspace: 'ワークスペース',
createWorkspace: 'ワークスペースを作成',
helpCenter: 'ヘルプ',
diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts
index c3fe03f3ac..d517dc9346 100644
--- a/web/i18n/ko-KR/common.ts
+++ b/web/i18n/ko-KR/common.ts
@@ -115,6 +115,7 @@ const translation = {
},
userProfile: {
settings: '설정',
+ emailSupport: '이메일 지원',
workspace: '작업 공간',
createWorkspace: '작업 공간 만들기',
helpCenter: '도움말 센터',
diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts
index a86dfb39d2..d9916e9d38 100644
--- a/web/i18n/pl-PL/common.ts
+++ b/web/i18n/pl-PL/common.ts
@@ -124,6 +124,7 @@ const translation = {
},
userProfile: {
settings: 'Ustawienia',
+ emailSupport: 'Wsparcie e-mail',
workspace: 'Przestrzeń robocza',
createWorkspace: 'Utwórz przestrzeń roboczą',
helpCenter: 'Pomoc',
diff --git a/web/i18n/pt-BR/common.ts b/web/i18n/pt-BR/common.ts
index 9906d4f69d..b82434d523 100644
--- a/web/i18n/pt-BR/common.ts
+++ b/web/i18n/pt-BR/common.ts
@@ -119,6 +119,7 @@ const translation = {
},
userProfile: {
settings: 'Configurações',
+ emailSupport: 'Suporte por e-mail',
workspace: 'Espaço de trabalho',
createWorkspace: 'Criar Espaço de Trabalho',
helpCenter: 'Ajuda',
diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts
index 3af0357f09..d61fc63f5c 100644
--- a/web/i18n/ro-RO/common.ts
+++ b/web/i18n/ro-RO/common.ts
@@ -118,6 +118,7 @@ const translation = {
},
userProfile: {
settings: 'Setări',
+ emailSupport: 'Suport prin email',
workspace: 'Spațiu de lucru',
createWorkspace: 'Creează Spațiu de lucru',
helpCenter: 'Ajutor',
diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts
index 1d2ad45c99..ededdcb228 100644
--- a/web/i18n/uk-UA/common.ts
+++ b/web/i18n/uk-UA/common.ts
@@ -119,6 +119,7 @@ const translation = {
},
userProfile: {
settings: 'Налаштування',
+ emailSupport: 'Підтримка по електронній пошті',
workspace: 'Робочий простір',
createWorkspace: 'Створити робочий простір',
helpCenter: 'Довідковий центр',
diff --git a/web/i18n/vi-VN/common.ts b/web/i18n/vi-VN/common.ts
index f547360b7d..ff0172fafc 100644
--- a/web/i18n/vi-VN/common.ts
+++ b/web/i18n/vi-VN/common.ts
@@ -118,6 +118,7 @@ const translation = {
},
userProfile: {
settings: 'Cài đặt',
+ emailSupport: 'Hỗ trợ qua Email',
workspace: 'Không gian làm việc',
createWorkspace: 'Tạo Không gian làm việc',
helpCenter: 'Trung tâm trợ giúp',
diff --git a/web/i18n/zh-Hans/common.ts b/web/i18n/zh-Hans/common.ts
index 20bdd6b02d..e0072e2cba 100644
--- a/web/i18n/zh-Hans/common.ts
+++ b/web/i18n/zh-Hans/common.ts
@@ -124,6 +124,7 @@ const translation = {
},
userProfile: {
settings: '设置',
+ emailSupport: '邮件支持',
workspace: '工作空间',
createWorkspace: '创建工作空间',
helpCenter: '帮助文档',
diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts
index eb7b80bcbe..78c34d3351 100644
--- a/web/i18n/zh-Hant/common.ts
+++ b/web/i18n/zh-Hant/common.ts
@@ -119,6 +119,7 @@ const translation = {
},
userProfile: {
settings: '設定',
+ emailSupport: '電子郵件支援',
workspace: '工作空間',
createWorkspace: '建立工作空間',
helpCenter: '幫助文件',
From 617847e3c0b9aad0b1e0eca40198c453c6b652e0 Mon Sep 17 00:00:00 2001
From: -LAN-
Date: Mon, 22 Jul 2024 22:58:07 +0800
Subject: [PATCH 04/19] fix(api/services/app_generate_service.py): Remove wrong
type hints. (#6535)
---
api/libs/helper.py | 3 ++-
api/services/app_generate_service.py | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/api/libs/helper.py b/api/libs/helper.py
index 335c6688f4..15cd65dd6a 100644
--- a/api/libs/helper.py
+++ b/api/libs/helper.py
@@ -15,6 +15,7 @@ from zoneinfo import available_timezones
from flask import Response, current_app, stream_with_context
from flask_restful import fields
+from core.app.features.rate_limiting.rate_limit import RateLimitGenerator
from extensions.ext_redis import redis_client
from models.account import Account
@@ -159,7 +160,7 @@ def generate_text_hash(text: str) -> str:
return sha256(hash_text.encode()).hexdigest()
-def compact_generate_response(response: Union[dict, Generator]) -> Response:
+def compact_generate_response(response: Union[dict, RateLimitGenerator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py
index e894570b97..cff4ba8af9 100644
--- a/api/services/app_generate_service.py
+++ b/api/services/app_generate_service.py
@@ -21,7 +21,7 @@ class AppGenerateService:
args: Any,
invoke_from: InvokeFrom,
streaming: bool = True,
- ) -> Union[dict, Generator[dict, None, None]]:
+ ):
"""
App Content Generate
:param app_model: app model
From cd7fa8027aea9658257d5289314f65ba337eda28 Mon Sep 17 00:00:00 2001
From: -LAN-
Date: Mon, 22 Jul 2024 22:58:22 +0800
Subject: [PATCH 05/19] fix(api/core/model_manager.py): Avoid mutation during
iteration. (#6536)
---
api/core/model_manager.py | 3 +--
api/core/rag/datasource/keyword/keyword_base.py | 3 +--
api/core/rag/datasource/vdb/vector_base.py | 3 +--
api/core/rag/datasource/vdb/vector_factory.py | 3 +--
api/services/model_load_balancing_service.py | 3 +--
5 files changed, 5 insertions(+), 10 deletions(-)
diff --git a/api/core/model_manager.py b/api/core/model_manager.py
index dc7556f09a..8e99ad3dec 100644
--- a/api/core/model_manager.py
+++ b/api/core/model_manager.py
@@ -410,10 +410,9 @@ class LBModelManager:
self._model = model
self._load_balancing_configs = load_balancing_configs
- for load_balancing_config in self._load_balancing_configs:
+ for load_balancing_config in self._load_balancing_configs[:]: # Iterate over a shallow copy of the list
if load_balancing_config.name == "__inherit__":
if not managed_credentials:
- # FIXME: Mutation to loop iterable `self._load_balancing_configs` during iteration
# remove __inherit__ if managed credentials is not provided
self._load_balancing_configs.remove(load_balancing_config)
else:
diff --git a/api/core/rag/datasource/keyword/keyword_base.py b/api/core/rag/datasource/keyword/keyword_base.py
index 67bc6df6fd..b77c6562b2 100644
--- a/api/core/rag/datasource/keyword/keyword_base.py
+++ b/api/core/rag/datasource/keyword/keyword_base.py
@@ -38,11 +38,10 @@ class BaseKeyword(ABC):
raise NotImplementedError
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
- for text in texts:
+ for text in texts[:]:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
- # FIXME: Mutation to loop iterable `texts` during iteration
texts.remove(text)
return texts
diff --git a/api/core/rag/datasource/vdb/vector_base.py b/api/core/rag/datasource/vdb/vector_base.py
index 0b1d58856c..3f70e8b608 100644
--- a/api/core/rag/datasource/vdb/vector_base.py
+++ b/api/core/rag/datasource/vdb/vector_base.py
@@ -57,11 +57,10 @@ class BaseVector(ABC):
raise NotImplementedError
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
- for text in texts:
+ for text in texts[:]:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
- # FIXME: Mutation to loop iterable `texts` during iteration
texts.remove(text)
return texts
diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py
index 509273e8ea..fad60ecf45 100644
--- a/api/core/rag/datasource/vdb/vector_factory.py
+++ b/api/core/rag/datasource/vdb/vector_factory.py
@@ -153,11 +153,10 @@ class Vector:
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
- for text in texts:
+ for text in texts[:]:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
- # FIXME: Mutation to loop iterable `texts` during iteration
texts.remove(text)
return texts
diff --git a/api/services/model_load_balancing_service.py b/api/services/model_load_balancing_service.py
index 4f59b86c12..0983839996 100644
--- a/api/services/model_load_balancing_service.py
+++ b/api/services/model_load_balancing_service.py
@@ -131,9 +131,8 @@ class ModelLoadBalancingService:
load_balancing_configs.insert(0, inherit_config)
else:
# move the inherit configuration to the first
- for i, load_balancing_config in enumerate(load_balancing_configs):
+ for i, load_balancing_config in enumerate(load_balancing_configs[:]):
if load_balancing_config.name == '__inherit__':
- # FIXME: Mutation to loop iterable `load_balancing_configs` during iteration
inherit_config = load_balancing_configs.pop(i)
load_balancing_configs.insert(0, inherit_config)
From 5e2f3ec6f092e7e2370ee1e4ccba50ee1cefdd0d Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Tue, 23 Jul 2024 09:44:27 +0800
Subject: [PATCH 06/19] update discussion template (#6544)
---
.../DISCUSSION_TEMPLATE/feature_request.yml | 40 +++++++++++++++++++
.github/DISCUSSION_TEMPLATE/general.yml | 25 ++++++++++++
.github/DISCUSSION_TEMPLATE/help.yml | 33 +++++++++++++++
3 files changed, 98 insertions(+)
create mode 100644 .github/DISCUSSION_TEMPLATE/feature_request.yml
create mode 100644 .github/DISCUSSION_TEMPLATE/general.yml
create mode 100644 .github/DISCUSSION_TEMPLATE/help.yml
diff --git a/.github/DISCUSSION_TEMPLATE/feature_request.yml b/.github/DISCUSSION_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000000..009e60eb74
--- /dev/null
+++ b/.github/DISCUSSION_TEMPLATE/feature_request.yml
@@ -0,0 +1,40 @@
+name: "Feature or enhancement request"
+description: Propose something new.
+labels:
+ - enhancement
+body:
+ - type: checkboxes
+ attributes:
+ label: Self Checks
+ description: "To make sure we get to you in time, please check the following :)"
+ options:
+ - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+ required: true
+ - label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+ required: true
+ - label: "请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
+ required: true
+ - label: "Please do not modify this template :) and fill in all the required fields."
+ required: true
+ - type: textarea
+ attributes:
+ label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
+ placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: 2. Additional context or comments
+ placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
+ validations:
+ required: false
+ - type: checkboxes
+ attributes:
+ label: 3. Can you help us with this feature?
+ description: Let us know! This is not a commitment, but a starting point for collaboration.
+ options:
+ - label: I am interested in contributing to this feature.
+ required: false
+ - type: markdown
+ attributes:
+ value: Please limit one request per issue.
diff --git a/.github/DISCUSSION_TEMPLATE/general.yml b/.github/DISCUSSION_TEMPLATE/general.yml
new file mode 100644
index 0000000000..c3ffee5948
--- /dev/null
+++ b/.github/DISCUSSION_TEMPLATE/general.yml
@@ -0,0 +1,25 @@
+name: "General Discussion"
+description: "General discussion about the project."
+body:
+ - type: checkboxes
+ attributes:
+ label: Self Checks
+ description: "To make sure we get to you in time, please check the following :)"
+ options:
+ - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+ required: true
+ - label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+ required: true
+ - label: "请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
+ required: true
+ - label: "Please do not modify this template :) and fill in all the required fields."
+ required: true
+ - type: textarea
+ attributes:
+ label: Content
+ placeholder: Please describe the content you would like to discuss.
+ validations:
+ required: true
+ - type: markdown
+ attributes:
+ value: Please limit one request per issue.
diff --git a/.github/DISCUSSION_TEMPLATE/help.yml b/.github/DISCUSSION_TEMPLATE/help.yml
new file mode 100644
index 0000000000..02ad65e39c
--- /dev/null
+++ b/.github/DISCUSSION_TEMPLATE/help.yml
@@ -0,0 +1,33 @@
+name: "Help"
+description: "Request for help."
+labels:
+ - enhancement
+body:
+ - type: checkboxes
+ attributes:
+ label: Self Checks
+ description: "To make sure we get to you in time, please check the following :)"
+ options:
+ - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+ required: true
+ - label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+ required: true
+ - label: "请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
+ required: true
+ - label: "Please do not modify this template :) and fill in all the required fields."
+ required: true
+ - type: textarea
+ attributes:
+ label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
+ placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: 2. Additional context or comments
+ placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: Please limit one request per issue.
From 7208ea1da9405fce032298bd8e92a172c3f22303 Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Tue, 23 Jul 2024 09:58:47 +0800
Subject: [PATCH 07/19] fix: template (#6545)
---
.github/DISCUSSION_TEMPLATE/feature_request.yml | 3 +--
.github/DISCUSSION_TEMPLATE/general.yml | 3 +--
.github/DISCUSSION_TEMPLATE/help.yml | 3 +--
3 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/.github/DISCUSSION_TEMPLATE/feature_request.yml b/.github/DISCUSSION_TEMPLATE/feature_request.yml
index 009e60eb74..979247a351 100644
--- a/.github/DISCUSSION_TEMPLATE/feature_request.yml
+++ b/.github/DISCUSSION_TEMPLATE/feature_request.yml
@@ -1,5 +1,4 @@
-name: "Feature or enhancement request"
-description: Propose something new.
+title: "Feature or enhancement request"
labels:
- enhancement
body:
diff --git a/.github/DISCUSSION_TEMPLATE/general.yml b/.github/DISCUSSION_TEMPLATE/general.yml
index c3ffee5948..5af61ea64c 100644
--- a/.github/DISCUSSION_TEMPLATE/general.yml
+++ b/.github/DISCUSSION_TEMPLATE/general.yml
@@ -1,5 +1,4 @@
-name: "General Discussion"
-description: "General discussion about the project."
+title: "General Discussion"
body:
- type: checkboxes
attributes:
diff --git a/.github/DISCUSSION_TEMPLATE/help.yml b/.github/DISCUSSION_TEMPLATE/help.yml
index 02ad65e39c..e6a38c46dc 100644
--- a/.github/DISCUSSION_TEMPLATE/help.yml
+++ b/.github/DISCUSSION_TEMPLATE/help.yml
@@ -1,5 +1,4 @@
-name: "Help"
-description: "Request for help."
+title: "Help"
labels:
- enhancement
body:
From 66765acf00215fba5508bc561d291f16e4ae6613 Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Tue, 23 Jul 2024 10:01:19 +0800
Subject: [PATCH 08/19] Update help.yml (#6546)
---
.github/DISCUSSION_TEMPLATE/help.yml | 2 --
1 file changed, 2 deletions(-)
diff --git a/.github/DISCUSSION_TEMPLATE/help.yml b/.github/DISCUSSION_TEMPLATE/help.yml
index e6a38c46dc..abebaa9727 100644
--- a/.github/DISCUSSION_TEMPLATE/help.yml
+++ b/.github/DISCUSSION_TEMPLATE/help.yml
@@ -1,6 +1,4 @@
title: "Help"
-labels:
- - enhancement
body:
- type: checkboxes
attributes:
From e80412df23cb81d7deace76198736fed4e7f3f08 Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Tue, 23 Jul 2024 10:07:54 +0800
Subject: [PATCH 09/19] feat: renanme template (#6547)
---
.../{feature_request.yml => suggestion.yml} | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
rename .github/DISCUSSION_TEMPLATE/{feature_request.yml => suggestion.yml} (96%)
diff --git a/.github/DISCUSSION_TEMPLATE/feature_request.yml b/.github/DISCUSSION_TEMPLATE/suggestion.yml
similarity index 96%
rename from .github/DISCUSSION_TEMPLATE/feature_request.yml
rename to .github/DISCUSSION_TEMPLATE/suggestion.yml
index 979247a351..0893a10b2d 100644
--- a/.github/DISCUSSION_TEMPLATE/feature_request.yml
+++ b/.github/DISCUSSION_TEMPLATE/suggestion.yml
@@ -1,6 +1,4 @@
-title: "Feature or enhancement request"
-labels:
- - enhancement
+title: Suggestions for New Features
body:
- type: checkboxes
attributes:
From d726473c6d81924b49442155498e3742a42a2d06 Mon Sep 17 00:00:00 2001
From: Joel
Date: Tue, 23 Jul 2024 13:31:32 +0800
Subject: [PATCH 10/19] Revert "chore: use node specify llm to auto generate
prompt" (#6555)
---
.../config-prompt/simple-prompt-input.tsx | 10 --
.../config/automatic/get-automatic-res.tsx | 12 +-
.../nodes/_base/components/prompt/editor.tsx | 5 +-
.../llm/components/config-prompt-item.tsx | 5 +-
.../nodes/llm/components/config-prompt.tsx | 6 +-
.../llm/components/prompt-generator-btn.tsx | 6 -
.../components/workflow/nodes/llm/panel.tsx | 1 -
web/service/debug.ts | 1 -
web/types/app.ts | 109 +++++++++---------
9 files changed, 57 insertions(+), 98 deletions(-)
diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
index da46d3a609..b0a140fc97 100644
--- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
+++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
@@ -14,7 +14,6 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap'
import cn from '@/utils/classnames'
import { type PromptVariable } from '@/models/debug'
import Tooltip from '@/app/components/base/tooltip'
-import type { CompletionParams } from '@/types/app'
import { AppType } from '@/types/app'
import { getNewVar, getVars } from '@/utils/var'
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
@@ -59,7 +58,6 @@ const Prompt: FC = ({
const { eventEmitter } = useEventEmitterContextContext()
const {
modelConfig,
- completionParams,
dataSets,
setModelConfig,
setPrevPromptConfig,
@@ -249,14 +247,6 @@ const Prompt: FC = ({
{showAutomatic && (
void
onFinished: (res: AutomaticRes) => void
@@ -59,7 +57,6 @@ const TryLabel: FC<{
const GetAutomaticRes: FC = ({
mode,
- model,
isShow,
onClose,
isInLLMNode,
@@ -152,17 +149,10 @@ const GetAutomaticRes: FC = ({
return
setLoadingTrue()
try {
- const { error, ...res } = await generateRule({
+ const res = await generateRule({
instruction,
- model_config: model,
})
setRes(res)
- if (error) {
- Toast.notify({
- type: 'error',
- message: error,
- })
- }
}
finally {
setLoadingFalse()
diff --git a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx
index 873dc0f17e..b350ff0f67 100644
--- a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx
+++ b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx
@@ -9,7 +9,6 @@ import { useTranslation } from 'react-i18next'
import { useBoolean } from 'ahooks'
import { BlockEnum, EditionType } from '../../../../types'
import type {
- ModelConfig,
Node,
NodeOutPutVar,
Variable,
@@ -59,7 +58,6 @@ type Props = {
availableNodes?: Node[]
isSupportPromptGenerator?: boolean
onGenerated?: (prompt: string) => void
- modelConfig?: ModelConfig
// for jinja
isSupportJinja?: boolean
editionType?: EditionType
@@ -92,7 +90,6 @@ const Editor: FC = ({
varList = [],
handleAddVariable,
onGenerated,
- modelConfig,
}) => {
const { t } = useTranslation()
const { eventEmitter } = useEventEmitterContextContext()
@@ -133,7 +130,7 @@ const Editor: FC = ({
{value?.length || 0}
{isSupportPromptGenerator && (
-
+
)}
diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx
index 39715a7c71..237e90ca9a 100644
--- a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx
+++ b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx
@@ -4,7 +4,7 @@ import React, { useCallback, useEffect, useState } from 'react'
import { uniqueId } from 'lodash-es'
import { useTranslation } from 'react-i18next'
import { RiQuestionLine } from '@remixicon/react'
-import type { ModelConfig, PromptItem, Variable } from '../../../types'
+import type { PromptItem, Variable } from '../../../types'
import { EditionType } from '../../../types'
import { useWorkflowStore } from '../../../store'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
@@ -38,7 +38,6 @@ type Props = {
availableNodes: any
varList: Variable[]
handleAddVariable: (payload: any) => void
- modelConfig?: ModelConfig
}
const roleOptions = [
@@ -78,7 +77,6 @@ const ConfigPromptItem: FC = ({
availableNodes,
varList,
handleAddVariable,
- modelConfig,
}) => {
const { t } = useTranslation()
const workflowStore = useWorkflowStore()
@@ -140,7 +138,6 @@ const ConfigPromptItem: FC = ({
availableNodes={availableNodes}
isSupportPromptGenerator={payload.role === PromptRole.system}
onGenerated={handleGenerated}
- modelConfig={modelConfig}
isSupportJinja
editionType={payload.edition_type}
onEditionTypeChange={onEditionTypeChange}
diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx
index 2c6d725c42..8db8425b61 100644
--- a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx
+++ b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx
@@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next'
import produce from 'immer'
import { ReactSortable } from 'react-sortablejs'
import { v4 as uuid4 } from 'uuid'
-import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types'
+import type { PromptItem, ValueSelector, Var, Variable } from '../../../types'
import { EditionType, PromptRole } from '../../../types'
import useAvailableVarList from '../../_base/hooks/use-available-var-list'
import { useWorkflowStore } from '../../../store'
@@ -33,7 +33,6 @@ type Props = {
}
varList?: Variable[]
handleAddVariable: (payload: any) => void
- modelConfig: ModelConfig
}
const ConfigPrompt: FC = ({
@@ -48,7 +47,6 @@ const ConfigPrompt: FC = ({
hasSetBlockStatus,
varList = [],
handleAddVariable,
- modelConfig,
}) => {
const { t } = useTranslation()
const workflowStore = useWorkflowStore()
@@ -201,7 +199,6 @@ const ConfigPrompt: FC = ({
availableNodes={availableNodesWithParent}
varList={varList}
handleAddVariable={handleAddVariable}
- modelConfig={modelConfig}
/>
)
@@ -237,7 +234,6 @@ const ConfigPrompt: FC = ({
onEditionTypeChange={handleCompletionEditionTypeChange}
handleAddVariable={handleAddVariable}
onGenerated={handleGenerated}
- modelConfig={modelConfig}
/>
)}
diff --git a/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx b/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
index ed8e7df770..63d123402e 100644
--- a/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
+++ b/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
@@ -7,19 +7,14 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
import { AppType } from '@/types/app'
import type { AutomaticRes } from '@/service/debug'
-import type { ModelConfig } from '@/app/components/workflow/types'
-import type { Model } from '@/types/app'
-
type Props = {
className?: string
onGenerated?: (prompt: string) => void
- modelConfig?: ModelConfig
}
const PromptGeneratorBtn: FC = ({
className,
onGenerated,
- modelConfig,
}) => {
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
const handleAutomaticRes = useCallback((res: AutomaticRes) => {
@@ -37,7 +32,6 @@ const PromptGeneratorBtn: FC = ({
isShow={showAutomatic}
onClose={showAutomaticFalse}
onFinished={handleAutomaticRes}
- model={modelConfig as Model}
isInLLMNode
/>
)}
diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx
index 1c2ec3c985..791dc6133d 100644
--- a/web/app/components/workflow/nodes/llm/panel.tsx
+++ b/web/app/components/workflow/nodes/llm/panel.tsx
@@ -178,7 +178,6 @@ const Panel: FC> = ({
hasSetBlockStatus={hasSetBlockStatus}
varList={inputs.prompt_config?.jinja2_variables || []}
handleAddVariable={handleAddVariable}
- modelConfig={model}
/>
)}
diff --git a/web/service/debug.ts b/web/service/debug.ts
index 8e90fe565f..a373a0dd6a 100644
--- a/web/service/debug.ts
+++ b/web/service/debug.ts
@@ -7,7 +7,6 @@ export type AutomaticRes = {
prompt: string
variables: string[]
opening_statement: string
- error?: string
}
export const sendChatMessage = async (appId: string, body: Record, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {
diff --git a/web/types/app.ts b/web/types/app.ts
index 9432e6d05a..ed73e2f5f7 100644
--- a/web/types/app.ts
+++ b/web/types/app.ts
@@ -135,64 +135,9 @@ export enum AgentStrategy {
react = 'react',
}
-export type CompletionParams = {
- /** Maximum number of tokens in the answer message returned by Completion */
- max_tokens: number
- /**
- * A number between 0 and 2.
- * The larger the number, the more random the result;
- * otherwise, the more deterministic.
- * When in use, choose either `temperature` or `top_p`.
- * Default is 1.
- */
- temperature: number
- /**
- * Represents the proportion of probability mass samples to take,
- * e.g., 0.1 means taking the top 10% probability mass samples.
- * The determinism between the samples is basically consistent.
- * Among these results, the `top_p` probability mass results are taken.
- * When in use, choose either `temperature` or `top_p`.
- * Default is 1.
- */
- top_p: number
- /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
- echo: boolean
- /**
- * Specify up to 4 to automatically stop generating before the text specified in `stop`.
- * Suitable for use in chat mode.
- * For example, specify "Q" and "A",
- * and provide some Q&A examples as context,
- * and the model will give out in Q&A format and stop generating before Q&A.
- */
- stop: string[]
- /**
- * A number between -2.0 and 2.0.
- * The larger the value, the less the model will repeat topics and the more it will provide new topics.
- */
- presence_penalty: number
- /**
- * A number between -2.0 and 2.0.
- * A lower setting will make the model appear less cultured,
- * always repeating expressions.
- * The difference between `frequency_penalty` and `presence_penalty`
- * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
- * while `presence_penalty` penalizes a word based on its occurrence in the input text.
- */
- frequency_penalty: number
-}
/**
* Model configuration. The backend type.
*/
-export type Model = {
- /** LLM provider, e.g., OPENAI */
- provider: string
- /** Model name, e.g, gpt-3.5.turbo */
- name: string
- mode: ModelModeType
- /** Default Completion call parameters */
- completion_params: CompletionParams
-}
-
export type ModelConfig = {
opening_statement: string
suggested_questions?: string[]
@@ -229,7 +174,59 @@ export type ModelConfig = {
strategy?: AgentStrategy
tools: ToolItem[]
}
- model: Model
+ model: {
+ /** LLM provider, e.g., OPENAI */
+ provider: string
+ /** Model name, e.g, gpt-3.5.turbo */
+ name: string
+ mode: ModelModeType
+ /** Default Completion call parameters */
+ completion_params: {
+ /** Maximum number of tokens in the answer message returned by Completion */
+ max_tokens: number
+ /**
+ * A number between 0 and 2.
+ * The larger the number, the more random the result;
+ * otherwise, the more deterministic.
+ * When in use, choose either `temperature` or `top_p`.
+ * Default is 1.
+ */
+ temperature: number
+ /**
+ * Represents the proportion of probability mass samples to take,
+ * e.g., 0.1 means taking the top 10% probability mass samples.
+ * The determinism between the samples is basically consistent.
+ * Among these results, the `top_p` probability mass results are taken.
+ * When in use, choose either `temperature` or `top_p`.
+ * Default is 1.
+ */
+ top_p: number
+ /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
+ echo: boolean
+ /**
+ * Specify up to 4 to automatically stop generating before the text specified in `stop`.
+ * Suitable for use in chat mode.
+ * For example, specify "Q" and "A",
+ * and provide some Q&A examples as context,
+ * and the model will give out in Q&A format and stop generating before Q&A.
+ */
+ stop: string[]
+ /**
+ * A number between -2.0 and 2.0.
+ * The larger the value, the less the model will repeat topics and the more it will provide new topics.
+ */
+ presence_penalty: number
+ /**
+ * A number between -2.0 and 2.0.
+ * A lower setting will make the model appear less cultured,
+ * always repeating expressions.
+ * The difference between `frequency_penalty` and `presence_penalty`
+ * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
+ * while `presence_penalty` penalizes a word based on its occurrence in the input text.
+ */
+ frequency_penalty: number
+ }
+ }
dataset_configs: DatasetConfigs
file_upload?: {
image: VisionSettings
From 155e708540b6d88c578b03dd8fe8b441c944e998 Mon Sep 17 00:00:00 2001
From: Joel
Date: Tue, 23 Jul 2024 13:35:35 +0800
Subject: [PATCH 11/19] Revert "chore: improve prompt auto generator" (#6556)
---
.../app/configuration/config-prompt/index.tsx | 9 -
.../config-prompt/simple-prompt-input.tsx | 63 ++---
.../config/automatic/automatic-btn.tsx | 14 +-
.../config/automatic/get-automatic-res.tsx | 260 +++++++-----------
.../config/automatic/style.module.css | 7 -
.../icons/assets/vender/other/generator.svg | 4 -
.../icons/src/vender/other/Generator.json | 37 ---
.../base/icons/src/vender/other/Generator.tsx | 16 --
.../base/icons/src/vender/other/index.ts | 1 -
.../solid/general/QuestionTriangle.json | 2 +-
.../nodes/_base/components/prompt/editor.tsx | 9 -
.../llm/components/config-prompt-item.tsx | 14 +-
.../nodes/llm/components/config-prompt.tsx | 14 +-
.../llm/components/prompt-generator-btn.tsx | 42 ---
web/i18n/de-DE/app-debug.ts | 19 +-
web/i18n/en-US/app-debug.ts | 64 +----
web/i18n/fr-FR/app-debug.ts | 19 +-
web/i18n/ja-JP/app-debug.ts | 17 ++
web/i18n/zh-Hans/app-debug.ts | 61 +---
web/i18n/zh-Hant/app-debug.ts | 19 +-
20 files changed, 237 insertions(+), 454 deletions(-)
delete mode 100644 web/app/components/app/configuration/config/automatic/style.module.css
delete mode 100644 web/app/components/base/icons/assets/vender/other/generator.svg
delete mode 100644 web/app/components/base/icons/src/vender/other/Generator.json
delete mode 100644 web/app/components/base/icons/src/vender/other/Generator.tsx
delete mode 100644 web/app/components/base/icons/src/vender/other/index.ts
delete mode 100644 web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx
diff --git a/web/app/components/app/configuration/config-prompt/index.tsx b/web/app/components/app/configuration/config-prompt/index.tsx
index 7e40fdc84e..bea4a9e455 100644
--- a/web/app/components/app/configuration/config-prompt/index.tsx
+++ b/web/app/components/app/configuration/config-prompt/index.tsx
@@ -19,9 +19,6 @@ export type IPromptProps = {
promptTemplate: string
promptVariables: PromptVariable[]
readonly?: boolean
- noTitle?: boolean
- gradientBorder?: boolean
- editorHeight?: number
onChange?: (prompt: string, promptVariables: PromptVariable[]) => void
}
@@ -29,10 +26,7 @@ const Prompt: FC = ({
mode,
promptTemplate,
promptVariables,
- noTitle,
- gradientBorder,
readonly = false,
- editorHeight,
onChange,
}) => {
const { t } = useTranslation()
@@ -105,9 +99,6 @@ const Prompt: FC = ({
promptVariables={promptVariables}
readonly={readonly}
onChange={onChange}
- noTitle={noTitle}
- gradientBorder={gradientBorder}
- editorHeight={editorHeight}
/>
)
}
diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
index b0a140fc97..a15f538227 100644
--- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
+++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
@@ -28,7 +28,6 @@ import { useEventEmitterContextContext } from '@/context/event-emitter'
import { ADD_EXTERNAL_DATA_TOOL } from '@/app/components/app/configuration/config-var'
import { INSERT_VARIABLE_VALUE_BLOCK_COMMAND } from '@/app/components/base/prompt-editor/plugins/variable-block'
import { PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER } from '@/app/components/base/prompt-editor/plugins/update-block'
-import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
export type ISimplePromptInput = {
mode: AppType
@@ -36,9 +35,6 @@ export type ISimplePromptInput = {
promptVariables: PromptVariable[]
readonly?: boolean
onChange?: (promp: string, promptVariables: PromptVariable[]) => void
- noTitle?: boolean
- gradientBorder?: boolean
- editorHeight?: number
}
const Prompt: FC = ({
@@ -47,14 +43,8 @@ const Prompt: FC = ({
promptVariables,
readonly = false,
onChange,
- noTitle,
- gradientBorder,
- editorHeight: initEditorHeight,
}) => {
const { t } = useTranslation()
- const media = useBreakpoints()
- const isMobile = media === MediaType.mobile
-
const { eventEmitter } = useEventEmitterContextContext()
const {
modelConfig,
@@ -126,11 +116,6 @@ const Prompt: FC = ({
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
const handleAutomaticRes = (res: AutomaticRes) => {
- // put eventEmitter in first place to prevent overwrite the configs.prompt_variables.But another problem is that prompt won't hight the prompt_variables.
- eventEmitter?.emit({
- type: PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER,
- payload: res.prompt,
- } as any)
const newModelConfig = produce(modelConfig, (draft) => {
draft.configs.prompt_template = res.prompt
draft.configs.prompt_variables = res.variables.map(key => ({ key, name: key, type: 'string', required: true }))
@@ -140,35 +125,36 @@ const Prompt: FC = ({
if (mode !== AppType.completion)
setIntroduction(res.opening_statement)
showAutomaticFalse()
+ eventEmitter?.emit({
+ type: PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER,
+ payload: res.prompt,
+ } as any)
}
- const minHeight = initEditorHeight || 228
+ const minHeight = 228
const [editorHeight, setEditorHeight] = useState(minHeight)
return (
-