diff --git a/.devcontainer/post_create_command.sh b/.devcontainer/post_create_command.sh
index e80f9d30aa..d879876d8a 100755
--- a/.devcontainer/post_create_command.sh
+++ b/.devcontainer/post_create_command.sh
@@ -1,11 +1,12 @@
#!/bin/bash
-cd web && npm install
+npm add -g pnpm@9.12.2
+cd web && pnpm install
pipx install poetry
echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc
-echo 'alias start-web="cd /workspaces/dify/web && npm run dev"' >> ~/.bashrc
+echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc
echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify down"' >> ~/.bashrc
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index 9276d1f2fd..433647b155 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -72,16 +72,16 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 20
- cache: yarn
+ cache: pnpm
cache-dependency-path: ./web/package.json
- name: Web dependencies
if: steps.changed-files.outputs.any_changed == 'true'
- run: yarn install --frozen-lockfile
+ run: pnpm install --frozen-lockfile
- name: Web style check
if: steps.changed-files.outputs.any_changed == 'true'
- run: yarn run lint
+ run: pnpm run lint
superlinter:
name: SuperLinter
diff --git a/.github/workflows/tool-test-sdks.yaml b/.github/workflows/tool-test-sdks.yaml
index fb4bcb9d66..d3a4592eb5 100644
--- a/.github/workflows/tool-test-sdks.yaml
+++ b/.github/workflows/tool-test-sdks.yaml
@@ -32,10 +32,10 @@ jobs:
with:
node-version: ${{ matrix.node-version }}
cache: ''
- cache-dependency-path: 'yarn.lock'
+ cache-dependency-path: 'pnpm-lock.yaml'
- name: Install Dependencies
- run: yarn install
+ run: pnpm install
- name: Test
- run: yarn test
+ run: pnpm test
diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml
index 3f51b3b2c7..b45793a05f 100644
--- a/.github/workflows/translate-i18n-base-on-english.yml
+++ b/.github/workflows/translate-i18n-base-on-english.yml
@@ -38,11 +38,11 @@ jobs:
- name: Install dependencies
if: env.FILES_CHANGED == 'true'
- run: yarn install --frozen-lockfile
+ run: pnpm install --frozen-lockfile
- name: Run npm script
if: env.FILES_CHANGED == 'true'
- run: npm run auto-gen-i18n
+ run: pnpm run auto-gen-i18n
- name: Create Pull Request
if: env.FILES_CHANGED == 'true'
diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml
index 5aee64b8e6..d9f310c811 100644
--- a/.github/workflows/web-tests.yml
+++ b/.github/workflows/web-tests.yml
@@ -34,13 +34,13 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 20
- cache: yarn
+ cache: pnpm
cache-dependency-path: ./web/package.json
- name: Install dependencies
if: steps.changed-files.outputs.any_changed == 'true'
- run: yarn install --frozen-lockfile
+ run: pnpm install --frozen-lockfile
- name: Run tests
if: steps.changed-files.outputs.any_changed == 'true'
- run: yarn test
+ run: pnpm test
diff --git a/.gitignore b/.gitignore
index ca95df4515..4d4af6efed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -194,3 +194,6 @@ api/.vscode
.idea/
.vscode
+
+# pnpm
+/.pnpm-store
diff --git a/api/core/helper/ssrf_proxy.py b/api/core/helper/ssrf_proxy.py
index 2e422cf444..c7a834a6b9 100644
--- a/api/core/helper/ssrf_proxy.py
+++ b/api/core/helper/ssrf_proxy.py
@@ -60,17 +60,20 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
if response.status_code not in STATUS_FORCELIST:
return response
else:
- logging.warning(f"Received status code {response.status_code} for URL {url} which is in the force list")
+ logging.warning(
+ f"Received status code {response.status_code} for URL {url} which is in the force list")
except httpx.RequestError as e:
- logging.warning(f"Request to URL {url} failed on attempt {retries + 1}: {e}")
+ logging.warning(f"Request to URL {url} failed on attempt {
+ retries + 1}: {e}")
if max_retries == 0:
raise
retries += 1
if retries <= max_retries:
time.sleep(BACKOFF_FACTOR * (2 ** (retries - 1)))
- raise MaxRetriesExceededError(f"Reached maximum retries ({max_retries}) for URL {url}")
+ raise MaxRetriesExceededError(
+ f"Reached maximum retries ({max_retries}) for URL {url}")
def get(url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
diff --git a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py
index d7a14207e9..3b3e47578a 100644
--- a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py
+++ b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py
@@ -17,7 +17,8 @@ from extensions.ext_redis import redis_client
from models.dataset import Dataset
logger = logging.getLogger(__name__)
-logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
+logging.basicConfig(level=logging.INFO,
+ format="%(asctime)s - %(levelname)s - %(message)s")
logging.getLogger("lindorm").setLevel(logging.WARN)
ROUTING_FIELD = "routing_field"
@@ -134,7 +135,8 @@ class LindormVectorStore(BaseVector):
self._client.delete(index=self._collection_name, id=id, params=params)
self.refresh()
else:
- logger.warning(f"DELETE BY ID: ID {id} does not exist in the index.")
+ logger.warning(
+ f"DELETE BY ID: ID {id} does not exist in the index.")
def delete(self) -> None:
if self._using_ugc:
@@ -145,7 +147,8 @@ class LindormVectorStore(BaseVector):
self.refresh()
else:
if self._client.indices.exists(index=self._collection_name):
- self._client.indices.delete(index=self._collection_name, params={"timeout": 60})
+ self._client.indices.delete(
+ index=self._collection_name, params={"timeout": 60})
logger.info("Delete index success")
else:
logger.warning(f"Index '{self._collection_name}' does not exist. No deletion performed.")
@@ -168,7 +171,8 @@ class LindormVectorStore(BaseVector):
raise ValueError("All elements in query_vector should be floats")
top_k = kwargs.get("top_k", 10)
- query = default_vector_search_query(query_vector=query_vector, k=top_k, **kwargs)
+ query = default_vector_search_query(
+ query_vector=query_vector, k=top_k, **kwargs)
try:
params = {}
if self._using_ugc:
@@ -220,7 +224,8 @@ class LindormVectorStore(BaseVector):
routing=routing,
routing_field=self._routing_field,
)
- response = self._client.search(index=self._collection_name, body=full_text_query)
+ response = self._client.search(
+ index=self._collection_name, body=full_text_query)
docs = []
for hit in response["hits"]["hits"]:
docs.append(
@@ -238,7 +243,8 @@ class LindormVectorStore(BaseVector):
with redis_client.lock(lock_name, timeout=20):
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
if redis_client.get(collection_exist_cache_key):
- logger.info(f"Collection {self._collection_name} already exists.")
+ logger.info(
+ f"Collection {self._collection_name} already exists.")
return
if self._client.indices.exists(index=self._collection_name):
logger.info(f"{self._collection_name.lower()} already exists.")
@@ -258,10 +264,13 @@ class LindormVectorStore(BaseVector):
hnsw_ef_construction = kwargs.pop("hnsw_ef_construction", 500)
ivfpq_m = kwargs.pop("ivfpq_m", dimension)
nlist = kwargs.pop("nlist", 1000)
- centroids_use_hnsw = kwargs.pop("centroids_use_hnsw", True if nlist >= 5000 else False)
+ centroids_use_hnsw = kwargs.pop(
+ "centroids_use_hnsw", True if nlist >= 5000 else False)
centroids_hnsw_m = kwargs.pop("centroids_hnsw_m", 24)
- centroids_hnsw_ef_construct = kwargs.pop("centroids_hnsw_ef_construct", 500)
- centroids_hnsw_ef_search = kwargs.pop("centroids_hnsw_ef_search", 100)
+ centroids_hnsw_ef_construct = kwargs.pop(
+ "centroids_hnsw_ef_construct", 500)
+ centroids_hnsw_ef_search = kwargs.pop(
+ "centroids_hnsw_ef_search", 100)
mapping = default_text_mapping(
dimension,
method_name,
@@ -281,7 +290,8 @@ class LindormVectorStore(BaseVector):
using_ugc=self._using_ugc,
**kwargs,
)
- self._client.indices.create(index=self._collection_name.lower(), body=mapping)
+ self._client.indices.create(
+ index=self._collection_name.lower(), body=mapping)
redis_client.set(collection_exist_cache_key, 1, ex=3600)
# logger.info(f"create index success: {self._collection_name}")
@@ -347,7 +357,8 @@ def default_text_mapping(dimension: int, method_name: str, **kwargs: Any) -> dic
}
if excludes_from_source:
- mapping["mappings"]["_source"] = {"excludes": excludes_from_source} # e.g. {"excludes": ["vector_field"]}
+ # e.g. {"excludes": ["vector_field"]}
+ mapping["mappings"]["_source"] = {"excludes": excludes_from_source}
if using_ugc and method_name == "ivfpq":
mapping["settings"]["index"]["knn_routing"] = True
@@ -385,7 +396,8 @@ def default_text_search_query(
# build complex search_query when either of must/must_not/should/filter is specified
if must:
if not isinstance(must, list):
- raise RuntimeError(f"unexpected [must] clause with {type(filters)}")
+ raise RuntimeError(
+ f"unexpected [must] clause with {type(filters)}")
if query_clause not in must:
must.append(query_clause)
else:
@@ -395,19 +407,22 @@ def default_text_search_query(
if must_not:
if not isinstance(must_not, list):
- raise RuntimeError(f"unexpected [must_not] clause with {type(filters)}")
+ raise RuntimeError(
+ f"unexpected [must_not] clause with {type(filters)}")
boolean_query["must_not"] = must_not
if should:
if not isinstance(should, list):
- raise RuntimeError(f"unexpected [should] clause with {type(filters)}")
+ raise RuntimeError(
+ f"unexpected [should] clause with {type(filters)}")
boolean_query["should"] = should
if minimum_should_match != 0:
boolean_query["minimum_should_match"] = minimum_should_match
if filters:
if not isinstance(filters, list):
- raise RuntimeError(f"unexpected [filter] clause with {type(filters)}")
+ raise RuntimeError(
+ f"unexpected [filter] clause with {type(filters)}")
boolean_query["filter"] = filters
search_query = {"size": k, "query": {"bool": boolean_query}}
diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py
index d93de5fef9..08adad3dd0 100644
--- a/api/core/rag/extractor/word_extractor.py
+++ b/api/core/rag/extractor/word_extractor.py
@@ -50,7 +50,7 @@ class WordExtractor(BaseExtractor):
self.web_path = self.file_path
# TODO: use a better way to handle the file
- self.temp_file = tempfile.NamedTemporaryFile() # noqa: SIM115
+ self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py
index 0ec44eefac..9aa43f58d5 100644
--- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py
+++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py
@@ -44,11 +44,13 @@ class QuestionClassifierNode(LLMNode):
variable_pool = self.graph_runtime_state.variable_pool
# extract variables
- variable = variable_pool.get(node_data.query_variable_selector) if node_data.query_variable_selector else None
+ variable = variable_pool.get(
+ node_data.query_variable_selector) if node_data.query_variable_selector else None
query = variable.value if variable else None
variables = {"query": query}
# fetch model config
- model_instance, model_config = self._fetch_model_config(node_data.model)
+ model_instance, model_config = self._fetch_model_config(
+ node_data.model)
# fetch memory
memory = self._fetch_memory(
node_data_memory=node_data.memory,
@@ -56,7 +58,8 @@ class QuestionClassifierNode(LLMNode):
)
# fetch instruction
node_data.instruction = node_data.instruction or ""
- node_data.instruction = variable_pool.convert_template(node_data.instruction).text
+ node_data.instruction = variable_pool.convert_template(
+ node_data.instruction).text
files = (
self._fetch_files(
@@ -178,12 +181,15 @@ class QuestionClassifierNode(LLMNode):
variable_mapping = {"query": node_data.query_variable_selector}
variable_selectors = []
if node_data.instruction:
- variable_template_parser = VariableTemplateParser(template=node_data.instruction)
- variable_selectors.extend(variable_template_parser.extract_variable_selectors())
+ variable_template_parser = VariableTemplateParser(
+ template=node_data.instruction)
+ variable_selectors.extend(
+ variable_template_parser.extract_variable_selectors())
for variable_selector in variable_selectors:
variable_mapping[variable_selector.variable] = variable_selector.value_selector
- variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
+ variable_mapping = {node_id + "." + key: value for key,
+ value in variable_mapping.items()}
return variable_mapping
@@ -204,7 +210,8 @@ class QuestionClassifierNode(LLMNode):
context: Optional[str],
) -> int:
prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True)
- prompt_template = self._get_prompt_template(node_data, query, None, 2000)
+ prompt_template = self._get_prompt_template(
+ node_data, query, None, 2000)
prompt_messages = prompt_transform.get_prompt(
prompt_template=prompt_template,
inputs={},
@@ -217,13 +224,15 @@ class QuestionClassifierNode(LLMNode):
)
rest_tokens = 2000
- model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
+ model_context_tokens = model_config.model_schema.model_properties.get(
+ ModelPropertyKey.CONTEXT_SIZE)
if model_context_tokens:
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
)
- curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
+ curr_message_tokens = model_instance.get_llm_num_tokens(
+ prompt_messages)
max_tokens = 0
for parameter_rule in model_config.model_schema.parameter_rules:
@@ -264,7 +273,8 @@ class QuestionClassifierNode(LLMNode):
prompt_messages: list[LLMNodeChatModelMessage] = []
if model_mode == ModelMode.CHAT:
system_prompt_messages = LLMNodeChatModelMessage(
- role=PromptMessageRole.SYSTEM, text=QUESTION_CLASSIFIER_SYSTEM_PROMPT.format(histories=memory_str)
+ role=PromptMessageRole.SYSTEM, text=QUESTION_CLASSIFIER_SYSTEM_PROMPT.format(
+ histories=memory_str)
)
prompt_messages.append(system_prompt_messages)
user_prompt_message_1 = LLMNodeChatModelMessage(
@@ -305,4 +315,5 @@ class QuestionClassifierNode(LLMNode):
)
else:
- raise InvalidModelTypeError(f"Model mode {model_mode} not support.")
+ raise InvalidModelTypeError(
+ f"Model mode {model_mode} not support.")
diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py
index 58b910e17b..285384ee6e 100644
--- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py
+++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py
@@ -68,7 +68,8 @@ def test_executor_with_json_body_and_object_variable():
system_variables={},
user_inputs={},
)
- variable_pool.add(["pre_node_id", "object"], {"name": "John Doe", "age": 30, "email": "john@example.com"})
+ variable_pool.add(["pre_node_id", "object"], {
+ "name": "John Doe", "age": 30, "email": "john@example.com"})
# Prepare the node data
node_data = HttpRequestNodeData(
@@ -123,7 +124,8 @@ def test_executor_with_json_body_and_nested_object_variable():
system_variables={},
user_inputs={},
)
- variable_pool.add(["pre_node_id", "object"], {"name": "John Doe", "age": 30, "email": "john@example.com"})
+ variable_pool.add(["pre_node_id", "object"], {
+ "name": "John Doe", "age": 30, "email": "john@example.com"})
# Prepare the node data
node_data = HttpRequestNodeData(
diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py
index 97bacada74..1375d835d3 100644
--- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py
+++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py
@@ -18,6 +18,14 @@ from models.enums import UserFrom
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
+def test_plain_text_to_dict():
+ assert _plain_text_to_dict("aa\n cc:") == {"aa": "", "cc": ""}
+ assert _plain_text_to_dict("aa:bb\n cc:dd") == {"aa": "bb", "cc": "dd"}
+ assert _plain_text_to_dict("aa:bb\n cc:dd\n") == {"aa": "bb", "cc": "dd"}
+ assert _plain_text_to_dict("aa:bb\n\n cc : dd\n\n") == {
+ "aa": "bb", "cc": "dd"}
+
+
def test_http_request_node_binary_file(monkeypatch):
data = HttpRequestNodeData(
title="test",
@@ -183,7 +191,8 @@ def test_http_request_node_form_with_file(monkeypatch):
def attr_checker(*args, **kwargs):
assert kwargs["data"] == {"name": "test"}
- assert kwargs["files"] == {"file": (None, b"test", "application/octet-stream")}
+ assert kwargs["files"] == {
+ "file": (None, b"test", "application/octet-stream")}
return httpx.Response(200, content=b"")
monkeypatch.setattr(
diff --git a/docker/.env.example b/docker/.env.example
index f3866a05e9..f85a3d94ee 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -513,7 +513,7 @@ TENCENT_VECTOR_DB_SHARD=1
TENCENT_VECTOR_DB_REPLICAS=2
# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch`
-ELASTICSEARCH_HOST=elasticsearch
+ELASTICSEARCH_HOST=0.0.0.0
ELASTICSEARCH_PORT=9200
ELASTICSEARCH_USERNAME=elastic
ELASTICSEARCH_PASSWORD=elastic
diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml
index b1885edf33..0e85452f21 100644
--- a/docker/docker-compose-template.yaml
+++ b/docker/docker-compose-template.yaml
@@ -451,7 +451,7 @@ services:
milvus-standalone:
container_name: milvus-standalone
- image: milvusdb/milvus:v2.3.1
+ image: milvusdb/milvus:v2.5.0-beta
profiles:
- milvus
command: [ 'milvus', 'run', 'standalone' ]
@@ -535,20 +535,28 @@ services:
container_name: elasticsearch
profiles:
- elasticsearch
+ - elasticsearch-ja
restart: always
volumes:
+ - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- dify_es01_data:/usr/share/elasticsearch/data
environment:
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
+ VECTOR_STORE: ${VECTOR_STORE:-}
cluster.name: dify-es-cluster
node.name: dify-es0
discovery.type: single-node
- xpack.license.self_generated.type: trial
+ xpack.license.self_generated.type: basic
xpack.security.enabled: 'true'
xpack.security.enrollment.enabled: 'false'
xpack.security.http.ssl.enabled: 'false'
ports:
- ${ELASTICSEARCH_PORT:-9200}:9200
+ deploy:
+ resources:
+ limits:
+ memory: 2g
+ entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
healthcheck:
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
interval: 30s
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index 20c8117bd1..68d5097e49 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -474,6 +474,8 @@ services:
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
+ MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-}
+ MARKETPLACE_URL: ${MARKETPLACE_URL:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
diff --git a/sdks/nodejs-client/babel.config.json b/sdks/nodejs-client/babel.config.json
deleted file mode 100644
index 0639bf7643..0000000000
--- a/sdks/nodejs-client/babel.config.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "presets": [
- "@babel/preset-env"
- ]
-}
\ No newline at end of file
diff --git a/web/.env.example b/web/.env.example
index 2decef02fa..b568bcfcc3 100644
--- a/web/.env.example
+++ b/web/.env.example
@@ -10,6 +10,10 @@ NEXT_PUBLIC_API_PREFIX=http://localhost:5001/console/api
# console or api domain.
# example: http://udify.app/api
NEXT_PUBLIC_PUBLIC_API_PREFIX=http://localhost:5001/api
+# The APIFREX for MARKETPLACE
+NEXT_PUBLIC_MARKETPLACE_API_PREFIX=http://localhost:5002/api
+# The URL for MARKETPLACE
+NEXT_PUBLIC_MARKETPLACE_URL_PREFIX=
# SENTRY
NEXT_PUBLIC_SENTRY_DSN=
@@ -26,5 +30,7 @@ NEXT_PUBLIC_TEXT_GENERATION_TIMEOUT_MS=60000
# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
NEXT_PUBLIC_CSP_WHITELIST=
+# Github Access Token, used for invoking Github API
+NEXT_PUBLIC_GITHUB_ACCESS_TOKEN=
# The maximum number of top-k value for RAG.
NEXT_PUBLIC_TOP_K_MAX_VALUE=10
diff --git a/web/.eslintignore b/web/.eslintignore
deleted file mode 100644
index 8a8bc38d80..0000000000
--- a/web/.eslintignore
+++ /dev/null
@@ -1,7 +0,0 @@
-/**/node_modules/*
-node_modules/
-
-dist/
-build/
-out/
-.next/
\ No newline at end of file
diff --git a/web/.eslintrc.json b/web/.eslintrc.json
deleted file mode 100644
index 18b6bc6016..0000000000
--- a/web/.eslintrc.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
- "extends": [
- "next",
- "@antfu",
- "plugin:storybook/recommended"
- ],
- "rules": {
- "@typescript-eslint/consistent-type-definitions": [
- "error",
- "type"
- ],
- "@typescript-eslint/no-var-requires": "off",
- "no-console": "off",
- "indent": "off",
- "@typescript-eslint/indent": [
- "error",
- 2,
- {
- "SwitchCase": 1,
- "flatTernaryExpressions": false,
- "ignoredNodes": [
- "PropertyDefinition[decorators]",
- "TSUnionType",
- "FunctionExpression[params]:has(Identifier[decorators])"
- ]
- }
- ],
- "react-hooks/exhaustive-deps": "warn",
- "react/display-name": "warn"
- }
-}
diff --git a/web/.gitignore b/web/.gitignore
index cb8fbe77ac..048c5f6485 100644
--- a/web/.gitignore
+++ b/web/.gitignore
@@ -44,12 +44,11 @@ package-lock.json
.pnp.cjs
.pnp.loader.mjs
.yarn/
-.yarnrc.yml
-
-# pmpm
-pnpm-lock.yaml
.favorites.json
+
+# storybook
+/storybook-static
*storybook.log
# mise
diff --git a/web/.husky/pre-commit b/web/.husky/pre-commit
index d9290e1853..cca8abe27a 100755
--- a/web/.husky/pre-commit
+++ b/web/.husky/pre-commit
@@ -1,6 +1,3 @@
-#!/usr/bin/env bash
-. "$(dirname -- "$0")/_/husky.sh"
-
# get the list of modified files
files=$(git diff --cached --name-only)
@@ -50,7 +47,7 @@ fi
if $web_modified; then
echo "Running ESLint on web module"
cd ./web || exit 1
- npx lint-staged
+ lint-staged
echo "Running unit tests check"
modified_files=$(git diff --cached --name-only -- utils | grep -v '\.spec\.ts$' || true)
@@ -63,7 +60,7 @@ if $web_modified; then
# check if the test file exists
if [ -f "../$test_file" ]; then
echo "Detected changes in $file, running corresponding unit tests..."
- npm run test "../$test_file"
+ pnpm run test "../$test_file"
if [ $? -ne 0 ]; then
echo "Unit tests failed. Please fix the errors before committing."
diff --git a/web/.storybook/main.ts b/web/.storybook/main.ts
index 74e95821de..fecf774e98 100644
--- a/web/.storybook/main.ts
+++ b/web/.storybook/main.ts
@@ -1,19 +1,19 @@
import type { StorybookConfig } from '@storybook/nextjs'
const config: StorybookConfig = {
- // stories: ['../stories/**/*.mdx', '../stories/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
- stories: ['../app/components/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
- addons: [
- '@storybook/addon-onboarding',
- '@storybook/addon-links',
- '@storybook/addon-essentials',
- '@chromatic-com/storybook',
- '@storybook/addon-interactions',
- ],
- framework: {
- name: '@storybook/nextjs',
- options: {},
- },
- staticDirs: ['../public'],
+ // stories: ['../stories/**/*.mdx', '../stories/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
+ stories: ['../app/components/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
+ addons: [
+ '@storybook/addon-onboarding',
+ '@storybook/addon-links',
+ '@storybook/addon-essentials',
+ '@chromatic-com/storybook',
+ '@storybook/addon-interactions',
+ ],
+ framework: {
+ name: '@storybook/nextjs',
+ options: {},
+ },
+ staticDirs: ['../public'],
}
export default config
diff --git a/web/.storybook/preview.tsx b/web/.storybook/preview.tsx
index 49cd24e974..55328602f9 100644
--- a/web/.storybook/preview.tsx
+++ b/web/.storybook/preview.tsx
@@ -1,6 +1,6 @@
import React from 'react'
import type { Preview } from '@storybook/react'
-import { withThemeByDataAttribute } from '@storybook/addon-themes';
+import { withThemeByDataAttribute } from '@storybook/addon-themes'
import I18nServer from '../app/components/i18n-server'
import '../app/styles/globals.css'
@@ -8,30 +8,30 @@ import '../app/styles/markdown.scss'
import './storybook.css'
export const decorators = [
- withThemeByDataAttribute({
- themes: {
- light: 'light',
- dark: 'dark',
- },
- defaultTheme: 'light',
- attributeName: 'data-theme',
- }),
- Story => {
- return
{t(`${prefixCustomize}.way1.name`)}
+{t(`${prefixCustomize}.way1.name`)}
+{t(`${prefixCustomize}.way1.step3`)}+{t(`${prefixCustomize}.way1.step3Tip`)}+NEXT_PUBLIC_APP_ID={`'${appId}'`}
NEXT_PUBLIC_APP_KEY={'\'\''}
NEXT_PUBLIC_API_URL={`'${api_base_url}'`} @@ -94,9 +94,9 @@ const CustomizeModal: FC= ({
{t(`${prefixCustomize}.way2.name`)}
+{t(`${prefixCustomize}.way2.name`)}