Merge branch 'fix/structured-output-error-variable' into deploy/dev

This commit is contained in:
Novice 2025-05-06 12:45:23 +08:00
commit 6c787ff4b2
29 changed files with 2287 additions and 2177 deletions

View File

@ -88,3 +88,6 @@ jobs:
- name: Run Workflow
run: uv run --project api bash dev/pytest/pytest_workflow.sh
- name: Run Tool
run: uv run --project api bash dev/pytest/pytest_tools.sh

View File

@ -90,3 +90,4 @@
```bash
uv run -P api bash dev/pytest/pytest_all_tests.sh
```

View File

@ -398,6 +398,11 @@ class InnerAPIConfig(BaseSettings):
default=False,
)
INNER_API_KEY: Optional[str] = Field(
description="API key for accessing the internal API",
default=None,
)
class LoggingConfig(BaseSettings):
"""

View File

@ -1,3 +1,5 @@
from urllib.parse import quote
from flask import Response
from flask_restful import Resource, reqparse # type: ignore
from werkzeug.exceptions import Forbidden, NotFound
@ -46,7 +48,8 @@ class ToolFilePreviewApi(Resource):
if tool_file.size > 0:
response.headers["Content-Length"] = str(tool_file.size)
if args["as_attachment"]:
response.headers["Content-Disposition"] = f"attachment; filename={tool_file.name}"
encoded_filename = quote(tool_file.name)
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
return response

View File

@ -18,7 +18,7 @@ def enterprise_inner_api_only(view):
# get header 'X-Inner-Api-Key'
inner_api_key = request.headers.get("X-Inner-Api-Key")
if not inner_api_key or inner_api_key != dify_config.INNER_API_KEY_FOR_PLUGIN:
if not inner_api_key or inner_api_key != dify_config.INNER_API_KEY:
abort(401)
return view(*args, **kwargs)

View File

@ -7,7 +7,7 @@ from collections.abc import Generator, Mapping
from typing import Any, Optional, Union, cast
from flask import Flask, current_app
from sqlalchemy import Integer, and_, or_, text
from sqlalchemy import Float, and_, or_, text
from sqlalchemy import cast as sqlalchemy_cast
from core.app.app_config.entities import (
@ -1005,28 +1005,24 @@ class DatasetRetrieval:
if isinstance(value, str):
filters.append(DatasetDocument.doc_metadata[metadata_name] == f'"{value}"')
else:
filters.append(
sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Integer) == value
)
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Float) == value)
case "is not" | "":
if isinstance(value, str):
filters.append(DatasetDocument.doc_metadata[metadata_name] != f'"{value}"')
else:
filters.append(
sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Integer) != value
)
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Float) != value)
case "empty":
filters.append(DatasetDocument.doc_metadata[metadata_name].is_(None))
case "not empty":
filters.append(DatasetDocument.doc_metadata[metadata_name].isnot(None))
case "before" | "<":
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Integer) < value)
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Float) < value)
case "after" | ">":
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Integer) > value)
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Float) > value)
case "" | "<=":
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Integer) <= value)
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Float) <= value)
case "" | ">=":
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Integer) >= value)
filters.append(sqlalchemy_cast(DatasetDocument.doc_metadata[metadata_name].astext, Float) >= value)
case _:
pass
return filters

View File

@ -2,7 +2,7 @@ METADATA_FILTER_SYSTEM_PROMPT = """
### Job Description',
You are a text metadata extract engine that extract text's metadata based on user input and set the metadata value
### Task
Your task is to ONLY extract the metadatas that exist in the input text from the provided metadata list and Use the following operators ["=", "!=", ">", "<", ">=", "<="] to express logical relationships, then return result in JSON format with the key "metadata_fields" and value "metadata_field_value" and comparison operator "comparison_operator".
Your task is to ONLY extract the metadatas that exist in the input text from the provided metadata list and Use the following operators ["contains", "not contains", "start with", "end with", "is", "is not", "empty", "not empty", "=", "", ">", "<", "", "", "before", "after"] to express logical relationships, then return result in JSON format with the key "metadata_fields" and value "metadata_field_value" and comparison operator "comparison_operator".
### Format
The input text is in the variable input_text. Metadata are specified as a list in the variable metadata_fields.
### Constraint

View File

@ -4,6 +4,7 @@ import hmac
import logging
import os
import time
from collections.abc import Generator
from mimetypes import guess_extension, guess_type
from typing import Optional, Union
from uuid import uuid4
@ -215,7 +216,7 @@ class ToolFileManager:
return blob, tool_file.mimetype
def get_file_generator_by_tool_file_id(self, tool_file_id: str):
def get_file_generator_by_tool_file_id(self, tool_file_id: str) -> tuple[Optional[Generator], Optional[ToolFile]]:
"""
get file binary

View File

@ -60,7 +60,7 @@ class ToolFileMessageTransformer:
mimetype = meta.get("mime_type", "application/octet-stream")
# get filename from meta
filename = meta.get("file_name", None)
filename = meta.get("filename", None)
# if message is str, encode it to bytes
if not isinstance(message.message, ToolInvokeMessage.BlobMessage):

View File

@ -262,7 +262,10 @@ class Executor:
headers[authorization.config.header] = f"Bearer {authorization.config.api_key}"
elif self.auth.config.type == "basic":
credentials = authorization.config.api_key
encoded_credentials = base64.b64encode(credentials.encode("utf-8")).decode("utf-8")
if ":" in credentials:
encoded_credentials = base64.b64encode(credentials.encode("utf-8")).decode("utf-8")
else:
encoded_credentials = credentials
headers[authorization.config.header] = f"Basic {encoded_credentials}"
elif self.auth.config.type == "custom":
headers[authorization.config.header] = authorization.config.api_key or ""

View File

@ -6,7 +6,7 @@ from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import Any, Optional, cast
from sqlalchemy import Integer, and_, func, or_, text
from sqlalchemy import Float, and_, func, or_, text
from sqlalchemy import cast as sqlalchemy_cast
from core.app.app_config.entities import DatasetRetrieveConfigEntity
@ -32,11 +32,11 @@ from core.workflow.nodes.knowledge_retrieval.template_prompts import (
METADATA_FILTER_COMPLETION_PROMPT,
METADATA_FILTER_SYSTEM_PROMPT,
METADATA_FILTER_USER_PROMPT_1,
METADATA_FILTER_USER_PROMPT_2,
METADATA_FILTER_USER_PROMPT_3,
)
from core.workflow.nodes.llm.entities import LLMNodeChatModelMessage, LLMNodeCompletionModelPromptTemplate
from core.workflow.nodes.llm.node import LLMNode
from core.workflow.nodes.question_classifier.template_prompts import QUESTION_CLASSIFIER_USER_PROMPT_2
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from libs.json_in_md_parser import parse_and_check_json_markdown
@ -493,24 +493,24 @@ class KnowledgeRetrievalNode(LLMNode):
if isinstance(value, str):
filters.append(Document.doc_metadata[metadata_name] == f'"{value}"')
else:
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Integer) == value)
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Float) == value)
case "is not" | "":
if isinstance(value, str):
filters.append(Document.doc_metadata[metadata_name] != f'"{value}"')
else:
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Integer) != value)
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Float) != value)
case "empty":
filters.append(Document.doc_metadata[metadata_name].is_(None))
case "not empty":
filters.append(Document.doc_metadata[metadata_name].isnot(None))
case "before" | "<":
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Integer) < value)
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Float) < value)
case "after" | ">":
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Integer) > value)
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Float) > value)
case "" | "<=":
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Integer) <= value)
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Float) <= value)
case "" | ">=":
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Integer) >= value)
filters.append(sqlalchemy_cast(Document.doc_metadata[metadata_name].astext, Float) >= value)
case _:
pass
return filters
@ -618,7 +618,7 @@ class KnowledgeRetrievalNode(LLMNode):
)
prompt_messages.append(assistant_prompt_message_1)
user_prompt_message_2 = LLMNodeChatModelMessage(
role=PromptMessageRole.USER, text=QUESTION_CLASSIFIER_USER_PROMPT_2
role=PromptMessageRole.USER, text=METADATA_FILTER_USER_PROMPT_2
)
prompt_messages.append(user_prompt_message_2)
assistant_prompt_message_2 = LLMNodeChatModelMessage(

View File

@ -2,7 +2,7 @@ METADATA_FILTER_SYSTEM_PROMPT = """
### Job Description',
You are a text metadata extract engine that extract text's metadata based on user input and set the metadata value
### Task
Your task is to ONLY extract the metadatas that exist in the input text from the provided metadata list and Use the following operators ["=", "!=", ">", "<", ">=", "<="] to express logical relationships, then return result in JSON format with the key "metadata_fields" and value "metadata_field_value" and comparison operator "comparison_operator".
Your task is to ONLY extract the metadatas that exist in the input text from the provided metadata list and Use the following operators ["contains", "not contains", "start with", "end with", "is", "is not", "empty", "not empty", "=", "", ">", "<", "", "", "before", "after"] to express logical relationships, then return result in JSON format with the key "metadata_fields" and value "metadata_field_value" and comparison operator "comparison_operator".
### Format
The input text is in the variable input_text. Metadata are specified as a list in the variable metadata_fields.
### Constraint

View File

@ -149,7 +149,7 @@ class LLMNode(BaseNode[LLMNodeData]):
self._llm_file_saver = llm_file_saver
def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
def process_structured_output(text: str) -> Optional[dict[str, Any] | list[Any]]:
def process_structured_output(text: str) -> Optional[dict[str, Any]]:
"""Process structured output if enabled"""
if not self.node_data.structured_output_enabled or not self.node_data.structured_output:
return None
@ -797,18 +797,22 @@ class LLMNode(BaseNode[LLMNodeData]):
stop = model_config.stop
return filtered_prompt_messages, stop
def _parse_structured_output(self, result_text: str) -> dict[str, Any] | list[Any]:
structured_output: dict[str, Any] | list[Any] = {}
def _parse_structured_output(self, result_text: str) -> dict[str, Any]:
structured_output: dict[str, Any] = {}
try:
parsed = json.loads(result_text)
if not isinstance(parsed, (dict | list)):
if not isinstance(parsed, dict):
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
except json.JSONDecodeError as e:
# if the result_text is not a valid json, try to repair it
parsed = json_repair.loads(result_text)
if not isinstance(parsed, (dict | list)):
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
if not isinstance(parsed, dict):
# handle reasoning model like deepseek-r1 got '<think>\n\n</think>\n' prefix
if isinstance(parsed, list):
parsed = next((item for item in parsed if isinstance(item, dict)), {})
else:
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
return structured_output

View File

@ -10,7 +10,7 @@ dependencies = [
"boto3==1.35.99",
"bs4~=0.0.1",
"cachetools~=5.3.0",
"celery~=5.4.0",
"celery~=5.5.2",
"chardet~=5.1.0",
"flask~=3.1.0",
"flask-compress~=1.17",
@ -120,6 +120,7 @@ dev = [
"types-defusedxml~=0.7.0",
"types-deprecated~=1.2.15",
"types-docutils~=0.21.0",
"types-jsonschema~=4.23.0",
"types-flask-cors~=5.0.0",
"types-flask-migrate~=4.1.0",
"types-gevent~=24.11.0",

View File

@ -5,6 +5,8 @@ import httpx
import pytest
from _pytest.monkeypatch import MonkeyPatch
from core.helper import ssrf_proxy
class MockedHttp:
@staticmethod
@ -29,6 +31,6 @@ class MockedHttp:
@pytest.fixture
def setup_http_mock(request, monkeypatch: MonkeyPatch):
monkeypatch.setattr(httpx, "request", MockedHttp.httpx_request)
monkeypatch.setattr(ssrf_proxy, "make_request", MockedHttp.httpx_request)
yield
monkeypatch.undo()

View File

@ -34,10 +34,11 @@ parameters = {
def test_api_tool(setup_http_mock):
tool = ApiTool(
entity=ToolEntity(
identity=ToolIdentity(provider="", author="", name="", label=I18nObject()),
identity=ToolIdentity(provider="", author="", name="", label=I18nObject(en_US="test tool")),
),
api_bundle=ApiToolBundle(**tool_bundle),
runtime=ToolRuntime(tenant_id="", credentials={"auth_type": "none"}),
provider_id="test_tool",
)
headers = tool.assembling_request(parameters)
response = tool.do_http_request(tool.api_bundle.server_url, tool.api_bundle.method, headers, parameters)

View File

@ -185,3 +185,38 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_model_mock):
assert item.run_result.process_data is not None
assert "sunny" in json.dumps(item.run_result.process_data)
assert "what's the weather today?" in json.dumps(item.run_result.process_data)
def test_extract_json():
node = init_llm_node(
config={
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"prompt_config": {
"structured_output": {
"enabled": True,
"schema": {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "number"}},
},
}
},
"prompt_template": [{"role": "user", "text": "{{#sys.query#}}"}],
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
},
},
)
llm_texts = [
'<think>\n\n</think>{"name": "test", "age": 123', # resoning model (deepseek-r1)
'{"name":"test","age":123}', # json schema model (gpt-4o)
'{\n "name": "test",\n "age": 123\n}', # small model (llama-3.2-1b)
'```json\n{"name": "test", "age": 123}\n```', # json markdown (deepseek-chat)
'{"name":"test",age:123}', # without quotes (qwen-2.5-0.5b)
]
result = {"name": "test", "age": 123}
assert all(node._parse_structured_output(item) == result for item in llm_texts)

View File

@ -100,3 +100,9 @@ def test_flask_configs(example_env_file):
assert str(config["CODE_EXECUTION_ENDPOINT"]) == "http://sandbox:8194/"
assert str(URL(str(config["CODE_EXECUTION_ENDPOINT"])) / "v1") == "http://sandbox:8194/v1"
def test_inner_api_config_exist():
config = DifyConfig()
assert config.INNER_API is False
assert config.INNER_API_KEY is None

4245
api/uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -3,5 +3,5 @@
set -x
# run mypy checks
uv run --directory api --dev \
python -m mypy --install-types --non-interactive .
uv run --directory api --dev --with pip \
python -m mypy --install-types --non-interactive --cache-fine-grained --sqlite-cache .

View File

@ -1,4 +1,4 @@
#!/bin/bash
set -x
pytest api/tests/integration_tests/tools/test_all_provider.py
pytest api/tests/integration_tests/tools

View File

@ -76,7 +76,7 @@ const AppPublisher = ({
const appDetail = useAppStore(state => state.appDetail)
const { app_base_url: appBaseURL = '', access_token: accessToken = '' } = appDetail?.site ?? {}
const appMode = (appDetail?.mode !== 'completion' && appDetail?.mode !== 'workflow') ? 'chat' : appDetail.mode
const appURL = `${appBaseURL}/${basePath}/${appMode}/${accessToken}`
const appURL = `${appBaseURL}${basePath}/${appMode}/${accessToken}`
const isChatApp = ['chat', 'agent-chat', 'completion'].includes(appDetail?.mode || '')
const language = useGetLanguage()

View File

@ -32,6 +32,7 @@ const FileImageItem = ({
}: FileImageItemProps) => {
const { id, progress, base64Url, url, name } = file
const [imagePreviewUrl, setImagePreviewUrl] = useState('')
const download_url = url ? `${url}&as_attachment=true` : base64Url
return (
<>
@ -84,7 +85,7 @@ const FileImageItem = ({
className='absolute bottom-0.5 right-0.5 flex h-6 w-6 items-center justify-center rounded-lg bg-components-actionbar-bg shadow-md'
onClick={(e) => {
e.stopPropagation()
downloadFile(url || base64Url || '', name)
downloadFile(download_url || '', name)
}}
>
<RiDownloadLine className='h-4 w-4 text-text-tertiary' />

View File

@ -45,6 +45,7 @@ const FileItem = ({
let tmp_preview_url = url || base64Url
if (!tmp_preview_url && file?.originalFile)
tmp_preview_url = URL.createObjectURL(file.originalFile.slice()).toString()
const download_url = url ? `${url}&as_attachment=true` : base64Url
return (
<>
@ -93,13 +94,13 @@ const FileItem = ({
}
</div>
{
showDownloadAction && tmp_preview_url && (
showDownloadAction && download_url && (
<ActionButton
size='m'
className='absolute -right-1 -top-1 hidden group-hover/file-item:flex'
onClick={(e) => {
e.stopPropagation()
downloadFile(tmp_preview_url || '', name)
downloadFile(download_url || '', name)
}}
>
<RiDownloadLine className='h-3.5 w-3.5 text-text-tertiary' />

View File

@ -120,10 +120,8 @@ const ComponentPicker = ({
}, [editor, checkForTriggerMatch, triggerString])
const handleClose = useCallback(() => {
ReactDOM.flushSync(() => {
const escapeEvent = new KeyboardEvent('keydown', { key: 'Escape' })
editor.dispatchCommand(KEY_ESCAPE_COMMAND, escapeEvent)
})
const escapeEvent = new KeyboardEvent('keydown', { key: 'Escape' })
editor.dispatchCommand(KEY_ESCAPE_COMMAND, escapeEvent)
}, [editor])
const renderMenu = useCallback<MenuRenderFn<PickerBlockMenuOption>>((
@ -132,7 +130,11 @@ const ComponentPicker = ({
) => {
if (!(anchorElementRef.current && (allFlattenOptions.length || workflowVariableBlock?.show)))
return null
refs.setReference(anchorElementRef.current)
setTimeout(() => {
if (anchorElementRef.current)
refs.setReference(anchorElementRef.current)
}, 0)
return (
<>
@ -149,7 +151,6 @@ const ComponentPicker = ({
visibility: isPositioned ? 'visible' : 'hidden',
}}
ref={refs.setFloating}
data-testid="component-picker-container"
>
{
workflowVariableBlock?.show && (
@ -173,7 +174,7 @@ const ComponentPicker = ({
<div className='my-1 h-px w-full -translate-x-1 bg-divider-subtle'></div>
)
}
<div data-testid="options-list">
<div>
{
options.map((option, index) => (
<Fragment key={option.key}>

View File

@ -1,6 +1,8 @@
'use client'
import type { FC } from 'react'
import React from 'react'
import { useContext } from 'use-context-selector'
import I18n from '@/context/i18n'
import {
RiArrowRightUpLine,
RiBugLine,
@ -9,12 +11,14 @@ import { useTranslation } from 'react-i18next'
import KeyValueItem from '../base/key-value-item'
import Tooltip from '@/app/components/base/tooltip'
import Button from '@/app/components/base/button'
import { getDocsUrl } from '@/app/components/plugins/utils'
import { useDebugKey } from '@/service/use-plugins'
const i18nPrefix = 'plugin.debugInfo'
const DebugInfo: FC = () => {
const { t } = useTranslation()
const { locale } = useContext(I18n)
const { data: info, isLoading } = useDebugKey()
// info.key likes 4580bdb7-b878-471c-a8a4-bfd760263a53 mask the middle part using *.
@ -30,7 +34,7 @@ const DebugInfo: FC = () => {
<>
<div className='flex items-center gap-1 self-stretch'>
<span className='system-sm-semibold flex shrink-0 grow basis-0 flex-col items-start justify-center text-text-secondary'>{t(`${i18nPrefix}.title`)}</span>
<a href='https://docs.dify.ai/plugins/quick-start/develop-plugins/debug-plugin' target='_blank' className='flex cursor-pointer items-center gap-0.5 text-text-accent-light-mode-only'>
<a href={getDocsUrl(locale, '/plugins/quick-start/debug-plugin')} target='_blank' className='flex cursor-pointer items-center gap-0.5 text-text-accent-light-mode-only'>
<span className='system-xs-medium'>{t(`${i18nPrefix}.viewDocs`)}</span>
<RiArrowRightUpLine className='h-3 w-3' />
</a>

View File

@ -34,10 +34,10 @@ import {
import type { Dependency } from '../types'
import type { PluginDeclaration, PluginManifestInMarket } from '../types'
import { sleep } from '@/utils'
import { getDocsUrl } from '@/app/components/plugins/utils'
import { fetchBundleInfoFromMarketPlace, fetchManifestFromMarketPlace } from '@/service/plugins'
import { marketplaceApiPrefix } from '@/config'
import { SUPPORT_INSTALL_LOCAL_FILE_EXTENSIONS } from '@/config'
import { LanguagesSupported } from '@/i18n/language'
import I18n from '@/context/i18n'
import { noop } from 'lodash-es'
import { PLUGIN_TYPE_SEARCH_MAP } from '../marketplace/plugin-type-switch'
@ -187,7 +187,7 @@ const PluginPage = ({
isExploringMarketplace && (
<>
<Link
href={`https://docs.dify.ai/${locale === LanguagesSupported[1] ? 'v/zh-hans/' : ''}plugins/publish-plugins/publish-to-dify-marketplace`}
href={getDocsUrl(locale, '/plugins/publish-plugins/publish-to-dify-marketplace/README')}
target='_blank'
>
<Button

View File

@ -1,3 +1,5 @@
import { LanguagesSupported } from '@/i18n/language'
import {
categoryKeys,
tagKeys,
@ -10,3 +12,15 @@ export const getValidTagKeys = (tags: string[]) => {
export const getValidCategoryKeys = (category?: string) => {
return categoryKeys.find(key => key === category)
}
export const getDocsUrl = (locale: string, path: string) => {
let localePath = 'en'
if (locale === LanguagesSupported[1])
localePath = 'zh-hans'
else if (locale === LanguagesSupported[7])
localePath = 'ja-jp'
return `https://docs.dify.ai/${localePath}${path}`
}

View File

@ -134,18 +134,33 @@ const findExceptVarInObject = (obj: any, filterVar: (payload: Var, selector: Val
const { children } = obj
const isStructuredOutput = !!(children as StructuredOutput)?.schema?.properties
let childrenResult: Var[] | StructuredOutput | undefined
if (isStructuredOutput) {
childrenResult = findExceptVarInStructuredOutput(children, filterVar)
}
else if (Array.isArray(children)) {
childrenResult = children.filter((item: Var) => {
const { children: itemChildren } = item
const currSelector = [...value_selector, item.variable]
if (!itemChildren)
return filterVar(item, currSelector)
const filteredObj = findExceptVarInObject(item, filterVar, currSelector, false) // File doesn't contain file children
return filteredObj.children && (filteredObj.children as Var[])?.length > 0
})
}
else {
childrenResult = []
}
const res: Var = {
variable: obj.variable,
type: isFile ? VarType.file : VarType.object,
children: isStructuredOutput ? findExceptVarInStructuredOutput(children, filterVar) : children.filter((item: Var) => {
const { children } = item
const currSelector = [...value_selector, item.variable]
if (!children)
return filterVar(item, currSelector)
const obj = findExceptVarInObject(item, filterVar, currSelector, false) // File doesn't contains file children
return obj.children && (obj.children as Var[])?.length > 0
}),
children: childrenResult,
}
return res
}