mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-11 18:49:02 +08:00
fix: typos and improve naming conventions: (#8687)
This commit is contained in:
parent
8cc9e68363
commit
bef83a4d2e
@ -652,7 +652,7 @@ where sites.id is null limit 1000"""
|
|||||||
app_was_created.send(app, account=account)
|
app_was_created.send(app, account=account)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
failed_app_ids.append(app_id)
|
failed_app_ids.append(app_id)
|
||||||
click.echo(click.style("FFailed to fix missing site for app {}".format(app_id), fg="red"))
|
click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red"))
|
||||||
logging.exception(f"Fix app related site missing issue failed, error: {e}")
|
logging.exception(f"Fix app related site missing issue failed, error: {e}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ class AsyncCompletions(BaseAPI):
|
|||||||
if temperature <= 0:
|
if temperature <= 0:
|
||||||
do_sample = False
|
do_sample = False
|
||||||
temperature = 0.01
|
temperature = 0.01
|
||||||
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501
|
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501
|
||||||
if temperature >= 1:
|
if temperature >= 1:
|
||||||
temperature = 0.99
|
temperature = 0.99
|
||||||
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间")
|
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间")
|
||||||
|
@ -60,7 +60,7 @@ class Completions(BaseAPI):
|
|||||||
if temperature <= 0:
|
if temperature <= 0:
|
||||||
do_sample = False
|
do_sample = False
|
||||||
temperature = 0.01
|
temperature = 0.01
|
||||||
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501
|
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501
|
||||||
if temperature >= 1:
|
if temperature >= 1:
|
||||||
temperature = 0.99
|
temperature = 0.99
|
||||||
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间")
|
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间")
|
||||||
|
@ -630,8 +630,7 @@ def validate_type(*, type_: type[_T], value: object) -> _T:
|
|||||||
return cast(_T, _validate_non_model_type(type_=type_, value=value))
|
return cast(_T, _validate_non_model_type(type_=type_, value=value))
|
||||||
|
|
||||||
|
|
||||||
# our use of subclasssing here causes weirdness for type checkers,
|
# Subclassing here confuses type checkers, so we treat this class as non-inheriting.
|
||||||
# so we just pretend that we don't subclass
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
GenericModel = BaseModel
|
GenericModel = BaseModel
|
||||||
else:
|
else:
|
||||||
|
@ -169,7 +169,7 @@ class BaseSyncPage(BasePage[_T], Generic[_T]):
|
|||||||
# Pydantic uses a custom `__iter__` method to support casting BaseModels
|
# Pydantic uses a custom `__iter__` method to support casting BaseModels
|
||||||
# to dictionaries. e.g. dict(model).
|
# to dictionaries. e.g. dict(model).
|
||||||
# As we want to support `for item in page`, this is inherently incompatible
|
# As we want to support `for item in page`, this is inherently incompatible
|
||||||
# with the default pydantic behaviour. It is not possible to support both
|
# with the default pydantic behavior. It is not possible to support both
|
||||||
# use cases at once. Fortunately, this is not a big deal as all other pydantic
|
# use cases at once. Fortunately, this is not a big deal as all other pydantic
|
||||||
# methods should continue to work as expected as there is an alternative method
|
# methods should continue to work as expected as there is an alternative method
|
||||||
# to cast a model to a dictionary, model.dict(), which is used internally
|
# to cast a model to a dictionary, model.dict(), which is used internally
|
||||||
@ -356,16 +356,16 @@ class HttpClient:
|
|||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _object_to_formfata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]:
|
def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]:
|
||||||
items = []
|
items = []
|
||||||
|
|
||||||
if isinstance(value, Mapping):
|
if isinstance(value, Mapping):
|
||||||
for k, v in value.items():
|
for k, v in value.items():
|
||||||
items.extend(self._object_to_formfata(f"{key}[{k}]", v))
|
items.extend(self._object_to_formdata(f"{key}[{k}]", v))
|
||||||
return items
|
return items
|
||||||
if isinstance(value, list | tuple):
|
if isinstance(value, list | tuple):
|
||||||
for v in value:
|
for v in value:
|
||||||
items.extend(self._object_to_formfata(key + "[]", v))
|
items.extend(self._object_to_formdata(key + "[]", v))
|
||||||
return items
|
return items
|
||||||
|
|
||||||
def _primitive_value_to_str(val) -> str:
|
def _primitive_value_to_str(val) -> str:
|
||||||
@ -385,7 +385,7 @@ class HttpClient:
|
|||||||
return [(key, str_data)]
|
return [(key, str_data)]
|
||||||
|
|
||||||
def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
|
def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
|
||||||
items = flatten(list(starmap(self._object_to_formfata, data.items())))
|
items = flatten(list(starmap(self._object_to_formdata, data.items())))
|
||||||
|
|
||||||
serialized: dict[str, object] = {}
|
serialized: dict[str, object] = {}
|
||||||
for key, value in items:
|
for key, value in items:
|
||||||
@ -620,7 +620,7 @@ class HttpClient:
|
|||||||
stream: bool,
|
stream: bool,
|
||||||
stream_cls: type[StreamResponse] | None,
|
stream_cls: type[StreamResponse] | None,
|
||||||
) -> ResponseT:
|
) -> ResponseT:
|
||||||
# _legacy_response with raw_response_header to paser method
|
# _legacy_response with raw_response_header to parser method
|
||||||
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
|
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
|
||||||
return cast(
|
return cast(
|
||||||
ResponseT,
|
ResponseT,
|
||||||
|
@ -87,7 +87,7 @@ class LegacyAPIResponse(Generic[R]):
|
|||||||
|
|
||||||
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
||||||
|
|
||||||
You can customise the type that the response is parsed into through
|
You can customize the type that the response is parsed into through
|
||||||
the `to` argument, e.g.
|
the `to` argument, e.g.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
|
@ -252,7 +252,7 @@ class APIResponse(BaseAPIResponse[R]):
|
|||||||
|
|
||||||
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
||||||
|
|
||||||
You can customise the type that the response is parsed into through
|
You can customize the type that the response is parsed into through
|
||||||
the `to` argument, e.g.
|
the `to` argument, e.g.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
@ -363,7 +363,7 @@ class StreamAlreadyConsumed(ZhipuAIError): # noqa: N818
|
|||||||
# ^ error
|
# ^ error
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want this behaviour you'll need to either manually accumulate the response
|
If you want this behavior you'll need to either manually accumulate the response
|
||||||
content or call `await response.read()` before iterating over the stream.
|
content or call `await response.read()` before iterating over the stream.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessinfo
|
from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessInfo
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"DocumentData",
|
"DocumentData",
|
||||||
"DocumentObject",
|
"DocumentObject",
|
||||||
"DocumentSuccessinfo",
|
"DocumentSuccessInfo",
|
||||||
"DocumentFailedInfo",
|
"DocumentFailedInfo",
|
||||||
]
|
]
|
||||||
|
@ -2,10 +2,10 @@ from typing import Optional
|
|||||||
|
|
||||||
from ....core import BaseModel
|
from ....core import BaseModel
|
||||||
|
|
||||||
__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessinfo", "DocumentFailedInfo"]
|
__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessInfo", "DocumentFailedInfo"]
|
||||||
|
|
||||||
|
|
||||||
class DocumentSuccessinfo(BaseModel):
|
class DocumentSuccessInfo(BaseModel):
|
||||||
documentId: Optional[str] = None
|
documentId: Optional[str] = None
|
||||||
"""文件id"""
|
"""文件id"""
|
||||||
filename: Optional[str] = None
|
filename: Optional[str] = None
|
||||||
@ -24,7 +24,7 @@ class DocumentFailedInfo(BaseModel):
|
|||||||
class DocumentObject(BaseModel):
|
class DocumentObject(BaseModel):
|
||||||
"""文档信息"""
|
"""文档信息"""
|
||||||
|
|
||||||
successInfos: Optional[list[DocumentSuccessinfo]] = None
|
successInfos: Optional[list[DocumentSuccessInfo]] = None
|
||||||
"""上传成功的文件信息"""
|
"""上传成功的文件信息"""
|
||||||
failedInfos: Optional[list[DocumentFailedInfo]] = None
|
failedInfos: Optional[list[DocumentFailedInfo]] = None
|
||||||
"""上传失败的文件信息"""
|
"""上传失败的文件信息"""
|
||||||
|
@ -22,8 +22,8 @@ class EndStreamProcessor(StreamProcessor):
|
|||||||
for end_node_id, _ in self.end_stream_param.end_stream_variable_selector_mapping.items():
|
for end_node_id, _ in self.end_stream_param.end_stream_variable_selector_mapping.items():
|
||||||
self.route_position[end_node_id] = 0
|
self.route_position[end_node_id] = 0
|
||||||
self.current_stream_chunk_generating_node_ids: dict[str, list[str]] = {}
|
self.current_stream_chunk_generating_node_ids: dict[str, list[str]] = {}
|
||||||
self.has_outputed = False
|
self.has_output = False
|
||||||
self.outputed_node_ids = set()
|
self.output_node_ids = set()
|
||||||
|
|
||||||
def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generator[GraphEngineEvent, None, None]:
|
def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generator[GraphEngineEvent, None, None]:
|
||||||
for event in generator:
|
for event in generator:
|
||||||
@ -34,11 +34,11 @@ class EndStreamProcessor(StreamProcessor):
|
|||||||
yield event
|
yield event
|
||||||
elif isinstance(event, NodeRunStreamChunkEvent):
|
elif isinstance(event, NodeRunStreamChunkEvent):
|
||||||
if event.in_iteration_id:
|
if event.in_iteration_id:
|
||||||
if self.has_outputed and event.node_id not in self.outputed_node_ids:
|
if self.has_output and event.node_id not in self.output_node_ids:
|
||||||
event.chunk_content = "\n" + event.chunk_content
|
event.chunk_content = "\n" + event.chunk_content
|
||||||
|
|
||||||
self.outputed_node_ids.add(event.node_id)
|
self.output_node_ids.add(event.node_id)
|
||||||
self.has_outputed = True
|
self.has_output = True
|
||||||
yield event
|
yield event
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -53,11 +53,11 @@ class EndStreamProcessor(StreamProcessor):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if stream_out_end_node_ids:
|
if stream_out_end_node_ids:
|
||||||
if self.has_outputed and event.node_id not in self.outputed_node_ids:
|
if self.has_output and event.node_id not in self.output_node_ids:
|
||||||
event.chunk_content = "\n" + event.chunk_content
|
event.chunk_content = "\n" + event.chunk_content
|
||||||
|
|
||||||
self.outputed_node_ids.add(event.node_id)
|
self.output_node_ids.add(event.node_id)
|
||||||
self.has_outputed = True
|
self.has_output = True
|
||||||
yield event
|
yield event
|
||||||
elif isinstance(event, NodeRunSucceededEvent):
|
elif isinstance(event, NodeRunSucceededEvent):
|
||||||
yield event
|
yield event
|
||||||
@ -124,11 +124,11 @@ class EndStreamProcessor(StreamProcessor):
|
|||||||
|
|
||||||
if text:
|
if text:
|
||||||
current_node_id = value_selector[0]
|
current_node_id = value_selector[0]
|
||||||
if self.has_outputed and current_node_id not in self.outputed_node_ids:
|
if self.has_output and current_node_id not in self.output_node_ids:
|
||||||
text = "\n" + text
|
text = "\n" + text
|
||||||
|
|
||||||
self.outputed_node_ids.add(current_node_id)
|
self.output_node_ids.add(current_node_id)
|
||||||
self.has_outputed = True
|
self.has_output = True
|
||||||
yield NodeRunStreamChunkEvent(
|
yield NodeRunStreamChunkEvent(
|
||||||
id=event.id,
|
id=event.id,
|
||||||
node_id=event.node_id,
|
node_id=event.node_id,
|
||||||
|
@ -334,9 +334,9 @@ export const useChat = (
|
|||||||
const newChatList = produce(chatListRef.current, (draft) => {
|
const newChatList = produce(chatListRef.current, (draft) => {
|
||||||
const index = draft.findIndex(item => item.id === responseItem.id)
|
const index = draft.findIndex(item => item.id === responseItem.id)
|
||||||
if (index !== -1) {
|
if (index !== -1) {
|
||||||
const requestion = draft[index - 1]
|
const question = draft[index - 1]
|
||||||
draft[index - 1] = {
|
draft[index - 1] = {
|
||||||
...requestion,
|
...question,
|
||||||
}
|
}
|
||||||
draft[index] = {
|
draft[index] = {
|
||||||
...draft[index],
|
...draft[index],
|
||||||
|
@ -88,7 +88,7 @@ const ImagePreview: FC<ImagePreviewProps> = ({
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
const imageTobase64ToBlob = (base64: string, type = 'image/png'): Blob => {
|
const imageBase64ToBlob = (base64: string, type = 'image/png'): Blob => {
|
||||||
const byteCharacters = atob(base64)
|
const byteCharacters = atob(base64)
|
||||||
const byteArrays = []
|
const byteArrays = []
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ const ImagePreview: FC<ImagePreviewProps> = ({
|
|||||||
const shareImage = async () => {
|
const shareImage = async () => {
|
||||||
try {
|
try {
|
||||||
const base64Data = url.split(',')[1]
|
const base64Data = url.split(',')[1]
|
||||||
const blob = imageTobase64ToBlob(base64Data, 'image/png')
|
const blob = imageBase64ToBlob(base64Data, 'image/png')
|
||||||
|
|
||||||
await navigator.clipboard.write([
|
await navigator.clipboard.write([
|
||||||
new ClipboardItem({
|
new ClipboardItem({
|
||||||
|
@ -424,7 +424,7 @@ Workflow applications offers non-session support and is ideal for translation, a
|
|||||||
/>
|
/>
|
||||||
<Row>
|
<Row>
|
||||||
<Col>
|
<Col>
|
||||||
Returns worklfow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order.
|
Returns workflow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order.
|
||||||
|
|
||||||
### Query
|
### Query
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ export const useWorkflowRun = () => {
|
|||||||
draft.forEach((edge) => {
|
draft.forEach((edge) => {
|
||||||
edge.data = {
|
edge.data = {
|
||||||
...edge.data,
|
...edge.data,
|
||||||
_runned: false,
|
_run: false,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -292,7 +292,7 @@ export const useWorkflowRun = () => {
|
|||||||
const newEdges = produce(edges, (draft) => {
|
const newEdges = produce(edges, (draft) => {
|
||||||
draft.forEach((edge) => {
|
draft.forEach((edge) => {
|
||||||
if (edge.target === data.node_id && incomeNodesId.includes(edge.source))
|
if (edge.target === data.node_id && incomeNodesId.includes(edge.source))
|
||||||
edge.data = { ...edge.data, _runned: true } as any
|
edge.data = { ...edge.data, _run: true } as any
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
setEdges(newEdges)
|
setEdges(newEdges)
|
||||||
@ -416,7 +416,7 @@ export const useWorkflowRun = () => {
|
|||||||
const edge = draft.find(edge => edge.target === data.node_id && edge.source === prevNodeId)
|
const edge = draft.find(edge => edge.target === data.node_id && edge.source === prevNodeId)
|
||||||
|
|
||||||
if (edge)
|
if (edge)
|
||||||
edge.data = { ...edge.data, _runned: true } as any
|
edge.data = { ...edge.data, _run: true } as any
|
||||||
})
|
})
|
||||||
setEdges(newEdges)
|
setEdges(newEdges)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user