mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-08-14 04:15:52 +08:00
fix: fix vertex gemini 2.0 flash 001 schema (#18405)
Co-authored-by: achmad-kautsar <achmad.kautsar@insignia.co.id>
This commit is contained in:
parent
161ff432f1
commit
b26e20fe34
@ -5,11 +5,6 @@ model_type: llm
|
|||||||
features:
|
features:
|
||||||
- agent-thought
|
- agent-thought
|
||||||
- vision
|
- vision
|
||||||
- tool-call
|
|
||||||
- stream-tool-call
|
|
||||||
- document
|
|
||||||
- video
|
|
||||||
- audio
|
|
||||||
model_properties:
|
model_properties:
|
||||||
mode: chat
|
mode: chat
|
||||||
context_size: 1048576
|
context_size: 1048576
|
||||||
@ -20,20 +15,21 @@ parameter_rules:
|
|||||||
use_template: top_p
|
use_template: top_p
|
||||||
- name: top_k
|
- name: top_k
|
||||||
label:
|
label:
|
||||||
zh_Hans: 取样数量
|
|
||||||
en_US: Top k
|
en_US: Top k
|
||||||
type: int
|
type: int
|
||||||
help:
|
help:
|
||||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
|
||||||
en_US: Only sample from the top K options for each subsequent token.
|
en_US: Only sample from the top K options for each subsequent token.
|
||||||
required: false
|
required: false
|
||||||
|
- name: presence_penalty
|
||||||
|
use_template: presence_penalty
|
||||||
|
- name: frequency_penalty
|
||||||
|
use_template: frequency_penalty
|
||||||
- name: max_output_tokens
|
- name: max_output_tokens
|
||||||
use_template: max_tokens
|
use_template: max_tokens
|
||||||
|
required: true
|
||||||
default: 8192
|
default: 8192
|
||||||
min: 1
|
min: 1
|
||||||
max: 8192
|
max: 8192
|
||||||
- name: json_schema
|
|
||||||
use_template: json_schema
|
|
||||||
pricing:
|
pricing:
|
||||||
input: '0.00'
|
input: '0.00'
|
||||||
output: '0.00'
|
output: '0.00'
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
import { UUID_NIL } from './constants'
|
import { UUID_NIL } from './constants'
|
||||||
import type { IChatItem } from './chat/type'
|
import type { IChatItem } from './chat/type'
|
||||||
import type { ChatItem, ChatItemInTree } from './types'
|
import type { ChatItem, ChatItemInTree } from './types'
|
||||||
|
import { addFileInfos, sortAgentSorts } from '../../tools/utils'
|
||||||
|
import { getProcessedFilesFromResponse } from '../file-uploader/utils'
|
||||||
|
|
||||||
async function decodeBase64AndDecompress(base64String: string) {
|
async function decodeBase64AndDecompress(base64String: string) {
|
||||||
const binaryString = atob(base64String)
|
const binaryString = atob(base64String)
|
||||||
@ -19,6 +21,60 @@ function getProcessedInputsFromUrlParams(): Record<string, any> {
|
|||||||
return inputs
|
return inputs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function appendQAToChatList(chatList: ChatItem[], item: any) {
|
||||||
|
// we append answer first and then question since will reverse the whole chatList later
|
||||||
|
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
|
||||||
|
chatList.push({
|
||||||
|
id: item.id,
|
||||||
|
content: item.answer,
|
||||||
|
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
|
||||||
|
feedback: item.feedback,
|
||||||
|
isAnswer: true,
|
||||||
|
citation: item.retriever_resources,
|
||||||
|
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||||
|
})
|
||||||
|
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
|
||||||
|
chatList.push({
|
||||||
|
id: `question-${item.id}`,
|
||||||
|
content: item.query,
|
||||||
|
isAnswer: false,
|
||||||
|
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the latest thread messages from all messages of the conversation.
|
||||||
|
* Same logic as backend codebase `api/core/prompt/utils/extract_thread_messages.py`
|
||||||
|
*
|
||||||
|
* @param fetchedMessages - The history chat list data from the backend, sorted by created_at in descending order. This includes all flattened history messages of the conversation.
|
||||||
|
* @returns An array of ChatItems representing the latest thread.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
function getPrevChatList(fetchedMessages: any[]) {
|
||||||
|
const ret: ChatItem[] = []
|
||||||
|
let nextMessageId = null
|
||||||
|
|
||||||
|
for (const item of fetchedMessages) {
|
||||||
|
if (!item.parent_message_id) {
|
||||||
|
appendQAToChatList(ret, item)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!nextMessageId) {
|
||||||
|
appendQAToChatList(ret, item)
|
||||||
|
nextMessageId = item.parent_message_id
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (item.id === nextMessageId || nextMessageId === UUID_NIL) {
|
||||||
|
appendQAToChatList(ret, item)
|
||||||
|
nextMessageId = item.parent_message_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret.reverse()
|
||||||
|
}
|
||||||
|
|
||||||
function isValidGeneratedAnswer(item?: ChatItem | ChatItemInTree): boolean {
|
function isValidGeneratedAnswer(item?: ChatItem | ChatItemInTree): boolean {
|
||||||
return !!item && item.isAnswer && !item.id.startsWith('answer-placeholder-') && !item.isOpeningStatement
|
return !!item && item.isAnswer && !item.id.startsWith('answer-placeholder-') && !item.isOpeningStatement
|
||||||
}
|
}
|
||||||
@ -164,6 +220,7 @@ function getThreadMessages(tree: ChatItemInTree[], targetMessageId?: string): Ch
|
|||||||
export {
|
export {
|
||||||
getProcessedInputsFromUrlParams,
|
getProcessedInputsFromUrlParams,
|
||||||
isValidGeneratedAnswer,
|
isValidGeneratedAnswer,
|
||||||
|
getPrevChatList,
|
||||||
getLastAnswer,
|
getLastAnswer,
|
||||||
buildChatItemTree,
|
buildChatItemTree,
|
||||||
getThreadMessages,
|
getThreadMessages,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user