diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index fbe533cad..9d43c8161 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -22,7 +22,7 @@ import {
preProcessImageContent,
uploadImage,
base64Image2Blob,
- stream,
+ streamWithThink,
} from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
@@ -294,7 +294,7 @@ export class ChatGPTApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [],
);
// console.log("getAsTools", tools, funcs);
- stream(
+ streamWithThink(
chatPath,
requestPayload,
getHeaders(),
@@ -309,8 +309,12 @@ export class ChatGPTApi implements LLMApi {
delta: {
content: string;
tool_calls: ChatMessageTool[];
+ reasoning_content: string | null;
};
}>;
+
+ if (!choices?.length) return { isThinking: false, content: "" };
+
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const id = tool_calls[0]?.id;
@@ -330,7 +334,37 @@ export class ChatGPTApi implements LLMApi {
runTools[index]["function"]["arguments"] += args;
}
}
- return choices[0]?.delta?.content;
+
+ const reasoning = choices[0]?.delta?.reasoning_content;
+ const content = choices[0]?.delta?.content;
+
+ // Skip if both content and reasoning_content are empty or null
+ if (
+ (!reasoning || reasoning.trim().length === 0) &&
+ (!content || content.trim().length === 0)
+ ) {
+ return {
+ isThinking: false,
+ content: "",
+ };
+ }
+
+ if (reasoning && reasoning.trim().length > 0) {
+ return {
+ isThinking: true,
+ content: reasoning,
+ };
+ } else if (content && content.trim().length > 0) {
+ return {
+ isThinking: false,
+ content: content,
+ };
+ }
+
+ return {
+ isThinking: false,
+ content: "",
+ };
},
// processToolMessage, include tool_calls message and tool call results
(
diff --git a/app/utils/chat.ts b/app/utils/chat.ts
index b77955e6e..efc496f2c 100644
--- a/app/utils/chat.ts
+++ b/app/utils/chat.ts
@@ -400,6 +400,7 @@ export function streamWithThink(
let responseRes: Response;
let isInThinkingMode = false;
let lastIsThinking = false;
+ let lastIsThinkingTagged = false; //between and tags
// animate response to make it looks smooth
function animateResponseText() {
@@ -579,6 +580,23 @@ export function streamWithThink(
if (!chunk?.content || chunk.content.length === 0) {
return;
}
+
+ // deal with and tags start
+ if (!chunk.isThinking) {
+ if (chunk.content.startsWith("")) {
+ chunk.isThinking = true;
+ chunk.content = chunk.content.slice(7).trim();
+ lastIsThinkingTagged = true;
+ } else if (chunk.content.endsWith("")) {
+ chunk.isThinking = false;
+ chunk.content = chunk.content.slice(0, -8).trim();
+ lastIsThinkingTagged = false;
+ } else if (lastIsThinkingTagged) {
+ chunk.isThinking = true;
+ }
+ }
+ // deal with and tags start
+
// Check if thinking mode changed
const isThinkingChanged = lastIsThinking !== chunk.isThinking;
lastIsThinking = chunk.isThinking;