diff --git a/apps/api/src/controllers/v1/extract.ts b/apps/api/src/controllers/v1/extract.ts index c0e06a2d..a6f50fae 100644 --- a/apps/api/src/controllers/v1/extract.ts +++ b/apps/api/src/controllers/v1/extract.ts @@ -263,7 +263,7 @@ export async function extractController( { mode: "llm", systemPrompt: - "Always prioritize using the provided content to answer the question. Do not make up an answer. Be concise and follow the schema if provided. Here are the urls the user provided of which he wants to extract information from: " + + "Always prioritize using the provided content to answer the question. Do not make up an answer. Be concise and follow the schema always if provided. Here are the urls the user provided of which he wants to extract information from: " + links.join(", "), prompt: req.body.prompt, schema: req.body.schema, diff --git a/apps/api/src/lib/LLM-extraction/index.ts b/apps/api/src/lib/LLM-extraction/index.ts index 22e2bd04..3a98ffc9 100644 --- a/apps/api/src/lib/LLM-extraction/index.ts +++ b/apps/api/src/lib/LLM-extraction/index.ts @@ -67,12 +67,11 @@ export async function generateCompletions( export async function generateBasicCompletion(prompt: string) { const openai = new OpenAI(); - const model = process.env.MODEL_NAME ?? "gpt-4o-mini"; + const model = "gpt-4o"; const completion = await openai.chat.completions.create({ model, messages: [{ role: "user", content: prompt }], }); - return completion.choices[0].message.content; }