From a759a7ab7a3de7dd579658b27ed00d909fb78541 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Wed, 18 Dec 2024 21:45:06 -0300 Subject: [PATCH] Nick: small improvements --- apps/api/src/controllers/v1/extract.ts | 2 +- apps/api/src/lib/LLM-extraction/index.ts | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/apps/api/src/controllers/v1/extract.ts b/apps/api/src/controllers/v1/extract.ts index c0e06a2d..a6f50fae 100644 --- a/apps/api/src/controllers/v1/extract.ts +++ b/apps/api/src/controllers/v1/extract.ts @@ -263,7 +263,7 @@ export async function extractController( { mode: "llm", systemPrompt: - "Always prioritize using the provided content to answer the question. Do not make up an answer. Be concise and follow the schema if provided. Here are the urls the user provided of which he wants to extract information from: " + + "Always prioritize using the provided content to answer the question. Do not make up an answer. Be concise and follow the schema always if provided. Here are the urls the user provided of which he wants to extract information from: " + links.join(", "), prompt: req.body.prompt, schema: req.body.schema, diff --git a/apps/api/src/lib/LLM-extraction/index.ts b/apps/api/src/lib/LLM-extraction/index.ts index 22e2bd04..3a98ffc9 100644 --- a/apps/api/src/lib/LLM-extraction/index.ts +++ b/apps/api/src/lib/LLM-extraction/index.ts @@ -67,12 +67,11 @@ export async function generateCompletions( export async function generateBasicCompletion(prompt: string) { const openai = new OpenAI(); - const model = process.env.MODEL_NAME ?? "gpt-4o-mini"; + const model = "gpt-4o"; const completion = await openai.chat.completions.create({ model, messages: [{ role: "user", content: prompt }], }); - return completion.choices[0].message.content; }