diff --git a/apps/api/src/lib/generate-llmstxt/generate-llmstxt-service.ts b/apps/api/src/lib/generate-llmstxt/generate-llmstxt-service.ts index 3528f48f..7f48872e 100644 --- a/apps/api/src/lib/generate-llmstxt/generate-llmstxt-service.ts +++ b/apps/api/src/lib/generate-llmstxt/generate-llmstxt-service.ts @@ -160,7 +160,7 @@ export async function performGenerateLlmsTxt( const { extract } = await generateCompletions({ logger, - model: getModel("gpt-4o-mini"), + model: getModel("gpt-4o-mini", "openai"), options: { systemPrompt: "", mode: "llm", diff --git a/apps/api/src/scraper/scrapeURL/transformers/llmExtract.ts b/apps/api/src/scraper/scrapeURL/transformers/llmExtract.ts index 64edea23..b63b2f85 100644 --- a/apps/api/src/scraper/scrapeURL/transformers/llmExtract.ts +++ b/apps/api/src/scraper/scrapeURL/transformers/llmExtract.ts @@ -187,6 +187,7 @@ export function calculateCost( ) { const modelCosts = { "openai/o3-mini": { input_cost: 1.1, output_cost: 4.4 }, + "gpt-4o-mini": { input_cost: 0.15, output_cost: 0.6 }, "openai/gpt-4o-mini": { input_cost: 0.15, output_cost: 0.6 }, "openai/gpt-4o": { input_cost: 2.5, output_cost: 10 }, "google/gemini-2.0-flash-001": { input_cost: 0.15, output_cost: 0.6 },