Perplexica/src/lib/providers/openai.ts

117 lines
2.9 KiB
TypeScript
Raw Normal View History

import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
2025-03-19 16:23:27 +05:30
import { getOpenaiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
2025-04-12 11:58:52 +05:30
displayName: 'OpenAI',
};
2025-03-19 16:23:27 +05:30
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const OPENAI_MODELS_ENDPOINT = 'https://api.openai.com/v1/models';
async function fetchOpenAIModels(apiKey: string): Promise<string[]> {
const resp = await fetch(OPENAI_MODELS_ENDPOINT, {
method: 'GET',
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
});
if (!resp.ok) {
throw new Error(`OpenAI models endpoint returned ${resp.status}`);
}
const data = await resp.json();
if (!data || !Array.isArray(data.data)) {
throw new Error('Unexpected OpenAI models response format');
}
return data.data
.map((model: any) => (model && model.id ? String(model.id) : undefined))
.filter(Boolean) as string[];
}
export const loadOpenAIChatModels = async () => {
2025-03-19 16:23:27 +05:30
const openaiApiKey = getOpenaiApiKey();
2025-03-19 16:23:27 +05:30
if (!openaiApiKey) return {};
2024-07-08 15:39:27 +05:30
try {
const modelIds = (await fetchOpenAIModels(openaiApiKey)).sort((a, b) =>
a.localeCompare(b),
);
2025-03-19 16:23:27 +05:30
const chatModels: Record<string, ChatModel> = {};
modelIds.forEach((model) => {
const lid = model.toLowerCase();
const excludedSubstrings = [
'audio',
'embedding',
'image',
'omni-moderation',
'transcribe',
'tts',
];
const isChat =
(lid.startsWith('gpt') || lid.startsWith('o')) &&
!excludedSubstrings.some((s) => lid.includes(s));
if (!isChat) return;
chatModels[model] = {
displayName: model,
model: new ChatOpenAI({
apiKey: openaiApiKey,
modelName: model,
temperature: model.includes('gpt-5') ? 1 : 0.7,
2025-03-19 16:23:27 +05:30
}) as unknown as BaseChatModel,
};
});
return chatModels;
} catch (err) {
console.error(`Error loading OpenAI chat models: ${err}`);
return {};
}
};
2025-03-19 16:23:27 +05:30
export const loadOpenAIEmbeddingModels = async () => {
const openaiApiKey = getOpenaiApiKey();
2025-03-19 16:23:27 +05:30
if (!openaiApiKey) return {};
2024-07-08 15:39:27 +05:30
try {
const modelIds = (await fetchOpenAIModels(openaiApiKey)).sort((a, b) =>
a.localeCompare(b),
);
2025-03-19 16:23:27 +05:30
const embeddingModels: Record<string, EmbeddingModel> = {};
modelIds.forEach((model) => {
const lid = model.toLowerCase();
const isEmbedding = lid.includes('embedding');
if (!isEmbedding) return;
embeddingModels[model] = {
displayName: model,
model: new OpenAIEmbeddings({
apiKey: openaiApiKey,
modelName: model,
2025-03-19 16:23:27 +05:30
}) as unknown as Embeddings,
};
});
return embeddingModels;
} catch (err) {
console.error(`Error loading OpenAI embedding models: ${err}`);
return {};
}
};