diff --git a/README.md b/README.md index cf81442..106a57e 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. - `OPENAI`: Your OpenAI API key. **You only need to fill this if you wish to use OpenAI's models**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. + - `OPENROUTER`: Your OpenRouter API key. **You only need to fill this if you wish to use models via OpenRouter**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**. - `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.** diff --git a/sample.config.toml b/sample.config.toml index 9c27ef7..43f2dbd 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -10,6 +10,9 @@ API_KEY = "" [MODELS.GROQ] API_KEY = "" +[MODELS.OPENROUTER] +API_KEY = "" + [MODELS.ANTHROPIC] API_KEY = "" diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index 1fd5954..64b51b8 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -8,6 +8,7 @@ import { getGroqApiKey, getOllamaApiEndpoint, getOpenaiApiKey, + getOpenrouterApiKey, getDeepseekApiKey, getAimlApiKey, getLMStudioApiEndpoint, @@ -64,6 +65,7 @@ export const GET = async (req: Request) => { config['anthropicApiKey'] = protectApiKey(getAnthropicApiKey()); config['geminiApiKey'] = protectApiKey(getGeminiApiKey()); config['deepseekApiKey'] = protectApiKey(getDeepseekApiKey()); + config['openrouterApiKey'] = protectApiKey(getOpenrouterApiKey()); config['customOpenaiApiKey'] = protectApiKey(getCustomOpenaiApiKey()); config['aimlApiKey'] = protectApiKey(getAimlApiKey()); @@ -143,6 +145,12 @@ export const POST = async (req: Request) => { LM_STUDIO: { API_URL: config.lmStudioApiUrl, }, + OPENROUTER: { + API_KEY: getUpdatedProtectedValue( + config.openrouterApiKey, + getOpenrouterApiKey(), + ), + }, CUSTOM_OPENAI: { API_URL: config.customOpenaiApiUrl, API_KEY: getUpdatedProtectedValue( diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index ce22d23..cbdc812 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -33,6 +33,7 @@ interface SettingsType { }; openaiApiKey: string; groqApiKey: string; + openrouterApiKey: string; anthropicApiKey: string; geminiApiKey: string; ollamaApiUrl: string; @@ -1673,6 +1674,25 @@ export default function SettingsPage() { /> +
+

+ OpenRouter API Key +

+ { + setConfig((prev) => ({ + ...prev!, + openrouterApiKey: e.target.value, + })); + }} + onSave={(value) => saveConfig('openrouterApiKey', value)} + /> +
+

Anthropic API Key diff --git a/src/lib/config.ts b/src/lib/config.ts index 951ce7d..4f60692 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -43,6 +43,9 @@ interface Config { LM_STUDIO: { API_URL: string; }; + OPENROUTER: { + API_KEY: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -115,6 +118,8 @@ export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY; export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY; +export const getOpenrouterApiKey = () => loadConfig().MODELS.OPENROUTER.API_KEY; + export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY; export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY; diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index f1cca27..a1dcd21 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -46,6 +46,10 @@ import { loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo, } from './lmstudio'; +import { + loadOpenrouterChatModels, + PROVIDER_INFO as OpenRouterInfo, +} from './openrouter'; export const PROVIDER_METADATA = { openai: OpenAIInfo, @@ -57,6 +61,7 @@ export const PROVIDER_METADATA = { deepseek: DeepseekInfo, aimlapi: AimlApiInfo, lmstudio: LMStudioInfo, + openrouter: OpenRouterInfo, custom_openai: { key: 'custom_openai', displayName: 'Custom OpenAI', @@ -85,6 +90,7 @@ export const chatModelProviders: Record< deepseek: loadDeepseekChatModels, aimlapi: loadAimlApiChatModels, lmstudio: loadLMStudioChatModels, + openrouter: loadOpenrouterChatModels, }; export const embeddingModelProviders: Record< diff --git a/src/lib/providers/openrouter.ts b/src/lib/providers/openrouter.ts new file mode 100644 index 0000000..ef7110d --- /dev/null +++ b/src/lib/providers/openrouter.ts @@ -0,0 +1,65 @@ +export const PROVIDER_INFO = { + key: 'openrouter', + displayName: 'OpenRouter', +}; +import { ChatOpenAI } from '@langchain/openai'; +import { getOpenrouterApiKey } from '../config'; +import { ChatModel } from '.'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; + +let openrouterChatModels: Record[] = []; + +async function fetchModelList(): Promise { + try { + const response = await fetch('https://openrouter.ai/api/v1/models', { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + throw new Error(`API request failed with status: ${response.status}`); + } + + const data = await response.json(); + + openrouterChatModels = data.data.map((model: any) => ({ + displayName: model.name, + key: model.id, + })); + } catch (error) { + console.error('Error fetching models:', error); + } +} + +export const loadOpenrouterChatModels = async () => { + await fetchModelList(); + + const openrouterApikey = getOpenrouterApiKey(); + + if (!openrouterApikey) return {}; + + try { + const chatModels: Record = {}; + + openrouterChatModels.forEach((model) => { + chatModels[model.key] = { + displayName: model.displayName, + model: new ChatOpenAI({ + openAIApiKey: openrouterApikey, + modelName: model.key, + temperature: 0.7, + configuration: { + baseURL: 'https://openrouter.ai/api/v1', + }, + }) as unknown as BaseChatModel, + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading Openrouter models: ${err}`); + return {}; + } +};