diff --git a/app.dockerfile b/app.dockerfile index 3433288..c3c0fd0 100644 --- a/app.dockerfile +++ b/app.dockerfile @@ -12,6 +12,9 @@ COPY public ./public RUN mkdir -p /home/perplexica/data RUN yarn build +RUN yarn add --dev @vercel/ncc +RUN yarn ncc build ./src/lib/db/migrate.ts -o migrator + FROM node:20.18.0-slim WORKDIR /home/perplexica @@ -21,7 +24,12 @@ COPY --from=builder /home/perplexica/.next/static ./public/_next/static COPY --from=builder /home/perplexica/.next/standalone ./ COPY --from=builder /home/perplexica/data ./data +COPY drizzle ./drizzle +COPY --from=builder /home/perplexica/migrator/build ./build +COPY --from=builder /home/perplexica/migrator/index.js ./migrate.js RUN mkdir /home/perplexica/uploads -CMD ["node", "server.js"] \ No newline at end of file +COPY entrypoint.sh ./entrypoint.sh +RUN chmod +x ./entrypoint.sh +CMD ["./entrypoint.sh"] \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index b702b4e..b32e0a9 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -16,6 +16,7 @@ services: dockerfile: app.dockerfile environment: - SEARXNG_API_URL=http://searxng:8080 + - DATA_DIR=/home/perplexica ports: - 3000:3000 networks: diff --git a/drizzle.config.ts b/drizzle.config.ts index 58de9e0..a029112 100644 --- a/drizzle.config.ts +++ b/drizzle.config.ts @@ -1,10 +1,11 @@ import { defineConfig } from 'drizzle-kit'; +import path from 'path'; export default defineConfig({ dialect: 'sqlite', schema: './src/lib/db/schema.ts', out: './drizzle', dbCredentials: { - url: './data/db.sqlite', + url: path.join(process.cwd(), 'data', 'db.sqlite'), }, }); diff --git a/drizzle/0000_fuzzy_randall.sql b/drizzle/0000_fuzzy_randall.sql new file mode 100644 index 0000000..0a2ff07 --- /dev/null +++ b/drizzle/0000_fuzzy_randall.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS `chats` ( + `id` text PRIMARY KEY NOT NULL, + `title` text NOT NULL, + `createdAt` text NOT NULL, + `focusMode` text NOT NULL, + `files` text DEFAULT '[]' +); +--> statement-breakpoint +CREATE TABLE IF NOT EXISTS `messages` ( + `id` integer PRIMARY KEY NOT NULL, + `content` text NOT NULL, + `chatId` text NOT NULL, + `messageId` text NOT NULL, + `type` text, + `metadata` text +); diff --git a/drizzle/meta/0000_snapshot.json b/drizzle/meta/0000_snapshot.json new file mode 100644 index 0000000..850bcd3 --- /dev/null +++ b/drizzle/meta/0000_snapshot.json @@ -0,0 +1,116 @@ +{ + "version": "6", + "dialect": "sqlite", + "id": "ef3a044b-0f34-40b5-babb-2bb3a909ba27", + "prevId": "00000000-0000-0000-0000-000000000000", + "tables": { + "chats": { + "name": "chats", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true, + "autoincrement": false + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "createdAt": { + "name": "createdAt", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "focusMode": { + "name": "focusMode", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "files": { + "name": "files", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false, + "default": "'[]'" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "checkConstraints": {} + }, + "messages": { + "name": "messages", + "columns": { + "id": { + "name": "id", + "type": "integer", + "primaryKey": true, + "notNull": true, + "autoincrement": false + }, + "content": { + "name": "content", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "chatId": { + "name": "chatId", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "messageId": { + "name": "messageId", + "type": "text", + "primaryKey": false, + "notNull": true, + "autoincrement": false + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + }, + "metadata": { + "name": "metadata", + "type": "text", + "primaryKey": false, + "notNull": false, + "autoincrement": false + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "checkConstraints": {} + } + }, + "views": {}, + "enums": {}, + "_meta": { + "schemas": {}, + "tables": {}, + "columns": {} + }, + "internal": { + "indexes": {} + } +} diff --git a/drizzle/meta/_journal.json b/drizzle/meta/_journal.json new file mode 100644 index 0000000..5db59d1 --- /dev/null +++ b/drizzle/meta/_journal.json @@ -0,0 +1,13 @@ +{ + "version": "7", + "dialect": "sqlite", + "entries": [ + { + "idx": 0, + "version": "6", + "when": 1748405503809, + "tag": "0000_fuzzy_randall", + "breakpoints": true + } + ] +} diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000..9f9448a --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -e + +node migrate.js + +exec node server.js \ No newline at end of file diff --git a/sample.config.toml b/sample.config.toml index 980e99d..1db2125 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -25,5 +25,8 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434 [MODELS.DEEPSEEK] API_KEY = "" +[MODELS.LM_STUDIO] +API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 + [API_ENDPOINTS] -SEARXNG = "" # SearxNG API URL - http://localhost:32768 \ No newline at end of file +SEARXNG = "" # SearxNG API URL - http://localhost:32768 diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index 39c1f84..c1e5bbd 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -8,6 +8,7 @@ import { getOllamaApiEndpoint, getOpenaiApiKey, getDeepseekApiKey, + getLMStudioApiEndpoint, updateConfig, } from '@/lib/config'; import { @@ -51,6 +52,7 @@ export const GET = async (req: Request) => { config['openaiApiKey'] = getOpenaiApiKey(); config['ollamaApiUrl'] = getOllamaApiEndpoint(); + config['lmStudioApiUrl'] = getLMStudioApiEndpoint(); config['anthropicApiKey'] = getAnthropicApiKey(); config['groqApiKey'] = getGroqApiKey(); config['geminiApiKey'] = getGeminiApiKey(); @@ -93,6 +95,9 @@ export const POST = async (req: Request) => { DEEPSEEK: { API_KEY: config.deepseekApiKey, }, + LM_STUDIO: { + API_URL: config.lmStudioApiUrl, + }, CUSTOM_OPENAI: { API_URL: config.customOpenaiApiUrl, API_KEY: config.customOpenaiApiKey, diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 8eee9a4..6f20f01 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -7,6 +7,7 @@ import { Switch } from '@headlessui/react'; import ThemeSwitcher from '@/components/theme/Switcher'; import { ImagesIcon, VideoIcon } from 'lucide-react'; import Link from 'next/link'; +import { PROVIDER_METADATA } from '@/lib/providers'; interface SettingsType { chatModelProviders: { @@ -20,6 +21,7 @@ interface SettingsType { anthropicApiKey: string; geminiApiKey: string; ollamaApiUrl: string; + lmStudioApiUrl: string; deepseekApiKey: string; customOpenaiApiKey: string; customOpenaiApiUrl: string; @@ -141,7 +143,7 @@ const Page = () => { const [selectedEmbeddingModel, setSelectedEmbeddingModel] = useState< string | null >(null); - const [isLoading, setIsLoading] = useState(false); + const [isLoading, setIsLoading] = useState(true); const [automaticImageSearch, setAutomaticImageSearch] = useState(false); const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false); const [systemInstructions, setSystemInstructions] = useState(''); @@ -149,7 +151,6 @@ const Page = () => { useEffect(() => { const fetchConfig = async () => { - setIsLoading(true); const res = await fetch(`/api/config`, { headers: { 'Content-Type': 'application/json', @@ -548,8 +549,9 @@ const Page = () => { (provider) => ({ value: provider, label: + (PROVIDER_METADATA as any)[provider]?.displayName || provider.charAt(0).toUpperCase() + - provider.slice(1), + provider.slice(1), }), )} /> @@ -690,8 +692,9 @@ const Page = () => { (provider) => ({ value: provider, label: + (PROVIDER_METADATA as any)[provider]?.displayName || provider.charAt(0).toUpperCase() + - provider.slice(1), + provider.slice(1), }), )} /> @@ -858,6 +861,25 @@ const Page = () => { onSave={(value) => saveConfig('deepseekApiKey', value)} /> + +
+

+ LM Studio API URL +

+ { + setConfig((prev) => ({ + ...prev!, + lmStudioApiUrl: e.target.value, + })); + }} + onSave={(value) => saveConfig('lmStudioApiUrl', value)} + /> +
diff --git a/src/components/Navbar.tsx b/src/components/Navbar.tsx index 13f2da3..e406ade 100644 --- a/src/components/Navbar.tsx +++ b/src/components/Navbar.tsx @@ -1,8 +1,122 @@ -import { Clock, Edit, Share, Trash } from 'lucide-react'; +import { Clock, Edit, Share, Trash, FileText, FileDown } from 'lucide-react'; import { Message } from './ChatWindow'; -import { useEffect, useState } from 'react'; +import { useEffect, useState, Fragment } from 'react'; import { formatTimeDifference } from '@/lib/utils'; import DeleteChat from './DeleteChat'; +import { + Popover, + PopoverButton, + PopoverPanel, + Transition, +} from '@headlessui/react'; +import jsPDF from 'jspdf'; + +const downloadFile = (filename: string, content: string, type: string) => { + const blob = new Blob([content], { type }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = filename; + document.body.appendChild(a); + a.click(); + setTimeout(() => { + document.body.removeChild(a); + URL.revokeObjectURL(url); + }, 0); +}; + +const exportAsMarkdown = (messages: Message[], title: string) => { + const date = new Date(messages[0]?.createdAt || Date.now()).toLocaleString(); + let md = `# 💬 Chat Export: ${title}\n\n`; + md += `*Exported on: ${date}*\n\n---\n`; + messages.forEach((msg, idx) => { + md += `\n---\n`; + md += `**${msg.role === 'user' ? '🧑 User' : '🤖 Assistant'}** +`; + md += `*${new Date(msg.createdAt).toLocaleString()}*\n\n`; + md += `> ${msg.content.replace(/\n/g, '\n> ')}\n`; + if (msg.sources && msg.sources.length > 0) { + md += `\n**Citations:**\n`; + msg.sources.forEach((src: any, i: number) => { + const url = src.metadata?.url || ''; + md += `- [${i + 1}] [${url}](${url})\n`; + }); + } + }); + md += '\n---\n'; + downloadFile(`${title || 'chat'}.md`, md, 'text/markdown'); +}; + +const exportAsPDF = (messages: Message[], title: string) => { + const doc = new jsPDF(); + const date = new Date(messages[0]?.createdAt || Date.now()).toLocaleString(); + let y = 15; + const pageHeight = doc.internal.pageSize.height; + doc.setFontSize(18); + doc.text(`Chat Export: ${title}`, 10, y); + y += 8; + doc.setFontSize(11); + doc.setTextColor(100); + doc.text(`Exported on: ${date}`, 10, y); + y += 8; + doc.setDrawColor(200); + doc.line(10, y, 200, y); + y += 6; + doc.setTextColor(30); + messages.forEach((msg, idx) => { + if (y > pageHeight - 30) { + doc.addPage(); + y = 15; + } + doc.setFont('helvetica', 'bold'); + doc.text(`${msg.role === 'user' ? 'User' : 'Assistant'}`, 10, y); + doc.setFont('helvetica', 'normal'); + doc.setFontSize(10); + doc.setTextColor(120); + doc.text(`${new Date(msg.createdAt).toLocaleString()}`, 40, y); + y += 6; + doc.setTextColor(30); + doc.setFontSize(12); + const lines = doc.splitTextToSize(msg.content, 180); + for (let i = 0; i < lines.length; i++) { + if (y > pageHeight - 20) { + doc.addPage(); + y = 15; + } + doc.text(lines[i], 12, y); + y += 6; + } + if (msg.sources && msg.sources.length > 0) { + doc.setFontSize(11); + doc.setTextColor(80); + if (y > pageHeight - 20) { + doc.addPage(); + y = 15; + } + doc.text('Citations:', 12, y); + y += 5; + msg.sources.forEach((src: any, i: number) => { + const url = src.metadata?.url || ''; + if (y > pageHeight - 15) { + doc.addPage(); + y = 15; + } + doc.text(`- [${i + 1}] ${url}`, 15, y); + y += 5; + }); + doc.setTextColor(30); + } + y += 6; + doc.setDrawColor(230); + if (y > pageHeight - 10) { + doc.addPage(); + y = 15; + } + doc.line(10, y, 200, y); + y += 4; + }); + doc.save(`${title || 'chat'}.pdf`); +}; const Navbar = ({ chatId, @@ -59,10 +173,39 @@ const Navbar = ({

{title}

- + + + + + + +
+ + +
+
+
+
{}} />
diff --git a/src/lib/config.ts b/src/lib/config.ts index 2831214..78ad09c 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -1,7 +1,14 @@ -import fs from 'fs'; -import path from 'path'; import toml from '@iarna/toml'; +// Use dynamic imports for Node.js modules to prevent client-side errors +let fs: any; +let path: any; +if (typeof window === 'undefined') { + // We're on the server + fs = require('fs'); + path = require('path'); +} + const configFileName = 'config.toml'; interface Config { @@ -28,6 +35,9 @@ interface Config { DEEPSEEK: { API_KEY: string; }; + LM_STUDIO: { + API_URL: string; + }; CUSTOM_OPENAI: { API_URL: string; API_KEY: string; @@ -43,10 +53,17 @@ type RecursivePartial = { [P in keyof T]?: RecursivePartial; }; -const loadConfig = () => - toml.parse( - fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), - ) as any as Config; +const loadConfig = () => { + // Server-side only + if (typeof window === 'undefined') { + return toml.parse( + fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'), + ) as any as Config; + } + + // Client-side fallback - settings will be loaded via API + return {} as Config; +}; export const getSimilarityMeasure = () => loadConfig().GENERAL.SIMILARITY_MEASURE; @@ -77,6 +94,9 @@ export const getCustomOpenaiApiUrl = () => export const getCustomOpenaiModelName = () => loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; +export const getLMStudioApiEndpoint = () => + loadConfig().MODELS.LM_STUDIO.API_URL; + const mergeConfigs = (current: any, update: any): any => { if (update === null || update === undefined) { return current; @@ -109,10 +129,13 @@ const mergeConfigs = (current: any, update: any): any => { }; export const updateConfig = (config: RecursivePartial) => { - const currentConfig = loadConfig(); - const mergedConfig = mergeConfigs(currentConfig, config); - fs.writeFileSync( - path.join(path.join(process.cwd(), `${configFileName}`)), - toml.stringify(mergedConfig), - ); + // Server-side only + if (typeof window === 'undefined') { + const currentConfig = loadConfig(); + const mergedConfig = mergeConfigs(currentConfig, config); + fs.writeFileSync( + path.join(path.join(process.cwd(), `${configFileName}`)), + toml.stringify(mergedConfig), + ); + } }; diff --git a/src/lib/db/index.ts b/src/lib/db/index.ts index 9b761d4..515cdb3 100644 --- a/src/lib/db/index.ts +++ b/src/lib/db/index.ts @@ -3,7 +3,8 @@ import Database from 'better-sqlite3'; import * as schema from './schema'; import path from 'path'; -const sqlite = new Database(path.join(process.cwd(), 'data/db.sqlite')); +const DATA_DIR = process.env.DATA_DIR || process.cwd(); +const sqlite = new Database(path.join(DATA_DIR, './data/db.sqlite')); const db = drizzle(sqlite, { schema: schema, }); diff --git a/src/lib/db/migrate.ts b/src/lib/db/migrate.ts new file mode 100644 index 0000000..c3ebff6 --- /dev/null +++ b/src/lib/db/migrate.ts @@ -0,0 +1,5 @@ +import db from './'; +import { migrate } from 'drizzle-orm/better-sqlite3/migrator'; +import path from 'path'; + +migrate(db, { migrationsFolder: path.join(process.cwd(), 'drizzle') }); diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts index 7ecde4b..2b0f2cc 100644 --- a/src/lib/providers/anthropic.ts +++ b/src/lib/providers/anthropic.ts @@ -1,6 +1,11 @@ import { ChatAnthropic } from '@langchain/anthropic'; import { ChatModel } from '.'; import { getAnthropicApiKey } from '../config'; + +export const PROVIDER_INFO = { + key: 'anthropic', + displayName: 'Anthropic', +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; const anthropicChatModels: Record[] = [ diff --git a/src/lib/providers/deepseek.ts b/src/lib/providers/deepseek.ts index 88f02ec..46f2398 100644 --- a/src/lib/providers/deepseek.ts +++ b/src/lib/providers/deepseek.ts @@ -3,6 +3,11 @@ import { getDeepseekApiKey } from '../config'; import { ChatModel } from '.'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +export const PROVIDER_INFO = { + key: 'deepseek', + displayName: 'Deepseek AI', +}; + const deepseekChatModels: Record[] = [ { displayName: 'Deepseek Chat (Deepseek V3)', diff --git a/src/lib/providers/gemini.ts b/src/lib/providers/gemini.ts index 2a88015..6cf2243 100644 --- a/src/lib/providers/gemini.ts +++ b/src/lib/providers/gemini.ts @@ -4,6 +4,11 @@ import { } from '@langchain/google-genai'; import { getGeminiApiKey } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'gemini', + displayName: 'Google Gemini', +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index 85c75f4..5435de4 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -1,101 +1,36 @@ import { ChatOpenAI } from '@langchain/openai'; import { getGroqApiKey } from '../config'; import { ChatModel } from '.'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -const groqChatModels: Record[] = [ - { - displayName: 'Gemma2 9B IT', - key: 'gemma2-9b-it', - }, - { - displayName: 'Llama 3.3 70B Versatile', - key: 'llama-3.3-70b-versatile', - }, - { - displayName: 'Llama 3.1 8B Instant', - key: 'llama-3.1-8b-instant', - }, - { - displayName: 'Llama3 70B 8192', - key: 'llama3-70b-8192', - }, - { - displayName: 'Llama3 8B 8192', - key: 'llama3-8b-8192', - }, - { - displayName: 'Mixtral 8x7B 32768', - key: 'mixtral-8x7b-32768', - }, - { - displayName: 'Qwen QWQ 32B (Preview)', - key: 'qwen-qwq-32b', - }, - { - displayName: 'Mistral Saba 24B (Preview)', - key: 'mistral-saba-24b', - }, - { - displayName: 'Qwen 2.5 Coder 32B (Preview)', - key: 'qwen-2.5-coder-32b', - }, - { - displayName: 'Qwen 2.5 32B (Preview)', - key: 'qwen-2.5-32b', - }, - { - displayName: 'DeepSeek R1 Distill Qwen 32B (Preview)', - key: 'deepseek-r1-distill-qwen-32b', - }, - { - displayName: 'DeepSeek R1 Distill Llama 70B (Preview)', - key: 'deepseek-r1-distill-llama-70b', - }, - { - displayName: 'Llama 3.3 70B SpecDec (Preview)', - key: 'llama-3.3-70b-specdec', - }, - { - displayName: 'Llama 3.2 1B Preview (Preview)', - key: 'llama-3.2-1b-preview', - }, - { - displayName: 'Llama 3.2 3B Preview (Preview)', - key: 'llama-3.2-3b-preview', - }, - { - displayName: 'Llama 3.2 11B Vision Preview (Preview)', - key: 'llama-3.2-11b-vision-preview', - }, - { - displayName: 'Llama 3.2 90B Vision Preview (Preview)', - key: 'llama-3.2-90b-vision-preview', - }, - /* { - displayName: 'Llama 4 Maverick 17B 128E Instruct (Preview)', - key: 'meta-llama/llama-4-maverick-17b-128e-instruct', - }, */ - { - displayName: 'Llama 4 Scout 17B 16E Instruct (Preview)', - key: 'meta-llama/llama-4-scout-17b-16e-instruct', - }, -]; +export const PROVIDER_INFO = { + key: 'groq', + displayName: 'Groq', +}; + +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; export const loadGroqChatModels = async () => { const groqApiKey = getGroqApiKey(); - if (!groqApiKey) return {}; try { + const res = await fetch('https://api.groq.com/openai/v1/models', { + method: 'GET', + headers: { + Authorization: `bearer ${groqApiKey}`, + 'Content-Type': 'application/json', + }, + }); + + const groqChatModels = (await res.json()).data; const chatModels: Record = {}; - groqChatModels.forEach((model) => { - chatModels[model.key] = { - displayName: model.displayName, + groqChatModels.forEach((model: any) => { + chatModels[model.id] = { + displayName: model.id, model: new ChatOpenAI({ openAIApiKey: groqApiKey, - modelName: model.key, + modelName: model.id, temperature: 0.7, configuration: { baseURL: 'https://api.groq.com/openai/v1', diff --git a/src/lib/providers/index.ts b/src/lib/providers/index.ts index eef212f..e536431 100644 --- a/src/lib/providers/index.ts +++ b/src/lib/providers/index.ts @@ -1,18 +1,60 @@ import { Embeddings } from '@langchain/core/embeddings'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { loadOpenAIChatModels, loadOpenAIEmbeddingModels } from './openai'; +import { + loadOpenAIChatModels, + loadOpenAIEmbeddingModels, + PROVIDER_INFO as OpenAIInfo, + PROVIDER_INFO, +} from './openai'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '../config'; import { ChatOpenAI } from '@langchain/openai'; -import { loadOllamaChatModels, loadOllamaEmbeddingModels } from './ollama'; -import { loadGroqChatModels } from './groq'; -import { loadAnthropicChatModels } from './anthropic'; -import { loadGeminiChatModels, loadGeminiEmbeddingModels } from './gemini'; -import { loadTransformersEmbeddingsModels } from './transformers'; -import { loadDeepseekChatModels } from './deepseek'; +import { + loadOllamaChatModels, + loadOllamaEmbeddingModels, + PROVIDER_INFO as OllamaInfo, +} from './ollama'; +import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq'; +import { + loadAnthropicChatModels, + PROVIDER_INFO as AnthropicInfo, +} from './anthropic'; +import { + loadGeminiChatModels, + loadGeminiEmbeddingModels, + PROVIDER_INFO as GeminiInfo, +} from './gemini'; +import { + loadTransformersEmbeddingsModels, + PROVIDER_INFO as TransformersInfo, +} from './transformers'; +import { + loadDeepseekChatModels, + PROVIDER_INFO as DeepseekInfo, +} from './deepseek'; +import { + loadLMStudioChatModels, + loadLMStudioEmbeddingsModels, + PROVIDER_INFO as LMStudioInfo, +} from './lmstudio'; + +export const PROVIDER_METADATA = { + openai: OpenAIInfo, + ollama: OllamaInfo, + groq: GroqInfo, + anthropic: AnthropicInfo, + gemini: GeminiInfo, + transformers: TransformersInfo, + deepseek: DeepseekInfo, + lmstudio: LMStudioInfo, + custom_openai: { + key: 'custom_openai', + displayName: 'Custom OpenAI', + }, +}; export interface ChatModel { displayName: string; @@ -34,6 +76,7 @@ export const chatModelProviders: Record< anthropic: loadAnthropicChatModels, gemini: loadGeminiChatModels, deepseek: loadDeepseekChatModels, + lmstudio: loadLMStudioChatModels, }; export const embeddingModelProviders: Record< @@ -44,6 +87,7 @@ export const embeddingModelProviders: Record< ollama: loadOllamaEmbeddingModels, gemini: loadGeminiEmbeddingModels, transformers: loadTransformersEmbeddingsModels, + lmstudio: loadLMStudioEmbeddingsModels, }; export const getAvailableChatModelProviders = async () => { diff --git a/src/lib/providers/lmstudio.ts b/src/lib/providers/lmstudio.ts new file mode 100644 index 0000000..811208f --- /dev/null +++ b/src/lib/providers/lmstudio.ts @@ -0,0 +1,100 @@ +import { getKeepAlive, getLMStudioApiEndpoint } from '../config'; +import axios from 'axios'; +import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'lmstudio', + displayName: 'LM Studio', +}; +import { ChatOpenAI } from '@langchain/openai'; +import { OpenAIEmbeddings } from '@langchain/openai'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { Embeddings } from '@langchain/core/embeddings'; + +interface LMStudioModel { + id: string; + name?: string; +} + +const ensureV1Endpoint = (endpoint: string): string => + endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`; + +const checkServerAvailability = async (endpoint: string): Promise => { + try { + await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + headers: { 'Content-Type': 'application/json' }, + }); + return true; + } catch { + return false; + } +}; + +export const loadLMStudioChatModels = async () => { + const endpoint = getLMStudioApiEndpoint(); + + if (!endpoint) return {}; + if (!(await checkServerAvailability(endpoint))) return {}; + + try { + const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + headers: { 'Content-Type': 'application/json' }, + }); + + const chatModels: Record = {}; + + response.data.data.forEach((model: LMStudioModel) => { + chatModels[model.id] = { + displayName: model.name || model.id, + model: new ChatOpenAI({ + openAIApiKey: 'lm-studio', + configuration: { + baseURL: ensureV1Endpoint(endpoint), + }, + modelName: model.id, + temperature: 0.7, + streaming: true, + maxRetries: 3, + }) as unknown as BaseChatModel, + }; + }); + + return chatModels; + } catch (err) { + console.error(`Error loading LM Studio models: ${err}`); + return {}; + } +}; + +export const loadLMStudioEmbeddingsModels = async () => { + const endpoint = getLMStudioApiEndpoint(); + + if (!endpoint) return {}; + if (!(await checkServerAvailability(endpoint))) return {}; + + try { + const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, { + headers: { 'Content-Type': 'application/json' }, + }); + + const embeddingsModels: Record = {}; + + response.data.data.forEach((model: LMStudioModel) => { + embeddingsModels[model.id] = { + displayName: model.name || model.id, + model: new OpenAIEmbeddings({ + openAIApiKey: 'lm-studio', + configuration: { + baseURL: ensureV1Endpoint(endpoint), + }, + modelName: model.id, + }) as unknown as Embeddings, + }; + }); + + return embeddingsModels; + } catch (err) { + console.error(`Error loading LM Studio embeddings model: ${err}`); + return {}; + } +}; diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts index 92e98e4..cca2142 100644 --- a/src/lib/providers/ollama.ts +++ b/src/lib/providers/ollama.ts @@ -1,6 +1,11 @@ import axios from 'axios'; import { getKeepAlive, getOllamaApiEndpoint } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'ollama', + displayName: 'Ollama', +}; import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index 01bacc6..e68e574 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -1,6 +1,11 @@ import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { getOpenaiApiKey } from '../config'; import { ChatModel, EmbeddingModel } from '.'; + +export const PROVIDER_INFO = { + key: 'openai', + displayName: 'OpenAI', +}; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { Embeddings } from '@langchain/core/embeddings'; @@ -25,6 +30,18 @@ const openaiChatModels: Record[] = [ displayName: 'GPT-4 omni mini', key: 'gpt-4o-mini', }, + { + displayName: 'GPT 4.1 nano', + key: 'gpt-4.1-nano', + }, + { + displayName: 'GPT 4.1 mini', + key: 'gpt-4.1-mini', + }, + { + displayName: 'GPT 4.1', + key: 'gpt-4.1', + }, ]; const openaiEmbeddingModels: Record[] = [ diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts index a06dd12..3098d9f 100644 --- a/src/lib/providers/transformers.ts +++ b/src/lib/providers/transformers.ts @@ -1,5 +1,10 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; +export const PROVIDER_INFO = { + key: 'transformers', + displayName: 'Hugging Face', +}; + export const loadTransformersEmbeddingsModels = async () => { try { const embeddingModels = { diff --git a/src/lib/utils/documents.ts b/src/lib/utils/documents.ts index ad64180..93eb451 100644 --- a/src/lib/utils/documents.ts +++ b/src/lib/utils/documents.ts @@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => { const splittedText = await splitter.splitText(parsedText); const title = res.data .toString('utf8') - .match(/(.*?)<\/title>/)?.[1]; + .match(/<title.*>(.*?)<\/title>/)?.[1]; const linkDocs = splittedText.map((text) => { return new Document({