Merge remote-tracking branch 'upstream/master' into refac/improve-searxng-queries

This commit is contained in:
Dave 2025-08-17 15:30:48 +02:00
commit 5885cf6d98
44 changed files with 999 additions and 628 deletions

0
.assets/manifest.json Normal file
View file

2
.gitignore vendored
View file

@ -37,3 +37,5 @@ Thumbs.db
# Db # Db
db.sqlite db.sqlite
/searxng /searxng
certificates

View file

@ -16,7 +16,7 @@
<hr/> <hr/>
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT) [![Discord](https://dcbadge.limes.pink/api/server/26aArMy8tT?style=flat)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?) ![preview](.assets/perplexica-screenshot.png?)
@ -90,6 +90,9 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
- `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**. - `OLLAMA`: Your Ollama API URL. You should enter it as `http://host.docker.internal:PORT_NUMBER`. If you installed Ollama on port 11434, use `http://host.docker.internal:11434`. For other ports, adjust accordingly. **You need to fill this if you wish to use Ollama's models instead of OpenAI's**.
- `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**. - `GROQ`: Your Groq API key. **You only need to fill this if you wish to use Groq's hosted models**.
- `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**. - `ANTHROPIC`: Your Anthropic API key. **You only need to fill this if you wish to use Anthropic models**.
- `Gemini`: Your Gemini API key. **You only need to fill this if you wish to use Google's models**.
- `DEEPSEEK`: Your Deepseek API key. **Only needed if you want Deepseek models.**
- `AIMLAPI`: Your AI/ML API key. **Only needed if you want to use AI/ML API models and embeddings.**
**Note**: You can change these after starting Perplexica from the settings dialog. **Note**: You can change these after starting Perplexica from the settings dialog.
@ -111,7 +114,7 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file. 2. Clone the repository and rename the `sample.config.toml` file to `config.toml` in the root directory. Ensure you complete all required fields in this file.
3. After populating the configuration run `npm i`. 3. After populating the configuration run `npm i`.
4. Install the dependencies and then execute `npm run build`. 4. Install the dependencies and then execute `npm run build`.
5. Finally, start the app by running `npm rum start` 5. Finally, start the app by running `npm run start`
**Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies. **Note**: Using Docker is recommended as it simplifies the setup process, especially for managing environment variables and dependencies.
@ -132,7 +135,7 @@ If you're encountering an Ollama connection error, it is likely due to the backe
3. **Linux Users - Expose Ollama to Network:** 3. **Linux Users - Expose Ollama to Network:**
- Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0"`. Then restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux) - Inside `/etc/systemd/system/ollama.service`, you need to add `Environment="OLLAMA_HOST=0.0.0.0:11434"`. (Change the port number if you are using a different one.) Then reload the systemd manager configuration with `systemctl daemon-reload`, and restart Ollama by `systemctl restart ollama`. For more information see [Ollama docs](https://github.com/ollama/ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux)
- Ensure that the port (default is 11434) is not blocked by your firewall. - Ensure that the port (default is 11434) is not blocked by your firewall.

View file

@ -41,6 +41,6 @@ To update Perplexica to the latest version, follow these steps:
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly. 3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. After populating the configuration run `npm i`. 4. After populating the configuration run `npm i`.
5. Install the dependencies and then execute `npm run build`. 5. Install the dependencies and then execute `npm run build`.
6. Finally, start the app by running `npm rum start` 6. Finally, start the app by running `npm run start`
--- ---

View file

@ -1,6 +1,6 @@
{ {
"name": "perplexica-frontend", "name": "perplexica-frontend",
"version": "1.11.0-rc1", "version": "1.11.0-rc2",
"license": "MIT", "license": "MIT",
"author": "ItzCrazyKns", "author": "ItzCrazyKns",
"scripts": { "scripts": {
@ -15,11 +15,13 @@
"@headlessui/react": "^2.2.0", "@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5", "@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0", "@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15", "@langchain/anthropic": "^0.3.24",
"@langchain/community": "^0.3.36", "@langchain/community": "^0.3.49",
"@langchain/core": "^0.3.42", "@langchain/core": "^0.3.66",
"@langchain/google-genai": "^0.1.12", "@langchain/google-genai": "^0.2.15",
"@langchain/openai": "^0.0.25", "@langchain/groq": "^0.2.3",
"@langchain/ollama": "^0.2.3",
"@langchain/openai": "^0.6.2",
"@langchain/textsplitters": "^0.1.0", "@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12", "@tailwindcss/typography": "^0.5.12",
"@xenova/transformers": "^2.17.2", "@xenova/transformers": "^2.17.2",
@ -31,7 +33,7 @@
"drizzle-orm": "^0.40.1", "drizzle-orm": "^0.40.1",
"html-to-text": "^9.0.5", "html-to-text": "^9.0.5",
"jspdf": "^3.0.1", "jspdf": "^3.0.1",
"langchain": "^0.1.30", "langchain": "^0.3.30",
"lucide-react": "^0.363.0", "lucide-react": "^0.363.0",
"mammoth": "^1.9.1", "mammoth": "^1.9.1",
"markdown-to-jsx": "^7.7.2", "markdown-to-jsx": "^7.7.2",

BIN
public/icon-100.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 916 B

BIN
public/icon-50.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 B

BIN
public/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
public/screenshots/p1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
public/screenshots/p2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View file

@ -25,6 +25,9 @@ API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.DEEPSEEK] [MODELS.DEEPSEEK]
API_KEY = "" API_KEY = ""
[MODELS.AIMLAPI]
API_KEY = "" # Required to use AI/ML API chat and embedding models
[MODELS.LM_STUDIO] [MODELS.LM_STUDIO]
API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 API_URL = "" # LM Studio API URL - http://host.docker.internal:1234

View file

@ -223,7 +223,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') { if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({ llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(), apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(), modelName: getCustomOpenaiModelName(),
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {

View file

@ -8,6 +8,7 @@ import {
getOllamaApiEndpoint, getOllamaApiEndpoint,
getOpenaiApiKey, getOpenaiApiKey,
getDeepseekApiKey, getDeepseekApiKey,
getAimlApiKey,
getLMStudioApiEndpoint, getLMStudioApiEndpoint,
updateConfig, updateConfig,
} from '@/lib/config'; } from '@/lib/config';
@ -57,6 +58,7 @@ export const GET = async (req: Request) => {
config['groqApiKey'] = getGroqApiKey(); config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey(); config['geminiApiKey'] = getGeminiApiKey();
config['deepseekApiKey'] = getDeepseekApiKey(); config['deepseekApiKey'] = getDeepseekApiKey();
config['aimlApiKey'] = getAimlApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName();
@ -95,6 +97,9 @@ export const POST = async (req: Request) => {
DEEPSEEK: { DEEPSEEK: {
API_KEY: config.deepseekApiKey, API_KEY: config.deepseekApiKey,
}, },
AIMLAPI: {
API_KEY: config.aimlApiKey,
},
LM_STUDIO: { LM_STUDIO: {
API_URL: config.lmStudioApiUrl, API_URL: config.lmStudioApiUrl,
}, },

View file

@ -1,55 +1,79 @@
import { searchSearxng } from '@/lib/searxng'; import { searchSearxng } from '@/lib/searxng';
const articleWebsites = [ const websitesForTopic = {
'yahoo.com', tech: {
'www.exchangewire.com', query: ['technology news', 'latest tech', 'AI', 'science and innovation'],
'businessinsider.com', links: ['techcrunch.com', 'wired.com', 'theverge.com'],
/* 'wired.com', },
'mashable.com', finance: {
'theverge.com', query: ['finance news', 'economy', 'stock market', 'investing'],
'gizmodo.com', links: ['bloomberg.com', 'cnbc.com', 'marketwatch.com'],
'cnet.com', },
'venturebeat.com', */ art: {
]; query: ['art news', 'culture', 'modern art', 'cultural events'],
links: ['artnews.com', 'hyperallergic.com', 'theartnewspaper.com'],
},
sports: {
query: ['sports news', 'latest sports', 'cricket football tennis'],
links: ['espn.com', 'bbc.com/sport', 'skysports.com'],
},
entertainment: {
query: ['entertainment news', 'movies', 'TV shows', 'celebrities'],
links: ['hollywoodreporter.com', 'variety.com', 'deadline.com'],
},
};
const topics = ['AI', 'tech']; /* TODO: Add UI to customize this */ type Topic = keyof typeof websitesForTopic;
export const GET = async (req: Request) => { export const GET = async (req: Request) => {
try { try {
const params = new URL(req.url).searchParams; const params = new URL(req.url).searchParams;
const mode: 'normal' | 'preview' = const mode: 'normal' | 'preview' =
(params.get('mode') as 'normal' | 'preview') || 'normal'; (params.get('mode') as 'normal' | 'preview') || 'normal';
const topic: Topic = (params.get('topic') as Topic) || 'tech';
const selectedTopic = websitesForTopic[topic];
let data = []; let data = [];
if (mode === 'normal') { if (mode === 'normal') {
const seenUrls = new Set();
data = ( data = (
await Promise.all([ await Promise.all(
...new Array(articleWebsites.length * topics.length) selectedTopic.links.flatMap((link) =>
.fill(0) selectedTopic.query.map(async (query) => {
.map(async (_, i) => {
return ( return (
await searchSearxng( await searchSearxng(`site:${link} ${query}`, {
`site:${articleWebsites[i % articleWebsites.length]} ${topics[i % topics.length] categories: ['news'],
}`, time_range: ['month'],
{ language: 'en',
categories: ['news'], pageno: 1,
time_range: ['month'], })
pageno: 1,
},
)
).results; ).results;
}), }),
]) ),
)
) )
.map((result) => result)
.flat() .flat()
.filter((item) => {
const url = item.url?.toLowerCase().trim();
if (seenUrls.has(url)) return false;
seenUrls.add(url);
return true;
})
.sort(() => Math.random() - 0.5); .sort(() => Math.random() - 0.5);
} else { } else {
data = ( data = (
await searchSearxng( await searchSearxng(
`site:${articleWebsites[Math.floor(Math.random() * articleWebsites.length)]} ${topics[Math.floor(Math.random() * topics.length)]}`, `site:${selectedTopic.links[Math.floor(Math.random() * selectedTopic.links.length)]} ${selectedTopic.query[Math.floor(Math.random() * selectedTopic.query.length)]}`,
{ categories: ['news'], time_range: ['month'], pageno: 1 }, {
categories: ['news'],
time_range: ['month'],
language: 'en',
pageno: 1,
},
) )
).results; ).results;
} }

View file

@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') { if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({ llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(), apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(), modelName: getCustomOpenaiModelName(),
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {

View file

@ -81,8 +81,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') { if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({ llm = new ChatOpenAI({
modelName: body.chatModel?.name || getCustomOpenaiModelName(), modelName: body.chatModel?.name || getCustomOpenaiModelName(),
openAIApiKey: apiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {
baseURL: baseURL:

View file

@ -48,7 +48,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') { if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({ llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(), apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(), modelName: getCustomOpenaiModelName(),
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {

View file

@ -49,7 +49,7 @@ export const POST = async (req: Request) => {
if (body.chatModel?.provider === 'custom_openai') { if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({ llm = new ChatOpenAI({
openAIApiKey: getCustomOpenaiApiKey(), apiKey: getCustomOpenaiApiKey(),
modelName: getCustomOpenaiModelName(), modelName: getCustomOpenaiModelName(),
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {

View file

@ -1,6 +1,10 @@
export const POST = async (req: Request) => { export const POST = async (req: Request) => {
try { try {
const body: { lat: number; lng: number } = await req.json(); const body: {
lat: number;
lng: number;
measureUnit: 'Imperial' | 'Metric';
} = await req.json();
if (!body.lat || !body.lng) { if (!body.lat || !body.lng) {
return Response.json( return Response.json(
@ -12,7 +16,9 @@ export const POST = async (req: Request) => {
} }
const res = await fetch( const res = await fetch(
`https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto`, `https://api.open-meteo.com/v1/forecast?latitude=${body.lat}&longitude=${body.lng}&current=weather_code,temperature_2m,is_day,relative_humidity_2m,wind_speed_10m&timezone=auto${
body.measureUnit === 'Metric' ? '' : '&temperature_unit=fahrenheit'
}${body.measureUnit === 'Metric' ? '' : '&wind_speed_unit=mph'}`,
); );
const data = await res.json(); const data = await res.json();
@ -33,12 +39,16 @@ export const POST = async (req: Request) => {
humidity: number; humidity: number;
windSpeed: number; windSpeed: number;
icon: string; icon: string;
temperatureUnit: 'C' | 'F';
windSpeedUnit: 'm/s' | 'mph';
} = { } = {
temperature: data.current.temperature_2m, temperature: data.current.temperature_2m,
condition: '', condition: '',
humidity: data.current.relative_humidity_2m, humidity: data.current.relative_humidity_2m,
windSpeed: data.current.wind_speed_10m, windSpeed: data.current.wind_speed_10m,
icon: '', icon: '',
temperatureUnit: body.measureUnit === 'Metric' ? 'C' : 'F',
windSpeedUnit: body.measureUnit === 'Metric' ? 'm/s' : 'mph',
}; };
const code = data.current.weather_code; const code = data.current.weather_code;

View file

@ -4,6 +4,7 @@ import { Search } from 'lucide-react';
import { useEffect, useState } from 'react'; import { useEffect, useState } from 'react';
import Link from 'next/link'; import Link from 'next/link';
import { toast } from 'sonner'; import { toast } from 'sonner';
import { cn } from '@/lib/utils';
interface Discover { interface Discover {
title: string; title: string;
@ -12,60 +13,66 @@ interface Discover {
thumbnail: string; thumbnail: string;
} }
const topics: { key: string; display: string }[] = [
{
display: 'Tech & Science',
key: 'tech',
},
{
display: 'Finance',
key: 'finance',
},
{
display: 'Art & Culture',
key: 'art',
},
{
display: 'Sports',
key: 'sports',
},
{
display: 'Entertainment',
key: 'entertainment',
},
];
const Page = () => { const Page = () => {
const [discover, setDiscover] = useState<Discover[] | null>(null); const [discover, setDiscover] = useState<Discover[] | null>(null);
const [loading, setLoading] = useState(true); const [loading, setLoading] = useState(true);
const [activeTopic, setActiveTopic] = useState<string>(topics[0].key);
const fetchArticles = async (topic: string) => {
setLoading(true);
try {
const res = await fetch(`/api/discover?topic=${topic}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
if (!res.ok) {
throw new Error(data.message);
}
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
setDiscover(data.blogs);
} catch (err: any) {
console.error('Error fetching data:', err.message);
toast.error('Error fetching data');
} finally {
setLoading(false);
}
};
useEffect(() => { useEffect(() => {
const fetchData = async () => { fetchArticles(activeTopic);
try { }, [activeTopic]);
const res = await fetch(`/api/discover`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json(); return (
if (!res.ok) {
throw new Error(data.message);
}
data.blogs = data.blogs.filter((blog: Discover) => blog.thumbnail);
setDiscover(data.blogs);
} catch (err: any) {
console.error('Error fetching data:', err.message);
toast.error('Error fetching data');
} finally {
setLoading(false);
}
};
fetchData();
}, []);
return loading ? (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
<> <>
<div> <div>
<div className="flex flex-col pt-4"> <div className="flex flex-col pt-4">
@ -76,35 +83,73 @@ const Page = () => {
<hr className="border-t border-[#2B2C2C] my-4 w-full" /> <hr className="border-t border-[#2B2C2C] my-4 w-full" />
</div> </div>
<div className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4 pb-28 lg:pb-8 w-full justify-items-center lg:justify-items-start"> <div className="flex flex-row items-center space-x-2 overflow-x-auto">
{discover && {topics.map((t, i) => (
discover?.map((item, i) => ( <div
<Link key={i}
href={`/?q=Summary: ${item.url}`} className={cn(
key={i} 'border-[0.1px] rounded-full text-sm px-3 py-1 text-nowrap transition duration-200 cursor-pointer',
className="max-w-sm rounded-lg overflow-hidden bg-light-secondary dark:bg-dark-secondary hover:-translate-y-[1px] transition duration-200" activeTopic === t.key
target="_blank" ? 'text-cyan-300 bg-cyan-300/30 border-cyan-300/60'
> : 'border-white/30 text-white/70 hover:text-white hover:border-white/40 hover:bg-white/5',
<img )}
className="object-cover w-full aspect-video" onClick={() => setActiveTopic(t.key)}
src={ >
new URL(item.thumbnail).origin + <span>{t.display}</span>
new URL(item.thumbnail).pathname + </div>
`?id=${new URL(item.thumbnail).searchParams.get('id')}` ))}
}
alt={item.title}
/>
<div className="px-6 py-4">
<div className="font-bold text-lg mb-2">
{item.title.slice(0, 100)}...
</div>
<p className="text-black-70 dark:text-white/70 text-sm">
{item.content.slice(0, 100)}...
</p>
</div>
</Link>
))}
</div> </div>
{loading ? (
<div className="flex flex-row items-center justify-center min-h-screen">
<svg
aria-hidden="true"
className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]"
viewBox="0 0 100 101"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z"
fill="currentColor"
/>
<path
d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z"
fill="currentFill"
/>
</svg>
</div>
) : (
<div className="grid lg:grid-cols-3 sm:grid-cols-2 grid-cols-1 gap-4 pb-28 pt-5 lg:pb-8 w-full justify-items-center lg:justify-items-start">
{discover &&
discover?.map((item, i) => (
<Link
href={`/?q=Summary: ${item.url}`}
key={i}
className="max-w-sm rounded-lg overflow-hidden bg-light-secondary dark:bg-dark-secondary hover:-translate-y-[1px] transition duration-200"
target="_blank"
>
<img
className="object-cover w-full aspect-video"
src={
new URL(item.thumbnail).origin +
new URL(item.thumbnail).pathname +
`?id=${new URL(item.thumbnail).searchParams.get('id')}`
}
alt={item.title}
/>
<div className="px-6 py-4">
<div className="font-bold text-lg mb-2">
{item.title.slice(0, 100)}...
</div>
<p className="text-black-70 dark:text-white/70 text-sm">
{item.content.slice(0, 100)}...
</p>
</div>
</Link>
))}
</div>
)}
</div> </div>
</> </>
); );

View file

@ -11,3 +11,11 @@
display: none; display: none;
} }
} }
@media screen and (-webkit-min-device-pixel-ratio: 0) {
select,
textarea,
input {
font-size: 16px !important;
}
}

54
src/app/manifest.ts Normal file
View file

@ -0,0 +1,54 @@
import type { MetadataRoute } from 'next';
export default function manifest(): MetadataRoute.Manifest {
return {
name: 'Perplexica - Chat with the internet',
short_name: 'Perplexica',
description:
'Perplexica is an AI powered chatbot that is connected to the internet.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
screenshots: [
{
src: '/screenshots/p1.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p2.png',
form_factor: 'wide',
sizes: '2560x1600',
},
{
src: '/screenshots/p1_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
{
src: '/screenshots/p2_small.png',
form_factor: 'narrow',
sizes: '828x1792',
},
],
icons: [
{
src: '/icon-50.png',
sizes: '50x50',
type: 'image/png' as const,
},
{
src: '/icon-100.png',
sizes: '100x100',
type: 'image/png',
},
{
src: '/icon.png',
sizes: '440x440',
type: 'image/png',
purpose: 'any',
},
],
};
}

View file

@ -23,6 +23,7 @@ interface SettingsType {
ollamaApiUrl: string; ollamaApiUrl: string;
lmStudioApiUrl: string; lmStudioApiUrl: string;
deepseekApiKey: string; deepseekApiKey: string;
aimlApiKey: string;
customOpenaiApiKey: string; customOpenaiApiKey: string;
customOpenaiApiUrl: string; customOpenaiApiUrl: string;
customOpenaiModelName: string; customOpenaiModelName: string;
@ -147,6 +148,9 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false); const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false); const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>(''); const [systemInstructions, setSystemInstructions] = useState<string>('');
const [measureUnit, setMeasureUnit] = useState<'Imperial' | 'Metric'>(
'Metric',
);
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({}); const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => { useEffect(() => {
@ -209,6 +213,10 @@ const Page = () => {
setSystemInstructions(localStorage.getItem('systemInstructions')!); setSystemInstructions(localStorage.getItem('systemInstructions')!);
setMeasureUnit(
localStorage.getItem('measureUnit')! as 'Imperial' | 'Metric',
);
setIsLoading(false); setIsLoading(false);
}; };
@ -367,6 +375,8 @@ const Page = () => {
localStorage.setItem('embeddingModel', value); localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') { } else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value); localStorage.setItem('systemInstructions', value);
} else if (key === 'measureUnit') {
localStorage.setItem('measureUnit', value.toString());
} }
} catch (err) { } catch (err) {
console.error('Failed to save:', err); console.error('Failed to save:', err);
@ -415,13 +425,35 @@ const Page = () => {
) : ( ) : (
config && ( config && (
<div className="flex flex-col space-y-6 pb-28 lg:pb-8"> <div className="flex flex-col space-y-6 pb-28 lg:pb-8">
<SettingsSection title="Appearance"> <SettingsSection title="Preferences">
<div className="flex flex-col space-y-1"> <div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm"> <p className="text-black/70 dark:text-white/70 text-sm">
Theme Theme
</p> </p>
<ThemeSwitcher /> <ThemeSwitcher />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Measurement Units
</p>
<Select
value={measureUnit ?? undefined}
onChange={(e) => {
setMeasureUnit(e.target.value as 'Imperial' | 'Metric');
saveConfig('measureUnit', e.target.value);
}}
options={[
{
label: 'Metric',
value: 'Metric',
},
{
label: 'Imperial',
value: 'Imperial',
},
]}
/>
</div>
</SettingsSection> </SettingsSection>
<SettingsSection title="Automatic Search"> <SettingsSection title="Automatic Search">
@ -515,7 +547,7 @@ const Page = () => {
<SettingsSection title="System Instructions"> <SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4"> <div className="flex flex-col space-y-4">
<Textarea <Textarea
value={systemInstructions} value={systemInstructions ?? undefined}
isSaving={savingStates['systemInstructions']} isSaving={savingStates['systemInstructions']}
onChange={(e) => { onChange={(e) => {
setSystemInstructions(e.target.value); setSystemInstructions(e.target.value);
@ -862,6 +894,25 @@ const Page = () => {
/> />
</div> </div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
AI/ML API Key
</p>
<Input
type="text"
placeholder="AI/ML API Key"
value={config.aimlApiKey}
isSaving={savingStates['aimlApiKey']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
aimlApiKey: e.target.value,
}));
}}
onSave={(value) => saveConfig('aimlApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1"> <div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm"> <p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL LM Studio API URL

View file

@ -82,14 +82,29 @@ const checkConfig = async (
) { ) {
if (!chatModel || !chatModelProvider) { if (!chatModel || !chatModelProvider) {
const chatModelProviders = providers.chatModelProviders; const chatModelProviders = providers.chatModelProviders;
const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider = if (!chatModelProviders || chatModelProvidersKeys.length === 0) {
chatModelProvider || Object.keys(chatModelProviders)[0]; return toast.error('No chat models available');
} else {
chatModelProvider =
chatModelProvidersKeys.find(
(provider) =>
Object.keys(chatModelProviders[provider]).length > 0,
) || chatModelProvidersKeys[0];
}
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
if (!chatModelProviders || Object.keys(chatModelProviders).length === 0)
return toast.error('No chat models available');
} }
if (!embeddingModel || !embeddingModelProvider) { if (!embeddingModel || !embeddingModelProvider) {
@ -117,7 +132,8 @@ const checkConfig = async (
if ( if (
Object.keys(chatModelProviders).length > 0 && Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider] (!chatModelProviders[chatModelProvider] ||
Object.keys(chatModelProviders[chatModelProvider]).length === 0)
) { ) {
const chatModelProvidersKeys = Object.keys(chatModelProviders); const chatModelProvidersKeys = Object.keys(chatModelProviders);
chatModelProvider = chatModelProvider =
@ -132,6 +148,16 @@ const checkConfig = async (
chatModelProvider && chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel] !chatModelProviders[chatModelProvider][chatModel]
) { ) {
if (
chatModelProvider === 'custom_openai' &&
Object.keys(chatModelProviders[chatModelProvider]).length === 0
) {
toast.error(
"Looks like you haven't configured any chat model providers. Please configure them from the settings page or the config file.",
);
return setHasError(true);
}
chatModel = Object.keys( chatModel = Object.keys(
chatModelProviders[ chatModelProviders[
Object.keys(chatModelProviders[chatModelProvider]).length > 0 Object.keys(chatModelProviders[chatModelProvider]).length > 0
@ -139,6 +165,7 @@ const checkConfig = async (
: Object.keys(chatModelProviders)[0] : Object.keys(chatModelProviders)[0]
], ],
)[0]; )[0];
localStorage.setItem('chatModel', chatModel); localStorage.setItem('chatModel', chatModel);
} }
@ -327,7 +354,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
} }
}, [isMessagesLoaded, isConfigReady]); }, [isMessagesLoaded, isConfigReady]);
const sendMessage = async (message: string, messageId?: string) => { const sendMessage = async (
message: string,
messageId?: string,
rewrite = false,
) => {
if (loading) return; if (loading) return;
if (!isConfigReady) { if (!isConfigReady) {
toast.error('Cannot send message before the configuration is ready'); toast.error('Cannot send message before the configuration is ready');
@ -455,6 +486,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
} }
}; };
const messageIndex = messages.findIndex((m) => m.messageId === messageId);
const res = await fetch('/api/chat', { const res = await fetch('/api/chat', {
method: 'POST', method: 'POST',
headers: { headers: {
@ -471,7 +504,9 @@ const ChatWindow = ({ id }: { id?: string }) => {
files: fileIds, files: fileIds,
focusMode: focusMode, focusMode: focusMode,
optimizationMode: optimizationMode, optimizationMode: optimizationMode,
history: chatHistory, history: rewrite
? chatHistory.slice(0, messageIndex === -1 ? undefined : messageIndex)
: chatHistory,
chatModel: { chatModel: {
name: chatModelProvider.name, name: chatModelProvider.name,
provider: chatModelProvider.provider, provider: chatModelProvider.provider,
@ -525,7 +560,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
}); });
sendMessage(message.content, message.messageId); sendMessage(message.content, message.messageId, true);
}; };
useEffect(() => { useEffect(() => {

View file

@ -51,10 +51,10 @@ const EmptyChat = ({
/> />
</div> </div>
<div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center"> <div className="flex flex-col w-full gap-4 mt-2 sm:flex-row sm:justify-center">
<div className="flex-1 max-w-xs"> <div className="flex-1 w-full">
<WeatherWidget /> <WeatherWidget />
</div> </div>
<div className="flex-1 max-w-xs"> <div className="flex-1 w-full">
<NewsArticleWidget /> <NewsArticleWidget />
</div> </div>
</div> </div>

View file

@ -21,8 +21,16 @@ import SearchVideos from './SearchVideos';
import { useSpeech } from 'react-text-to-speech'; import { useSpeech } from 'react-text-to-speech';
import ThinkBox from './ThinkBox'; import ThinkBox from './ThinkBox';
const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => { const ThinkTagProcessor = ({
return <ThinkBox content={children as string} />; children,
thinkingEnded,
}: {
children: React.ReactNode;
thinkingEnded: boolean;
}) => {
return (
<ThinkBox content={children as string} thinkingEnded={thinkingEnded} />
);
}; };
const MessageBox = ({ const MessageBox = ({
@ -46,6 +54,7 @@ const MessageBox = ({
}) => { }) => {
const [parsedMessage, setParsedMessage] = useState(message.content); const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content); const [speechMessage, setSpeechMessage] = useState(message.content);
const [thinkingEnded, setThinkingEnded] = useState(false);
useEffect(() => { useEffect(() => {
const citationRegex = /\[([^\]]+)\]/g; const citationRegex = /\[([^\]]+)\]/g;
@ -61,6 +70,10 @@ const MessageBox = ({
} }
} }
if (message.role === 'assistant' && message.content.includes('</think>')) {
setThinkingEnded(true);
}
if ( if (
message.role === 'assistant' && message.role === 'assistant' &&
message?.sources && message?.sources &&
@ -88,7 +101,7 @@ const MessageBox = ({
if (url) { if (url) {
return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`; return `<a href="${url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${numStr}</a>`;
} else { } else {
return `[${numStr}]`; return ``;
} }
}) })
.join(''); .join('');
@ -99,6 +112,14 @@ const MessageBox = ({
); );
setSpeechMessage(message.content.replace(regex, '')); setSpeechMessage(message.content.replace(regex, ''));
return; return;
} else if (
message.role === 'assistant' &&
message?.sources &&
message.sources.length === 0
) {
setParsedMessage(processedMessage.replace(regex, ''));
setSpeechMessage(message.content.replace(regex, ''));
return;
} }
setSpeechMessage(message.content.replace(regex, '')); setSpeechMessage(message.content.replace(regex, ''));
@ -111,6 +132,9 @@ const MessageBox = ({
overrides: { overrides: {
think: { think: {
component: ThinkTagProcessor, component: ThinkTagProcessor,
props: {
thinkingEnded: thinkingEnded,
},
}, },
}, },
}; };

View file

@ -1,15 +1,23 @@
'use client'; 'use client';
import { useState } from 'react'; import { useEffect, useState } from 'react';
import { cn } from '@/lib/utils';
import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react'; import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react';
interface ThinkBoxProps { interface ThinkBoxProps {
content: string; content: string;
thinkingEnded: boolean;
} }
const ThinkBox = ({ content }: ThinkBoxProps) => { const ThinkBox = ({ content, thinkingEnded }: ThinkBoxProps) => {
const [isExpanded, setIsExpanded] = useState(false); const [isExpanded, setIsExpanded] = useState(true);
useEffect(() => {
if (thinkingEnded) {
setIsExpanded(false);
} else {
setIsExpanded(true);
}
}, [thinkingEnded]);
return ( return (
<div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden"> <div className="my-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200 overflow-hidden">

View file

@ -9,7 +9,10 @@ const WeatherWidget = () => {
humidity: 0, humidity: 0,
windSpeed: 0, windSpeed: 0,
icon: '', icon: '',
temperatureUnit: 'C',
windSpeedUnit: 'm/s',
}); });
const [loading, setLoading] = useState(true); const [loading, setLoading] = useState(true);
useEffect(() => { useEffect(() => {
@ -31,30 +34,40 @@ const WeatherWidget = () => {
city: string; city: string;
}) => void, }) => void,
) => { ) => {
/* if (navigator.geolocation) {
// Geolocation doesn't give city so we'll country using ipapi for now const result = await navigator.permissions.query({
if (navigator.geolocation) { name: 'geolocation',
const result = await navigator.permissions.query({ });
name: 'geolocation',
})
if (result.state === 'granted') { if (result.state === 'granted') {
navigator.geolocation.getCurrentPosition(position => { navigator.geolocation.getCurrentPosition(async (position) => {
callback({ const res = await fetch(
latitude: position.coords.latitude, `https://api-bdc.io/data/reverse-geocode-client?latitude=${position.coords.latitude}&longitude=${position.coords.longitude}&localityLanguage=en`,
longitude: position.coords.longitude, {
}) method: 'GET',
}) headers: {
} else if (result.state === 'prompt') { 'Content-Type': 'application/json',
callback(await getApproxLocation()) },
navigator.geolocation.getCurrentPosition(position => {}) },
} else if (result.state === 'denied') { );
callback(await getApproxLocation())
} const data = await res.json();
} else {
callback(await getApproxLocation()) callback({
} */ latitude: position.coords.latitude,
callback(await getApproxLocation()); longitude: position.coords.longitude,
city: data.locality,
});
});
} else if (result.state === 'prompt') {
callback(await getApproxLocation());
navigator.geolocation.getCurrentPosition((position) => {});
} else if (result.state === 'denied') {
callback(await getApproxLocation());
}
} else {
callback(await getApproxLocation());
}
}; };
getLocation(async (location) => { getLocation(async (location) => {
@ -63,6 +76,7 @@ const WeatherWidget = () => {
body: JSON.stringify({ body: JSON.stringify({
lat: location.latitude, lat: location.latitude,
lng: location.longitude, lng: location.longitude,
measureUnit: localStorage.getItem('measureUnit') ?? 'Metric',
}), }),
}); });
@ -81,6 +95,8 @@ const WeatherWidget = () => {
humidity: data.humidity, humidity: data.humidity,
windSpeed: data.windSpeed, windSpeed: data.windSpeed,
icon: data.icon, icon: data.icon,
temperatureUnit: data.temperatureUnit,
windSpeedUnit: data.windSpeedUnit,
}); });
setLoading(false); setLoading(false);
}); });
@ -115,7 +131,7 @@ const WeatherWidget = () => {
className="h-10 w-auto" className="h-10 w-auto"
/> />
<span className="text-base font-semibold text-black dark:text-white"> <span className="text-base font-semibold text-black dark:text-white">
{data.temperature}°C {data.temperature}°{data.temperatureUnit}
</span> </span>
</div> </div>
<div className="flex flex-col justify-between flex-1 h-full py-1"> <div className="flex flex-col justify-between flex-1 h-full py-1">
@ -125,7 +141,7 @@ const WeatherWidget = () => {
</span> </span>
<span className="flex items-center text-xs text-black/60 dark:text-white/60"> <span className="flex items-center text-xs text-black/60 dark:text-white/60">
<Wind className="w-3 h-3 mr-1" /> <Wind className="w-3 h-3 mr-1" />
{data.windSpeed} km/h {data.windSpeed} {data.windSpeedUnit}
</span> </span>
</div> </div>
<span className="text-xs text-black/60 dark:text-white/60 mt-1"> <span className="text-xs text-black/60 dark:text-white/60 mt-1">

View file

@ -3,32 +3,18 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts'; import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const imageSearchChainPrompt = ` const imageSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images. You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
Example:
1. Follow up question: What is a cat?
Rephrased: A cat
2. Follow up question: What is a car? How does it works?
Rephrased: Car working
3. Follow up question: How does an AC work?
Rephrased: AC working
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`; `;
type ImageSearchChainInput = { type ImageSearchChainInput = {
@ -54,12 +40,39 @@ const createImageSearchChain = (llm: BaseChatModel) => {
return input.query; return input.query;
}, },
}), }),
PromptTemplate.fromTemplate(imageSearchChainPrompt), ChatPromptTemplate.fromMessages([
['system', imageSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a cat?\n</follow_up>',
],
['assistant', '<query>A cat</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is a car? How does it work?\n</follow_up>',
],
['assistant', '<query>Car working</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm, llm,
strParser, strParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, ''); const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, { const res = await searchSearxng(input, {
engines: ['bing images', 'google images'], engines: ['bing images', 'google images'],
}); });

View file

@ -3,33 +3,19 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts'; import { ChatPromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../searxng'; import { searchSearxng } from '../searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import LineOutputParser from '../outputParsers/lineOutputParser';
const VideoSearchChainPrompt = ` const videoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos. You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Output only the rephrased query wrapped in an XML <query> element. Do not include any explanation or additional text.
Example: `;
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = { type VideoSearchChainInput = {
chat_history: BaseMessage[]; chat_history: BaseMessage[];
@ -55,12 +41,37 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
return input.query; return input.query;
}, },
}), }),
PromptTemplate.fromTemplate(VideoSearchChainPrompt), ChatPromptTemplate.fromMessages([
['system', videoSearchChainPrompt],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does a car work?\n</follow_up>',
],
['assistant', '<query>How does a car work?</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nWhat is the theory of relativity?\n</follow_up>',
],
['assistant', '<query>Theory of relativity</query>'],
[
'user',
'<conversation>\n</conversation>\n<follow_up>\nHow does an AC work?\n</follow_up>',
],
['assistant', '<query>AC working</query>'],
[
'user',
'<conversation>{chat_history}</conversation>\n<follow_up>\n{query}\n</follow_up>',
],
]),
llm, llm,
strParser, strParser,
RunnableLambda.from(async (input: string) => { RunnableLambda.from(async (input: string) => {
input = input.replace(/<think>.*?<\/think>/g, ''); const queryParser = new LineOutputParser({
key: 'query',
});
return await queryParser.parse(input);
}),
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, { const res = await searchSearxng(input, {
engines: ['youtube'], engines: ['youtube'],
}); });
@ -92,8 +103,8 @@ const handleVideoSearch = (
input: VideoSearchChainInput, input: VideoSearchChainInput,
llm: BaseChatModel, llm: BaseChatModel,
) => { ) => {
const VideoSearchChain = createVideoSearchChain(llm); const videoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input); return videoSearchChain.invoke(input);
}; };
export default handleVideoSearch; export default handleVideoSearch;

View file

@ -35,6 +35,9 @@ interface Config {
DEEPSEEK: { DEEPSEEK: {
API_KEY: string; API_KEY: string;
}; };
AIMLAPI: {
API_KEY: string;
};
LM_STUDIO: { LM_STUDIO: {
API_URL: string; API_URL: string;
}; };
@ -85,6 +88,8 @@ export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY; export const getDeepseekApiKey = () => loadConfig().MODELS.DEEPSEEK.API_KEY;
export const getAimlApiKey = () => loadConfig().MODELS.AIMLAPI.API_KEY;
export const getCustomOpenaiApiKey = () => export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY; loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;

View file

@ -0,0 +1,94 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import { getAimlApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
import axios from 'axios';
export const PROVIDER_INFO = {
key: 'aimlapi',
displayName: 'AI/ML API',
};
interface AimlApiModel {
id: string;
name?: string;
type?: string;
}
const API_URL = 'https://api.aimlapi.com';
export const loadAimlApiChatModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const chatModels: Record<string, ChatModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'chat-completion') {
chatModels[model.id] = {
displayName: model.name || model.id,
model: new ChatOpenAI({
apiKey: apiKey,
modelName: model.id,
temperature: 0.7,
configuration: {
baseURL: API_URL,
},
}) as unknown as BaseChatModel,
};
}
});
return chatModels;
} catch (err) {
console.error(`Error loading AI/ML API models: ${err}`);
return {};
}
};
export const loadAimlApiEmbeddingModels = async () => {
const apiKey = getAimlApiKey();
if (!apiKey) return {};
try {
const response = await axios.get(`${API_URL}/models`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
});
const embeddingModels: Record<string, EmbeddingModel> = {};
response.data.data.forEach((model: AimlApiModel) => {
if (model.type === 'embedding') {
embeddingModels[model.id] = {
displayName: model.name || model.id,
model: new OpenAIEmbeddings({
apiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: API_URL,
},
}) as unknown as Embeddings,
};
}
});
return embeddingModels;
} catch (err) {
console.error(`Error loading AI/ML API embeddings models: ${err}`);
return {};
}
};

View file

@ -9,6 +9,18 @@ export const PROVIDER_INFO = {
import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { BaseChatModel } from '@langchain/core/language_models/chat_models';
const anthropicChatModels: Record<string, string>[] = [ const anthropicChatModels: Record<string, string>[] = [
{
displayName: 'Claude 4.1 Opus',
key: 'claude-opus-4-1-20250805',
},
{
displayName: 'Claude 4 Opus',
key: 'claude-opus-4-20250514',
},
{
displayName: 'Claude 4 Sonnet',
key: 'claude-sonnet-4-20250514',
},
{ {
displayName: 'Claude 3.7 Sonnet', displayName: 'Claude 3.7 Sonnet',
key: 'claude-3-7-sonnet-20250219', key: 'claude-3-7-sonnet-20250219',

View file

@ -31,7 +31,7 @@ export const loadDeepseekChatModels = async () => {
chatModels[model.key] = { chatModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey: deepseekApiKey, apiKey: deepseekApiKey,
modelName: model.key, modelName: model.key,
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {

View file

@ -14,8 +14,16 @@ import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [ const geminiChatModels: Record<string, string>[] = [
{ {
displayName: 'Gemini 2.5 Pro Experimental', displayName: 'Gemini 2.5 Flash',
key: 'gemini-2.5-pro-exp-03-25', key: 'gemini-2.5-flash',
},
{
displayName: 'Gemini 2.5 Flash-Lite',
key: 'gemini-2.5-flash-lite',
},
{
displayName: 'Gemini 2.5 Pro',
key: 'gemini-2.5-pro',
}, },
{ {
displayName: 'Gemini 2.0 Flash', displayName: 'Gemini 2.0 Flash',
@ -67,7 +75,7 @@ export const loadGeminiChatModels = async () => {
displayName: model.displayName, displayName: model.displayName,
model: new ChatGoogleGenerativeAI({ model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey, apiKey: geminiApiKey,
modelName: model.key, model: model.key,
temperature: 0.7, temperature: 0.7,
}) as unknown as BaseChatModel, }) as unknown as BaseChatModel,
}; };
@ -100,7 +108,7 @@ export const loadGeminiEmbeddingModels = async () => {
return embeddingModels; return embeddingModels;
} catch (err) { } catch (err) {
console.error(`Error loading OpenAI embeddings models: ${err}`); console.error(`Error loading Gemini embeddings models: ${err}`);
return {}; return {};
} }
}; };

View file

@ -1,4 +1,4 @@
import { ChatOpenAI } from '@langchain/openai'; import { ChatGroq } from '@langchain/groq';
import { getGroqApiKey } from '../config'; import { getGroqApiKey } from '../config';
import { ChatModel } from '.'; import { ChatModel } from '.';
@ -28,13 +28,10 @@ export const loadGroqChatModels = async () => {
groqChatModels.forEach((model: any) => { groqChatModels.forEach((model: any) => {
chatModels[model.id] = { chatModels[model.id] = {
displayName: model.id, displayName: model.id,
model: new ChatOpenAI({ model: new ChatGroq({
openAIApiKey: groqApiKey, apiKey: groqApiKey,
modelName: model.id, model: model.id,
temperature: 0.7, temperature: 0.7,
configuration: {
baseURL: 'https://api.groq.com/openai/v1',
},
}) as unknown as BaseChatModel, }) as unknown as BaseChatModel,
}; };
}); });

View file

@ -35,6 +35,11 @@ import {
loadDeepseekChatModels, loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo, PROVIDER_INFO as DeepseekInfo,
} from './deepseek'; } from './deepseek';
import {
loadAimlApiChatModels,
loadAimlApiEmbeddingModels,
PROVIDER_INFO as AimlApiInfo,
} from './aimlapi';
import { import {
loadLMStudioChatModels, loadLMStudioChatModels,
loadLMStudioEmbeddingsModels, loadLMStudioEmbeddingsModels,
@ -49,6 +54,7 @@ export const PROVIDER_METADATA = {
gemini: GeminiInfo, gemini: GeminiInfo,
transformers: TransformersInfo, transformers: TransformersInfo,
deepseek: DeepseekInfo, deepseek: DeepseekInfo,
aimlapi: AimlApiInfo,
lmstudio: LMStudioInfo, lmstudio: LMStudioInfo,
custom_openai: { custom_openai: {
key: 'custom_openai', key: 'custom_openai',
@ -76,6 +82,7 @@ export const chatModelProviders: Record<
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
deepseek: loadDeepseekChatModels, deepseek: loadDeepseekChatModels,
aimlapi: loadAimlApiChatModels,
lmstudio: loadLMStudioChatModels, lmstudio: loadLMStudioChatModels,
}; };
@ -87,6 +94,7 @@ export const embeddingModelProviders: Record<
ollama: loadOllamaEmbeddingModels, ollama: loadOllamaEmbeddingModels,
gemini: loadGeminiEmbeddingModels, gemini: loadGeminiEmbeddingModels,
transformers: loadTransformersEmbeddingsModels, transformers: loadTransformersEmbeddingsModels,
aimlapi: loadAimlApiEmbeddingModels,
lmstudio: loadLMStudioEmbeddingsModels, lmstudio: loadLMStudioEmbeddingsModels,
}; };
@ -110,7 +118,7 @@ export const getAvailableChatModelProviders = async () => {
[customOpenAiModelName]: { [customOpenAiModelName]: {
displayName: customOpenAiModelName, displayName: customOpenAiModelName,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey: customOpenAiApiKey, apiKey: customOpenAiApiKey,
modelName: customOpenAiModelName, modelName: customOpenAiModelName,
temperature: 0.7, temperature: 0.7,
configuration: { configuration: {

View file

@ -47,7 +47,7 @@ export const loadLMStudioChatModels = async () => {
chatModels[model.id] = { chatModels[model.id] = {
displayName: model.name || model.id, displayName: model.name || model.id,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey: 'lm-studio', apiKey: 'lm-studio',
configuration: { configuration: {
baseURL: ensureV1Endpoint(endpoint), baseURL: ensureV1Endpoint(endpoint),
}, },
@ -83,7 +83,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
embeddingsModels[model.id] = { embeddingsModels[model.id] = {
displayName: model.name || model.id, displayName: model.name || model.id,
model: new OpenAIEmbeddings({ model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio', apiKey: 'lm-studio',
configuration: { configuration: {
baseURL: ensureV1Endpoint(endpoint), baseURL: ensureV1Endpoint(endpoint),
}, },

View file

@ -6,8 +6,8 @@ export const PROVIDER_INFO = {
key: 'ollama', key: 'ollama',
displayName: 'Ollama', displayName: 'Ollama',
}; };
import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { ChatOllama } from '@langchain/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { OllamaEmbeddings } from '@langchain/ollama';
export const loadOllamaChatModels = async () => { export const loadOllamaChatModels = async () => {
const ollamaApiEndpoint = getOllamaApiEndpoint(); const ollamaApiEndpoint = getOllamaApiEndpoint();

View file

@ -42,6 +42,18 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT 4.1', displayName: 'GPT 4.1',
key: 'gpt-4.1', key: 'gpt-4.1',
}, },
{
displayName: 'GPT 5 nano',
key: 'gpt-5-nano',
},
{
displayName: 'GPT 5 mini',
key: 'gpt-5-mini',
},
{
displayName: 'GPT 5',
key: 'gpt-5',
},
]; ];
const openaiEmbeddingModels: Record<string, string>[] = [ const openaiEmbeddingModels: Record<string, string>[] = [
@ -67,9 +79,9 @@ export const loadOpenAIChatModels = async () => {
chatModels[model.key] = { chatModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new ChatOpenAI({ model: new ChatOpenAI({
openAIApiKey: openaiApiKey, apiKey: openaiApiKey,
modelName: model.key, modelName: model.key,
temperature: 0.7, temperature: model.key.includes('gpt-5') ? 1 : 0.7,
}) as unknown as BaseChatModel, }) as unknown as BaseChatModel,
}; };
}); });
@ -93,7 +105,7 @@ export const loadOpenAIEmbeddingModels = async () => {
embeddingModels[model.key] = { embeddingModels[model.key] = {
displayName: model.displayName, displayName: model.displayName,
model: new OpenAIEmbeddings({ model: new OpenAIEmbeddings({
openAIApiKey: openaiApiKey, apiKey: openaiApiKey,
modelName: model.key, modelName: model.key,
}) as unknown as Embeddings, }) as unknown as Embeddings,
}; };

View file

@ -1,8 +1,11 @@
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage, isAIMessage } from '@langchain/core/messages';
const formatChatHistoryAsString = (history: BaseMessage[]) => { const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history return history
.map((message) => `${message._getType()}: ${message.content}`) .map(
(message) =>
`${isAIMessage(message) ? 'AI' : 'User'}: ${message.content}`,
)
.join('\n'); .join('\n');
}; };

701
yarn.lock

File diff suppressed because it is too large Load diff