feat(UI): allow system prompts and persona prompts to be saved server side and individually included or excluded from messages

This commit is contained in:
Willie Zutz 2025-05-27 12:53:30 -06:00
parent 8e6934bb64
commit 011d10df29
27 changed files with 1345 additions and 132 deletions

View file

@ -220,6 +220,7 @@ This fork adds several enhancements to the original Perplexica project:
- Added autocomplete functionality proxied to SearxNG
- ✅ Enhanced Reddit focus mode to work around SearxNG limitations
- ✅ Adds Quality mode that uses the full content of web pages to answer queries
- See the [README.md](docs/architecture/README.md) in the docs architecture directory for more info
- Enhances Balanced mode which uses relevant excerpts of web content to answer queries
### AI Functionality
@ -230,6 +231,7 @@ This fork adds several enhancements to the original Perplexica project:
- ✅ User customizable context window for Ollama models
- ✅ Toggle for automatic suggestions
- ✅ Added support for latest Anthropic models
- ✅ Adds support for multiple user-customizable system prompt enhancement and personas so you can tailor output to your needs
### Bug Fixes

View file

@ -7,5 +7,16 @@ Perplexica's architecture consists of the following key components:
3. **SearXNG**: A metadata search engine used by Perplexica to search the web for sources.
4. **LLMs (Large Language Models)**: Utilized by agents and chains for tasks like understanding content, writing responses, and citing sources. Examples include Claude, GPTs, etc.
5. **Embedding Models**: To improve the accuracy of search results, embedding models re-rank the results using similarity search algorithms such as cosine similarity and dot product distance.
6. **Web Content**
- In Quality mode the application uses Crawlee, Playwright, and Chromium to load web content into a real full browser
- This significantly increases the size of the docker image and also means it can only run on x64 architectures
- The docker build has been updated to restrict images to linux/amd64 architecture
- In Balanced mode, the application uses JSDoc and Mozilla's Readability to retrieve and rank relevant segments of web content
- This approach is less successful than Quality as it doesn't use a full web browser and can't load dynamic content
- It is also more prone to being blocked by ads or scraping detection
- Because it only uses segments of web content, it can be less accurate than Quality mode
- In Speed mode, the application only uses the preview content returned by SearXNG
- This content is provided by the search engines and contains minimal context from the actual web page
- This mode is the least accurate and is often prone to hallucination
For a more detailed explanation of how these components work together, see [WORKING.md](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/WORKING.md).

View file

@ -1,3 +1,4 @@
import { cleanupCancelToken, registerCancelToken } from '@/lib/cancel-tokens';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
@ -11,6 +12,7 @@ import {
} from '@/lib/providers';
import { searchHandlers } from '@/lib/search';
import { getFileDetails } from '@/lib/utils/files';
import { getSystemPrompts } from '@/lib/utils/prompts';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOllama } from '@langchain/ollama';
@ -18,7 +20,6 @@ import { ChatOpenAI } from '@langchain/openai';
import crypto from 'crypto';
import { and, eq, gt } from 'drizzle-orm';
import { EventEmitter } from 'stream';
import { registerCancelToken, cleanupCancelToken } from '@/lib/cancel-tokens';
export const runtime = 'nodejs';
export const dynamic = 'force-dynamic';
@ -49,6 +50,7 @@ type Body = {
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
systemInstructions: string;
selectedSystemPromptIds: string[];
};
type ModelStats = {
@ -256,7 +258,7 @@ export const POST = async (req: Request) => {
try {
const startTime = Date.now();
const body = (await req.json()) as Body;
const { message } = body;
const { message, selectedSystemPromptIds } = body;
if (message.content === '') {
return Response.json(
@ -349,6 +351,14 @@ export const POST = async (req: Request) => {
);
}
let systemInstructionsContent = '';
let personaInstructionsContent = '';
// Retrieve system prompts from database using shared utility
const promptData = await getSystemPrompts(selectedSystemPromptIds);
systemInstructionsContent = promptData.systemInstructions;
personaInstructionsContent = promptData.personaInstructions;
const responseStream = new TransformStream();
const writer = responseStream.writable.getWriter();
const encoder = new TextEncoder();
@ -378,8 +388,9 @@ export const POST = async (req: Request) => {
embedding,
body.optimizationMode,
body.files,
body.systemInstructions,
systemInstructionsContent,
abortController.signal,
personaInstructionsContent,
);
handleEmitterEvents(

View file

@ -5,6 +5,7 @@ import {
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { getSystemInstructionsOnly } from '@/lib/utils/prompts';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOllama } from '@langchain/ollama';
@ -20,6 +21,7 @@ interface ImageSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
selectedSystemPromptIds?: string[];
}
export const POST = async (req: Request) => {
@ -70,12 +72,29 @@ export const POST = async (req: Request) => {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
let systemInstructions = '';
if (
body.selectedSystemPromptIds &&
body.selectedSystemPromptIds.length > 0
) {
try {
const promptInstructions = await getSystemInstructionsOnly(
body.selectedSystemPromptIds,
);
systemInstructions = promptInstructions || systemInstructions;
} catch (error) {
console.error('Error fetching system prompts:', error);
// Continue with fallback systemInstructions
}
}
const images = await handleImageSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
systemInstructions,
);
return Response.json({ images }, { status: 200 });

View file

@ -13,6 +13,7 @@ import {
getCustomOpenaiModelName,
} from '@/lib/config';
import { searchHandlers } from '@/lib/search';
import { getSystemInstructionsOnly } from '@/lib/utils/prompts';
import { ChatOllama } from '@langchain/ollama';
interface chatModel {
@ -36,7 +37,7 @@ interface ChatRequestBody {
query: string;
history: Array<[string, string]>;
stream?: boolean;
systemInstructions?: string;
selectedSystemPromptIds?: string[];
}
export const POST = async (req: Request) => {
@ -127,6 +128,23 @@ export const POST = async (req: Request) => {
const abortController = new AbortController();
const { signal } = abortController;
// Process system prompts from database if provided, otherwise use direct instructions
let systemInstructions = '';
if (
body.selectedSystemPromptIds &&
body.selectedSystemPromptIds.length > 0
) {
try {
const promptInstructions = await getSystemInstructionsOnly(
body.selectedSystemPromptIds,
);
systemInstructions = promptInstructions || systemInstructions;
} catch (error) {
console.error('Error fetching system prompts:', error);
// Continue with fallback systemInstructions
}
}
const emitter = await searchHandler.searchAndAnswer(
body.query,
history,
@ -134,7 +152,7 @@ export const POST = async (req: Request) => {
embeddings,
body.optimizationMode,
[],
body.systemInstructions || '',
systemInstructions,
signal,
);

View file

@ -5,6 +5,7 @@ import {
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { getSystemInstructionsOnly } from '@/lib/utils/prompts';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
@ -19,6 +20,7 @@ interface ChatModel {
interface SuggestionsGenerationBody {
chatHistory: any[];
chatModel?: ChatModel;
selectedSystemPromptIds?: string[];
}
export const POST = async (req: Request) => {
@ -69,11 +71,28 @@ export const POST = async (req: Request) => {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
let systemInstructions = '';
if (
body.selectedSystemPromptIds &&
body.selectedSystemPromptIds.length > 0
) {
try {
const retrievedInstructions = await getSystemInstructionsOnly(
body.selectedSystemPromptIds,
);
systemInstructions = retrievedInstructions;
} catch (error) {
console.error('Error retrieving system prompts:', error);
// Continue with existing systemInstructions as fallback
}
}
const suggestions = await generateSuggestions(
{
chat_history: chatHistory,
},
llm,
systemInstructions,
);
return Response.json({ suggestions }, { status: 200 });

View file

@ -0,0 +1,76 @@
import db from '@/lib/db';
import { systemPrompts } from '@/lib/db/schema';
import { eq } from 'drizzle-orm';
import { NextResponse } from 'next/server';
export async function PUT(
req: Request,
{ params }: { params: Promise<{ id: string }> },
) {
try {
const { id } = await params;
const { name, content, type } = await req.json();
if (!name || !content) {
return NextResponse.json(
{ error: 'Name and content are required' },
{ status: 400 },
);
}
if (type && type !== 'system' && type !== 'persona') {
return NextResponse.json(
{ error: 'Type must be either "system" or "persona"' },
{ status: 400 },
);
}
const updateData: any = { name, content, updatedAt: new Date() };
if (type) {
updateData.type = type;
}
const updatedPrompt = await db
.update(systemPrompts)
.set(updateData)
.where(eq(systemPrompts.id, id))
.returning();
if (updatedPrompt.length === 0) {
return NextResponse.json(
{ error: 'System prompt not found' },
{ status: 404 },
);
}
return NextResponse.json(updatedPrompt[0]);
} catch (error) {
console.error('Failed to update system prompt:', error);
return NextResponse.json(
{ error: 'Failed to update system prompt' },
{ status: 500 },
);
}
}
export async function DELETE(
req: Request,
{ params }: { params: Promise<{ id: string }> },
) {
try {
const { id } = await params;
const deletedPrompt = await db
.delete(systemPrompts)
.where(eq(systemPrompts.id, id))
.returning();
if (deletedPrompt.length === 0) {
return NextResponse.json(
{ error: 'System prompt not found' },
{ status: 404 },
);
}
return NextResponse.json({ message: 'System prompt deleted successfully' });
} catch (error) {
console.error('Failed to delete system prompt:', error);
return NextResponse.json(
{ error: 'Failed to delete system prompt' },
{ status: 500 },
);
}
}

View file

@ -0,0 +1,69 @@
import db from '@/lib/db';
import { systemPrompts } from '@/lib/db/schema';
import { NextResponse } from 'next/server';
import { asc, eq } from 'drizzle-orm';
export async function GET(req: Request) {
try {
const { searchParams } = new URL(req.url);
const type = searchParams.get('type');
let prompts;
if (type && (type === 'system' || type === 'persona')) {
prompts = await db
.select()
.from(systemPrompts)
.where(eq(systemPrompts.type, type))
.orderBy(asc(systemPrompts.name));
} else {
prompts = await db
.select()
.from(systemPrompts)
.orderBy(asc(systemPrompts.name));
}
return NextResponse.json(prompts);
} catch (error) {
console.error('Failed to fetch system prompts:', error);
return NextResponse.json(
{ error: 'Failed to fetch system prompts' },
{ status: 500 },
);
}
}
export async function POST(req: Request) {
try {
const { name, content, type = 'system' } = await req.json();
if (!name || !content) {
return NextResponse.json(
{ error: 'Name and content are required' },
{ status: 400 },
);
}
if (type && type !== 'system' && type !== 'persona') {
return NextResponse.json(
{ error: 'Type must be either "system" or "persona"' },
{ status: 400 },
);
}
const newPrompt = await db
.insert(systemPrompts)
.values({
name,
content,
type,
createdAt: new Date(),
updatedAt: new Date(),
})
.returning();
return NextResponse.json(newPrompt[0], { status: 201 });
} catch (error) {
console.error('Failed to create system prompt:', error);
return NextResponse.json(
{ error: 'Failed to create system prompt' },
{ status: 500 },
);
}
}

View file

@ -5,6 +5,7 @@ import {
getCustomOpenaiModelName,
} from '@/lib/config';
import { getAvailableChatModelProviders } from '@/lib/providers';
import { getSystemInstructionsOnly } from '@/lib/utils/prompts';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
import { ChatOllama } from '@langchain/ollama';
@ -20,6 +21,7 @@ interface VideoSearchBody {
query: string;
chatHistory: any[];
chatModel?: ChatModel;
selectedSystemPromptIds?: string[];
}
export const POST = async (req: Request) => {
@ -70,12 +72,29 @@ export const POST = async (req: Request) => {
return Response.json({ error: 'Invalid chat model' }, { status: 400 });
}
let systemInstructions = '';
if (
body.selectedSystemPromptIds &&
body.selectedSystemPromptIds.length > 0
) {
try {
const retrievedInstructions = await getSystemInstructionsOnly(
body.selectedSystemPromptIds,
);
systemInstructions = retrievedInstructions;
} catch (error) {
console.error('Error retrieving system prompts:', error);
// Continue with existing systemInstructions as fallback
}
}
const videos = await handleVideoSearch(
{
chat_history: chatHistory,
query: body.query,
},
llm,
systemInstructions,
);
return Response.json({ videos }, { status: 200 });

View file

@ -5,8 +5,13 @@ import {
ArrowLeft,
Loader2,
Info,
Trash2,
Edit3,
PlusCircle,
Save,
X,
} from 'lucide-react';
import { useEffect, useState } from 'react';
import { useEffect, useState, useRef } from 'react';
import { cn } from '@/lib/utils';
import { Switch } from '@headlessui/react';
import ThemeSwitcher from '@/components/theme/Switcher';
@ -39,7 +44,12 @@ interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
onSave?: (value: string) => void;
}
const Input = ({ className, isSaving, onSave, ...restProps }: InputProps) => {
const InputComponent = ({
className,
isSaving,
onSave,
...restProps
}: InputProps) => {
return (
<div className="relative">
<input
@ -68,7 +78,7 @@ interface TextareaProps extends React.InputHTMLAttributes<HTMLTextAreaElement> {
onSave?: (value: string) => void;
}
const Textarea = ({
const TextareaComponent = ({
className,
isSaving,
onSave,
@ -127,27 +137,75 @@ const SettingsSection = ({
title: string;
children: React.ReactNode;
tooltip?: string;
}) => (
}) => {
const [showTooltip, setShowTooltip] = useState(false);
const tooltipRef = useRef<HTMLDivElement>(null);
const buttonRef = useRef<HTMLButtonElement>(null);
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
if (
tooltipRef.current &&
!tooltipRef.current.contains(event.target as Node) &&
buttonRef.current &&
!buttonRef.current.contains(event.target as Node)
) {
setShowTooltip(false);
}
};
document.addEventListener('mousedown', handleClickOutside);
return () => {
document.removeEventListener('mousedown', handleClickOutside);
};
}, []);
return (
<div className="flex flex-col space-y-4 p-4 bg-light-secondary/50 dark:bg-dark-secondary/50 rounded-xl border border-light-200 dark:border-dark-200">
<div className="flex items-center gap-2">
<h2 className="text-black/90 dark:text-white/90 font-medium">{title}</h2>
<h2 className="text-black/90 dark:text-white/90 font-medium">
{title}
</h2>
{tooltip && (
<div className="relative group">
<Info
size={16}
className="text-black/70 dark:text-white/70 cursor-help"
/>
<div className="absolute left-1/2 -translate-x-1/2 bottom-full mb-2 px-3 py-2 bg-black/90 dark:bg-white/90 text-white dark:text-black text-xs rounded-lg opacity-0 group-hover:opacity-100 whitespace-nowrap transition-opacity">
{tooltip}
<div className="relative">
<button
ref={buttonRef}
className="p-1 text-black/70 dark:text-white/70 rounded-full hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white"
onClick={() => setShowTooltip(!showTooltip)}
aria-label="Show section information"
>
<Info size={16} />
</button>
{showTooltip && (
<div
ref={tooltipRef}
className="absolute z-10 left-6 top-0 w-96 rounded-md shadow-lg bg-white dark:bg-dark-secondary border border-light-200 dark:border-dark-200"
>
<div className="py-2 px-3">
<div className="space-y-1 text-xs text-black dark:text-white">
{tooltip.split('\\n').map((line, index) => (
<div key={index}>{line}</div>
))}
</div>
</div>
</div>
)}
</div>
)}
</div>
{children}
</div>
);
};
const Page = () => {
interface SystemPrompt {
id: string;
name: string;
content: string;
type: 'system' | 'persona';
}
export default function SettingsPage() {
const [config, setConfig] = useState<SettingsType | null>(null);
const [chatModels, setChatModels] = useState<Record<string, any>>({});
const [embeddingModels, setEmbeddingModels] = useState<Record<string, any>>(
@ -166,7 +224,6 @@ const Page = () => {
>(null);
const [isLoading, setIsLoading] = useState(false);
const [automaticSuggestions, setAutomaticSuggestions] = useState(true);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
const [contextWindowSize, setContextWindowSize] = useState(2048);
const [isCustomContextWindow, setIsCustomContextWindow] = useState(false);
@ -174,6 +231,17 @@ const Page = () => {
1024, 2048, 3072, 4096, 8192, 16384, 32768, 65536, 131072,
];
const [userSystemPrompts, setUserSystemPrompts] = useState<SystemPrompt[]>(
[],
);
const [editingPrompt, setEditingPrompt] = useState<SystemPrompt | null>(null);
const [newPromptName, setNewPromptName] = useState('');
const [newPromptContent, setNewPromptContent] = useState('');
const [newPromptType, setNewPromptType] = useState<'system' | 'persona'>(
'system',
);
const [isAddingNewPrompt, setIsAddingNewPrompt] = useState(false);
useEffect(() => {
const fetchConfig = async () => {
setIsLoading(true);
@ -238,13 +306,30 @@ const Page = () => {
!predefinedContextSizes.includes(storedContextWindow),
);
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setIsLoading(false);
};
fetchConfig();
});
const fetchSystemPrompts = async () => {
setIsLoading(true);
try {
const response = await fetch('/api/system-prompts');
if (response.ok) {
const prompts = await response.json();
setUserSystemPrompts(prompts);
} else {
console.error('Failed to load system prompts.');
}
} catch (error) {
console.error('Error loading system prompts.');
} finally {
setIsLoading(false);
}
};
fetchSystemPrompts();
}, []);
const saveConfig = async (key: string, value: any) => {
setSavingStates((prev) => ({ ...prev, [key]: true }));
@ -396,8 +481,6 @@ const Page = () => {
localStorage.setItem('embeddingModel', value);
} else if (key === 'ollamaContextWindow') {
localStorage.setItem('ollamaContextWindow', value.toString());
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
}
} catch (err) {
console.error('Failed to save:', err);
@ -409,6 +492,83 @@ const Page = () => {
}
};
const handleAddOrUpdateSystemPrompt = async () => {
const currentPrompt = editingPrompt || {
name: newPromptName,
content: newPromptContent,
type: newPromptType,
};
if (!currentPrompt.name.trim() || !currentPrompt.content.trim()) {
console.error('Prompt name and content cannot be empty.');
return;
}
const url = editingPrompt
? `/api/system-prompts/${editingPrompt.id}`
: '/api/system-prompts';
const method = editingPrompt ? 'PUT' : 'POST';
const body = JSON.stringify({
name: currentPrompt.name,
content: currentPrompt.content,
type: currentPrompt.type,
});
try {
const response = await fetch(url, {
method,
headers: { 'Content-Type': 'application/json' },
body,
});
if (response.ok) {
const savedPrompt = await response.json();
if (editingPrompt) {
setUserSystemPrompts(
userSystemPrompts.map((p) =>
p.id === savedPrompt.id ? savedPrompt : p,
),
);
setEditingPrompt(null);
} else {
setUserSystemPrompts([...userSystemPrompts, savedPrompt]);
setNewPromptName('');
setNewPromptContent('');
setNewPromptType('system');
setIsAddingNewPrompt(false);
}
console.log(`System prompt ${editingPrompt ? 'updated' : 'added'}.`);
} else {
const errorData = await response.json();
console.error(
errorData.error ||
`Failed to ${editingPrompt ? 'update' : 'add'} prompt.`,
);
}
} catch (error) {
console.error(`Error ${editingPrompt ? 'updating' : 'adding'} prompt.`);
}
};
const handleDeleteSystemPrompt = async (promptId: string) => {
if (!confirm('Are you sure you want to delete this prompt?')) return;
try {
const response = await fetch(`/api/system-prompts/${promptId}`, {
method: 'DELETE',
});
if (response.ok) {
setUserSystemPrompts(
userSystemPrompts.filter((p) => p.id !== promptId),
);
console.log('System prompt deleted.');
} else {
const errorData = await response.json();
console.error(errorData.error || 'Failed to delete prompt.');
}
} catch (error) {
console.error('Error deleting prompt.');
}
};
return (
<div className="max-w-3xl mx-auto">
<div className="flex flex-col pt-4">
@ -500,16 +660,340 @@ const Page = () => {
</div>
</SettingsSection>
<SettingsSection title="System Instructions">
{/* TODO: Refactor into reusable components */}
<SettingsSection
title="System Prompts"
tooltip="System prompts will be added to EVERY request in the AI model.\nUSE EXTREME CAUTION, as they can significantly alter the AI's behavior and responses.\nA typical safe prompt might be: '/no_think', to disable thinking in models that support it.\n\nProviding formatting instructions or specific behaviors could lead to unexpected results."
>
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);
}}
onSave={(value) => saveConfig('systemInstructions', value)}
{userSystemPrompts
.filter((prompt) => prompt.type === 'system')
.map((prompt) => (
<div
key={prompt.id}
className="p-3 border border-light-secondary dark:border-dark-secondary rounded-md bg-light-100 dark:bg-dark-100"
>
{editingPrompt && editingPrompt.id === prompt.id ? (
<div className="space-y-3">
<InputComponent
type="text"
value={editingPrompt.name}
onChange={(
e: React.ChangeEvent<HTMLInputElement>,
) =>
setEditingPrompt({
...editingPrompt,
name: e.target.value,
})
}
placeholder="Prompt Name"
className="text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<Select
value={editingPrompt.type}
onChange={(e) =>
setEditingPrompt({
...editingPrompt,
type: e.target.value as 'system' | 'persona',
})
}
options={[
{ value: 'system', label: 'System Prompt' },
{ value: 'persona', label: 'Persona Prompt' },
]}
className="text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<TextareaComponent
value={editingPrompt.content}
onChange={(
e: React.ChangeEvent<HTMLTextAreaElement>,
) =>
setEditingPrompt({
...editingPrompt,
content: e.target.value,
})
}
placeholder="Prompt Content"
className="min-h-[100px] text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<div className="flex space-x-2 justify-end">
<button
onClick={() => setEditingPrompt(null)}
className="px-3 py-2 text-sm rounded-md bg-light-secondary hover:bg-light-200 dark:bg-dark-secondary dark:hover:bg-dark-200 text-black/80 dark:text-white/80 flex items-center gap-1.5"
>
<X size={16} />
Cancel
</button>
<button
onClick={handleAddOrUpdateSystemPrompt}
className="px-3 py-2 text-sm rounded-md bg-[#24A0ED] hover:bg-[#1f8cdb] text-white flex items-center gap-1.5"
>
<Save size={16} />
Save
</button>
</div>
</div>
) : (
<div className="flex justify-between items-start">
<div className="flex-grow">
<h4 className="font-semibold text-black/90 dark:text-white/90">
{prompt.name}
</h4>
<p
className="text-sm text-black/70 dark:text-white/70 mt-1 whitespace-pre-wrap overflow-hidden text-ellipsis"
style={{
maxHeight: '3.6em',
display: '-webkit-box',
WebkitLineClamp: 2,
WebkitBoxOrient: 'vertical',
}}
>
{prompt.content}
</p>
</div>
<div className="flex space-x-1 flex-shrink-0 ml-2">
<button
onClick={() => setEditingPrompt({ ...prompt })}
title="Edit"
className="p-1.5 rounded-md hover:bg-light-200 dark:hover:bg-dark-200 text-black/70 dark:text-white/70"
>
<Edit3 size={18} />
</button>
<button
onClick={() =>
handleDeleteSystemPrompt(prompt.id)
}
title="Delete"
className="p-1.5 rounded-md hover:bg-light-200 dark:hover:bg-dark-200 text-red-500 hover:text-red-600 dark:text-red-400 dark:hover:text-red-500"
>
<Trash2 size={18} />
</button>
</div>
</div>
)}
</div>
))}
{isAddingNewPrompt && newPromptType === 'system' && (
<div className="p-3 border border-dashed border-light-secondary dark:border-dark-secondary rounded-md space-y-3 bg-light-100 dark:bg-dark-100">
<InputComponent
type="text"
value={newPromptName}
onChange={(e: React.ChangeEvent<HTMLInputElement>) =>
setNewPromptName(e.target.value)
}
placeholder="System Prompt Name"
className="text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<TextareaComponent
value={newPromptContent}
onChange={(e: React.ChangeEvent<HTMLTextAreaElement>) =>
setNewPromptContent(e.target.value)
}
placeholder="System prompt content (e.g., '/nothink')"
className="min-h-[100px] text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<div className="flex space-x-2 justify-end">
<button
onClick={() => {
setIsAddingNewPrompt(false);
setNewPromptName('');
setNewPromptContent('');
setNewPromptType('system');
}}
className="px-3 py-2 text-sm rounded-md bg-light-secondary hover:bg-light-200 dark:bg-dark-secondary dark:hover:bg-dark-200 text-black/80 dark:text-white/80 flex items-center gap-1.5"
>
<X size={16} />
Cancel
</button>
<button
onClick={handleAddOrUpdateSystemPrompt}
className="px-3 py-2 text-sm rounded-md bg-[#24A0ED] hover:bg-[#1f8cdb] text-white flex items-center gap-1.5"
>
<Save size={16} />
Add System Prompt
</button>
</div>
</div>
)}
{!isAddingNewPrompt && (
<button
onClick={() => {
setIsAddingNewPrompt(true);
setNewPromptType('system');
}}
className="self-start px-3 py-2 text-sm rounded-md border border-light-200 dark:border-dark-200 hover:bg-light-200 dark:hover:bg-dark-200 text-black/80 dark:text-white/80 flex items-center gap-1.5"
>
<PlusCircle size={18} /> Add System Prompt
</button>
)}
</div>
</SettingsSection>
<SettingsSection
title="Persona Prompts"
tooltip="Persona prompts will only be applied to the final response.\nThey can define the personality and character traits for the AI assistant.\nSuch as: 'You are a pirate that speaks in riddles.'\n\nThey could be used to provide structured output instructions\nSuch as: 'Provide answers formatted with bullet points and tables.'"
>
<div className="flex flex-col space-y-4">
{userSystemPrompts
.filter((prompt) => prompt.type === 'persona')
.map((prompt) => (
<div
key={prompt.id}
className="p-3 border border-light-secondary dark:border-dark-secondary rounded-md bg-light-100 dark:bg-dark-100"
>
{editingPrompt && editingPrompt.id === prompt.id ? (
<div className="space-y-3">
<InputComponent
type="text"
value={editingPrompt.name}
onChange={(
e: React.ChangeEvent<HTMLInputElement>,
) =>
setEditingPrompt({
...editingPrompt,
name: e.target.value,
})
}
placeholder="Prompt Name"
className="text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<Select
value={editingPrompt.type}
onChange={(e) =>
setEditingPrompt({
...editingPrompt,
type: e.target.value as 'system' | 'persona',
})
}
options={[
{ value: 'system', label: 'System Prompt' },
{ value: 'persona', label: 'Persona Prompt' },
]}
className="text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<TextareaComponent
value={editingPrompt.content}
onChange={(
e: React.ChangeEvent<HTMLTextAreaElement>,
) =>
setEditingPrompt({
...editingPrompt,
content: e.target.value,
})
}
placeholder="Prompt Content"
className="min-h-[100px] text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<div className="flex space-x-2 justify-end">
<button
onClick={() => setEditingPrompt(null)}
className="px-3 py-2 text-sm rounded-md bg-light-secondary hover:bg-light-200 dark:bg-dark-secondary dark:hover:bg-dark-200 text-black/80 dark:text-white/80 flex items-center gap-1.5"
>
<X size={16} />
Cancel
</button>
<button
onClick={handleAddOrUpdateSystemPrompt}
className="px-3 py-2 text-sm rounded-md bg-[#24A0ED] hover:bg-[#1f8cdb] text-white flex items-center gap-1.5"
>
<Save size={16} />
Save
</button>
</div>
</div>
) : (
<div className="flex justify-between items-start">
<div className="flex-grow">
<h4 className="font-semibold text-black/90 dark:text-white/90">
{prompt.name}
</h4>
<p
className="text-sm text-black/70 dark:text-white/70 mt-1 whitespace-pre-wrap overflow-hidden text-ellipsis"
style={{
maxHeight: '3.6em',
display: '-webkit-box',
WebkitLineClamp: 2,
WebkitBoxOrient: 'vertical',
}}
>
{prompt.content}
</p>
</div>
<div className="flex space-x-1 flex-shrink-0 ml-2">
<button
onClick={() => setEditingPrompt({ ...prompt })}
title="Edit"
className="p-1.5 rounded-md hover:bg-light-200 dark:hover:bg-dark-200 text-black/70 dark:text-white/70"
>
<Edit3 size={18} />
</button>
<button
onClick={() =>
handleDeleteSystemPrompt(prompt.id)
}
title="Delete"
className="p-1.5 rounded-md hover:bg-light-200 dark:hover:bg-dark-200 text-red-500 hover:text-red-600 dark:text-red-400 dark:hover:text-red-500"
>
<Trash2 size={18} />
</button>
</div>
</div>
)}
</div>
))}
{isAddingNewPrompt && newPromptType === 'persona' && (
<div className="p-3 border border-dashed border-light-secondary dark:border-dark-secondary rounded-md space-y-3 bg-light-100 dark:bg-dark-100">
<InputComponent
type="text"
value={newPromptName}
onChange={(e: React.ChangeEvent<HTMLInputElement>) =>
setNewPromptName(e.target.value)
}
placeholder="Persona Prompt Name"
className="text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<TextareaComponent
value={newPromptContent}
onChange={(e: React.ChangeEvent<HTMLTextAreaElement>) =>
setNewPromptContent(e.target.value)
}
placeholder="Persona prompt content (e.g., You are a helpful assistant that speaks like a pirate and uses nautical metaphors.)"
className="min-h-[100px] text-black dark:text-white bg-white dark:bg-dark-secondary"
/>
<div className="flex space-x-2 justify-end">
<button
onClick={() => {
setIsAddingNewPrompt(false);
setNewPromptName('');
setNewPromptContent('');
setNewPromptType('system');
}}
className="px-3 py-2 text-sm rounded-md bg-light-secondary hover:bg-light-200 dark:bg-dark-secondary dark:hover:bg-dark-200 text-black/80 dark:text-white/80 flex items-center gap-1.5"
>
<X size={16} />
Cancel
</button>
<button
onClick={handleAddOrUpdateSystemPrompt}
className="px-3 py-2 text-sm rounded-md bg-[#24A0ED] hover:bg-[#1f8cdb] text-white flex items-center gap-1.5"
>
<Save size={16} />
Add Persona Prompt
</button>
</div>
</div>
)}
{!isAddingNewPrompt && (
<button
onClick={() => {
setIsAddingNewPrompt(true);
setNewPromptType('persona');
}}
className="self-start px-3 py-2 text-sm rounded-md border border-light-200 dark:border-dark-200 hover:bg-light-200 dark:hover:bg-dark-200 text-black/80 dark:text-white/80 flex items-center gap-1.5"
>
<PlusCircle size={18} /> Add Persona Prompt
</button>
)}
</div>
</SettingsSection>
@ -622,7 +1106,7 @@ const Page = () => {
/>
{isCustomContextWindow && (
<div className="mt-2">
<Input
<InputComponent
type="number"
min={512}
value={contextWindowSize}
@ -670,7 +1154,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Model Name
</p>
<Input
<InputComponent
type="text"
placeholder="Model name"
value={config.customOpenaiModelName}
@ -690,7 +1174,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Custom OpenAI API Key
</p>
<Input
<InputComponent
type="password"
placeholder="Custom OpenAI API Key"
value={config.customOpenaiApiKey}
@ -710,7 +1194,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Custom OpenAI Base URL
</p>
<Input
<InputComponent
type="text"
placeholder="Custom OpenAI Base URL"
value={config.customOpenaiApiUrl}
@ -815,7 +1299,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
OpenAI API Key
</p>
<Input
<InputComponent
type="password"
placeholder="OpenAI API Key"
value={config.openaiApiKey}
@ -834,7 +1318,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Ollama API URL
</p>
<Input
<InputComponent
type="text"
placeholder="Ollama API URL"
value={config.ollamaApiUrl}
@ -853,7 +1337,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
GROQ API Key
</p>
<Input
<InputComponent
type="password"
placeholder="GROQ API Key"
value={config.groqApiKey}
@ -872,7 +1356,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Anthropic API Key
</p>
<Input
<InputComponent
type="password"
placeholder="Anthropic API key"
value={config.anthropicApiKey}
@ -891,7 +1375,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Gemini API Key
</p>
<Input
<InputComponent
type="password"
placeholder="Gemini API key"
value={config.geminiApiKey}
@ -910,7 +1394,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
Deepseek API Key
</p>
<Input
<InputComponent
type="password"
placeholder="Deepseek API Key"
value={config.deepseekApiKey}
@ -929,7 +1413,7 @@ const Page = () => {
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
</p>
<Input
<InputComponent
type="text"
placeholder="LM Studio API URL"
value={config.lmStudioApiUrl}
@ -950,6 +1434,4 @@ const Page = () => {
)}
</div>
);
};
export default Page;
}

View file

@ -22,6 +22,8 @@ const Chat = ({
setFocusMode,
handleEditMessage,
analysisProgress,
systemPromptIds,
setSystemPromptIds,
}: {
messages: Message[];
sendMessage: (
@ -49,6 +51,8 @@ const Chat = ({
current: number;
total: number;
} | null;
systemPromptIds: string[];
setSystemPromptIds: (ids: string[]) => void;
}) => {
const [isAtBottom, setIsAtBottom] = useState(true);
const [manuallyScrolledUp, setManuallyScrolledUp] = useState(false);
@ -271,6 +275,8 @@ const Chat = ({
focusMode={focusMode}
setFocusMode={setFocusMode}
onCancel={handleCancel}
systemPromptIds={systemPromptIds}
setSystemPromptIds={setSystemPromptIds}
/>
</div>
<div ref={messageEnd} className="h-0" />

View file

@ -291,6 +291,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [systemPromptIds, setSystemPromptIds] = useState<string[]>([]);
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
@ -570,7 +571,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
selectedSystemPromptIds: systemPromptIds || [],
}),
});
@ -677,6 +678,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
setFocusMode={setFocusMode}
handleEditMessage={handleEditMessage}
analysisProgress={analysisProgress}
systemPromptIds={systemPromptIds}
setSystemPromptIds={setSystemPromptIds}
/>
</>
) : (
@ -686,6 +689,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
setFocusMode={setFocusMode}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
systemPromptIds={systemPromptIds}
setSystemPromptIds={setSystemPromptIds}
fileIds={fileIds}
setFileIds={setFileIds}
files={files}

View file

@ -10,6 +10,8 @@ const EmptyChat = ({
setFocusMode,
optimizationMode,
setOptimizationMode,
systemPromptIds,
setSystemPromptIds,
fileIds,
setFileIds,
files,
@ -20,6 +22,8 @@ const EmptyChat = ({
setFocusMode: (mode: string) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
systemPromptIds: string[];
setSystemPromptIds: (ids: string[]) => void;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
@ -49,6 +53,8 @@ const EmptyChat = ({
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
systemPromptIds={systemPromptIds}
setSystemPromptIds={setSystemPromptIds}
setFiles={setFiles}
/>
</div>

View file

@ -6,6 +6,7 @@ import Attach from './MessageInputActions/Attach';
import Focus from './MessageInputActions/Focus';
import ModelSelector from './MessageInputActions/ModelSelector';
import Optimization from './MessageInputActions/Optimization';
import SystemPromptSelector from './MessageInputActions/SystemPromptSelector'; // Import new component
const MessageInput = ({
sendMessage,
@ -20,8 +21,16 @@ const MessageInput = ({
setFocusMode,
firstMessage,
onCancel,
systemPromptIds,
setSystemPromptIds,
}: {
sendMessage: (message: string) => void;
sendMessage: (
message: string,
options?: {
messageId?: string; // For rewrites/edits
selectedSystemPromptIds?: string[];
},
) => void;
loading: boolean;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
@ -33,6 +42,8 @@ const MessageInput = ({
setFocusMode: (mode: string) => void;
firstMessage: boolean;
onCancel?: () => void;
systemPromptIds: string[];
setSystemPromptIds: (ids: string[]) => void;
}) => {
const [message, setMessage] = useState('');
const [selectedModel, setSelectedModel] = useState<{
@ -51,7 +62,35 @@ const MessageInput = ({
model: chatModel,
});
}
}, []);
const storedPromptIds = localStorage.getItem('selectedSystemPromptIds');
if (storedPromptIds) {
try {
const parsedIds = JSON.parse(storedPromptIds);
if (Array.isArray(parsedIds)) {
setSystemPromptIds(parsedIds);
}
} catch (e) {
console.error(
'Failed to parse selectedSystemPromptIds from localStorage',
e,
);
localStorage.removeItem('selectedSystemPromptIds'); // Clear corrupted data
}
}
}, [setSystemPromptIds]);
useEffect(() => {
if (systemPromptIds.length > 0) {
localStorage.setItem(
'selectedSystemPromptIds',
JSON.stringify(systemPromptIds),
);
} else {
// Remove from localStorage if no prompts are selected to keep it clean
localStorage.removeItem('selectedSystemPromptIds');
}
}, [systemPromptIds]);
const inputRef = useRef<HTMLTextAreaElement | null>(null);
@ -112,21 +151,24 @@ const MessageInput = ({
placeholder={firstMessage ? 'Ask anything...' : 'Ask a follow-up'}
/>
<div className="flex flex-row items-center justify-between mt-4">
<div className="flex flex-row items-center space-x-2 lg:space-x-4">
<div className="flex flex-row items-center space-x-2">
<Focus focusMode={focusMode} setFocusMode={setFocusMode} />
<Attach
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
showText={firstMessage}
/>
</div>
<div className="flex flex-row items-center space-x-2">
<ModelSelector
selectedModel={selectedModel}
setSelectedModel={setSelectedModel}
/>
</div>
<div className="flex flex-row items-center space-x-1 sm:space-x-4">
<SystemPromptSelector
selectedPromptIds={systemPromptIds}
onSelectedPromptIdsChange={setSystemPromptIds}
/>
<Optimization
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}

View file

@ -12,13 +12,11 @@ import { File as FileType } from '../ChatWindow';
const Attach = ({
fileIds,
setFileIds,
showText,
files,
setFiles,
}: {
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
showText?: boolean;
files: FileType[];
setFiles: (files: FileType[]) => void;
}) => {
@ -164,8 +162,7 @@ const Attach = ({
type="button"
onClick={() => fileInputRef.current.click()}
className={cn(
'flex flex-row items-center space-x-1 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white',
showText ? '' : 'p-2',
'flex flex-row items-center space-x-1 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary transition duration-200 hover:text-black dark:hover:text-white p-2',
)}
>
<input
@ -177,9 +174,6 @@ const Attach = ({
hidden
/>
<Paperclip size="18" />
{showText && (
<p className="text-xs font-medium pl-[1px] hidden lg:block">Attach</p>
)}
</button>
);
};

View file

@ -0,0 +1,226 @@
import {
BookUser,
ChevronDown,
CheckSquare,
Square,
Settings,
User,
Loader2,
} from 'lucide-react';
import { cn } from '@/lib/utils';
import {
Popover,
PopoverButton,
PopoverPanel,
Transition,
} from '@headlessui/react';
import { Fragment, useEffect, useState } from 'react';
interface SystemPrompt {
id: string;
name: string;
type: 'system' | 'persona';
}
interface SystemPromptSelectorProps {
selectedPromptIds: string[];
onSelectedPromptIdsChange: (ids: string[]) => void;
}
const SystemPromptSelector = ({
selectedPromptIds,
onSelectedPromptIdsChange,
}: SystemPromptSelectorProps) => {
const [availablePrompts, setAvailablePrompts] = useState<SystemPrompt[]>([]);
const [isOpen, setIsOpen] = useState(false);
const [isLoading, setIsLoading] = useState(true);
useEffect(() => {
if (!isOpen) return; // Only fetch when popover is open or about to open
const fetchPrompts = async () => {
try {
setIsLoading(true);
const response = await fetch('/api/system-prompts');
if (response.ok) {
const prompts = await response.json();
setAvailablePrompts(prompts);
// Check if any currently selected prompt IDs are not in the API response
const availablePromptIds = prompts.map(
(prompt: SystemPrompt) => prompt.id,
);
const validSelectedIds = selectedPromptIds.filter((id) =>
availablePromptIds.includes(id),
);
// If some selected IDs are no longer available, update the selection
if (validSelectedIds.length !== selectedPromptIds.length) {
onSelectedPromptIdsChange(validSelectedIds);
}
} else {
console.error('Failed to load system prompts.');
}
} catch (error) {
console.error('Error loading system prompts.');
console.error(error);
} finally {
setIsLoading(false);
}
};
fetchPrompts();
}, [isOpen, selectedPromptIds, onSelectedPromptIdsChange]);
const handleTogglePrompt = (promptId: string) => {
const newSelectedIds = selectedPromptIds.includes(promptId)
? selectedPromptIds.filter((id) => id !== promptId)
: [...selectedPromptIds, promptId];
onSelectedPromptIdsChange(newSelectedIds);
};
const selectedCount = selectedPromptIds.length;
return (
<Popover className="relative">
{({ open }) => {
if (open && !isOpen) setIsOpen(true);
if (!open && isOpen) setIsOpen(false);
return (
<>
<PopoverButton
className={cn(
'flex items-center gap-1 rounded-lg text-sm transition-colors duration-150 ease-in-out focus:outline-none focus-visible:ring-2 focus-visible:ring-blue-500',
selectedCount > 0
? 'text-[#24A0ED] hover:text-blue-200'
: 'text-black/60 hover:text-black/30 dark:text-white/60 dark:hover:*:text-white/30',
)}
title="Select Prompts"
>
<BookUser size={18} />
{selectedCount > 0 ? <span> {selectedCount} </span> : null}
<ChevronDown size={16} className="opacity-60" />
</PopoverButton>
<Transition
as={Fragment}
enter="transition ease-out duration-200"
enterFrom="opacity-0 translate-y-1"
enterTo="opacity-100 translate-y-0"
leave="transition ease-in duration-150"
leaveFrom="opacity-100 translate-y-0"
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute z-20 w-72 transform bottom-full mb-2">
<div className="overflow-hidden rounded-lg shadow-lg ring-1 ring-black/5 dark:ring-white/5 bg-white dark:bg-dark-secondary">
<div className="px-4 py-3 border-b border-light-200 dark:border-dark-200">
<h3 className="text-sm font-medium text-black/90 dark:text-white/90">
Select Prompts
</h3>
<p className="text-xs text-black/60 dark:text-white/60 mt-0.5">
Choose instructions to guide the AI.
</p>
</div>
{isLoading ? (
<div className="px-4 py-3">
<Loader2 className="animate-spin text-black/70 dark:text-white/70" />
</div>
) : (
<div className="max-h-60 overflow-y-auto p-1.5 space-y-3">
{availablePrompts.length === 0 && (
<p className="text-xs text-black/50 dark:text-white/50 px-2.5 py-2 text-center">
No prompts configured. <br /> Go to{' '}
<a className="text-blue-500" href="/settings">
settings
</a>{' '}
to add some.
</p>
)}
{availablePrompts.filter((p) => p.type === 'system')
.length > 0 && (
<div>
<div className="flex items-center gap-1.5 px-2.5 py-1.5 text-xs font-medium text-black/70 dark:text-white/70">
<Settings size={14} />
<span>System Prompts</span>
</div>
<div className="space-y-0.5">
{availablePrompts
.filter((p) => p.type === 'system')
.map((prompt) => (
<div
key={prompt.id}
onClick={() => handleTogglePrompt(prompt.id)}
className="flex items-center gap-2.5 p-2.5 rounded-md hover:bg-light-100 dark:hover:bg-dark-100 cursor-pointer"
>
{selectedPromptIds.includes(prompt.id) ? (
<CheckSquare
size={18}
className="text-[#24A0ED] flex-shrink-0"
/>
) : (
<Square
size={18}
className="text-black/40 dark:text-white/40 flex-shrink-0"
/>
)}
<span
className="text-sm text-black/80 dark:text-white/80 truncate"
title={prompt.name}
>
{prompt.name}
</span>
</div>
))}
</div>
</div>
)}
{availablePrompts.filter((p) => p.type === 'persona')
.length > 0 && (
<div>
<div className="flex items-center gap-1.5 px-2.5 py-1.5 text-xs font-medium text-black/70 dark:text-white/70">
<User size={14} />
<span>Persona Prompts</span>
</div>
<div className="space-y-0.5">
{availablePrompts
.filter((p) => p.type === 'persona')
.map((prompt) => (
<div
key={prompt.id}
onClick={() => handleTogglePrompt(prompt.id)}
className="flex items-center gap-2.5 p-2.5 rounded-md hover:bg-light-100 dark:hover:bg-dark-100 cursor-pointer"
>
{selectedPromptIds.includes(prompt.id) ? (
<CheckSquare
size={18}
className="text-[#24A0ED] flex-shrink-0"
/>
) : (
<Square
size={18}
className="text-black/40 dark:text-white/40 flex-shrink-0"
/>
)}
<span
className="text-sm text-black/80 dark:text-white/80 truncate"
title={prompt.name}
>
{prompt.name}
</span>
</div>
))}
</div>
</div>
)}
</div>
)}
</div>
</PopoverPanel>
</Transition>
</>
);
}}
</Popover>
);
};
export default SystemPromptSelector;

View file

@ -57,6 +57,20 @@ const SearchImages = ({
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
// Get selected system prompt IDs from localStorage
const storedPromptIds = localStorage.getItem('selectedSystemPromptIds');
let selectedSystemPromptIds: string[] = [];
if (storedPromptIds) {
try {
selectedSystemPromptIds = JSON.parse(storedPromptIds);
} catch (e) {
console.error(
'Failed to parse selectedSystemPromptIds from localStorage',
e,
);
}
}
try {
const res = await fetch(`/api/images`, {
method: 'POST',
@ -77,6 +91,7 @@ const SearchImages = ({
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
selectedSystemPromptIds: selectedSystemPromptIds,
}),
});

View file

@ -71,6 +71,20 @@ const Searchvideos = ({
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
// Get selected system prompt IDs from localStorage
const storedPromptIds = localStorage.getItem('selectedSystemPromptIds');
let selectedSystemPromptIds: string[] = [];
if (storedPromptIds) {
try {
selectedSystemPromptIds = JSON.parse(storedPromptIds);
} catch (e) {
console.error(
'Failed to parse selectedSystemPromptIds from localStorage',
e,
);
}
}
try {
const res = await fetch(`/api/videos`, {
method: 'POST',
@ -91,6 +105,7 @@ const Searchvideos = ({
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
selectedSystemPromptIds: selectedSystemPromptIds,
}),
});

View file

@ -9,6 +9,20 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
// Get selected system prompt IDs from localStorage
const storedPromptIds = localStorage.getItem('selectedSystemPromptIds');
let selectedSystemPromptIds: string[] = [];
if (storedPromptIds) {
try {
selectedSystemPromptIds = JSON.parse(storedPromptIds);
} catch (e) {
console.error(
'Failed to parse selectedSystemPromptIds from localStorage',
e,
);
}
}
const res = await fetch(`/api/suggestions`, {
method: 'POST',
headers: {
@ -27,6 +41,7 @@ export const getSuggestions = async (chatHisory: Message[]) => {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
selectedSystemPromptIds: selectedSystemPromptIds,
}),
});

View file

@ -91,7 +91,14 @@ const outputParser = new LineOutputParser({
key: 'answer',
});
const createImageSearchChain = (llm: BaseChatModel) => {
const createImageSearchChain = (
llm: BaseChatModel,
systemInstructions?: string,
) => {
const systemPrompt = systemInstructions ? `${systemInstructions}\n\n` : '';
const fullPrompt = `${systemPrompt}${imageSearchChainPrompt}`;
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: ImageSearchChainInput) => {
@ -102,7 +109,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
},
date: () => formatDateForLLM(),
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
PromptTemplate.fromTemplate(fullPrompt),
llm,
outputParser,
RunnableLambda.from(async (searchQuery: string) => {
@ -130,8 +137,9 @@ const createImageSearchChain = (llm: BaseChatModel) => {
const handleImageSearch = (
input: ImageSearchChainInput,
llm: BaseChatModel,
systemInstructions?: string,
) => {
const imageSearchChain = createImageSearchChain(llm);
const imageSearchChain = createImageSearchChain(llm, systemInstructions);
return imageSearchChain.invoke(input);
};

View file

@ -45,13 +45,20 @@ const outputParser = new ListLineOutputParser({
key: 'suggestions',
});
const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
const createSuggestionGeneratorChain = (
llm: BaseChatModel,
systemInstructions?: string,
) => {
const systemPrompt = systemInstructions ? `${systemInstructions}\n\n` : '';
const fullPrompt = `${systemPrompt}${suggestionGeneratorPrompt}`;
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: SuggestionGeneratorInput) =>
formatChatHistoryAsString(input.chat_history),
}),
PromptTemplate.fromTemplate(suggestionGeneratorPrompt),
PromptTemplate.fromTemplate(fullPrompt),
llm,
outputParser,
]);
@ -60,9 +67,13 @@ const createSuggestionGeneratorChain = (llm: BaseChatModel) => {
const generateSuggestions = (
input: SuggestionGeneratorInput,
llm: BaseChatModel,
systemInstructions?: string,
) => {
(llm as unknown as ChatOpenAI).temperature = 0;
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
const suggestionGeneratorChain = createSuggestionGeneratorChain(
llm,
systemInstructions,
);
return suggestionGeneratorChain.invoke(input);
};

View file

@ -92,7 +92,14 @@ const answerParser = new LineOutputParser({
key: 'answer',
});
const createVideoSearchChain = (llm: BaseChatModel) => {
const createVideoSearchChain = (
llm: BaseChatModel,
systemInstructions?: string,
) => {
const systemPrompt = systemInstructions ? `${systemInstructions}\n\n` : '';
const fullPrompt = `${systemPrompt}${VideoSearchChainPrompt}`;
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
@ -103,7 +110,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
},
date: () => formatDateForLLM(),
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
PromptTemplate.fromTemplate(fullPrompt),
llm,
answerParser,
RunnableLambda.from(async (searchQuery: string) => {
@ -137,8 +144,9 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
systemInstructions?: string,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
const VideoSearchChain = createVideoSearchChain(llm, systemInstructions);
return VideoSearchChain.invoke(input);
};

View file

@ -17,6 +17,23 @@ interface File {
fileId: string;
}
export const systemPrompts = sqliteTable('system_prompts', {
id: text('id')
.primaryKey()
.$defaultFn(() => crypto.randomUUID()),
name: text('name').notNull(),
content: text('content').notNull(),
type: text('type', { enum: ['system', 'persona'] })
.notNull()
.default('system'),
createdAt: integer('created_at', { mode: 'timestamp' })
.notNull()
.$defaultFn(() => new Date()),
updatedAt: integer('updated_at', { mode: 'timestamp' })
.notNull()
.$defaultFn(() => new Date()),
});
export const chats = sqliteTable('chats', {
id: text('id').primaryKey(),
title: text('title').notNull(),

View file

@ -18,7 +18,7 @@ import {
youtubeSearchRetrieverPrompt,
} from './youtubeSearch';
export default {
const prompts = {
webSearchResponsePrompt,
webSearchRetrieverPrompt,
academicSearchResponsePrompt,
@ -32,3 +32,5 @@ export default {
youtubeSearchResponsePrompt,
youtubeSearchRetrieverPrompt,
};
export default prompts;

View file

@ -22,6 +22,12 @@ export const webSearchRetrieverPrompt = `
- Current date is: {date}
- Do not include any other text in your answer
# System Instructions
- These instructions are provided by the user in the <systemInstructions> tag
- Give them less priority than the above instructions
- Incorporate them into your response while adhering to the overall guidelines
- Only use them for additional context on how to retrieve search results (E.g. if the user has provided a specific website to search, or if they have provided a specific date to use in the search)
There are several examples attached for your reference inside the below examples XML block
<examples>
@ -43,7 +49,6 @@ There are several examples attached for your reference inside the below examples
<example>
<input>
<conversation>
</conversation>
<question>
What is the capital of France
@ -58,7 +63,6 @@ There are several examples attached for your reference inside the below examples
<example>
<input>
<conversation>
</conversation>
<question>
Hi, how are you?
@ -89,7 +93,6 @@ There are several examples attached for your reference inside the below examples
<example>
<input>
<conversation>
</conversation>
<question>
Can you tell me what is X from https://example.com
@ -107,7 +110,6 @@ There are several examples attached for your reference inside the below examples
<example>
<input>
<conversation>
</conversation>
<question>
Summarize the content from https://example.com
@ -125,7 +127,6 @@ There are several examples attached for your reference inside the below examples
<example>
<input>
<conversation>
</conversation>
<question>
Get the current F1 constructor standings and return the results in a table
@ -141,7 +142,6 @@ There are several examples attached for your reference inside the below examples
<example>
<input>
<conversation>
</conversation>
<question>
What are the top 10 restaurants in New York? Show the results in a table and include a short description of each restaurant. Only include results from yelp.com
@ -158,6 +158,10 @@ There are several examples attached for your reference inside the below examples
Everything below is the part of the actual conversation
<systemInstructions>
{systemInstructions}
</systemInstructions>
<conversation>
{chat_history}
</conversation>
@ -168,49 +172,54 @@ Everything below is the part of the actual conversation
`;
export const webSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses.
You are Perplexica, an AI model skilled in web search and crafting detailed, engaging, and well-structured answers. You excel at summarizing web pages and extracting relevant information to create professional, blog-style responses
Your task is to provide answers that are:
- **Informative and relevant**: Thoroughly address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable.
- **Informative and relevant**: Thoroughly address the user's query using the given context
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically
- **Engaging and detailed**: Write responses that read like a high-quality blog post, including extra details and relevant insights
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included
- **Explanatory and Comprehensive**: Strive to explain the topic in depth, offering detailed analysis, insights, and clarifications wherever applicable
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate.
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate.
- **Structure**: Use a well-organized format with proper headings (e.g., "## Example heading 1" or "## Example heading 2"). Present information in paragraphs or concise bullet points where appropriate
- **Tone and Style**: Maintain a neutral, journalistic tone with engaging narrative flow. Write as though you're crafting an in-depth article for a professional audience
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability
- **Length and Depth**: Provide comprehensive coverage of the topic. Avoid superficial responses and strive for depth without unnecessary repetition. Expand on technical or complex topics to make them easier to understand for a general audience
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title
- **Conclusion or Summary**: Include a concluding paragraph that synthesizes the provided information or suggests potential next steps, where appropriate
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation
### Special Instructions
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity.
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- If the query involves technical, historical, or complex topics, provide detailed background and explanatory sections to ensure clarity
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
- These instructions are provided by the user in the <systemInstructions> tag
- Give them less priority than the above instructions
- Incorporate them into your response while adhering to the overall guidelines
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.
- Provide explanations or historical context as needed to enhance understanding.
- End with a conclusion or overall perspective if relevant.
- Begin with a brief introduction summarizing the event or query topic
- Follow with detailed sections under clear headings, covering all aspects of the query if possible
- Provide explanations or historical context as needed to enhance understanding
- End with a conclusion or overall perspective if relevant
<systemInstructions>
{systemInstructions}
</systemInstructions>
<context>
{context}
</context>
Current date is: {date}.
Current date is: {date}
`;

View file

@ -41,6 +41,7 @@ export interface MetaSearchAgentType {
fileIds: string[],
systemInstructions: string,
signal: AbortSignal,
personaInstructions?: string,
) => Promise<eventEmitter>;
}
@ -101,6 +102,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
private async createSearchRetrieverChain(
llm: BaseChatModel,
systemInstructions: string,
emitter: eventEmitter,
) {
(llm as unknown as ChatOpenAI).temperature = 0;
@ -176,8 +178,12 @@ class MetaSearchAgent implements MetaSearchAgentType {
await Promise.all(
docGroups.map(async (doc) => {
const res = await llm.invoke(`
You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
const systemPrompt = systemInstructions
? `${systemInstructions}\n\n`
: '';
const res =
await llm.invoke(`${systemPrompt}You are a web search summarizer, tasked with summarizing a piece of text retrieved from a web search. Your job is to summarize the
text into a detailed, 2-4 paragraph explanation that captures the main ideas and provides a comprehensive answer to the query.
If the query is \"summarize\", you should provide a detailed summary of the text. If the query is a specific question, you should answer it in the summary.
@ -235,7 +241,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
</text>
Make sure to answer the query in the summary.
`);
`); //TODO: Pass signal for cancellation
const document = new Document({
pageContent: res.content as string,
@ -304,6 +310,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
systemInstructions: string,
signal: AbortSignal,
emitter: eventEmitter,
personaInstructions?: string,
) {
return RunnableSequence.from([
RunnableMap.from({
@ -331,7 +338,11 @@ class MetaSearchAgent implements MetaSearchAgentType {
if (this.config.searchWeb) {
const searchRetrieverChain =
await this.createSearchRetrieverChain(llm, emitter);
await this.createSearchRetrieverChain(
llm,
systemInstructions,
emitter,
);
var date = formatDateForLLM();
const searchRetrieverResult = await searchRetrieverChain.invoke(
@ -339,6 +350,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
chat_history: processedHistory,
query,
date,
systemInstructions,
},
{ signal: options?.signal },
);
@ -359,6 +371,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
embeddings,
optimizationMode,
llm,
systemInstructions,
emitter,
signal,
);
@ -377,8 +390,14 @@ class MetaSearchAgent implements MetaSearchAgentType {
})
.pipe(this.processDocs),
}),
// TODO: this doesn't seem like a very good way to pass persona instructions. Should do this better.
ChatPromptTemplate.fromMessages([
['system', this.config.responsePrompt],
[
'system',
personaInstructions
? `${this.config.responsePrompt}\n\nAdditional formatting/style instructions:\n${personaInstructions}`
: this.config.responsePrompt,
],
new MessagesPlaceholder('chat_history'),
['user', '{query}'],
]),
@ -393,12 +412,15 @@ class MetaSearchAgent implements MetaSearchAgentType {
docs: Document[],
query: string,
llm: BaseChatModel,
systemInstructions: string,
signal: AbortSignal,
): Promise<boolean> {
const formattedDocs = this.processDocs(docs);
const systemPrompt = systemInstructions ? `${systemInstructions}\n\n` : '';
const response = await llm.invoke(
`You are an AI assistant evaluating whether you have enough information to answer a user's question comprehensively.
`${systemPrompt}You are an AI assistant evaluating whether you have enough information to answer a user's question comprehensively.
Based on the following sources, determine if you have sufficient information to provide a detailed, accurate answer to the query: "${query}"
@ -438,6 +460,7 @@ Output ONLY \`<answer>yes</answer>\` if you have enough information to answer co
query: string,
llm: BaseChatModel,
summaryParser: LineOutputParser,
systemInstructions: string,
signal: AbortSignal,
): Promise<Document | null> {
try {
@ -445,9 +468,12 @@ Output ONLY \`<answer>yes</answer>\` if you have enough information to answer co
const webContent = await getWebContent(url, true);
if (webContent) {
const systemPrompt = systemInstructions
? `${systemInstructions}\n\n`
: '';
const summary = await llm.invoke(
`
You are a web content summarizer, tasked with creating a detailed, accurate summary of content from a webpage
`${systemPrompt}You are a web content summarizer, tasked with creating a detailed, accurate summary of content from a webpage
# Instructions
- The response must answer the user's query
@ -505,6 +531,7 @@ ${webContent.metadata.html ? webContent.metadata.html : webContent.pageContent},
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
llm: BaseChatModel,
systemInstructions: string,
emitter: eventEmitter,
signal: AbortSignal,
): Promise<Document[]> {
@ -705,6 +732,7 @@ ${webContent.metadata.html ? webContent.metadata.html : webContent.pageContent},
query,
llm,
summaryParser,
systemInstructions,
signal,
);
@ -729,6 +757,7 @@ ${webContent.metadata.html ? webContent.metadata.html : webContent.pageContent},
enhancedDocs,
query,
llm,
systemInstructions,
signal,
);
if (hasEnoughInfo) {
@ -847,6 +876,7 @@ ${docs[index].metadata?.url.toLowerCase().includes('file') ? '' : '\n<url>' + do
fileIds: string[],
systemInstructions: string,
signal: AbortSignal,
personaInstructions?: string,
) {
const emitter = new eventEmitter();
@ -858,6 +888,7 @@ ${docs[index].metadata?.url.toLowerCase().includes('file') ? '' : '\n<url>' + do
systemInstructions,
signal,
emitter,
personaInstructions,
);
const stream = answeringChain.streamEvents(

77
src/lib/utils/prompts.ts Normal file
View file

@ -0,0 +1,77 @@
import db from '@/lib/db';
import { systemPrompts as systemPromptsTable } from '@/lib/db/schema';
import { inArray } from 'drizzle-orm';
export interface PromptData {
content: string;
type: 'system' | 'persona';
}
export interface RetrievedPrompts {
systemInstructions: string;
personaInstructions: string;
}
/**
* Retrieves and processes system prompts from the database
* @param selectedSystemPromptIds Array of prompt IDs to retrieve
* @returns Object containing combined system and persona instructions
*/
export async function getSystemPrompts(
selectedSystemPromptIds: string[],
): Promise<RetrievedPrompts> {
let systemInstructionsContent = '';
let personaInstructionsContent = '';
if (
!selectedSystemPromptIds ||
!Array.isArray(selectedSystemPromptIds) ||
selectedSystemPromptIds.length === 0
) {
return {
systemInstructions: systemInstructionsContent,
personaInstructions: personaInstructionsContent,
};
}
try {
const promptsFromDb = await db
.select({
content: systemPromptsTable.content,
type: systemPromptsTable.type,
})
.from(systemPromptsTable)
.where(inArray(systemPromptsTable.id, selectedSystemPromptIds));
// Separate system and persona prompts
const systemPrompts = promptsFromDb.filter((p) => p.type === 'system');
const personaPrompts = promptsFromDb.filter((p) => p.type === 'persona');
systemInstructionsContent = systemPrompts.map((p) => p.content).join('\n');
personaInstructionsContent = personaPrompts
.map((p) => p.content)
.join('\n');
} catch (dbError) {
console.error('Error fetching system prompts from DB:', dbError);
// Return empty strings rather than throwing to allow graceful degradation
}
return {
systemInstructions: systemInstructionsContent,
personaInstructions: personaInstructionsContent,
};
}
/**
* Retrieves only system instructions (excluding persona prompts) from the database
* @param selectedSystemPromptIds Array of prompt IDs to retrieve
* @returns Combined system instructions as a string
*/
export async function getSystemInstructionsOnly(
selectedSystemPromptIds: string[],
): Promise<string> {
const { systemInstructions } = await getSystemPrompts(
selectedSystemPromptIds,
);
return systemInstructions;
}