From 011d10df29e1486d6b6477eb60ee64f65d80820a Mon Sep 17 00:00:00 2001 From: Willie Zutz Date: Tue, 27 May 2025 12:53:30 -0600 Subject: [PATCH] feat(UI): allow system prompts and persona prompts to be saved server side and individually included or excluded from messages --- README.md | 2 + docs/architecture/README.md | 11 + src/app/api/chat/route.ts | 17 +- src/app/api/images/route.ts | 19 + src/app/api/search/route.ts | 22 +- src/app/api/suggestions/route.ts | 19 + src/app/api/system-prompts/[id]/route.ts | 76 +++ src/app/api/system-prompts/route.ts | 69 +++ src/app/api/videos/route.ts | 19 + src/app/settings/page.tsx | 586 ++++++++++++++++-- src/components/Chat.tsx | 6 + src/components/ChatWindow.tsx | 7 +- src/components/EmptyChat.tsx | 6 + src/components/MessageInput.tsx | 54 +- src/components/MessageInputActions/Attach.tsx | 8 +- .../SystemPromptSelector.tsx | 226 +++++++ src/components/SearchImages.tsx | 15 + src/components/SearchVideos.tsx | 15 + src/lib/actions.ts | 15 + src/lib/chains/imageSearchAgent.ts | 14 +- src/lib/chains/suggestionGeneratorAgent.ts | 17 +- src/lib/chains/videoSearchAgent.ts | 14 +- src/lib/db/schema.ts | 17 + src/lib/prompts/index.ts | 4 +- src/lib/prompts/webSearch.ts | 95 +-- src/lib/search/metaSearchAgent.ts | 47 +- src/lib/utils/prompts.ts | 77 +++ 27 files changed, 1345 insertions(+), 132 deletions(-) create mode 100644 src/app/api/system-prompts/[id]/route.ts create mode 100644 src/app/api/system-prompts/route.ts create mode 100644 src/components/MessageInputActions/SystemPromptSelector.tsx create mode 100644 src/lib/utils/prompts.ts diff --git a/README.md b/README.md index e71ef64..75ca8d6 100644 --- a/README.md +++ b/README.md @@ -220,6 +220,7 @@ This fork adds several enhancements to the original Perplexica project: - Added autocomplete functionality proxied to SearxNG - ✅ Enhanced Reddit focus mode to work around SearxNG limitations - ✅ Adds Quality mode that uses the full content of web pages to answer queries + - See the [README.md](docs/architecture/README.md) in the docs architecture directory for more info - Enhances Balanced mode which uses relevant excerpts of web content to answer queries ### AI Functionality @@ -230,6 +231,7 @@ This fork adds several enhancements to the original Perplexica project: - ✅ User customizable context window for Ollama models - ✅ Toggle for automatic suggestions - ✅ Added support for latest Anthropic models +- ✅ Adds support for multiple user-customizable system prompt enhancement and personas so you can tailor output to your needs ### Bug Fixes diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 5732471..12c445f 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -7,5 +7,16 @@ Perplexica's architecture consists of the following key components: 3. **SearXNG**: A metadata search engine used by Perplexica to search the web for sources. 4. **LLMs (Large Language Models)**: Utilized by agents and chains for tasks like understanding content, writing responses, and citing sources. Examples include Claude, GPTs, etc. 5. **Embedding Models**: To improve the accuracy of search results, embedding models re-rank the results using similarity search algorithms such as cosine similarity and dot product distance. +6. **Web Content** + - In Quality mode the application uses Crawlee, Playwright, and Chromium to load web content into a real full browser + - This significantly increases the size of the docker image and also means it can only run on x64 architectures + - The docker build has been updated to restrict images to linux/amd64 architecture + - In Balanced mode, the application uses JSDoc and Mozilla's Readability to retrieve and rank relevant segments of web content + - This approach is less successful than Quality as it doesn't use a full web browser and can't load dynamic content + - It is also more prone to being blocked by ads or scraping detection + - Because it only uses segments of web content, it can be less accurate than Quality mode + - In Speed mode, the application only uses the preview content returned by SearXNG + - This content is provided by the search engines and contains minimal context from the actual web page + - This mode is the least accurate and is often prone to hallucination For a more detailed explanation of how these components work together, see [WORKING.md](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/architecture/WORKING.md). diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts index 115e020..808d7b9 100644 --- a/src/app/api/chat/route.ts +++ b/src/app/api/chat/route.ts @@ -1,3 +1,4 @@ +import { cleanupCancelToken, registerCancelToken } from '@/lib/cancel-tokens'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, @@ -11,6 +12,7 @@ import { } from '@/lib/providers'; import { searchHandlers } from '@/lib/search'; import { getFileDetails } from '@/lib/utils/files'; +import { getSystemPrompts } from '@/lib/utils/prompts'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { ChatOllama } from '@langchain/ollama'; @@ -18,7 +20,6 @@ import { ChatOpenAI } from '@langchain/openai'; import crypto from 'crypto'; import { and, eq, gt } from 'drizzle-orm'; import { EventEmitter } from 'stream'; -import { registerCancelToken, cleanupCancelToken } from '@/lib/cancel-tokens'; export const runtime = 'nodejs'; export const dynamic = 'force-dynamic'; @@ -49,6 +50,7 @@ type Body = { chatModel: ChatModel; embeddingModel: EmbeddingModel; systemInstructions: string; + selectedSystemPromptIds: string[]; }; type ModelStats = { @@ -256,7 +258,7 @@ export const POST = async (req: Request) => { try { const startTime = Date.now(); const body = (await req.json()) as Body; - const { message } = body; + const { message, selectedSystemPromptIds } = body; if (message.content === '') { return Response.json( @@ -349,6 +351,14 @@ export const POST = async (req: Request) => { ); } + let systemInstructionsContent = ''; + let personaInstructionsContent = ''; + + // Retrieve system prompts from database using shared utility + const promptData = await getSystemPrompts(selectedSystemPromptIds); + systemInstructionsContent = promptData.systemInstructions; + personaInstructionsContent = promptData.personaInstructions; + const responseStream = new TransformStream(); const writer = responseStream.writable.getWriter(); const encoder = new TextEncoder(); @@ -378,8 +388,9 @@ export const POST = async (req: Request) => { embedding, body.optimizationMode, body.files, - body.systemInstructions, + systemInstructionsContent, abortController.signal, + personaInstructionsContent, ); handleEmitterEvents( diff --git a/src/app/api/images/route.ts b/src/app/api/images/route.ts index 83af845..05c87f7 100644 --- a/src/app/api/images/route.ts +++ b/src/app/api/images/route.ts @@ -5,6 +5,7 @@ import { getCustomOpenaiModelName, } from '@/lib/config'; import { getAvailableChatModelProviders } from '@/lib/providers'; +import { getSystemInstructionsOnly } from '@/lib/utils/prompts'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { ChatOllama } from '@langchain/ollama'; @@ -20,6 +21,7 @@ interface ImageSearchBody { query: string; chatHistory: any[]; chatModel?: ChatModel; + selectedSystemPromptIds?: string[]; } export const POST = async (req: Request) => { @@ -70,12 +72,29 @@ export const POST = async (req: Request) => { return Response.json({ error: 'Invalid chat model' }, { status: 400 }); } + let systemInstructions = ''; + if ( + body.selectedSystemPromptIds && + body.selectedSystemPromptIds.length > 0 + ) { + try { + const promptInstructions = await getSystemInstructionsOnly( + body.selectedSystemPromptIds, + ); + systemInstructions = promptInstructions || systemInstructions; + } catch (error) { + console.error('Error fetching system prompts:', error); + // Continue with fallback systemInstructions + } + } + const images = await handleImageSearch( { chat_history: chatHistory, query: body.query, }, llm, + systemInstructions, ); return Response.json({ images }, { status: 200 }); diff --git a/src/app/api/search/route.ts b/src/app/api/search/route.ts index 96e9814..b046e18 100644 --- a/src/app/api/search/route.ts +++ b/src/app/api/search/route.ts @@ -13,6 +13,7 @@ import { getCustomOpenaiModelName, } from '@/lib/config'; import { searchHandlers } from '@/lib/search'; +import { getSystemInstructionsOnly } from '@/lib/utils/prompts'; import { ChatOllama } from '@langchain/ollama'; interface chatModel { @@ -36,7 +37,7 @@ interface ChatRequestBody { query: string; history: Array<[string, string]>; stream?: boolean; - systemInstructions?: string; + selectedSystemPromptIds?: string[]; } export const POST = async (req: Request) => { @@ -127,6 +128,23 @@ export const POST = async (req: Request) => { const abortController = new AbortController(); const { signal } = abortController; + // Process system prompts from database if provided, otherwise use direct instructions + let systemInstructions = ''; + if ( + body.selectedSystemPromptIds && + body.selectedSystemPromptIds.length > 0 + ) { + try { + const promptInstructions = await getSystemInstructionsOnly( + body.selectedSystemPromptIds, + ); + systemInstructions = promptInstructions || systemInstructions; + } catch (error) { + console.error('Error fetching system prompts:', error); + // Continue with fallback systemInstructions + } + } + const emitter = await searchHandler.searchAndAnswer( body.query, history, @@ -134,7 +152,7 @@ export const POST = async (req: Request) => { embeddings, body.optimizationMode, [], - body.systemInstructions || '', + systemInstructions, signal, ); diff --git a/src/app/api/suggestions/route.ts b/src/app/api/suggestions/route.ts index 63caff4..7ba6bd5 100644 --- a/src/app/api/suggestions/route.ts +++ b/src/app/api/suggestions/route.ts @@ -5,6 +5,7 @@ import { getCustomOpenaiModelName, } from '@/lib/config'; import { getAvailableChatModelProviders } from '@/lib/providers'; +import { getSystemInstructionsOnly } from '@/lib/utils/prompts'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { ChatOpenAI } from '@langchain/openai'; @@ -19,6 +20,7 @@ interface ChatModel { interface SuggestionsGenerationBody { chatHistory: any[]; chatModel?: ChatModel; + selectedSystemPromptIds?: string[]; } export const POST = async (req: Request) => { @@ -69,11 +71,28 @@ export const POST = async (req: Request) => { return Response.json({ error: 'Invalid chat model' }, { status: 400 }); } + let systemInstructions = ''; + if ( + body.selectedSystemPromptIds && + body.selectedSystemPromptIds.length > 0 + ) { + try { + const retrievedInstructions = await getSystemInstructionsOnly( + body.selectedSystemPromptIds, + ); + systemInstructions = retrievedInstructions; + } catch (error) { + console.error('Error retrieving system prompts:', error); + // Continue with existing systemInstructions as fallback + } + } + const suggestions = await generateSuggestions( { chat_history: chatHistory, }, llm, + systemInstructions, ); return Response.json({ suggestions }, { status: 200 }); diff --git a/src/app/api/system-prompts/[id]/route.ts b/src/app/api/system-prompts/[id]/route.ts new file mode 100644 index 0000000..6357915 --- /dev/null +++ b/src/app/api/system-prompts/[id]/route.ts @@ -0,0 +1,76 @@ +import db from '@/lib/db'; +import { systemPrompts } from '@/lib/db/schema'; +import { eq } from 'drizzle-orm'; +import { NextResponse } from 'next/server'; + +export async function PUT( + req: Request, + { params }: { params: Promise<{ id: string }> }, +) { + try { + const { id } = await params; + const { name, content, type } = await req.json(); + if (!name || !content) { + return NextResponse.json( + { error: 'Name and content are required' }, + { status: 400 }, + ); + } + if (type && type !== 'system' && type !== 'persona') { + return NextResponse.json( + { error: 'Type must be either "system" or "persona"' }, + { status: 400 }, + ); + } + + const updateData: any = { name, content, updatedAt: new Date() }; + if (type) { + updateData.type = type; + } + + const updatedPrompt = await db + .update(systemPrompts) + .set(updateData) + .where(eq(systemPrompts.id, id)) + .returning(); + if (updatedPrompt.length === 0) { + return NextResponse.json( + { error: 'System prompt not found' }, + { status: 404 }, + ); + } + return NextResponse.json(updatedPrompt[0]); + } catch (error) { + console.error('Failed to update system prompt:', error); + return NextResponse.json( + { error: 'Failed to update system prompt' }, + { status: 500 }, + ); + } +} + +export async function DELETE( + req: Request, + { params }: { params: Promise<{ id: string }> }, +) { + try { + const { id } = await params; + const deletedPrompt = await db + .delete(systemPrompts) + .where(eq(systemPrompts.id, id)) + .returning(); + if (deletedPrompt.length === 0) { + return NextResponse.json( + { error: 'System prompt not found' }, + { status: 404 }, + ); + } + return NextResponse.json({ message: 'System prompt deleted successfully' }); + } catch (error) { + console.error('Failed to delete system prompt:', error); + return NextResponse.json( + { error: 'Failed to delete system prompt' }, + { status: 500 }, + ); + } +} diff --git a/src/app/api/system-prompts/route.ts b/src/app/api/system-prompts/route.ts new file mode 100644 index 0000000..42c6b44 --- /dev/null +++ b/src/app/api/system-prompts/route.ts @@ -0,0 +1,69 @@ +import db from '@/lib/db'; +import { systemPrompts } from '@/lib/db/schema'; +import { NextResponse } from 'next/server'; +import { asc, eq } from 'drizzle-orm'; + +export async function GET(req: Request) { + try { + const { searchParams } = new URL(req.url); + const type = searchParams.get('type'); + + let prompts; + + if (type && (type === 'system' || type === 'persona')) { + prompts = await db + .select() + .from(systemPrompts) + .where(eq(systemPrompts.type, type)) + .orderBy(asc(systemPrompts.name)); + } else { + prompts = await db + .select() + .from(systemPrompts) + .orderBy(asc(systemPrompts.name)); + } + + return NextResponse.json(prompts); + } catch (error) { + console.error('Failed to fetch system prompts:', error); + return NextResponse.json( + { error: 'Failed to fetch system prompts' }, + { status: 500 }, + ); + } +} + +export async function POST(req: Request) { + try { + const { name, content, type = 'system' } = await req.json(); + if (!name || !content) { + return NextResponse.json( + { error: 'Name and content are required' }, + { status: 400 }, + ); + } + if (type && type !== 'system' && type !== 'persona') { + return NextResponse.json( + { error: 'Type must be either "system" or "persona"' }, + { status: 400 }, + ); + } + const newPrompt = await db + .insert(systemPrompts) + .values({ + name, + content, + type, + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning(); + return NextResponse.json(newPrompt[0], { status: 201 }); + } catch (error) { + console.error('Failed to create system prompt:', error); + return NextResponse.json( + { error: 'Failed to create system prompt' }, + { status: 500 }, + ); + } +} diff --git a/src/app/api/videos/route.ts b/src/app/api/videos/route.ts index 64c6b7c..673d358 100644 --- a/src/app/api/videos/route.ts +++ b/src/app/api/videos/route.ts @@ -5,6 +5,7 @@ import { getCustomOpenaiModelName, } from '@/lib/config'; import { getAvailableChatModelProviders } from '@/lib/providers'; +import { getSystemInstructionsOnly } from '@/lib/utils/prompts'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { ChatOllama } from '@langchain/ollama'; @@ -20,6 +21,7 @@ interface VideoSearchBody { query: string; chatHistory: any[]; chatModel?: ChatModel; + selectedSystemPromptIds?: string[]; } export const POST = async (req: Request) => { @@ -70,12 +72,29 @@ export const POST = async (req: Request) => { return Response.json({ error: 'Invalid chat model' }, { status: 400 }); } + let systemInstructions = ''; + if ( + body.selectedSystemPromptIds && + body.selectedSystemPromptIds.length > 0 + ) { + try { + const retrievedInstructions = await getSystemInstructionsOnly( + body.selectedSystemPromptIds, + ); + systemInstructions = retrievedInstructions; + } catch (error) { + console.error('Error retrieving system prompts:', error); + // Continue with existing systemInstructions as fallback + } + } + const videos = await handleVideoSearch( { chat_history: chatHistory, query: body.query, }, llm, + systemInstructions, ); return Response.json({ videos }, { status: 200 }); diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx index 9bee38f..6611f39 100644 --- a/src/app/settings/page.tsx +++ b/src/app/settings/page.tsx @@ -5,8 +5,13 @@ import { ArrowLeft, Loader2, Info, + Trash2, + Edit3, + PlusCircle, + Save, + X, } from 'lucide-react'; -import { useEffect, useState } from 'react'; +import { useEffect, useState, useRef } from 'react'; import { cn } from '@/lib/utils'; import { Switch } from '@headlessui/react'; import ThemeSwitcher from '@/components/theme/Switcher'; @@ -39,7 +44,12 @@ interface InputProps extends React.InputHTMLAttributes { onSave?: (value: string) => void; } -const Input = ({ className, isSaving, onSave, ...restProps }: InputProps) => { +const InputComponent = ({ + className, + isSaving, + onSave, + ...restProps +}: InputProps) => { return (
{ onSave?: (value: string) => void; } -const Textarea = ({ +const TextareaComponent = ({ className, isSaving, onSave, @@ -127,27 +137,75 @@ const SettingsSection = ({ title: string; children: React.ReactNode; tooltip?: string; -}) => ( -
-
-

{title}

- {tooltip && ( -
- -
- {tooltip} -
-
- )} -
- {children} -
-); +}) => { + const [showTooltip, setShowTooltip] = useState(false); + const tooltipRef = useRef(null); + const buttonRef = useRef(null); -const Page = () => { + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if ( + tooltipRef.current && + !tooltipRef.current.contains(event.target as Node) && + buttonRef.current && + !buttonRef.current.contains(event.target as Node) + ) { + setShowTooltip(false); + } + }; + + document.addEventListener('mousedown', handleClickOutside); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, []); + + return ( +
+
+

+ {title} +

+ {tooltip && ( +
+ + {showTooltip && ( +
+
+
+ {tooltip.split('\\n').map((line, index) => ( +
{line}
+ ))} +
+
+
+ )} +
+ )} +
+ {children} +
+ ); +}; + +interface SystemPrompt { + id: string; + name: string; + content: string; + type: 'system' | 'persona'; +} + +export default function SettingsPage() { const [config, setConfig] = useState(null); const [chatModels, setChatModels] = useState>({}); const [embeddingModels, setEmbeddingModels] = useState>( @@ -166,7 +224,6 @@ const Page = () => { >(null); const [isLoading, setIsLoading] = useState(false); const [automaticSuggestions, setAutomaticSuggestions] = useState(true); - const [systemInstructions, setSystemInstructions] = useState(''); const [savingStates, setSavingStates] = useState>({}); const [contextWindowSize, setContextWindowSize] = useState(2048); const [isCustomContextWindow, setIsCustomContextWindow] = useState(false); @@ -174,6 +231,17 @@ const Page = () => { 1024, 2048, 3072, 4096, 8192, 16384, 32768, 65536, 131072, ]; + const [userSystemPrompts, setUserSystemPrompts] = useState( + [], + ); + const [editingPrompt, setEditingPrompt] = useState(null); + const [newPromptName, setNewPromptName] = useState(''); + const [newPromptContent, setNewPromptContent] = useState(''); + const [newPromptType, setNewPromptType] = useState<'system' | 'persona'>( + 'system', + ); + const [isAddingNewPrompt, setIsAddingNewPrompt] = useState(false); + useEffect(() => { const fetchConfig = async () => { setIsLoading(true); @@ -238,13 +306,30 @@ const Page = () => { !predefinedContextSizes.includes(storedContextWindow), ); - setSystemInstructions(localStorage.getItem('systemInstructions')!); - setIsLoading(false); }; fetchConfig(); - }); + + const fetchSystemPrompts = async () => { + setIsLoading(true); + try { + const response = await fetch('/api/system-prompts'); + if (response.ok) { + const prompts = await response.json(); + setUserSystemPrompts(prompts); + } else { + console.error('Failed to load system prompts.'); + } + } catch (error) { + console.error('Error loading system prompts.'); + } finally { + setIsLoading(false); + } + }; + + fetchSystemPrompts(); + }, []); const saveConfig = async (key: string, value: any) => { setSavingStates((prev) => ({ ...prev, [key]: true })); @@ -396,8 +481,6 @@ const Page = () => { localStorage.setItem('embeddingModel', value); } else if (key === 'ollamaContextWindow') { localStorage.setItem('ollamaContextWindow', value.toString()); - } else if (key === 'systemInstructions') { - localStorage.setItem('systemInstructions', value); } } catch (err) { console.error('Failed to save:', err); @@ -409,6 +492,83 @@ const Page = () => { } }; + const handleAddOrUpdateSystemPrompt = async () => { + const currentPrompt = editingPrompt || { + name: newPromptName, + content: newPromptContent, + type: newPromptType, + }; + if (!currentPrompt.name.trim() || !currentPrompt.content.trim()) { + console.error('Prompt name and content cannot be empty.'); + return; + } + + const url = editingPrompt + ? `/api/system-prompts/${editingPrompt.id}` + : '/api/system-prompts'; + const method = editingPrompt ? 'PUT' : 'POST'; + const body = JSON.stringify({ + name: currentPrompt.name, + content: currentPrompt.content, + type: currentPrompt.type, + }); + + try { + const response = await fetch(url, { + method, + headers: { 'Content-Type': 'application/json' }, + body, + }); + + if (response.ok) { + const savedPrompt = await response.json(); + if (editingPrompt) { + setUserSystemPrompts( + userSystemPrompts.map((p) => + p.id === savedPrompt.id ? savedPrompt : p, + ), + ); + setEditingPrompt(null); + } else { + setUserSystemPrompts([...userSystemPrompts, savedPrompt]); + setNewPromptName(''); + setNewPromptContent(''); + setNewPromptType('system'); + setIsAddingNewPrompt(false); + } + console.log(`System prompt ${editingPrompt ? 'updated' : 'added'}.`); + } else { + const errorData = await response.json(); + console.error( + errorData.error || + `Failed to ${editingPrompt ? 'update' : 'add'} prompt.`, + ); + } + } catch (error) { + console.error(`Error ${editingPrompt ? 'updating' : 'adding'} prompt.`); + } + }; + + const handleDeleteSystemPrompt = async (promptId: string) => { + if (!confirm('Are you sure you want to delete this prompt?')) return; + try { + const response = await fetch(`/api/system-prompts/${promptId}`, { + method: 'DELETE', + }); + if (response.ok) { + setUserSystemPrompts( + userSystemPrompts.filter((p) => p.id !== promptId), + ); + console.log('System prompt deleted.'); + } else { + const errorData = await response.json(); + console.error(errorData.error || 'Failed to delete prompt.'); + } + } catch (error) { + console.error('Error deleting prompt.'); + } + }; + return (
@@ -500,16 +660,340 @@ const Page = () => {
- + {/* TODO: Refactor into reusable components */} +
-