feat(Focus): Enhance hover effects and update icon colors for better visibility

feat(Optimization): Update icon colors for consistency and improve hover styles
feat(SimplifiedAgent): Add messagesCount parameter to initializeAgent for adaptive prompts
This commit is contained in:
Willie Zutz 2025-08-10 17:36:15 -06:00
parent 7b4a7a531e
commit 803fd5cc17
6 changed files with 61 additions and 37 deletions

View file

@ -1,4 +1,3 @@
# Tracing LLM Calls in Perplexica
Perplexica supports tracing all LangChain and LangGraph LLM calls for debugging, analytics, and prompt transparency. You can use either Langfuse (self-hosted, private, or cloud) or LangSmith (cloud, by LangChain) for tracing.
@ -10,21 +9,22 @@ Langfuse is an open-source, self-hostable observability platform for LLM applica
### Setup
1. **Deploy Langfuse**
- See: [Langfuse Self-Hosting Guide](https://langfuse.com/docs/self-hosting)
- You can also use the Langfuse Cloud if you prefer.
- See: [Langfuse Self-Hosting Guide](https://langfuse.com/docs/self-hosting)
- You can also use the Langfuse Cloud if you prefer.
2. **Configure Environment Variables**
- Add the following to your environment variables in docker-compose or your deployment environment:
- Add the following to your environment variables in docker-compose or your deployment environment:
```env
LANGFUSE_PUBLIC_KEY=your-public-key
LANGFUSE_SECRET_KEY=your-secret-key
LANGFUSE_BASE_URL=https://your-langfuse-instance.com
```
- These are required for the tracing integration to work. If not set, tracing is disabled gracefully.
```env
LANGFUSE_PUBLIC_KEY=your-public-key
LANGFUSE_SECRET_KEY=your-secret-key
LANGFUSE_BASE_URL=https://your-langfuse-instance.com
```
- These are required for the tracing integration to work. If not set, tracing is disabled gracefully.
3. **Run Perplexica**
- All LLM and agent calls will be traced automatically. You can view traces in your Langfuse dashboard.
- All LLM and agent calls will be traced automatically. You can view traces in your Langfuse dashboard.
## LangSmith Tracing (Cloud by LangChain)
@ -38,5 +38,6 @@ Perplexica also supports tracing via [LangSmith](https://smith.langchain.com/),
---
For more details on tracing, see the respective documentation:
- [Langfuse Documentation](https://langfuse.com/docs)
- [LangSmith Observability](https://docs.smith.langchain.com/observability)

View file

@ -42,7 +42,7 @@ const Focus = ({
);
return (
<div className="rounded-xl hover:bg-surface-2 transition duration-200">
<div className="rounded-xl transition duration-200">
<div className="flex flex-row items-center space-x-1">
<div className="relative">
<div className="flex items-center border border-surface-2 rounded-lg overflow-hidden">
@ -52,7 +52,7 @@ const Focus = ({
'p-2 transition-all duration-200',
focusMode === 'webSearch'
? 'text-accent scale-105'
: 'text-fg/70',
: 'text-fg/70 hover:bg-surface-2',
)}
onMouseEnter={() => setShowWebSearchTooltip(true)}
onMouseLeave={() => setShowWebSearchTooltip(false)}
@ -72,8 +72,8 @@ const Focus = ({
className={cn(
'p-2 transition-all duration-200',
focusMode === 'chat'
? 'text-[#10B981] scale-105'
: 'text-fg/70',
? 'text-accent scale-105'
: 'text-fg/70 hover:bg-surface-2',
)}
onMouseEnter={() => setShowChatTooltip(true)}
onMouseLeave={() => setShowChatTooltip(false)}
@ -93,8 +93,8 @@ const Focus = ({
className={cn(
'p-2 transition-all duration-200',
focusMode === 'localResearch'
? 'text-[#8B5CF6] scale-105'
: 'text-fg/70',
? 'text-accent scale-105'
: 'text-fg/70 hover:bg-surface-2',
)}
onMouseEnter={() => setShowLocalResearchTooltip(true)}
onMouseLeave={() => setShowLocalResearchTooltip(false)}
@ -129,7 +129,7 @@ const Focus = ({
<div className="absolute z-20 bottom-[100%] mb-2 left-0 transform animate-in fade-in-0 duration-150">
<div className="bg-surface border rounded-lg border-surface-2 p-4 w-80 shadow-lg">
<div className="flex items-center space-x-2 mb-2">
<MessageCircle size={16} className="text-[#10B981]" />
<MessageCircle size={16} className="text-accent" />
<h3 className="font-medium text-sm text-left">
{chatMode?.title}
</h3>
@ -146,8 +146,8 @@ const Focus = ({
<div className="absolute z-20 bottom-[100%] mb-2 left-0 animate-in fade-in-0 duration-150">
<div className="bg-surface border rounded-lg border-surface-2 p-4 w-80 shadow-lg">
<div className="flex items-center space-x-2 mb-2">
<Pencil size={16} className="text-[#8B5CF6]" />
<h3 className="font-medium text-smtext-left">
<Pencil size={16} className="text-accent" />
<h3 className="font-medium text-sm text-left">
{localResearchMode?.title}
</h3>
</div>

View file

@ -8,14 +8,14 @@ const OptimizationModes = [
title: 'Speed',
description:
'Prioritize speed and get the quickest possible answer. Uses only web search results - attached files will not be processed.',
icon: <Zap size={20} className="text-[#FF9800]" />,
icon: <Zap size={20} className="text-accent" />,
},
{
key: 'agent',
title: 'Agent (Experimental)',
description:
'Use an agentic workflow to answer complex multi-part questions. This mode may take longer and is experimental. It uses large prompts and may not work with all models. Best with at least a 8b model that supports 32k context or more.',
icon: <Bot size={20} className="text-[#9C27B0]" />,
icon: <Bot size={20} className="text-accent" />,
},
];
@ -57,7 +57,7 @@ const Optimization = ({
className={cn(
'p-2 transition-all duration-200',
!isAgentMode
? 'bg-[#FF9800]/20 text-[#FF9800] scale-105'
? 'bg-surface-2 text-accent scale-105'
: 'text-fg/30 hover:text-fg/50 hover:bg-surface-2/50',
)}
onMouseEnter={() => setShowSpeedTooltip(true)}
@ -74,7 +74,7 @@ const Optimization = ({
className={cn(
'p-2 transition-all duration-200',
isAgentMode
? 'bg-[#9C27B0]/20 text-[#9C27B0] scale-105'
? 'bg-surface-2 text-accent scale-105'
: 'text-fg/30 hover:text-fg/50 hover:bg-surface-2/50',
)}
onMouseEnter={() => setShowAgentTooltip(true)}
@ -89,7 +89,7 @@ const Optimization = ({
<div className="absolute z-20 bottom-[100%] mb-2 right-0 animate-in fade-in-0 duration-150">
<div className="bg-surface border rounded-lg border-surface-2 p-4 w-80 shadow-lg">
<div className="flex items-center space-x-2 mb-2">
<Zap size={16} className="text-[#FF9800]" />
<Zap size={16} className="text-accent" />
<h3 className="font-medium text-sm text-fg text-left">
{speedMode?.title}
</h3>
@ -106,7 +106,7 @@ const Optimization = ({
<div className="absolute z-20 bottom-[100%] mb-2 right-0 animate-in fade-in-0 duration-150">
<div className="bg-surface border rounded-lg border-surface-2 p-4 w-80 shadow-lg">
<div className="flex items-center space-x-2 mb-2">
<Bot size={16} className="text-[#9C27B0]" />
<Bot size={16} className="text-accent" />
<h3 className="font-medium text-sm text-fg text-left">
{agentMode?.title}
</h3>

View file

@ -89,13 +89,18 @@ export class SimplifiedAgent {
/**
* Initialize the createReactAgent with tools and configuration
*/
private initializeAgent(focusMode: string, fileIds: string[] = []) {
private initializeAgent(
focusMode: string,
fileIds: string[] = [],
messagesCount?: number,
) {
// Select appropriate tools based on focus mode and available files
const tools = this.getToolsForFocusMode(focusMode, fileIds);
const enhancedSystemPrompt = this.createEnhancedSystemPrompt(
focusMode,
fileIds,
messagesCount,
);
try {
@ -159,6 +164,7 @@ export class SimplifiedAgent {
private createEnhancedSystemPrompt(
focusMode: string,
fileIds: string[] = [],
messagesCount?: number,
): string {
const baseInstructions = this.systemInstructions || '';
const personaInstructions = this.personaInstructions || '';
@ -172,6 +178,7 @@ export class SimplifiedAgent {
baseInstructions,
personaInstructions,
fileIds,
messagesCount,
);
case 'localResearch':
return this.createLocalResearchModePrompt(
@ -186,6 +193,7 @@ export class SimplifiedAgent {
baseInstructions,
personaInstructions,
fileIds,
messagesCount,
);
}
}
@ -252,7 +260,13 @@ Focus on providing engaging, helpful conversation while using task management to
baseInstructions: string,
personaInstructions: string,
fileIds: string[] = [],
messagesCount: number = 0,
): string {
// If the number of messages passed to the LLM is < 2 (i.e., first turn), enforce ALWAYS web search.
const alwaysSearchInstruction =
messagesCount < 2
? '\n - **You must ALWAYS perform at least one web search on the first turn, regardless of prior knowledge or assumptions. Do not skip this.**'
: '';
return `${baseInstructions}
# Comprehensive Research Assistant
@ -323,7 +337,7 @@ Your task is to provide answers that are:
- Give the web search tool a specific question you want answered that will help you gather relevant information
- This query will be passed directly to the search engine
- You will receive a list of relevant documents containing snippets of the web page, a URL, and the title of the web page
- **Always perform at least one web search** unless the question can be definitively answered with previous conversation history or local file content. If you don't have conversation history or local files, **you must perform a web search**
${alwaysSearchInstruction}
${
fileIds.length > 0
? `
@ -368,7 +382,14 @@ ${
${personaInstructions ? `\n## User Formatting and Persona Instructions\n- Give these instructions more weight than the system formatting instructions\n${personaInstructions}` : ''}
Use all available tools strategically to provide comprehensive, well-researched, formatted responses with proper citations`;
Use all available tools strategically to provide comprehensive, well-researched, formatted responses with proper citations.
${
messagesCount < 2
? `
**DO NOT SKIP WEB SEARCH**
`
: ''
}`;
}
/**
@ -487,7 +508,9 @@ Use all available tools strategically to provide comprehensive, well-researched,
console.log(`SimplifiedAgent: File IDs: ${fileIds.join(', ')}`);
// Initialize agent with the provided focus mode and file context
const agent = this.initializeAgent(focusMode, fileIds);
// Pass the number of messages that will be sent to the LLM so prompts can adapt.
const llmMessagesCount = [...history, new HumanMessage(query)].length;
const agent = this.initializeAgent(focusMode, fileIds, llmMessagesCount);
// Prepare initial state
const initialState = {

View file

@ -104,7 +104,7 @@ class SpeedSearchAgent implements SpeedSearchAgentType {
this.emitProgress(emitter, 10, `Building search query`);
return RunnableSequence.from([
return RunnableSequence.from([
PromptTemplate.fromTemplate(this.config.queryGeneratorPrompt),
llm,
this.strParser,
@ -237,7 +237,7 @@ class SpeedSearchAgent implements SpeedSearchAgentType {
Make sure to answer the query in the summary.
`,
{ signal, ...getLangfuseCallbacks() },
{ signal, ...getLangfuseCallbacks() },
);
const document = new Document({
@ -542,7 +542,7 @@ ${docs[index].metadata?.url.toLowerCase().includes('file') ? '' : '\n<url>' + do
personaInstructions,
);
const stream = answeringChain.streamEvents(
const stream = answeringChain.streamEvents(
{
chat_history: history,
query: message,
@ -550,8 +550,8 @@ ${docs[index].metadata?.url.toLowerCase().includes('file') ? '' : '\n<url>' + do
{
version: 'v1',
// Pass the abort signal to the LLM streaming chain
signal,
...getLangfuseCallbacks(),
signal,
...getLangfuseCallbacks(),
},
);

View file

@ -82,7 +82,7 @@ Snippet: ${content.snippet}
name: 'analyze_preview_content',
});
const analysisResult = await structuredLLM.invoke(
const analysisResult = await structuredLLM.invoke(
`You are a preview content analyzer, tasked with determining if search result snippets contain sufficient information to answer the Task Query.
# Instructions
@ -119,7 +119,7 @@ ${taskQuery}
# Search Result Previews to Analyze:
${formattedPreviewContent}
`,
{ signal, ...getLangfuseCallbacks() },
{ signal, ...getLangfuseCallbacks() },
);
if (!analysisResult) {