From cddc7939150f7d577bccb6a75f03e6743da61bb8 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Sat, 19 Jul 2025 17:52:14 +0530 Subject: [PATCH 01/38] feat(videoSearch): use XML parsing, use few shot prompting --- src/lib/chains/videoSearchAgent.ts | 72 +++++++++++++++++++----------- 1 file changed, 46 insertions(+), 26 deletions(-) diff --git a/src/lib/chains/videoSearchAgent.ts b/src/lib/chains/videoSearchAgent.ts index f7cb156..8e158f5 100644 --- a/src/lib/chains/videoSearchAgent.ts +++ b/src/lib/chains/videoSearchAgent.ts @@ -3,33 +3,19 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { PromptTemplate } from '@langchain/core/prompts'; +import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts'; import formatChatHistoryAsString from '../utils/formatHistory'; import { BaseMessage } from '@langchain/core/messages'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { searchSearxng } from '../searxng'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import LineOutputParser from '../outputParsers/lineOutputParser'; -const VideoSearchChainPrompt = ` - You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos. - You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. - - Example: - 1. Follow up question: How does a car work? - Rephrased: How does a car work? - - 2. Follow up question: What is the theory of relativity? - Rephrased: What is theory of relativity - - 3. Follow up question: How does an AC work? - Rephrased: How does an AC work - - Conversation: - {chat_history} - - Follow up question: {query} - Rephrased question: - `; +const videoSearchChainPrompt = ` +You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos. +You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. +Output only the rephrased query wrapped in an XML element. Do not include any explanation or additional text. +`; type VideoSearchChainInput = { chat_history: BaseMessage[]; @@ -55,12 +41,46 @@ const createVideoSearchChain = (llm: BaseChatModel) => { return input.query; }, }), - PromptTemplate.fromTemplate(VideoSearchChainPrompt), + ChatPromptTemplate.fromMessages([ + ['system', videoSearchChainPrompt], + [ + 'user', + '\n\n\nHow does a car work?\n' + ], + [ + 'assistant', + 'How does a car work?' + ], + [ + 'user', + '\n\n\nWhat is the theory of relativity?\n' + ], + [ + 'assistant', + 'Theory of relativity' + ], + [ + 'user', + '\n\n\nHow does an AC work?\n' + ], + [ + 'assistant', + 'AC working' + ], + [ + 'user', + '{chat_history}\n\n{query}\n' + ] + ]), llm, strParser, RunnableLambda.from(async (input: string) => { - input = input.replace(/.*?<\/think>/g, ''); - + const queryParser = new LineOutputParser({ + key: 'query' + }); + return (await queryParser.parse(input)); + }), + RunnableLambda.from(async (input: string) => { const res = await searchSearxng(input, { engines: ['youtube'], }); @@ -92,8 +112,8 @@ const handleVideoSearch = ( input: VideoSearchChainInput, llm: BaseChatModel, ) => { - const VideoSearchChain = createVideoSearchChain(llm); - return VideoSearchChain.invoke(input); + const videoSearchChain = createVideoSearchChain(llm); + return videoSearchChain.invoke(input); }; export default handleVideoSearch; From b48b0eeb0e5cfea3a8de9f7d2f1052119f8af9ec Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Sat, 19 Jul 2025 17:52:30 +0530 Subject: [PATCH 02/38] feat(imageSearch): use XML parsing, implement few shot prompting --- src/lib/chains/imageSearchAgent.ts | 60 ++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 19 deletions(-) diff --git a/src/lib/chains/imageSearchAgent.ts b/src/lib/chains/imageSearchAgent.ts index 4fd684f..993cba9 100644 --- a/src/lib/chains/imageSearchAgent.ts +++ b/src/lib/chains/imageSearchAgent.ts @@ -3,32 +3,18 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { PromptTemplate } from '@langchain/core/prompts'; +import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts'; import formatChatHistoryAsString from '../utils/formatHistory'; import { BaseMessage } from '@langchain/core/messages'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { searchSearxng } from '../searxng'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import LineOutputParser from '../outputParsers/lineOutputParser'; const imageSearchChainPrompt = ` You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images. You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation. - -Example: -1. Follow up question: What is a cat? -Rephrased: A cat - -2. Follow up question: What is a car? How does it works? -Rephrased: Car working - -3. Follow up question: How does an AC work? -Rephrased: AC working - -Conversation: -{chat_history} - -Follow up question: {query} -Rephrased question: +Output only the rephrased query wrapped in an XML element. Do not include any explanation or additional text. `; type ImageSearchChainInput = { @@ -54,12 +40,48 @@ const createImageSearchChain = (llm: BaseChatModel) => { return input.query; }, }), - PromptTemplate.fromTemplate(imageSearchChainPrompt), + ChatPromptTemplate.fromMessages([ + ['system', imageSearchChainPrompt], + [ + "user", + "\n\n\nWhat is a cat?\n" + ], + [ + "assistant", + "A cat" + ], + + [ + "user", + "\n\n\nWhat is a car? How does it work?\n" + ], + [ + "assistant", + "Car working" + ], + [ + "user", + "\n\n\nHow does an AC work?\n" + ], + [ + "assistant", + "AC working" + ], + [ + 'user', + '{chat_history}\n\n{query}\n' + ] + ]), llm, strParser, RunnableLambda.from(async (input: string) => { - input = input.replace(/.*?<\/think>/g, ''); + const queryParser = new LineOutputParser({ + key: 'query' + }) + return (await queryParser.parse(input)) + }), + RunnableLambda.from(async (input: string) => { const res = await searchSearxng(input, { engines: ['bing images', 'google images'], }); From 7c4aa683a244137626c7d820c322f50c87fb79f7 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Sat, 19 Jul 2025 17:57:32 +0530 Subject: [PATCH 03/38] feat(chains): remove unused imports --- src/lib/chains/imageSearchAgent.ts | 43 ++++++++++++------------------ src/lib/chains/videoSearchAgent.ts | 31 ++++++++------------- 2 files changed, 28 insertions(+), 46 deletions(-) diff --git a/src/lib/chains/imageSearchAgent.ts b/src/lib/chains/imageSearchAgent.ts index 993cba9..a91b7bb 100644 --- a/src/lib/chains/imageSearchAgent.ts +++ b/src/lib/chains/imageSearchAgent.ts @@ -3,7 +3,7 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts'; +import { ChatPromptTemplate } from '@langchain/core/prompts'; import formatChatHistoryAsString from '../utils/formatHistory'; import { BaseMessage } from '@langchain/core/messages'; import { StringOutputParser } from '@langchain/core/output_parsers'; @@ -43,43 +43,34 @@ const createImageSearchChain = (llm: BaseChatModel) => { ChatPromptTemplate.fromMessages([ ['system', imageSearchChainPrompt], [ - "user", - "\n\n\nWhat is a cat?\n" - ], - [ - "assistant", - "A cat" + 'user', + '\n\n\nWhat is a cat?\n', ], + ['assistant', 'A cat'], [ - "user", - "\n\n\nWhat is a car? How does it work?\n" - ], - [ - "assistant", - "Car working" - ], - [ - "user", - "\n\n\nHow does an AC work?\n" - ], - [ - "assistant", - "AC working" + 'user', + '\n\n\nWhat is a car? How does it work?\n', ], + ['assistant', 'Car working'], [ 'user', - '{chat_history}\n\n{query}\n' - ] + '\n\n\nHow does an AC work?\n', + ], + ['assistant', 'AC working'], + [ + 'user', + '{chat_history}\n\n{query}\n', + ], ]), llm, strParser, RunnableLambda.from(async (input: string) => { const queryParser = new LineOutputParser({ - key: 'query' - }) + key: 'query', + }); - return (await queryParser.parse(input)) + return await queryParser.parse(input); }), RunnableLambda.from(async (input: string) => { const res = await searchSearxng(input, { diff --git a/src/lib/chains/videoSearchAgent.ts b/src/lib/chains/videoSearchAgent.ts index 8e158f5..3f878a8 100644 --- a/src/lib/chains/videoSearchAgent.ts +++ b/src/lib/chains/videoSearchAgent.ts @@ -3,7 +3,7 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts'; +import { ChatPromptTemplate } from '@langchain/core/prompts'; import formatChatHistoryAsString from '../utils/formatHistory'; import { BaseMessage } from '@langchain/core/messages'; import { StringOutputParser } from '@langchain/core/output_parsers'; @@ -45,40 +45,31 @@ const createVideoSearchChain = (llm: BaseChatModel) => { ['system', videoSearchChainPrompt], [ 'user', - '\n\n\nHow does a car work?\n' - ], - [ - 'assistant', - 'How does a car work?' + '\n\n\nHow does a car work?\n', ], + ['assistant', 'How does a car work?'], [ 'user', - '\n\n\nWhat is the theory of relativity?\n' - ], - [ - 'assistant', - 'Theory of relativity' + '\n\n\nWhat is the theory of relativity?\n', ], + ['assistant', 'Theory of relativity'], [ 'user', - '\n\n\nHow does an AC work?\n' - ], - [ - 'assistant', - 'AC working' + '\n\n\nHow does an AC work?\n', ], + ['assistant', 'AC working'], [ 'user', - '{chat_history}\n\n{query}\n' - ] + '{chat_history}\n\n{query}\n', + ], ]), llm, strParser, RunnableLambda.from(async (input: string) => { const queryParser = new LineOutputParser({ - key: 'query' + key: 'query', }); - return (await queryParser.parse(input)); + return await queryParser.parse(input); }), RunnableLambda.from(async (input: string) => { const res = await searchSearxng(input, { From 1228beb59a4965683c24781640d7c6508437ddfd Mon Sep 17 00:00:00 2001 From: Willie Zutz Date: Sat, 19 Jul 2025 08:23:06 -0600 Subject: [PATCH 04/38] feat(dashboard): Implement Widget Configuration and Display Components - Added WidgetConfigModal for creating and editing widgets with fields for title, sources, prompt, provider, model, and refresh frequency. - Integrated MarkdownRenderer for displaying widget content previews. - Created WidgetDisplay component to show widget details, including loading states, error handling, and source information. - Developed a reusable Card component structure for consistent UI presentation. - Introduced useDashboard hook for managing widget state, including adding, updating, deleting, and refreshing widgets. - Implemented local storage management for dashboard state and settings. - Added types for widgets, dashboard configuration, and API requests/responses to improve type safety and clarity. --- src/app/api/dashboard/process-widget/route.ts | 216 +++++++++ src/app/dashboard/page.tsx | 220 ++++++++++ src/components/MarkdownRenderer.tsx | 191 ++++++++ src/components/MessageTabs.tsx | 116 +---- src/components/Sidebar.tsx | 8 +- src/components/ThinkBox.tsx | 17 +- .../dashboard/WidgetConfigModal.tsx | 415 ++++++++++++++++++ src/components/dashboard/WidgetDisplay.tsx | 179 ++++++++ src/components/ui/card.tsx | 108 +++++ src/lib/hooks/useDashboard.ts | 414 +++++++++++++++++ src/lib/types.ts | 83 ++++ 11 files changed, 1852 insertions(+), 115 deletions(-) create mode 100644 src/app/api/dashboard/process-widget/route.ts create mode 100644 src/app/dashboard/page.tsx create mode 100644 src/components/MarkdownRenderer.tsx create mode 100644 src/components/dashboard/WidgetConfigModal.tsx create mode 100644 src/components/dashboard/WidgetDisplay.tsx create mode 100644 src/components/ui/card.tsx create mode 100644 src/lib/hooks/useDashboard.ts create mode 100644 src/lib/types.ts diff --git a/src/app/api/dashboard/process-widget/route.ts b/src/app/api/dashboard/process-widget/route.ts new file mode 100644 index 0000000..5f42831 --- /dev/null +++ b/src/app/api/dashboard/process-widget/route.ts @@ -0,0 +1,216 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { getWebContent, getWebContentLite } from '@/lib/utils/documents'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { ChatOpenAI } from '@langchain/openai'; +import { HumanMessage } from '@langchain/core/messages'; +import { getAvailableChatModelProviders } from '@/lib/providers'; +import { + getCustomOpenaiApiKey, + getCustomOpenaiApiUrl, + getCustomOpenaiModelName, +} from '@/lib/config'; +import { ChatOllama } from '@langchain/ollama'; + +interface Source { + url: string; + type: 'Web Page' | 'HTTP Data'; +} + +interface WidgetProcessRequest { + sources: Source[]; + prompt: string; + provider: string; + model: string; +} + +// Helper function to fetch content from a single source +async function fetchSourceContent(source: Source): Promise<{ content: string; error?: string }> { + try { + let document; + + if (source.type === 'Web Page') { + // Use headless browser for complex web pages + document = await getWebContent(source.url); + } else { + // Use faster fetch for HTTP data/APIs + document = await getWebContentLite(source.url); + } + + if (!document) { + return { + content: '', + error: `Failed to fetch content from ${source.url}` + }; + } + + return { content: document.pageContent }; + } catch (error) { + console.error(`Error fetching content from ${source.url}:`, error); + return { + content: '', + error: `Error fetching ${source.url}: ${error instanceof Error ? error.message : 'Unknown error'}` + }; + } +} + + +// Helper function to replace variables in prompt +function replacePromptVariables(prompt: string, sourceContents: string[], location?: string): string { + let processedPrompt = prompt; + + // Replace source content variables + sourceContents.forEach((content, index) => { + const variable = `{{source_content_${index + 1}}}`; + processedPrompt = processedPrompt.replace(new RegExp(variable, 'g'), content); + }); + + // Replace location if provided + if (location) { + processedPrompt = processedPrompt.replace(/\{\{location\}\}/g, location); + } + + return processedPrompt; +} + +// Helper function to get LLM instance based on provider and model +async function getLLMInstance(provider: string, model: string): Promise { + try { + const chatModelProviders = await getAvailableChatModelProviders(); + + if (provider === 'custom_openai') { + return new ChatOpenAI({ + modelName: model || getCustomOpenaiModelName(), + openAIApiKey: getCustomOpenaiApiKey(), + configuration: { + baseURL: getCustomOpenaiApiUrl(), + }, + }) as unknown as BaseChatModel; + } + + if (chatModelProviders[provider] && chatModelProviders[provider][model]) { + const llm = chatModelProviders[provider][model].model as BaseChatModel; + + // Special handling for Ollama models + if (llm instanceof ChatOllama && provider === 'ollama') { + llm.numCtx = 2048; // Default context window + } + + return llm; + } + + return null; + } catch (error) { + console.error('Error getting LLM instance:', error); + return null; + } +} + +// Helper function to process the prompt with LLM +async function processWithLLM(prompt: string, provider: string, model: string): Promise { + const llm = await getLLMInstance(provider, model); + + if (!llm) { + throw new Error(`Invalid or unavailable model: ${provider}/${model}`); + } + + const message = new HumanMessage({ content: prompt }); + const response = await llm.invoke([message]); + + return response.content as string; +} + +export async function POST(request: NextRequest) { + try { + const body: WidgetProcessRequest = await request.json(); + + // Validate required fields + if (!body.sources || !body.prompt || !body.provider || !body.model) { + return NextResponse.json( + { error: 'Missing required fields: sources, prompt, provider, model' }, + { status: 400 } + ); + } + + // Validate sources + if (!Array.isArray(body.sources) || body.sources.length === 0) { + return NextResponse.json( + { error: 'At least one source URL is required' }, + { status: 400 } + ); + } + + // Fetch content from all sources + console.log(`Processing widget with ${body.sources.length} source(s)`); + const sourceResults = await Promise.all( + body.sources.map(source => fetchSourceContent(source)) + ); + + // Check for fetch errors + const fetchErrors = sourceResults + .map((result, index) => result.error ? `Source ${index + 1}: ${result.error}` : null) + .filter(Boolean); + + if (fetchErrors.length > 0) { + console.warn('Some sources failed to fetch:', fetchErrors); + } + + // Extract successful content + const sourceContents = sourceResults.map(result => result.content); + + // If all sources failed, return error + if (sourceContents.every(content => !content)) { + return NextResponse.json( + { error: 'Failed to fetch content from all sources' }, + { status: 500 } + ); + } + + // Replace variables in prompt + const processedPrompt = replacePromptVariables(body.prompt, sourceContents); + + console.log('Processed prompt:', processedPrompt.substring(0, 200) + '...'); + + // Process with LLM + try { + const llmResponse = await processWithLLM(processedPrompt, body.provider, body.model); + + return NextResponse.json({ + content: llmResponse, + success: true, + sourcesFetched: sourceContents.filter(content => content).length, + totalSources: body.sources.length, + warnings: fetchErrors.length > 0 ? fetchErrors : undefined + }); + } catch (llmError) { + console.error('LLM processing failed:', llmError); + + // Return diagnostic information if LLM fails + const diagnosticResponse = `# Widget Processing - LLM Error + +**Error:** ${llmError instanceof Error ? llmError.message : 'Unknown LLM error'} + +## Processed Prompt (for debugging) +${processedPrompt} + +## Sources Successfully Fetched +${sourceContents.filter(content => content).length} of ${body.sources.length} sources + +${fetchErrors.length > 0 ? `## Source Errors\n${fetchErrors.join('\n')}` : ''}`; + + return NextResponse.json({ + content: diagnosticResponse, + success: false, + error: llmError instanceof Error ? llmError.message : 'LLM processing failed', + sourcesFetched: sourceContents.filter(content => content).length, + totalSources: body.sources.length + }); + } + + } catch (error) { + console.error('Error processing widget:', error); + return NextResponse.json( + { error: 'Internal server error' }, + { status: 500 } + ); + } +} diff --git a/src/app/dashboard/page.tsx b/src/app/dashboard/page.tsx new file mode 100644 index 0000000..cabac17 --- /dev/null +++ b/src/app/dashboard/page.tsx @@ -0,0 +1,220 @@ +'use client'; + +import { Plus, RefreshCw, Download, Upload, LayoutDashboard, Layers, List } from 'lucide-react'; +import { useState } from 'react'; +import { Card, CardContent, CardDescription, CardFooter, CardHeader, CardTitle } from '@/components/ui/card'; +import WidgetConfigModal from '@/components/dashboard/WidgetConfigModal'; +import WidgetDisplay from '@/components/dashboard/WidgetDisplay'; +import { useDashboard } from '@/lib/hooks/useDashboard'; +import { Widget, WidgetConfig } from '@/lib/types'; + +const DashboardPage = () => { + const { + widgets, + isLoading, + addWidget, + updateWidget, + deleteWidget, + refreshWidget, + refreshAllWidgets, + exportDashboard, + importDashboard, + settings, + updateSettings, + } = useDashboard(); + + const [showAddModal, setShowAddModal] = useState(false); + const [editingWidget, setEditingWidget] = useState(null); const handleAddWidget = () => { + setEditingWidget(null); + setShowAddModal(true); + }; + + const handleEditWidget = (widget: Widget) => { + setEditingWidget(widget); + setShowAddModal(true); + }; + + const handleSaveWidget = (widgetConfig: WidgetConfig) => { + if (editingWidget) { + // Update existing widget + updateWidget(editingWidget.id, widgetConfig); + } else { + // Add new widget + addWidget(widgetConfig); + } + setShowAddModal(false); + setEditingWidget(null); + }; + + const handleCloseModal = () => { + setShowAddModal(false); + setEditingWidget(null); + }; + + const handleDeleteWidget = (widgetId: string) => { + deleteWidget(widgetId); + }; + + const handleRefreshWidget = (widgetId: string) => { + refreshWidget(widgetId, true); // Force refresh when manually triggered + }; + + const handleRefreshAll = () => { + refreshAllWidgets(); + }; + + const handleExport = async () => { + try { + const configJson = await exportDashboard(); + await navigator.clipboard.writeText(configJson); + // TODO: Add toast notification for success + console.log('Dashboard configuration copied to clipboard'); + } catch (error) { + console.error('Export failed:', error); + // TODO: Add toast notification for error + } + }; + + const handleImport = async () => { + try { + const configJson = await navigator.clipboard.readText(); + await importDashboard(configJson); + // TODO: Add toast notification for success + console.log('Dashboard configuration imported successfully'); + } catch (error) { + console.error('Import failed:', error); + // TODO: Add toast notification for error + } + }; + + const handleToggleProcessingMode = () => { + updateSettings({ parallelLoading: !settings.parallelLoading }); + }; + + // Empty state component + const EmptyDashboard = () => ( +
+ + + Welcome to your Dashboard + + Create your first widget to get started with personalized information + + + + +

+ Widgets let you fetch content from any URL and process it with AI to show exactly what you need. +

+
+ + + + +
+
+ ); + + return ( +
+ {/* Header matching other pages */} +
+
+
+ +

Dashboard

+
+ +
+ + + + + + + + + +
+
+
+
+ + {/* Main content area */} +
+ {isLoading ? ( +
+
+
+

Loading dashboard...

+
+
+ ) : widgets.length === 0 ? ( + + ) : ( +
+ {widgets.map((widget) => ( + + ))} +
+ )} +
+ + {/* Widget Configuration Modal */} + +
+ ); +}; + +export default DashboardPage; diff --git a/src/components/MarkdownRenderer.tsx b/src/components/MarkdownRenderer.tsx new file mode 100644 index 0000000..88fd6a1 --- /dev/null +++ b/src/components/MarkdownRenderer.tsx @@ -0,0 +1,191 @@ +/* eslint-disable @next/next/no-img-element */ +'use client'; + +import { cn } from '@/lib/utils'; +import { CheckCheck, Copy as CopyIcon, Brain } from 'lucide-react'; +import Markdown, { MarkdownToJSX } from 'markdown-to-jsx'; +import { useState } from 'react'; +import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'; +import { oneDark } from 'react-syntax-highlighter/dist/cjs/styles/prism'; +import ThinkBox from './ThinkBox'; + +// Helper functions for think overlay +const extractThinkContent = (content: string): string | null => { + const thinkRegex = /([\s\S]*?)<\/think>/g; + const matches = content.match(thinkRegex); + if (!matches) return null; + + // Extract content between think tags and join if multiple + const extractedContent = matches + .map(match => match.replace(/<\/?think>/g, '')) + .join('\n\n'); + + // Return null if content is empty or only whitespace + return extractedContent.trim().length === 0 ? null : extractedContent; +}; + +const removeThinkTags = (content: string): string => { + return content.replace(/[\s\S]*?<\/think>/g, '').trim(); +}; + +const ThinkTagProcessor = ({ + children, + isOverlayMode = false +}: { + children: React.ReactNode; + isOverlayMode?: boolean; +}) => { + // In overlay mode, don't render anything (content will be handled by overlay) + if (isOverlayMode) { + return null; + } + return ; +}; + +const CodeBlock = ({ + className, + children, +}: { + className?: string; + children: React.ReactNode; +}) => { + // Extract language from className (format could be "language-javascript" or "lang-javascript") + let language = ''; + if (className) { + if (className.startsWith('language-')) { + language = className.replace('language-', ''); + } else if (className.startsWith('lang-')) { + language = className.replace('lang-', ''); + } + } + + const content = children as string; + const [isCopied, setIsCopied] = useState(false); + + const handleCopyCode = () => { + navigator.clipboard.writeText(content); + setIsCopied(true); + setTimeout(() => setIsCopied(false), 2000); + }; + + return ( +
+
+ {language} + +
+ 1} + useInlineStyles={true} + PreTag="div" + > + {content} + +
+ ); +}; + +interface MarkdownRendererProps { + content: string; + className?: string; + thinkOverlay?: boolean; +} + +const MarkdownRenderer = ({ content, className, thinkOverlay = false }: MarkdownRendererProps) => { + const [showThinkBox, setShowThinkBox] = useState(false); + + // Extract think content from the markdown + const thinkContent = thinkOverlay ? extractThinkContent(content) : null; + const contentWithoutThink = thinkOverlay ? removeThinkTags(content) : content; + // Markdown formatting options + const markdownOverrides: MarkdownToJSX.Options = { + overrides: { + think: { + component: ({ children }) => ( + + {children} + + ), + }, + code: { + component: ({ className, children }) => { + // Check if it's an inline code block or a fenced code block + if (className) { + // This is a fenced code block (```code```) + return {children}; + } + // This is an inline code block (`code`) + return ( + + {children} + + ); + }, + }, + pre: { + component: ({ children }) => children, + }, + }, + }; + + return ( +
+ {/* Think box when expanded - shows above markdown */} + {thinkOverlay && thinkContent && showThinkBox && ( +
+ setShowThinkBox(false)} + /> +
+ )} + + + {thinkOverlay ? contentWithoutThink : content} + + + {/* Overlay icon when think box is collapsed */} + {thinkOverlay && thinkContent && !showThinkBox && ( + + )} +
+ ); +}; + +export default MarkdownRenderer; diff --git a/src/components/MessageTabs.tsx b/src/components/MessageTabs.tsx index 0fe32be..c7a7028 100644 --- a/src/components/MessageTabs.tsx +++ b/src/components/MessageTabs.tsx @@ -5,8 +5,6 @@ import { getSuggestions } from '@/lib/actions'; import { cn } from '@/lib/utils'; import { BookCopy, - CheckCheck, - Copy as CopyIcon, Disc3, ImagesIcon, Layers3, @@ -16,86 +14,16 @@ import { VideoIcon, Volume2, } from 'lucide-react'; -import Markdown, { MarkdownToJSX } from 'markdown-to-jsx'; import { useCallback, useEffect, useState } from 'react'; -import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'; -import { oneDark } from 'react-syntax-highlighter/dist/cjs/styles/prism'; import { useSpeech } from 'react-text-to-speech'; import { Message } from './ChatWindow'; +import MarkdownRenderer from './MarkdownRenderer'; import Copy from './MessageActions/Copy'; import ModelInfoButton from './MessageActions/ModelInfo'; import Rewrite from './MessageActions/Rewrite'; import MessageSources from './MessageSources'; import SearchImages from './SearchImages'; import SearchVideos from './SearchVideos'; -import ThinkBox from './ThinkBox'; - -const ThinkTagProcessor = ({ children }: { children: React.ReactNode }) => { - return ; -}; - -const CodeBlock = ({ - className, - children, -}: { - className?: string; - children: React.ReactNode; -}) => { - // Extract language from className (format could be "language-javascript" or "lang-javascript") - let language = ''; - if (className) { - if (className.startsWith('language-')) { - language = className.replace('language-', ''); - } else if (className.startsWith('lang-')) { - language = className.replace('lang-', ''); - } - } - - const content = children as string; - const [isCopied, setIsCopied] = useState(false); - - const handleCopyCode = () => { - navigator.clipboard.writeText(content); - setIsCopied(true); - setTimeout(() => setIsCopied(false), 2000); - }; - - return ( -
-
- {language} - -
- 1} - useInlineStyles={true} - PreTag="div" - > - {content} - -
- ); -}; type TabType = 'text' | 'sources' | 'images' | 'videos'; @@ -236,33 +164,6 @@ const MessageTabs = ({ } }, [isLast, loading, message.role, handleLoadSuggestions]); - // Markdown formatting options - const markdownOverrides: MarkdownToJSX.Options = { - overrides: { - think: { - component: ThinkTagProcessor, - }, - code: { - component: ({ className, children }) => { - // Check if it's an inline code block or a fenced code block - if (className) { - // This is a fenced code block (```code```) - return {children}; - } - // This is an inline code block (`code`) - return ( - - {children} - - ); - }, - }, - pre: { - component: ({ children }) => children, - }, - }, - }; - return (
{/* Tabs */} @@ -372,17 +273,10 @@ const MessageTabs = ({ {/* Answer Tab */} {activeTab === 'text' && (
- - {parsedMessage} - + {loading && isLast ? null : (
diff --git a/src/components/Sidebar.tsx b/src/components/Sidebar.tsx index 5829c60..9d3044e 100644 --- a/src/components/Sidebar.tsx +++ b/src/components/Sidebar.tsx @@ -1,7 +1,7 @@ 'use client'; import { cn } from '@/lib/utils'; -import { BookOpenText, Home, Search, SquarePen, Settings } from 'lucide-react'; +import { BookOpenText, Home, Search, SquarePen, Settings, LayoutDashboard } from 'lucide-react'; import Link from 'next/link'; import { useSelectedLayoutSegments } from 'next/navigation'; import React, { useState, type ReactNode } from 'react'; @@ -23,6 +23,12 @@ const Sidebar = ({ children }: { children: React.ReactNode }) => { active: segments.length === 0 || segments.includes('c'), label: 'Home', }, + { + icon: LayoutDashboard, + href: '/dashboard', + active: segments.includes('dashboard'), + label: 'Dashboard', + }, { icon: Search, href: '/discover', diff --git a/src/components/ThinkBox.tsx b/src/components/ThinkBox.tsx index 9c6a576..defb5f1 100644 --- a/src/components/ThinkBox.tsx +++ b/src/components/ThinkBox.tsx @@ -6,15 +6,26 @@ import { ChevronDown, ChevronUp, BrainCircuit } from 'lucide-react'; interface ThinkBoxProps { content: string; + expanded?: boolean; + onToggle?: () => void; } -const ThinkBox = ({ content }: ThinkBoxProps) => { - const [isExpanded, setIsExpanded] = useState(false); +const ThinkBox = ({ content, expanded, onToggle }: ThinkBoxProps) => { + // Don't render anything if content is empty or only whitespace + if (!content || content.trim().length === 0) { + return null; + } + + const [internalExpanded, setInternalExpanded] = useState(false); + + // Use external expanded state if provided, otherwise use internal state + const isExpanded = expanded !== undefined ? expanded : internalExpanded; + const handleToggle = onToggle || (() => setInternalExpanded(!internalExpanded)); return (
+ + +
+ {/* Left Column - Configuration */} +
+ {/* Widget Title */} +
+ + setConfig(prev => ({ ...prev, title: e.target.value }))} + className="w-full px-3 py-2 border border-light-200 dark:border-dark-200 rounded-md bg-light-primary dark:bg-dark-primary text-black dark:text-white focus:outline-none focus:ring-2 focus:ring-blue-500" + placeholder="Enter widget title..." + /> +
+ + {/* Source URLs */} +
+ +
+ {config.sources.map((source, index) => ( +
+ updateSource(index, 'url', e.target.value)} + className="flex-1 px-3 py-2 border border-light-200 dark:border-dark-200 rounded-md bg-light-primary dark:bg-dark-primary text-black dark:text-white focus:outline-none focus:ring-2 focus:ring-blue-500" + placeholder="https://example.com" + /> + + {config.sources.length > 1 && ( + + )} +
+ ))} + +
+
+ + {/* LLM Prompt */} +
+ +