Perplexica/src/routes/suggestions.ts

48 lines
1.6 KiB
TypeScript
Raw Normal View History

2024-07-05 14:36:50 +08:00
import express from "express";
import generateSuggestions from "../agents/suggestionGeneratorAgent";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { getAvailableChatModelProviders } from "../lib/providers";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import logger from "../utils/logger";
2024-05-09 20:42:03 +05:30
const router = express.Router();
2024-07-05 14:36:50 +08:00
router.post("/", async (req, res) => {
2024-05-09 20:42:03 +05:30
try {
const { chat_history: raw_chat_history, chat_model, chat_model_provider } = req.body;
2024-05-09 20:42:03 +05:30
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const chat_history = raw_chat_history.map((msg: any) => {
2024-07-05 14:36:50 +08:00
if (msg.role === "user") {
2024-05-09 20:42:03 +05:30
return new HumanMessage(msg.content);
2024-07-05 14:36:50 +08:00
} else if (msg.role === "assistant") {
2024-05-09 20:42:03 +05:30
return new AIMessage(msg.content);
}
});
const chatModels = await getAvailableChatModelProviders();
2024-05-22 10:45:16 +05:30
const provider = chat_model_provider ?? Object.keys(chatModels)[0];
const chatModel = chat_model ?? Object.keys(chatModels[provider])[0];
2024-05-09 20:42:03 +05:30
let llm: BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
}
if (!llm) {
2024-07-05 14:36:50 +08:00
res.status(500).json({ message: "Invalid LLM model selected" });
2024-05-09 20:42:03 +05:30
return;
}
const suggestions = await generateSuggestions({ chat_history }, llm);
res.status(200).json({ suggestions: suggestions });
} catch (err) {
2024-07-05 14:36:50 +08:00
res.status(500).json({ message: "An error has occurred." });
2024-05-09 20:42:03 +05:30
logger.error(`Error in generating suggestions: ${err.message}`);
}
});
export default router;