From 8796009141429ec2cb234553b0018e637a34819f Mon Sep 17 00:00:00 2001 From: Willie Zutz Date: Tue, 6 May 2025 23:45:46 -0600 Subject: [PATCH] fix(api): History rewriting should delete the current message. fix(UI): Model changes shouldn't submit the form. --- .github/copilot-instructions.md | 94 +++++++++++++++++++ src/app/api/chat/route.ts | 34 +++---- .../MessageInputActions/ModelSelector.tsx | 4 +- 3 files changed, 111 insertions(+), 21 deletions(-) create mode 100644 .github/copilot-instructions.md diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..01a4628 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,94 @@ +# GitHub Copilot Instructions for Perplexica + +This file provides context and guidance for GitHub Copilot when working with the Perplexica codebase. + +## Project Overview + +Perplexica is an open-source AI-powered search engine that uses advanced machine learning to provide intelligent search results. It combines web search capabilities with LLM-based processing to understand and answer user questions, similar to Perplexity AI but fully open source. + +## Key Components + +- **Frontend**: Next.js application with React components (in `/src/components` and `/src/app`) +- **Backend Logic**: Node.js backend with API routes (in `/src/app/api`) and library code (in `/src/lib`) +- **Search Engine**: Uses SearXNG as a metadata search engine +- **LLM Integration**: Supports multiple models including OpenAI, Anthropic, Groq, Ollama (local models) +- **Database**: SQLite database managed with Drizzle ORM + +## Architecture + +The system works through these main steps: + +- User submits a query +- The system determines if web search is needed +- If needed, it searches the web using SearXNG +- Results are ranked using embedding-based similarity search +- LLMs are used to generate a comprehensive response with cited sources + +## Key Technologies + +- **Frontend**: React, Next.js, Tailwind CSS +- **Backend**: Node.js +- **Database**: SQLite with Drizzle ORM +- **AI/ML**: LangChain for orchestration, various LLM providers +- **Search**: SearXNG integration +- **Embedding Models**: For re-ranking search results + +## Project Structure + +- `/src/app`: Next.js app directory with page components and API routes +- `/src/components`: Reusable UI components +- `/src/lib`: Backend functionality + - `/lib/search`: Search functionality and meta search agent + - `/lib/db`: Database schema and operations + - `/lib/providers`: LLM and embedding model integrations + - `/lib/prompts`: Prompt templates for LLMs + - `/lib/chains`: LangChain chains for various operations + +## Focus Modes + +Perplexica supports multiple specialized search modes: + +- All Mode: General web search +- Local Research Mode: Research and interact with local files with citations +- Chat Mode: Have a creative conversation +- Academic Search Mode: For academic research +- YouTube Search Mode: For video content +- Wolfram Alpha Search Mode: For calculations and data analysis +- Reddit Search Mode: For community discussions + +## Development Workflow + +- Use `npm run dev` for local development +- Format code with `npm run format:write` before committing +- Database migrations: `npm run db:push` +- Build for production: `npm run build` +- Start production server: `npm run start` + +## Configuration + +The application uses a `config.toml` file (created from `sample.config.toml`) for configuration, including: + +- API keys for various LLM providers +- Database settings +- Search engine configuration +- Similarity measure settings + +## Common Tasks + +When working on this codebase, you might need to: + +- Add new API endpoints in `/src/app/api` +- Modify UI components in `/src/components` +- Extend search functionality in `/src/lib/search` +- Add new LLM providers in `/src/lib/providers` +- Update database schema in `/src/lib/db/schema.ts` +- Create new prompt templates in `/src/lib/prompts` +- Build new chains in `/src/lib/chains` + +## AI Behavior + +- Avoid conciliatory language +- It is not necessary to apologize +- If you don't know the answer, ask for clarification +- Do not add additional packages or dependencies unless explicitly requested +- Only make changes to the code that are relevant to the task at hand diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts index f21e278..f11324f 100644 --- a/src/app/api/chat/route.ts +++ b/src/app/api/chat/route.ts @@ -1,27 +1,23 @@ -import prompts from '@/lib/prompts'; -import MetaSearchAgent from '@/lib/search/metaSearchAgent'; -import crypto from 'crypto'; -import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; -import { EventEmitter } from 'stream'; -import { - chatModelProviders, - embeddingModelProviders, - getAvailableChatModelProviders, - getAvailableEmbeddingModelProviders, -} from '@/lib/providers'; -import db from '@/lib/db'; -import { chats, messages as messagesSchema } from '@/lib/db/schema'; -import { and, eq, gt } from 'drizzle-orm'; -import { getFileDetails } from '@/lib/utils/files'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { ChatOpenAI } from '@langchain/openai'; import { getCustomOpenaiApiKey, getCustomOpenaiApiUrl, getCustomOpenaiModelName, } from '@/lib/config'; -import { ChatOllama } from '@langchain/ollama'; +import db from '@/lib/db'; +import { chats, messages as messagesSchema } from '@/lib/db/schema'; +import { + getAvailableChatModelProviders, + getAvailableEmbeddingModelProviders +} from '@/lib/providers'; import { searchHandlers } from '@/lib/search'; +import { getFileDetails } from '@/lib/utils/files'; +import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; +import { ChatOllama } from '@langchain/ollama'; +import { ChatOpenAI } from '@langchain/openai'; +import crypto from 'crypto'; +import { and, eq, gte } from 'drizzle-orm'; +import { EventEmitter } from 'stream'; export const runtime = 'nodejs'; export const dynamic = 'force-dynamic'; @@ -202,7 +198,7 @@ const handleHistorySave = async ( .delete(messagesSchema) .where( and( - gt(messagesSchema.id, messageExists.id), + gte(messagesSchema.id, messageExists.id), eq(messagesSchema.chatId, message.chatId), ), ) diff --git a/src/components/MessageInputActions/ModelSelector.tsx b/src/components/MessageInputActions/ModelSelector.tsx index 96e5da4..a17f7e1 100644 --- a/src/components/MessageInputActions/ModelSelector.tsx +++ b/src/components/MessageInputActions/ModelSelector.tsx @@ -254,7 +254,7 @@ const ModelSelector = ({ {isExpanded && (
{provider.models.map((modelOption) => ( - + ))}
)}