diff --git a/docker-compose.yaml b/docker-compose.yaml index b32e0a9..f511e94 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -15,6 +15,7 @@ services: context: . dockerfile: app.dockerfile environment: + - DOCKER=true - SEARXNG_API_URL=http://searxng:8080 - DATA_DIR=/home/perplexica ports: @@ -24,7 +25,6 @@ services: volumes: - backend-dbstore:/home/perplexica/data - uploads:/home/perplexica/uploads - - ./config.toml:/home/perplexica/config.toml restart: unless-stopped networks: diff --git a/package.json b/package.json index bf7538e..7100e3f 100644 --- a/package.json +++ b/package.json @@ -29,8 +29,8 @@ "better-sqlite3": "^11.9.1", "clsx": "^2.1.0", "compute-cosine-similarity": "^1.1.0", - "compute-dot": "^1.1.0", "drizzle-orm": "^0.40.1", + "framer-motion": "^12.23.24", "html-to-text": "^9.0.5", "jspdf": "^3.0.1", "langchain": "^0.3.30", diff --git a/sample.config.toml b/sample.config.toml deleted file mode 100644 index 90c69e7..0000000 --- a/sample.config.toml +++ /dev/null @@ -1,39 +0,0 @@ -[GENERAL] -SIMILARITY_MEASURE = "cosine" # "cosine" or "dot" -KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m") - -[MODELS.OPENAI] -API_KEY = "" - -[MODELS.GROQ] -API_KEY = "" - -[MODELS.ANTHROPIC] -API_KEY = "" - -[MODELS.GEMINI] -API_KEY = "" - -[MODELS.CUSTOM_OPENAI] -API_KEY = "" -API_URL = "" -MODEL_NAME = "" - -[MODELS.OLLAMA] -API_URL = "" # Ollama API URL - http://host.docker.internal:11434 - -[MODELS.DEEPSEEK] -API_KEY = "" - -[MODELS.AIMLAPI] -API_KEY = "" # Required to use AI/ML API chat and embedding models - -[MODELS.LM_STUDIO] -API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 - -[MODELS.LEMONADE] -API_URL = "" # Lemonade API URL - http://host.docker.internal:8000 -API_KEY = "" # Optional API key for Lemonade - -[API_ENDPOINTS] -SEARXNG = "" # SearxNG API URL - http://localhost:32768 diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts index 7329299..bab34fa 100644 --- a/src/app/api/chat/route.ts +++ b/src/app/api/chat/route.ts @@ -1,23 +1,14 @@ import crypto from 'crypto'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { EventEmitter } from 'stream'; -import { - getAvailableChatModelProviders, - getAvailableEmbeddingModelProviders, -} from '@/lib/providers'; import db from '@/lib/db'; import { chats, messages as messagesSchema } from '@/lib/db/schema'; import { and, eq, gt } from 'drizzle-orm'; import { getFileDetails } from '@/lib/utils/files'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { ChatOpenAI } from '@langchain/openai'; -import { - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, -} from '@/lib/config'; import { searchHandlers } from '@/lib/search'; import { z } from 'zod'; +import ModelRegistry from '@/lib/models/registry'; +import { ModelWithProvider } from '@/lib/models/types'; export const runtime = 'nodejs'; export const dynamic = 'force-dynamic'; @@ -28,14 +19,30 @@ const messageSchema = z.object({ content: z.string().min(1, 'Message content is required'), }); -const chatModelSchema = z.object({ - provider: z.string().optional(), - name: z.string().optional(), +const chatModelSchema: z.ZodType = z.object({ + providerId: z.string({ + errorMap: () => ({ + message: 'Chat model provider id must be provided', + }), + }), + key: z.string({ + errorMap: () => ({ + message: 'Chat model key must be provided', + }), + }), }); -const embeddingModelSchema = z.object({ - provider: z.string().optional(), - name: z.string().optional(), +const embeddingModelSchema: z.ZodType = z.object({ + providerId: z.string({ + errorMap: () => ({ + message: 'Embedding model provider id must be provided', + }), + }), + key: z.string({ + errorMap: () => ({ + message: 'Embedding model key must be provided', + }), + }), }); const bodySchema = z.object({ @@ -57,8 +64,8 @@ const bodySchema = z.object({ .optional() .default([]), files: z.array(z.string()).optional().default([]), - chatModel: chatModelSchema.optional().default({}), - embeddingModel: embeddingModelSchema.optional().default({}), + chatModel: chatModelSchema, + embeddingModel: embeddingModelSchema, systemInstructions: z.string().nullable().optional().default(''), }); @@ -248,56 +255,16 @@ export const POST = async (req: Request) => { ); } - const [chatModelProviders, embeddingModelProviders] = await Promise.all([ - getAvailableChatModelProviders(), - getAvailableEmbeddingModelProviders(), + const registry = new ModelRegistry(); + + const [llm, embedding] = await Promise.all([ + registry.loadChatModel(body.chatModel.providerId, body.chatModel.key), + registry.loadEmbeddingModel( + body.embeddingModel.providerId, + body.embeddingModel.key, + ), ]); - const chatModelProvider = - chatModelProviders[ - body.chatModel?.provider || Object.keys(chatModelProviders)[0] - ]; - const chatModel = - chatModelProvider[ - body.chatModel?.name || Object.keys(chatModelProvider)[0] - ]; - - const embeddingProvider = - embeddingModelProviders[ - body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0] - ]; - const embeddingModel = - embeddingProvider[ - body.embeddingModel?.name || Object.keys(embeddingProvider)[0] - ]; - - let llm: BaseChatModel | undefined; - let embedding = embeddingModel.model; - - if (body.chatModel?.provider === 'custom_openai') { - llm = new ChatOpenAI({ - apiKey: getCustomOpenaiApiKey(), - modelName: getCustomOpenaiModelName(), - temperature: 0.7, - configuration: { - baseURL: getCustomOpenaiApiUrl(), - }, - }) as unknown as BaseChatModel; - } else if (chatModelProvider && chatModel) { - llm = chatModel.model; - } - - if (!llm) { - return Response.json({ error: 'Invalid chat model' }, { status: 400 }); - } - - if (!embedding) { - return Response.json( - { error: 'Invalid embedding model' }, - { status: 400 }, - ); - } - const humanMessageId = message.messageId ?? crypto.randomBytes(7).toString('hex'); diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index 5f66fdf..1e36137 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -1,134 +1,76 @@ -import { - getAnthropicApiKey, - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, - getGeminiApiKey, - getGroqApiKey, - getOllamaApiEndpoint, - getOpenaiApiKey, - getDeepseekApiKey, - getAimlApiKey, - getLMStudioApiEndpoint, - getLemonadeApiEndpoint, - getLemonadeApiKey, - updateConfig, - getOllamaApiKey, -} from '@/lib/config'; -import { - getAvailableChatModelProviders, - getAvailableEmbeddingModelProviders, -} from '@/lib/providers'; +import configManager from '@/lib/config'; +import ModelRegistry from '@/lib/models/registry'; +import { NextRequest, NextResponse } from 'next/server'; +import { ConfigModelProvider } from '@/lib/config/types'; -export const GET = async (req: Request) => { - try { - const config: Record = {}; - - const [chatModelProviders, embeddingModelProviders] = await Promise.all([ - getAvailableChatModelProviders(), - getAvailableEmbeddingModelProviders(), - ]); - - config['chatModelProviders'] = {}; - config['embeddingModelProviders'] = {}; - - for (const provider in chatModelProviders) { - config['chatModelProviders'][provider] = Object.keys( - chatModelProviders[provider], - ).map((model) => { - return { - name: model, - displayName: chatModelProviders[provider][model].displayName, - }; - }); - } - - for (const provider in embeddingModelProviders) { - config['embeddingModelProviders'][provider] = Object.keys( - embeddingModelProviders[provider], - ).map((model) => { - return { - name: model, - displayName: embeddingModelProviders[provider][model].displayName, - }; - }); - } - - config['openaiApiKey'] = getOpenaiApiKey(); - config['ollamaApiUrl'] = getOllamaApiEndpoint(); - config['ollamaApiKey'] = getOllamaApiKey(); - config['lmStudioApiUrl'] = getLMStudioApiEndpoint(); - config['lemonadeApiUrl'] = getLemonadeApiEndpoint(); - config['lemonadeApiKey'] = getLemonadeApiKey(); - config['anthropicApiKey'] = getAnthropicApiKey(); - config['groqApiKey'] = getGroqApiKey(); - config['geminiApiKey'] = getGeminiApiKey(); - config['deepseekApiKey'] = getDeepseekApiKey(); - config['aimlApiKey'] = getAimlApiKey(); - config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); - config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); - config['customOpenaiModelName'] = getCustomOpenaiModelName(); - - return Response.json({ ...config }, { status: 200 }); - } catch (err) { - console.error('An error occurred while getting config:', err); - return Response.json( - { message: 'An error occurred while getting config' }, - { status: 500 }, - ); - } +type SaveConfigBody = { + key: string; + value: string; }; -export const POST = async (req: Request) => { +export const GET = async (req: NextRequest) => { try { - const config = await req.json(); + const values = configManager.getCurrentConfig(); + const fields = configManager.getUIConfigSections(); - const updatedConfig = { - MODELS: { - OPENAI: { - API_KEY: config.openaiApiKey, - }, - GROQ: { - API_KEY: config.groqApiKey, - }, - ANTHROPIC: { - API_KEY: config.anthropicApiKey, - }, - GEMINI: { - API_KEY: config.geminiApiKey, - }, - OLLAMA: { - API_URL: config.ollamaApiUrl, - API_KEY: config.ollamaApiKey, - }, - DEEPSEEK: { - API_KEY: config.deepseekApiKey, - }, - AIMLAPI: { - API_KEY: config.aimlApiKey, - }, - LM_STUDIO: { - API_URL: config.lmStudioApiUrl, - }, - LEMONADE: { - API_URL: config.lemonadeApiUrl, - API_KEY: config.lemonadeApiKey, - }, - CUSTOM_OPENAI: { - API_URL: config.customOpenaiApiUrl, - API_KEY: config.customOpenaiApiKey, - MODEL_NAME: config.customOpenaiModelName, - }, + const modelRegistry = new ModelRegistry(); + const modelProviders = await modelRegistry.getActiveProviders(); + + values.modelProviders = values.modelProviders.map( + (mp: ConfigModelProvider) => { + const activeProvider = modelProviders.find((p) => p.id === mp.id); + + return { + ...mp, + chatModels: activeProvider?.chatModels ?? mp.chatModels, + embeddingModels: + activeProvider?.embeddingModels ?? mp.embeddingModels, + }; }, - }; + ); - updateConfig(updatedConfig); - - return Response.json({ message: 'Config updated' }, { status: 200 }); + return NextResponse.json({ + values, + fields, + }); } catch (err) { - console.error('An error occurred while updating config:', err); + console.error('Error in getting config: ', err); return Response.json( - { message: 'An error occurred while updating config' }, + { message: 'An error has occurred.' }, + { status: 500 }, + ); + } +}; + +export const POST = async (req: NextRequest) => { + try { + const body: SaveConfigBody = await req.json(); + + if (!body.key || !body.value) { + return Response.json( + { + message: 'Key and value are required.', + }, + { + status: 400, + }, + ); + } + + configManager.updateConfig(body.key, body.value); + + return Response.json( + { + message: 'Config updated successfully.', + }, + { + status: 200, + }, + ); + } catch (err) { + console.error('Error in getting config: ', err); + return Response.json( + { message: 'An error has occurred.' }, { status: 500 }, ); } diff --git a/src/app/api/config/setup-complete/route.ts b/src/app/api/config/setup-complete/route.ts new file mode 100644 index 0000000..0055fd3 --- /dev/null +++ b/src/app/api/config/setup-complete/route.ts @@ -0,0 +1,23 @@ +import configManager from '@/lib/config'; +import { NextRequest } from 'next/server'; + +export const POST = async (req: NextRequest) => { + try { + configManager.markSetupComplete(); + + return Response.json( + { + message: 'Setup marked as complete.', + }, + { + status: 200, + }, + ); + } catch (err) { + console.error('Error marking setup as complete: ', err); + return Response.json( + { message: 'An error has occurred.' }, + { status: 500 }, + ); + } +}; diff --git a/src/app/api/images/route.ts b/src/app/api/images/route.ts index e02854d..d3416ca 100644 --- a/src/app/api/images/route.ts +++ b/src/app/api/images/route.ts @@ -1,23 +1,12 @@ import handleImageSearch from '@/lib/chains/imageSearchAgent'; -import { - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, -} from '@/lib/config'; -import { getAvailableChatModelProviders } from '@/lib/providers'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import ModelRegistry from '@/lib/models/registry'; +import { ModelWithProvider } from '@/lib/models/types'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; -import { ChatOpenAI } from '@langchain/openai'; - -interface ChatModel { - provider: string; - model: string; -} interface ImageSearchBody { query: string; chatHistory: any[]; - chatModel?: ChatModel; + chatModel: ModelWithProvider; } export const POST = async (req: Request) => { @@ -34,35 +23,12 @@ export const POST = async (req: Request) => { }) .filter((msg) => msg !== undefined) as BaseMessage[]; - const chatModelProviders = await getAvailableChatModelProviders(); + const registry = new ModelRegistry(); - const chatModelProvider = - chatModelProviders[ - body.chatModel?.provider || Object.keys(chatModelProviders)[0] - ]; - const chatModel = - chatModelProvider[ - body.chatModel?.model || Object.keys(chatModelProvider)[0] - ]; - - let llm: BaseChatModel | undefined; - - if (body.chatModel?.provider === 'custom_openai') { - llm = new ChatOpenAI({ - apiKey: getCustomOpenaiApiKey(), - modelName: getCustomOpenaiModelName(), - temperature: 0.7, - configuration: { - baseURL: getCustomOpenaiApiUrl(), - }, - }) as unknown as BaseChatModel; - } else if (chatModelProvider && chatModel) { - llm = chatModel.model; - } - - if (!llm) { - return Response.json({ error: 'Invalid chat model' }, { status: 400 }); - } + const llm = await registry.loadChatModel( + body.chatModel.providerId, + body.chatModel.key, + ); const images = await handleImageSearch( { diff --git a/src/app/api/models/route.ts b/src/app/api/models/route.ts deleted file mode 100644 index 04a6949..0000000 --- a/src/app/api/models/route.ts +++ /dev/null @@ -1,47 +0,0 @@ -import { - getAvailableChatModelProviders, - getAvailableEmbeddingModelProviders, -} from '@/lib/providers'; - -export const GET = async (req: Request) => { - try { - const [chatModelProviders, embeddingModelProviders] = await Promise.all([ - getAvailableChatModelProviders(), - getAvailableEmbeddingModelProviders(), - ]); - - Object.keys(chatModelProviders).forEach((provider) => { - Object.keys(chatModelProviders[provider]).forEach((model) => { - delete (chatModelProviders[provider][model] as { model?: unknown }) - .model; - }); - }); - - Object.keys(embeddingModelProviders).forEach((provider) => { - Object.keys(embeddingModelProviders[provider]).forEach((model) => { - delete (embeddingModelProviders[provider][model] as { model?: unknown }) - .model; - }); - }); - - return Response.json( - { - chatModelProviders, - embeddingModelProviders, - }, - { - status: 200, - }, - ); - } catch (err) { - console.error('An error occurred while fetching models', err); - return Response.json( - { - message: 'An error has occurred.', - }, - { - status: 500, - }, - ); - } -}; diff --git a/src/app/api/providers/[id]/models/route.ts b/src/app/api/providers/[id]/models/route.ts new file mode 100644 index 0000000..5b4acc3 --- /dev/null +++ b/src/app/api/providers/[id]/models/route.ts @@ -0,0 +1,94 @@ +import ModelRegistry from '@/lib/models/registry'; +import { Model } from '@/lib/models/types'; +import { NextRequest } from 'next/server'; + +export const POST = async ( + req: NextRequest, + { params }: { params: Promise<{ id: string }> }, +) => { + try { + const { id } = await params; + + const body: Partial & { type: 'embedding' | 'chat' } = + await req.json(); + + if (!body.key || !body.name) { + return Response.json( + { + message: 'Key and name must be provided', + }, + { + status: 400, + }, + ); + } + + const registry = new ModelRegistry(); + + await registry.addProviderModel(id, body.type, body); + + return Response.json( + { + message: 'Model added successfully', + }, + { + status: 200, + }, + ); + } catch (err) { + console.error('An error occurred while adding provider model', err); + return Response.json( + { + message: 'An error has occurred.', + }, + { + status: 500, + }, + ); + } +}; + +export const DELETE = async ( + req: NextRequest, + { params }: { params: Promise<{ id: string }> }, +) => { + try { + const { id } = await params; + + const body: { key: string; type: 'embedding' | 'chat' } = await req.json(); + + if (!body.key) { + return Response.json( + { + message: 'Key and name must be provided', + }, + { + status: 400, + }, + ); + } + + const registry = new ModelRegistry(); + + await registry.removeProviderModel(id, body.type, body.key); + + return Response.json( + { + message: 'Model added successfully', + }, + { + status: 200, + }, + ); + } catch (err) { + console.error('An error occurred while deleting provider model', err); + return Response.json( + { + message: 'An error has occurred.', + }, + { + status: 500, + }, + ); + } +}; diff --git a/src/app/api/providers/[id]/route.ts b/src/app/api/providers/[id]/route.ts new file mode 100644 index 0000000..489d73a --- /dev/null +++ b/src/app/api/providers/[id]/route.ts @@ -0,0 +1,89 @@ +import ModelRegistry from '@/lib/models/registry'; +import { NextRequest } from 'next/server'; + +export const DELETE = async ( + req: NextRequest, + { params }: { params: Promise<{ id: string }> }, +) => { + try { + const { id } = await params; + + if (!id) { + return Response.json( + { + message: 'Provider ID is required.', + }, + { + status: 400, + }, + ); + } + + const registry = new ModelRegistry(); + await registry.removeProvider(id); + + return Response.json( + { + message: 'Provider deleted successfully.', + }, + { + status: 200, + }, + ); + } catch (err: any) { + console.error('An error occurred while deleting provider', err.message); + return Response.json( + { + message: 'An error has occurred.', + }, + { + status: 500, + }, + ); + } +}; + +export const PATCH = async ( + req: NextRequest, + { params }: { params: Promise<{ id: string }> }, +) => { + try { + const body = await req.json(); + const { name, config } = body; + const { id } = await params; + + if (!id || !name || !config) { + return Response.json( + { + message: 'Missing required fields.', + }, + { + status: 400, + }, + ); + } + + const registry = new ModelRegistry(); + + const updatedProvider = await registry.updateProvider(id, name, config); + + return Response.json( + { + provider: updatedProvider, + }, + { + status: 200, + }, + ); + } catch (err: any) { + console.error('An error occurred while updating provider', err.message); + return Response.json( + { + message: 'An error has occurred.', + }, + { + status: 500, + }, + ); + } +}; diff --git a/src/app/api/providers/route.ts b/src/app/api/providers/route.ts new file mode 100644 index 0000000..53d6e60 --- /dev/null +++ b/src/app/api/providers/route.ts @@ -0,0 +1,74 @@ +import ModelRegistry from '@/lib/models/registry'; +import { NextRequest } from 'next/server'; + +export const GET = async (req: Request) => { + try { + const registry = new ModelRegistry(); + + const activeProviders = await registry.getActiveProviders(); + + const filteredProviders = activeProviders.filter((p) => { + return !p.chatModels.some((m) => m.key === 'error'); + }); + + return Response.json( + { + providers: filteredProviders, + }, + { + status: 200, + }, + ); + } catch (err) { + console.error('An error occurred while fetching providers', err); + return Response.json( + { + message: 'An error has occurred.', + }, + { + status: 500, + }, + ); + } +}; + +export const POST = async (req: NextRequest) => { + try { + const body = await req.json(); + const { type, name, config } = body; + + if (!type || !name || !config) { + return Response.json( + { + message: 'Missing required fields.', + }, + { + status: 400, + }, + ); + } + + const registry = new ModelRegistry(); + + const newProvider = await registry.addProvider(type, name, config); + + return Response.json( + { + provider: newProvider, + }, + { + status: 200, + }, + ); + } catch (err) { + console.error('An error occurred while creating provider', err); + return Response.json( + { + message: 'An error has occurred.', + }, + { + status: 500, + }, + ); + } +}; diff --git a/src/app/api/search/route.ts b/src/app/api/search/route.ts index 5f752ec..bc7255f 100644 --- a/src/app/api/search/route.ts +++ b/src/app/api/search/route.ts @@ -1,36 +1,14 @@ -import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import type { Embeddings } from '@langchain/core/embeddings'; -import { ChatOpenAI } from '@langchain/openai'; -import { - getAvailableChatModelProviders, - getAvailableEmbeddingModelProviders, -} from '@/lib/providers'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; import { MetaSearchAgentType } from '@/lib/search/metaSearchAgent'; -import { - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, -} from '@/lib/config'; import { searchHandlers } from '@/lib/search'; - -interface chatModel { - provider: string; - name: string; - customOpenAIKey?: string; - customOpenAIBaseURL?: string; -} - -interface embeddingModel { - provider: string; - name: string; -} +import ModelRegistry from '@/lib/models/registry'; +import { ModelWithProvider } from '@/lib/models/types'; interface ChatRequestBody { optimizationMode: 'speed' | 'balanced'; focusMode: string; - chatModel?: chatModel; - embeddingModel?: embeddingModel; + chatModel: ModelWithProvider; + embeddingModel: ModelWithProvider; query: string; history: Array<[string, string]>; stream?: boolean; @@ -58,60 +36,16 @@ export const POST = async (req: Request) => { : new AIMessage({ content: msg[1] }); }); - const [chatModelProviders, embeddingModelProviders] = await Promise.all([ - getAvailableChatModelProviders(), - getAvailableEmbeddingModelProviders(), + const registry = new ModelRegistry(); + + const [llm, embeddings] = await Promise.all([ + registry.loadChatModel(body.chatModel.providerId, body.chatModel.key), + registry.loadEmbeddingModel( + body.embeddingModel.providerId, + body.embeddingModel.key, + ), ]); - const chatModelProvider = - body.chatModel?.provider || Object.keys(chatModelProviders)[0]; - const chatModel = - body.chatModel?.name || - Object.keys(chatModelProviders[chatModelProvider])[0]; - - const embeddingModelProvider = - body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]; - const embeddingModel = - body.embeddingModel?.name || - Object.keys(embeddingModelProviders[embeddingModelProvider])[0]; - - let llm: BaseChatModel | undefined; - let embeddings: Embeddings | undefined; - - if (body.chatModel?.provider === 'custom_openai') { - llm = new ChatOpenAI({ - modelName: body.chatModel?.name || getCustomOpenaiModelName(), - apiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(), - temperature: 0.7, - configuration: { - baseURL: - body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(), - }, - }) as unknown as BaseChatModel; - } else if ( - chatModelProviders[chatModelProvider] && - chatModelProviders[chatModelProvider][chatModel] - ) { - llm = chatModelProviders[chatModelProvider][chatModel] - .model as unknown as BaseChatModel | undefined; - } - - if ( - embeddingModelProviders[embeddingModelProvider] && - embeddingModelProviders[embeddingModelProvider][embeddingModel] - ) { - embeddings = embeddingModelProviders[embeddingModelProvider][ - embeddingModel - ].model as Embeddings | undefined; - } - - if (!llm || !embeddings) { - return Response.json( - { message: 'Invalid model selected' }, - { status: 400 }, - ); - } - const searchHandler: MetaSearchAgentType = searchHandlers[body.focusMode]; if (!searchHandler) { diff --git a/src/app/api/suggestions/route.ts b/src/app/api/suggestions/route.ts index 99179d2..d8312cf 100644 --- a/src/app/api/suggestions/route.ts +++ b/src/app/api/suggestions/route.ts @@ -1,22 +1,12 @@ import generateSuggestions from '@/lib/chains/suggestionGeneratorAgent'; -import { - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, -} from '@/lib/config'; -import { getAvailableChatModelProviders } from '@/lib/providers'; +import ModelRegistry from '@/lib/models/registry'; +import { ModelWithProvider } from '@/lib/models/types'; import { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; -import { ChatOpenAI } from '@langchain/openai'; - -interface ChatModel { - provider: string; - model: string; -} interface SuggestionsGenerationBody { chatHistory: any[]; - chatModel?: ChatModel; + chatModel: ModelWithProvider; } export const POST = async (req: Request) => { @@ -33,35 +23,12 @@ export const POST = async (req: Request) => { }) .filter((msg) => msg !== undefined) as BaseMessage[]; - const chatModelProviders = await getAvailableChatModelProviders(); + const registry = new ModelRegistry(); - const chatModelProvider = - chatModelProviders[ - body.chatModel?.provider || Object.keys(chatModelProviders)[0] - ]; - const chatModel = - chatModelProvider[ - body.chatModel?.model || Object.keys(chatModelProvider)[0] - ]; - - let llm: BaseChatModel | undefined; - - if (body.chatModel?.provider === 'custom_openai') { - llm = new ChatOpenAI({ - apiKey: getCustomOpenaiApiKey(), - modelName: getCustomOpenaiModelName(), - temperature: 0.7, - configuration: { - baseURL: getCustomOpenaiApiUrl(), - }, - }) as unknown as BaseChatModel; - } else if (chatModelProvider && chatModel) { - llm = chatModel.model; - } - - if (!llm) { - return Response.json({ error: 'Invalid chat model' }, { status: 400 }); - } + const llm = await registry.loadChatModel( + body.chatModel.providerId, + body.chatModel.key, + ); const suggestions = await generateSuggestions( { diff --git a/src/app/api/uploads/route.ts b/src/app/api/uploads/route.ts index 9fbaf2d..6db17dd 100644 --- a/src/app/api/uploads/route.ts +++ b/src/app/api/uploads/route.ts @@ -2,11 +2,11 @@ import { NextResponse } from 'next/server'; import fs from 'fs'; import path from 'path'; import crypto from 'crypto'; -import { getAvailableEmbeddingModelProviders } from '@/lib/providers'; import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf'; import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'; import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters'; import { Document } from 'langchain/document'; +import ModelRegistry from '@/lib/models/registry'; interface FileRes { fileName: string; @@ -30,8 +30,8 @@ export async function POST(req: Request) { const formData = await req.formData(); const files = formData.getAll('files') as File[]; - const embedding_model = formData.get('embedding_model'); - const embedding_model_provider = formData.get('embedding_model_provider'); + const embedding_model = formData.get('embedding_model_key') as string; + const embedding_model_provider = formData.get('embedding_model_provider_id') as string; if (!embedding_model || !embedding_model_provider) { return NextResponse.json( @@ -40,20 +40,9 @@ export async function POST(req: Request) { ); } - const embeddingModels = await getAvailableEmbeddingModelProviders(); - const provider = - embedding_model_provider ?? Object.keys(embeddingModels)[0]; - const embeddingModel = - embedding_model ?? Object.keys(embeddingModels[provider as string])[0]; + const registry = new ModelRegistry(); - let embeddingsModel = - embeddingModels[provider as string]?.[embeddingModel as string]?.model; - if (!embeddingsModel) { - return NextResponse.json( - { message: 'Invalid embedding model selected' }, - { status: 400 }, - ); - } + const model = await registry.loadEmbeddingModel(embedding_model_provider, embedding_model); const processedFiles: FileRes[] = []; @@ -98,7 +87,7 @@ export async function POST(req: Request) { }), ); - const embeddings = await embeddingsModel.embedDocuments( + const embeddings = await model.embedDocuments( splitted.map((doc) => doc.pageContent), ); const embeddingsDataPath = filePath.replace( diff --git a/src/app/api/videos/route.ts b/src/app/api/videos/route.ts index 7e8288b..02e5909 100644 --- a/src/app/api/videos/route.ts +++ b/src/app/api/videos/route.ts @@ -1,23 +1,12 @@ import handleVideoSearch from '@/lib/chains/videoSearchAgent'; -import { - getCustomOpenaiApiKey, - getCustomOpenaiApiUrl, - getCustomOpenaiModelName, -} from '@/lib/config'; -import { getAvailableChatModelProviders } from '@/lib/providers'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import ModelRegistry from '@/lib/models/registry'; +import { ModelWithProvider } from '@/lib/models/types'; import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; -import { ChatOpenAI } from '@langchain/openai'; - -interface ChatModel { - provider: string; - model: string; -} interface VideoSearchBody { query: string; chatHistory: any[]; - chatModel?: ChatModel; + chatModel: ModelWithProvider; } export const POST = async (req: Request) => { @@ -34,35 +23,12 @@ export const POST = async (req: Request) => { }) .filter((msg) => msg !== undefined) as BaseMessage[]; - const chatModelProviders = await getAvailableChatModelProviders(); + const registry = new ModelRegistry(); - const chatModelProvider = - chatModelProviders[ - body.chatModel?.provider || Object.keys(chatModelProviders)[0] - ]; - const chatModel = - chatModelProvider[ - body.chatModel?.model || Object.keys(chatModelProvider)[0] - ]; - - let llm: BaseChatModel | undefined; - - if (body.chatModel?.provider === 'custom_openai') { - llm = new ChatOpenAI({ - apiKey: getCustomOpenaiApiKey(), - modelName: getCustomOpenaiModelName(), - temperature: 0.7, - configuration: { - baseURL: getCustomOpenaiApiUrl(), - }, - }) as unknown as BaseChatModel; - } else if (chatModelProvider && chatModel) { - llm = chatModel.model; - } - - if (!llm) { - return Response.json({ error: 'Invalid chat model' }, { status: 400 }); - } + const llm = await registry.loadChatModel( + body.chatModel.providerId, + body.chatModel.key, + ); const videos = await handleVideoSearch( { diff --git a/src/app/globals.css b/src/app/globals.css index 639e515..3b95d06 100644 --- a/src/app/globals.css +++ b/src/app/globals.css @@ -5,7 +5,7 @@ @font-face { font-family: 'PP Editorial'; src: url('/fonts/pp-ed-ul.otf') format('opentype'); - font-weight: 200; + font-weight: 300; font-style: normal; font-display: swap; } @@ -18,6 +18,66 @@ .overflow-hidden-scrollable::-webkit-scrollbar { display: none; } + + * { + scrollbar-width: thin; + scrollbar-color: #e8edf1 transparent; /* light-200 */ + } + + *::-webkit-scrollbar { + width: 6px; + height: 6px; + } + + *::-webkit-scrollbar-track { + background: transparent; + } + + *::-webkit-scrollbar-thumb { + background: #e8edf1; /* light-200 */ + border-radius: 3px; + transition: background 0.2s ease; + } + + *::-webkit-scrollbar-thumb:hover { + background: #d0d7de; /* light-300 */ + } + + @media (prefers-color-scheme: dark) { + * { + scrollbar-color: #21262d transparent; /* dark-200 */ + } + + *::-webkit-scrollbar-thumb { + background: #21262d; /* dark-200 */ + } + + *::-webkit-scrollbar-thumb:hover { + background: #30363d; /* dark-300 */ + } + } + + :root.dark *, + html.dark *, + body.dark * { + scrollbar-color: #21262d transparent; /* dark-200 */ + } + + :root.dark *::-webkit-scrollbar-thumb, + html.dark *::-webkit-scrollbar-thumb, + body.dark *::-webkit-scrollbar-thumb { + background: #21262d; /* dark-200 */ + } + + :root.dark *::-webkit-scrollbar-thumb:hover, + html.dark *::-webkit-scrollbar-thumb:hover, + body.dark *::-webkit-scrollbar-thumb:hover { + background: #30363d; /* dark-300 */ + } + + html { + scroll-behavior: smooth; + } } @layer utilities { @@ -25,6 +85,7 @@ display: -webkit-box; -webkit-box-orient: vertical; -webkit-line-clamp: 2; + line-clamp: 2; overflow: hidden; } } diff --git a/src/app/layout.tsx b/src/app/layout.tsx index 684a99c..830d842 100644 --- a/src/app/layout.tsx +++ b/src/app/layout.tsx @@ -1,3 +1,5 @@ +export const dynamic = 'force-dynamic'; + import type { Metadata } from 'next'; import { Montserrat } from 'next/font/google'; import './globals.css'; @@ -5,6 +7,8 @@ import { cn } from '@/lib/utils'; import Sidebar from '@/components/Sidebar'; import { Toaster } from 'sonner'; import ThemeProvider from '@/components/theme/Provider'; +import configManager from '@/lib/config'; +import SetupWizard from '@/components/Setup/SetupWizard'; const montserrat = Montserrat({ weight: ['300', '400', '500', '700'], @@ -24,20 +28,29 @@ export default function RootLayout({ }: Readonly<{ children: React.ReactNode; }>) { + const setupComplete = configManager.isSetupComplete(); + const configSections = configManager.getUIConfigSections(); + return ( - {children} - + {setupComplete ? ( + <> + {children} + + + ) : ( + + )} diff --git a/src/app/settings/page.tsx b/src/app/settings/page.tsx deleted file mode 100644 index 1af53f9..0000000 --- a/src/app/settings/page.tsx +++ /dev/null @@ -1,1007 +0,0 @@ -'use client'; - -import { Settings as SettingsIcon, ArrowLeft, Loader2 } from 'lucide-react'; -import { useEffect, useState } from 'react'; -import { cn } from '@/lib/utils'; -import { Switch } from '@headlessui/react'; -import ThemeSwitcher from '@/components/theme/Switcher'; -import { ImagesIcon, VideoIcon } from 'lucide-react'; -import Link from 'next/link'; -import { PROVIDER_METADATA } from '@/lib/providers'; - -interface SettingsType { - chatModelProviders: { - [key: string]: [Record]; - }; - embeddingModelProviders: { - [key: string]: [Record]; - }; - openaiApiKey: string; - groqApiKey: string; - anthropicApiKey: string; - geminiApiKey: string; - ollamaApiUrl: string; - ollamaApiKey: string; - lmStudioApiUrl: string; - lemonadeApiUrl: string; - lemonadeApiKey: string; - deepseekApiKey: string; - aimlApiKey: string; - customOpenaiApiKey: string; - customOpenaiApiUrl: string; - customOpenaiModelName: string; -} - -interface InputProps extends React.InputHTMLAttributes { - isSaving?: boolean; - onSave?: (value: string) => void; -} - -const Input = ({ className, isSaving, onSave, ...restProps }: InputProps) => { - return ( -
- onSave?.(e.target.value)} - /> - {isSaving && ( -
- -
- )} -
- ); -}; - -interface TextareaProps extends React.InputHTMLAttributes { - isSaving?: boolean; - onSave?: (value: string) => void; -} - -const Textarea = ({ - className, - isSaving, - onSave, - ...restProps -}: TextareaProps) => { - return ( -
-