This commit is contained in:
haddadrm
2025-02-16 15:26:33 +04:00
committed by GitHub
10 changed files with 7174 additions and 43 deletions

View File

@ -37,6 +37,7 @@ services:
args: args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001 - NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
network: host
image: itzcrazykns1337/perplexica-frontend:main image: itzcrazykns1337/perplexica-frontend:main
depends_on: depends_on:
- perplexica-backend - perplexica-backend

6912
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -15,12 +15,16 @@ API_KEY = ""
[MODELS.GEMINI] [MODELS.GEMINI]
API_KEY = "" API_KEY = ""
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
[MODELS.OLLAMA] [MODELS.OLLAMA]
API_URL = "" # Ollama API URL - http://host.docker.internal:11434 API_URL = "" # Ollama API URL - http://host.docker.internal:11434
[MODELS.LMSTUDIO]
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234
[MODELS.CUSTOM_OPENAI]
API_KEY = ""
API_URL = ""
MODEL_NAME = ""
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL SEARXNG = "http://localhost:32768" # SearxNG API URL

View File

@ -26,6 +26,9 @@ interface Config {
OLLAMA: { OLLAMA: {
API_URL: string; API_URL: string;
}; };
LMSTUDIO: {
API_URL: string;
};
CUSTOM_OPENAI: { CUSTOM_OPENAI: {
API_URL: string; API_URL: string;
API_KEY: string; API_KEY: string;
@ -66,6 +69,8 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL; export const getOllamaApiEndpoint = () => loadConfig().MODELS.OLLAMA.API_URL;
export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LMSTUDIO.API_URL;
export const getCustomOpenaiApiKey = () => export const getCustomOpenaiApiKey = () =>
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY; loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
@ -76,10 +81,6 @@ export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME; loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
const mergeConfigs = (current: any, update: any): any => { const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {
return current;
}
if (typeof current !== 'object' || current === null) { if (typeof current !== 'object' || current === null) {
return update; return update;
} }

1
src/lib/chat/service.ts Normal file
View File

@ -0,0 +1 @@

View File

@ -4,6 +4,7 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic'; import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers'; import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini'; import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
import { import {
getCustomOpenaiApiKey, getCustomOpenaiApiKey,
getCustomOpenaiApiUrl, getCustomOpenaiApiUrl,
@ -17,6 +18,7 @@ const chatModelProviders = {
ollama: loadOllamaChatModels, ollama: loadOllamaChatModels,
anthropic: loadAnthropicChatModels, anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels, gemini: loadGeminiChatModels,
lm_studio: loadLMStudioChatModels,
}; };
const embeddingModelProviders = { const embeddingModelProviders = {
@ -24,6 +26,7 @@ const embeddingModelProviders = {
local: loadTransformersEmbeddingsModels, local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels, ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels, gemini: loadGeminiEmbeddingsModels,
lm_studio: loadLMStudioEmbeddingsModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {

View File

@ -0,0 +1,125 @@
import { OpenAIEmbeddings } from '@langchain/openai';
import { ChatOpenAI } from '@langchain/openai';
import { getKeepAlive, getLMStudioApiEndpoint } from '../../config';
import logger from '../../utils/logger';
import axios from 'axios';
const ensureV1Endpoint = (endpoint: string): string => {
return endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
};
interface LMStudioModel {
id: string;
// add other properties if LM Studio API provides them
}
interface ChatModelConfig {
displayName: string;
model: ChatOpenAI;
}
const checkLMStudioAvailability = async (endpoint: string): Promise<boolean> => {
const v1Endpoint = ensureV1Endpoint(endpoint);
try {
await axios.get(`${v1Endpoint}/models`, {
timeout: 1000, // 1 second timeout
headers: {
'Content-Type': 'application/json',
},
});
return true;
} catch (err) {
logger.debug(`LM Studio server not available at ${endpoint}`);
return false;
}
};
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
const lmStudioEndpoint = getLMStudioApiEndpoint();
if (!lmStudioEndpoint) {
logger.debug('LM Studio endpoint not configured, skipping');
return {};
}
// Check if server is available before attempting to load models
const isAvailable = await checkLMStudioAvailability(lmStudioEndpoint);
if (!isAvailable) {
return {};
}
try {
const v1Endpoint = ensureV1Endpoint(lmStudioEndpoint);
const response = await axios.get<{ data: LMStudioModel[] }>(`${v1Endpoint}/models`, {
timeout: 5000, // 5 second timeout for model loading
headers: {
'Content-Type': 'application/json',
},
});
const lmStudioModels = response.data.data;
const chatModels = lmStudioModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
acc[model.id] = {
displayName: model.id,
model: new ChatOpenAI({
openAIApiKey: 'lm-studio',
configuration: {
baseURL: ensureV1Endpoint(lmStudioEndpoint),
},
modelName: model.id,
temperature: 0.7,
}),
};
return acc;
}, {});
return chatModels;
} catch (err) {
logger.error(`Error loading LM Studio models: ${err}`);
return {};
}
};
export const loadLMStudioEmbeddingsModels = async () => {
const lmStudioEndpoint = getLMStudioApiEndpoint();
if (!lmStudioEndpoint) return {};
// Check if server is available before attempting to load models
const isAvailable = await checkLMStudioAvailability(lmStudioEndpoint);
if (!isAvailable) {
return {};
}
try {
const v1Endpoint = ensureV1Endpoint(lmStudioEndpoint);
const response = await axios.get(`${v1Endpoint}/models`, {
timeout: 5000, // 5 second timeout for model loading
headers: {
'Content-Type': 'application/json',
},
});
const lmStudioModels = response.data.data;
const embeddingsModels = lmStudioModels.reduce((acc, model) => {
acc[model.id] = {
displayName: model.id,
model: new OpenAIEmbeddings({
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
configuration: {
baseURL: ensureV1Endpoint(lmStudioEndpoint),
},
modelName: model.id,
}),
};
return acc;
}, {});
return embeddingsModels;
} catch (err) {
logger.error(`Error loading LM Studio embeddings model: ${err}`);
return {};
}
};

View File

@ -6,6 +6,7 @@ import {
import { import {
getGroqApiKey, getGroqApiKey,
getOllamaApiEndpoint, getOllamaApiEndpoint,
getLMStudioApiEndpoint,
getAnthropicApiKey, getAnthropicApiKey,
getGeminiApiKey, getGeminiApiKey,
getOpenaiApiKey, getOpenaiApiKey,
@ -54,12 +55,13 @@ router.get('/', async (_, res) => {
config['openaiApiKey'] = getOpenaiApiKey(); config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey(); config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey(); config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey(); config['geminiApiKey'] = getGeminiApiKey();
config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl(); config['customOpenaiApiUrl'] = getCustomOpenaiApiUrl();
config['customOpenaiApiKey'] = getCustomOpenaiApiKey(); config['customOpenaiApiKey'] = getCustomOpenaiApiKey();
config['customOpenaiModelName'] = getCustomOpenaiModelName(); config['customOpenaiModelName'] = getCustomOpenaiModelName()
res.status(200).json(config); res.status(200).json(config);
} catch (err: any) { } catch (err: any) {
@ -88,6 +90,9 @@ router.post('/', async (req, res) => {
OLLAMA: { OLLAMA: {
API_URL: config.ollamaApiUrl, API_URL: config.ollamaApiUrl,
}, },
LMSTUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: { CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl, API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey, API_KEY: config.customOpenaiApiKey,

View File

@ -11,6 +11,8 @@ import {
StopCircle, StopCircle,
Layers3, Layers3,
Plus, Plus,
Brain,
ChevronDown,
} from 'lucide-react'; } from 'lucide-react';
import Markdown from 'markdown-to-jsx'; import Markdown from 'markdown-to-jsx';
import Copy from './MessageActions/Copy'; import Copy from './MessageActions/Copy';
@ -41,26 +43,58 @@ const MessageBox = ({
}) => { }) => {
const [parsedMessage, setParsedMessage] = useState(message.content); const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content); const [speechMessage, setSpeechMessage] = useState(message.content);
const [thinking, setThinking] = useState<string>('');
const [answer, setAnswer] = useState<string>('');
const [isThinkingExpanded, setIsThinkingExpanded] = useState(false);
useEffect(() => { useEffect(() => {
const regex = /\[(\d+)\]/g; const regex = /\[(\d+)\]/g;
const thinkRegex = /<think>(.*?)(?:<\/think>|$)(.*)/s;
if ( // Check for thinking content, including partial tags
message.role === 'assistant' && const match = message.content.match(thinkRegex);
message?.sources && if (match) {
message.sources.length > 0 const [_, thinkingContent, answerContent] = match;
) {
return setParsedMessage( // Set thinking content even if </think> hasn't appeared yet
message.content.replace( if (thinkingContent) {
regex, setThinking(thinkingContent.trim());
(_, number) => setIsThinkingExpanded(true); // Auto-expand when thinking starts
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`, }
),
); // Only set answer content if we have it (after </think>)
if (answerContent) {
setAnswer(answerContent.trim());
// Process the answer part for sources if needed
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
setParsedMessage(
answerContent.trim().replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
),
);
} else {
setParsedMessage(answerContent.trim());
}
setSpeechMessage(answerContent.trim().replace(regex, ''));
}
} else {
// No thinking content - process as before
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
setParsedMessage(
message.content.replace(
regex,
(_, number) =>
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
),
);
} else {
setParsedMessage(message.content);
}
setSpeechMessage(message.content.replace(regex, ''));
} }
setSpeechMessage(message.content.replace(regex, ''));
setParsedMessage(message.content);
}, [message.content, message.sources, message.role]); }, [message.content, message.sources, message.role]);
const { speechStatus, start, stop } = useSpeech({ text: speechMessage }); const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
@ -92,27 +126,71 @@ const MessageBox = ({
<MessageSources sources={message.sources} /> <MessageSources sources={message.sources} />
</div> </div>
)} )}
<div className="flex flex-col space-y-2"> <div className="flex flex-col space-y-4">
<div className="flex flex-row items-center space-x-2"> {thinking && (
<Disc3 <div className="flex flex-col space-y-2 mb-4">
className={cn( <button
'text-black dark:text-white', onClick={() => setIsThinkingExpanded(!isThinkingExpanded)}
isLast && loading ? 'animate-spin' : 'animate-none', className="flex flex-row items-center space-x-2 group text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white transition duration-200"
>
<Brain size={20} />
<h3 className="font-medium text-xl">Reasoning</h3>
<ChevronDown
size={16}
className={cn(
"transition-transform duration-200",
isThinkingExpanded ? "rotate-180" : ""
)}
/>
</button>
{isThinkingExpanded && (
<div className="rounded-lg bg-light-secondary/50 dark:bg-dark-secondary/50 p-4">
{thinking.split('\n\n').map((paragraph, index) => {
if (!paragraph.trim()) return null;
const content = paragraph.replace(/^[•\-\d.]\s*/, '');
return (
<div key={index} className="mb-2 last:mb-0">
<details className="group [&_summary::-webkit-details-marker]:hidden">
<summary className="flex items-center cursor-pointer list-none text-sm text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white">
<span className="arrow mr-2 inline-block transition-transform duration-200 group-open:rotate-90 group-open:self-start group-open:mt-1"></span>
<p className="relative whitespace-normal line-clamp-1 group-open:line-clamp-none after:content-['...'] after:inline group-open:after:hidden transition-all duration-200 text-ellipsis overflow-hidden group-open:overflow-visible">
{content}
</p>
</summary>
</details>
</div>
);
})}
</div>
)} )}
size={20} </div>
/> )}
<h3 className="text-black dark:text-white font-medium text-xl">
Answer <div className="flex flex-col space-y-2">
</h3> <div className="flex flex-row items-center space-x-2">
<Disc3
className={cn(
'text-black dark:text-white',
isLast && loading ? 'animate-spin' : 'animate-none',
)}
size={20}
/>
<h3 className="text-black dark:text-white font-medium text-xl">
Answer
</h3>
</div>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
>
{parsedMessage}
</Markdown>
</div> </div>
<Markdown
className={cn(
'prose prose-h1:mb-3 prose-h2:mb-2 prose-h2:mt-6 prose-h2:font-[800] prose-h3:mt-4 prose-h3:mb-1.5 prose-h3:font-[600] dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 font-[400]',
'max-w-none break-words text-black dark:text-white',
)}
>
{parsedMessage}
</Markdown>
{loading && isLast ? null : ( {loading && isLast ? null : (
<div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2"> <div className="flex flex-row items-center justify-between w-full text-black dark:text-white py-4 -mx-2">
<div className="flex flex-row items-center space-x-1"> <div className="flex flex-row items-center space-x-1">

View File

@ -1,5 +1,6 @@
{ {
"compilerOptions": { "compilerOptions": {
"target": "es2018",
"lib": ["dom", "dom.iterable", "esnext"], "lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true, "allowJs": true,
"skipLibCheck": true, "skipLibCheck": true,