Compare commits

...

8 Commits

Author SHA1 Message Date
5bf3221349 Merge b84e4e4ce6 into 115e6b2a71 2025-02-16 22:08:42 +00:00
b84e4e4ce6 Added an icon to indicate that compact mode is enabled. 2025-02-16 15:08:30 -07:00
467905d9f2 Added compact mode for more concise answers.
Made optimization mode persist between page refreshes.
Added mode switcher to chat so it can be changed while researching.
2025-02-16 15:02:05 -07:00
18b6f5b674 Updated formatting 2025-02-15 16:07:19 -07:00
2bdcbf20fb User customizable context window for ollama models. 2025-02-15 16:03:24 -07:00
115e6b2a71 Merge branch 'master' of https://github.com/ItzCrazyKns/Perplexica 2025-02-15 12:52:30 +05:30
a5c79c92ed feat(settings): add embedding provider settings 2025-02-15 12:52:27 +05:30
db3cea446e Update UPDATING.md 2025-02-15 12:33:43 +05:30
22 changed files with 558 additions and 77 deletions

View File

@ -2,7 +2,6 @@
[![Discord](https://dcbadge.vercel.app/api/server/26aArMy8tT?style=flat&compact=true)](https://discord.gg/26aArMy8tT)
![preview](.assets/perplexica-screenshot.png?)
## Table of Contents <!-- omit in toc -->

View File

@ -7,34 +7,43 @@ To update Perplexica to the latest version, follow these steps:
1. Clone the latest version of Perplexica from GitHub:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
2. Navigate to the Project Directory.
2. Navigate to the project directory.
3. Pull latest images from registry.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Pull the latest images from the registry.
```bash
docker compose pull
```
4. Update and Recreate containers.
5. Update and recreate the containers.
```bash
docker compose up -d
```
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
## For non Docker users
## For non-Docker users
1. Clone the latest version of Perplexica from GitHub:
```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
2. Navigate to the Project Directory
3. Execute `npm i` in both the `ui` folder and the root directory.
4. Once packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
5. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
2. Navigate to the project directory.
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
4. Execute `npm i` in both the `ui` folder and the root directory.
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
---

View File

@ -6,7 +6,11 @@ import {
redditSearchResponsePrompt,
redditSearchRetrieverPrompt,
} from './redditSearch';
import { webSearchResponsePrompt, webSearchRetrieverPrompt } from './webSearch';
import {
webSearchResponsePrompt,
webSearchRetrieverPrompt,
preciseWebSearchResponsePrompt,
} from './webSearch';
import {
wolframAlphaSearchResponsePrompt,
wolframAlphaSearchRetrieverPrompt,
@ -20,6 +24,7 @@ import {
export default {
webSearchResponsePrompt,
webSearchRetrieverPrompt,
preciseWebSearchResponsePrompt,
academicSearchResponsePrompt,
academicSearchRetrieverPrompt,
redditSearchResponsePrompt,

View File

@ -104,3 +104,41 @@ export const webSearchResponsePrompt = `
Current date & time in ISO format (UTC timezone) is: {date}.
`;
export const preciseWebSearchResponsePrompt = `
You are Perplexica, an AI model skilled in web search and crafting accurate, concise, and well-structured answers. You excel at breaking down long form content into brief summaries or specific answers.
Your task is to provide answers that are:
- **Informative and relevant**: Precisely address the user's query using the given context.
- **Well-structured**: Include clear headings and subheadings, and use a professional tone to present information concisely and logically.
- **Cited and credible**: Use inline citations with [number] notation to refer to the context source(s) for each fact or detail included.
- **Brief and Accurate**: If a direct answer is available, provide it succinctly without unnecessary elaboration.
### Formatting Instructions
- **Structure**: Use a well-organized format with proper headings. Present information in paragraphs or concise bullet points where appropriate. You should never need more than one heading.
- **Tone and Style**: Maintain a matter-of-fact tone and focus on delivering accurate information. Avoid overly complex language or unnecessary jargon.
- **Markdown Usage**: Format your response with Markdown for clarity. Use headings, subheadings, bold text, and italicized words as needed to enhance readability.
- **Length and Depth**: Be brief. Provide concise answers. Avoid superficial responses and strive for accuracy without unnecessary repetition.
- **No main heading/title**: Start your response directly with the introduction unless asked to provide a specific title.
- **Conclusion or Summary**: Do not include a conclusion unless the context specifically requires it.
### Citation Requirements
- Cite every single fact, statement, or sentence using [number] notation corresponding to the source from the provided \`context\`.
- Integrate citations naturally at the end of sentences or clauses as appropriate. For example, "The Eiffel Tower is one of the most visited landmarks in the world[1]."
- Ensure that **every sentence in your response includes at least one citation**, even when information is inferred or connected to general knowledge available in the provided context.
- Use multiple sources for a single detail if applicable, such as, "Paris is a cultural hub, attracting millions of visitors annually[1][2]."
- Always prioritize credibility and accuracy by linking all statements back to their respective context sources.
- Avoid citing unsupported assumptions or personal interpretations; if no source supports a statement, clearly indicate the limitation.
### Special Instructions
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- Do not provide additional commentary or personal opinions unless specifically asked for in the context.
- Do not include plesantries or greetings in your response.
<context>
{context}
</context>
Current date & time in ISO format (UTC timezone) is: {date}.
`;

View File

@ -5,6 +5,7 @@ import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import { ChatOpenAI } from '@langchain/openai';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
@ -16,6 +17,7 @@ const router = express.Router();
interface ChatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
}
interface ImageSearchBody {
@ -61,6 +63,10 @@ router.post('/', async (req, res) => {
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
if (llm instanceof ChatOllama) {
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
}
}
if (!llm) {

View File

@ -15,12 +15,14 @@ import {
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
const router = express.Router();
interface chatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
customOpenAIKey?: string;
customOpenAIBaseURL?: string;
}
@ -78,6 +80,7 @@ router.post('/', async (req, res) => {
const embeddingModel =
body.embeddingModel?.model ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
const ollamaContextWindow = body.chatModel?.ollamaContextWindow || 2048;
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
@ -85,10 +88,12 @@ router.post('/', async (req, res) => {
if (body.chatModel?.provider === 'custom_openai') {
llm = new ChatOpenAI({
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
openAIApiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
openAIApiKey:
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
temperature: 0.7,
configuration: {
baseURL: body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
baseURL:
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
},
}) as unknown as BaseChatModel;
} else if (
@ -97,6 +102,9 @@ router.post('/', async (req, res) => {
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
if (llm instanceof ChatOllama) {
llm.numCtx = ollamaContextWindow;
}
}
if (

View File

@ -10,12 +10,14 @@ import {
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
}
interface SuggestionsBody {
@ -60,6 +62,9 @@ router.post('/', async (req, res) => {
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
if (llm instanceof ChatOllama) {
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
}
}
if (!llm) {

View File

@ -10,12 +10,14 @@ import {
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
const router = express.Router();
interface ChatModel {
provider: string;
model: string;
ollamaContextWindow?: number;
}
interface VideoSearchBody {
@ -61,6 +63,10 @@ router.post('/', async (req, res) => {
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
if (llm instanceof ChatOllama) {
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
}
}
if (!llm) {

View File

@ -34,6 +34,7 @@ export interface MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
isCompact?: boolean,
) => Promise<eventEmitter>;
}
@ -44,6 +45,7 @@ interface Config {
rerankThreshold: number;
queryGeneratorPrompt: string;
responsePrompt: string;
preciseResponsePrompt: string;
activeEngines: string[];
}
@ -235,6 +237,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
isCompact?: boolean,
) {
return RunnableSequence.from([
RunnableMap.from({
@ -278,7 +281,12 @@ class MetaSearchAgent implements MetaSearchAgentType {
.pipe(this.processDocs),
}),
ChatPromptTemplate.fromMessages([
['system', this.config.responsePrompt],
[
'system',
isCompact
? this.config.preciseResponsePrompt
: this.config.responsePrompt,
],
new MessagesPlaceholder('chat_history'),
['user', '{query}'],
]),
@ -465,6 +473,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
isCompact?: boolean,
) {
const emitter = new eventEmitter();
@ -473,6 +482,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds,
embeddings,
optimizationMode,
isCompact,
);
const stream = answeringChain.streamEvents(

View File

@ -14,6 +14,7 @@ import {
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
export const handleConnection = async (
ws: WebSocket,
@ -42,6 +43,8 @@ export const handleConnection = async (
searchParams.get('embeddingModel') ||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
const ollamaContextWindow = searchParams.get('ollamaContextWindow');
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
@ -52,6 +55,9 @@ export const handleConnection = async (
) {
llm = chatModelProviders[chatModelProvider][chatModel]
.model as unknown as BaseChatModel | undefined;
if (llm instanceof ChatOllama) {
llm.numCtx = ollamaContextWindow ? parseInt(ollamaContextWindow) : 2048;
}
} else if (chatModelProvider == 'custom_openai') {
const customOpenaiApiKey = getCustomOpenaiApiKey();
const customOpenaiApiUrl = getCustomOpenaiApiUrl();

View File

@ -26,6 +26,7 @@ type WSMessage = {
focusMode: string;
history: Array<[string, string]>;
files: Array<string>;
isCompact?: boolean;
};
export const searchHandlers = {
@ -33,6 +34,7 @@ export const searchHandlers = {
activeEngines: [],
queryGeneratorPrompt: prompts.webSearchRetrieverPrompt,
responsePrompt: prompts.webSearchResponsePrompt,
preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
@ -42,6 +44,7 @@ export const searchHandlers = {
activeEngines: ['arxiv', 'google scholar', 'pubmed'],
queryGeneratorPrompt: prompts.academicSearchRetrieverPrompt,
responsePrompt: prompts.academicSearchResponsePrompt,
preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: true,
@ -51,6 +54,7 @@ export const searchHandlers = {
activeEngines: [],
queryGeneratorPrompt: '',
responsePrompt: prompts.writingAssistantPrompt,
preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt,
rerank: true,
rerankThreshold: 0,
searchWeb: false,
@ -60,6 +64,7 @@ export const searchHandlers = {
activeEngines: ['wolframalpha'],
queryGeneratorPrompt: prompts.wolframAlphaSearchRetrieverPrompt,
responsePrompt: prompts.wolframAlphaSearchResponsePrompt,
preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt,
rerank: false,
rerankThreshold: 0,
searchWeb: true,
@ -69,6 +74,7 @@ export const searchHandlers = {
activeEngines: ['youtube'],
queryGeneratorPrompt: prompts.youtubeSearchRetrieverPrompt,
responsePrompt: prompts.youtubeSearchResponsePrompt,
preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
@ -78,6 +84,7 @@ export const searchHandlers = {
activeEngines: ['reddit'],
queryGeneratorPrompt: prompts.redditSearchRetrieverPrompt,
responsePrompt: prompts.redditSearchResponsePrompt,
preciseResponsePrompt: prompts.preciseWebSearchResponsePrompt,
rerank: true,
rerankThreshold: 0.3,
searchWeb: true,
@ -116,6 +123,7 @@ const handleEmitterEvents = (
sources = parsedData.data;
}
});
emitter.on('end', () => {
ws.send(JSON.stringify({ type: 'messageEnd', messageId: messageId }));
@ -132,6 +140,7 @@ const handleEmitterEvents = (
})
.execute();
});
emitter.on('error', (data) => {
const parsedData = JSON.parse(data);
ws.send(
@ -197,6 +206,7 @@ export const handleMessage = async (
embeddings,
parsedWSMessage.optimizationMode,
parsedWSMessage.files,
parsedWSMessage.isCompact,
);
handleEmitterEvents(emitter, ws, aiMessageId, parsedMessage.chatId);

View File

@ -23,6 +23,7 @@ interface SettingsType {
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
customOpenaiModelName: string;
ollamaContextWindow: number;
}
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
@ -112,6 +113,11 @@ const Page = () => {
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
const [contextWindowSize, setContextWindowSize] = useState(2048);
const [isCustomContextWindow, setIsCustomContextWindow] = useState(false);
const predefinedContextSizes = [
1024, 2048, 3072, 4096, 8192, 16384, 32768, 65536, 131072,
];
useEffect(() => {
const fetchConfig = async () => {
@ -123,6 +129,7 @@ const Page = () => {
});
const data = (await res.json()) as SettingsType;
setConfig(data);
const chatModelProvidersKeys = Object.keys(data.chatModelProviders || {});
@ -171,6 +178,13 @@ const Page = () => {
setAutomaticVideoSearch(
localStorage.getItem('autoVideoSearch') === 'true',
);
const storedContextWindow = parseInt(
localStorage.getItem('ollamaContextWindow') ?? '2048',
);
setContextWindowSize(storedContextWindow);
setIsCustomContextWindow(
!predefinedContextSizes.includes(storedContextWindow),
);
setIsLoading(false);
};
@ -223,11 +237,11 @@ const Page = () => {
setChatModels(data.chatModelProviders || {});
setEmbeddingModels(data.embeddingModelProviders || {});
const currentProvider = selectedChatModelProvider;
const newProviders = Object.keys(data.chatModelProviders || {});
const currentChatProvider = selectedChatModelProvider;
const newChatProviders = Object.keys(data.chatModelProviders || {});
if (!currentProvider && newProviders.length > 0) {
const firstProvider = newProviders[0];
if (!currentChatProvider && newChatProviders.length > 0) {
const firstProvider = newChatProviders[0];
const firstModel = data.chatModelProviders[firstProvider]?.[0]?.name;
if (firstModel) {
@ -237,11 +251,11 @@ const Page = () => {
localStorage.setItem('chatModel', firstModel);
}
} else if (
currentProvider &&
currentChatProvider &&
(!data.chatModelProviders ||
!data.chatModelProviders[currentProvider] ||
!Array.isArray(data.chatModelProviders[currentProvider]) ||
data.chatModelProviders[currentProvider].length === 0)
!data.chatModelProviders[currentChatProvider] ||
!Array.isArray(data.chatModelProviders[currentChatProvider]) ||
data.chatModelProviders[currentChatProvider].length === 0)
) {
const firstValidProvider = Object.entries(
data.chatModelProviders || {},
@ -267,6 +281,55 @@ const Page = () => {
}
}
const currentEmbeddingProvider = selectedEmbeddingModelProvider;
const newEmbeddingProviders = Object.keys(
data.embeddingModelProviders || {},
);
if (!currentEmbeddingProvider && newEmbeddingProviders.length > 0) {
const firstProvider = newEmbeddingProviders[0];
const firstModel =
data.embeddingModelProviders[firstProvider]?.[0]?.name;
if (firstModel) {
setSelectedEmbeddingModelProvider(firstProvider);
setSelectedEmbeddingModel(firstModel);
localStorage.setItem('embeddingModelProvider', firstProvider);
localStorage.setItem('embeddingModel', firstModel);
}
} else if (
currentEmbeddingProvider &&
(!data.embeddingModelProviders ||
!data.embeddingModelProviders[currentEmbeddingProvider] ||
!Array.isArray(
data.embeddingModelProviders[currentEmbeddingProvider],
) ||
data.embeddingModelProviders[currentEmbeddingProvider].length === 0)
) {
const firstValidProvider = Object.entries(
data.embeddingModelProviders || {},
).find(
([_, models]) => Array.isArray(models) && models.length > 0,
)?.[0];
if (firstValidProvider) {
setSelectedEmbeddingModelProvider(firstValidProvider);
setSelectedEmbeddingModel(
data.embeddingModelProviders[firstValidProvider][0].name,
);
localStorage.setItem('embeddingModelProvider', firstValidProvider);
localStorage.setItem(
'embeddingModel',
data.embeddingModelProviders[firstValidProvider][0].name,
);
} else {
setSelectedEmbeddingModelProvider(null);
setSelectedEmbeddingModel(null);
localStorage.removeItem('embeddingModelProvider');
localStorage.removeItem('embeddingModel');
}
}
setConfig(data);
}
@ -278,6 +341,12 @@ const Page = () => {
localStorage.setItem('chatModelProvider', value);
} else if (key === 'chatModel') {
localStorage.setItem('chatModel', value);
} else if (key === 'embeddingModelProvider') {
localStorage.setItem('embeddingModelProvider', value);
} else if (key === 'embeddingModel') {
localStorage.setItem('embeddingModel', value);
} else if (key === 'ollamaContextWindow') {
localStorage.setItem('ollamaContextWindow', value.toString());
}
} catch (err) {
console.error('Failed to save:', err);
@ -436,7 +505,6 @@ const Page = () => {
const value = e.target.value;
setSelectedChatModelProvider(value);
saveConfig('chatModelProvider', value);
// Auto-select first model of new provider
const firstModel =
config.chatModelProviders[value]?.[0]?.name;
if (firstModel) {
@ -496,6 +564,78 @@ const Page = () => {
];
})()}
/>
{selectedChatModelProvider === 'ollama' && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Chat Context Window Size
</p>
<Select
value={
isCustomContextWindow
? 'custom'
: contextWindowSize.toString()
}
onChange={(e) => {
const value = e.target.value;
if (value === 'custom') {
setIsCustomContextWindow(true);
} else {
setIsCustomContextWindow(false);
const numValue = parseInt(value);
setContextWindowSize(numValue);
setConfig((prev) => ({
...prev!,
ollamaContextWindow: numValue,
}));
saveConfig('ollamaContextWindow', numValue);
}
}}
options={[
...predefinedContextSizes.map((size) => ({
value: size.toString(),
label: `${size.toLocaleString()} tokens`,
})),
{ value: 'custom', label: 'Custom...' },
]}
/>
{isCustomContextWindow && (
<div className="mt-2">
<Input
type="number"
min={512}
value={contextWindowSize}
placeholder="Custom context window size (minimum 512)"
isSaving={savingStates['ollamaContextWindow']}
onChange={(e) => {
// Allow any value to be typed
const value =
parseInt(e.target.value) ||
contextWindowSize;
setContextWindowSize(value);
}}
onSave={(value) => {
// Validate only when saving
const numValue = Math.max(
512,
parseInt(value) || 2048,
);
setContextWindowSize(numValue);
setConfig((prev) => ({
...prev!,
ollamaContextWindow: numValue,
}));
saveConfig('ollamaContextWindow', numValue);
}}
/>
</div>
)}
<p className="text-xs text-black/60 dark:text-white/60 mt-0.5">
{isCustomContextWindow
? 'Adjust the context window size for Ollama models (minimum 512 tokens)'
: 'Adjust the context window size for Ollama models'}
</p>
</div>
)}
</div>
)}
</div>
@ -554,6 +694,81 @@ const Page = () => {
</div>
</div>
)}
{config.embeddingModelProviders && (
<div className="flex flex-col space-y-4 mt-4 pt-4 border-t border-light-200 dark:border-dark-200">
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Embedding Model Provider
</p>
<Select
value={selectedEmbeddingModelProvider ?? undefined}
onChange={(e) => {
const value = e.target.value;
setSelectedEmbeddingModelProvider(value);
saveConfig('embeddingModelProvider', value);
const firstModel =
config.embeddingModelProviders[value]?.[0]?.name;
if (firstModel) {
setSelectedEmbeddingModel(firstModel);
saveConfig('embeddingModel', firstModel);
}
}}
options={Object.keys(config.embeddingModelProviders).map(
(provider) => ({
value: provider,
label:
provider.charAt(0).toUpperCase() +
provider.slice(1),
}),
)}
/>
</div>
{selectedEmbeddingModelProvider && (
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
Embedding Model
</p>
<Select
value={selectedEmbeddingModel ?? undefined}
onChange={(e) => {
const value = e.target.value;
setSelectedEmbeddingModel(value);
saveConfig('embeddingModel', value);
}}
options={(() => {
const embeddingModelProvider =
config.embeddingModelProviders[
selectedEmbeddingModelProvider
];
return embeddingModelProvider
? embeddingModelProvider.length > 0
? embeddingModelProvider.map((model) => ({
value: model.name,
label: model.displayName,
}))
: [
{
value: '',
label: 'No models available',
disabled: true,
},
]
: [
{
value: '',
label:
'Invalid provider, please check backend logs',
disabled: true,
},
];
})()}
/>
</div>
)}
</div>
)}
</SettingsSection>
<SettingsSection title="API Keys">

View File

@ -16,9 +16,17 @@ const Chat = ({
setFileIds,
files,
setFiles,
isCompact,
setIsCompact,
optimizationMode,
setOptimizationMode,
}: {
messages: Message[];
sendMessage: (message: string) => void;
sendMessage: (
message: string,
messageId?: string,
options?: { isCompact?: boolean },
) => void;
loading: boolean;
messageAppeared: boolean;
rewrite: (messageId: string) => void;
@ -26,6 +34,10 @@ const Chat = ({
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
isCompact: boolean;
setIsCompact: (isCompact: boolean) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
}) => {
const [dividerWidth, setDividerWidth] = useState(0);
const dividerRef = useRef<HTMLDivElement | null>(null);
@ -71,6 +83,7 @@ const Chat = ({
dividerRef={isLast ? dividerRef : undefined}
isLast={isLast}
rewrite={rewrite}
isCompact={isCompact}
sendMessage={sendMessage}
/>
{!isLast && msg.role === 'assistant' && (
@ -83,7 +96,7 @@ const Chat = ({
<div ref={messageEnd} className="h-0" />
{dividerWidth > 0 && (
<div
className="bottom-24 lg:bottom-10 fixed z-40"
className="bottom-24 lg:bottom-10 fixed"
style={{ width: dividerWidth }}
>
<MessageInput
@ -93,6 +106,10 @@ const Chat = ({
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
isCompact={isCompact}
setIsCompact={setIsCompact}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
/>
</div>
)}

View File

@ -197,6 +197,11 @@ const useSocket = (
'openAIBaseURL',
localStorage.getItem('openAIBaseURL')!,
);
} else {
searchParams.append(
'ollamaContextWindow',
localStorage.getItem('ollamaContextWindow') || '2048',
);
}
searchParams.append('embeddingModel', embeddingModel!);
@ -394,6 +399,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [focusMode, setFocusMode] = useState('webSearch');
const [optimizationMode, setOptimizationMode] = useState('speed');
const [isCompact, setIsCompact] = useState(false);
const [isMessagesLoaded, setIsMessagesLoaded] = useState(false);
@ -401,6 +407,21 @@ const ChatWindow = ({ id }: { id?: string }) => {
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
useEffect(() => {
const savedCompactMode = localStorage.getItem('compactMode');
const savedOptimizationMode = localStorage.getItem('optimizationMode');
if (savedCompactMode !== null) {
setIsCompact(savedCompactMode === 'true');
}
if (savedOptimizationMode !== null) {
setOptimizationMode(savedOptimizationMode);
} else {
localStorage.setItem('optimizationMode', optimizationMode);
}
}, []);
useEffect(() => {
if (
chatId &&
@ -451,7 +472,11 @@ const ChatWindow = ({ id }: { id?: string }) => {
}
}, [isMessagesLoaded, isWSReady]);
const sendMessage = async (message: string, messageId?: string) => {
const sendMessage = async (
message: string,
messageId?: string,
options?: { isCompact?: boolean },
) => {
if (loading) return;
if (!ws || ws.readyState !== WebSocket.OPEN) {
toast.error('Cannot send message while disconnected');
@ -466,21 +491,21 @@ const ChatWindow = ({ id }: { id?: string }) => {
let added = false;
messageId = messageId ?? crypto.randomBytes(7).toString('hex');
ws.send(
JSON.stringify({
type: 'message',
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]],
}),
);
let messageData = {
type: 'message',
message: {
messageId: messageId,
chatId: chatId!,
content: message,
},
files: fileIds,
focusMode: focusMode,
optimizationMode: optimizationMode,
history: [...chatHistory, ['human', message]],
isCompact: options?.isCompact ?? isCompact,
};
//console.log('sending:', messageData, JSON.stringify(messageData));
ws.send(JSON.stringify(messageData));
setMessages((prevMessages) => [
...prevMessages,
@ -610,12 +635,12 @@ const ChatWindow = ({ id }: { id?: string }) => {
return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)];
});
sendMessage(message.content, message.messageId);
sendMessage(message.content, message.messageId, { isCompact });
};
useEffect(() => {
if (isReady && initialMessage && ws?.readyState === 1) {
sendMessage(initialMessage);
sendMessage(initialMessage, undefined, { isCompact });
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [ws?.readyState, isReady, initialMessage, isWSReady]);
@ -655,6 +680,10 @@ const ChatWindow = ({ id }: { id?: string }) => {
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
isCompact={isCompact}
setIsCompact={setIsCompact}
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
/>
</>
) : (
@ -668,6 +697,8 @@ const ChatWindow = ({ id }: { id?: string }) => {
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
isCompact={isCompact}
setIsCompact={setIsCompact}
/>
)}
</div>

View File

@ -14,6 +14,8 @@ const EmptyChat = ({
setFileIds,
files,
setFiles,
isCompact,
setIsCompact,
}: {
sendMessage: (message: string) => void;
focusMode: string;
@ -24,6 +26,8 @@ const EmptyChat = ({
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
isCompact: boolean;
setIsCompact: (isCompact: boolean) => void;
}) => {
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
@ -48,6 +52,8 @@ const EmptyChat = ({
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
isCompact={isCompact}
setIsCompact={setIsCompact}
/>
</div>
</div>

View File

@ -17,8 +17,14 @@ const EmptyChatMessageInput = ({
setFileIds,
files,
setFiles,
isCompact,
setIsCompact,
}: {
sendMessage: (message: string) => void;
sendMessage: (
message: string,
messageId?: string,
options?: { isCompact?: boolean },
) => void;
focusMode: string;
setFocusMode: (mode: string) => void;
optimizationMode: string;
@ -27,6 +33,8 @@ const EmptyChatMessageInput = ({
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
isCompact: boolean;
setIsCompact: (isCompact: boolean) => void;
}) => {
const [copilotEnabled, setCopilotEnabled] = useState(false);
const [message, setMessage] = useState('');
@ -61,13 +69,13 @@ const EmptyChatMessageInput = ({
<form
onSubmit={(e) => {
e.preventDefault();
sendMessage(message);
sendMessage(message, undefined, { isCompact });
setMessage('');
}}
onKeyDown={(e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
sendMessage(message);
sendMessage(message, undefined, { isCompact });
setMessage('');
}
}}
@ -97,6 +105,8 @@ const EmptyChatMessageInput = ({
<Optimization
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
isCompact={isCompact}
setIsCompact={setIsCompact}
/>
<button
disabled={message.trim().length === 0}

View File

@ -28,6 +28,7 @@ const MessageBox = ({
dividerRef,
isLast,
rewrite,
isCompact,
sendMessage,
}: {
message: Message;
@ -37,7 +38,12 @@ const MessageBox = ({
dividerRef?: MutableRefObject<HTMLDivElement | null>;
isLast: boolean;
rewrite: (messageId: string) => void;
sendMessage: (message: string) => void;
isCompact: boolean;
sendMessage: (
message: string,
messageId?: string,
options?: { isCompact?: boolean },
) => void;
}) => {
const [parsedMessage, setParsedMessage] = useState(message.content);
const [speechMessage, setSpeechMessage] = useState(message.content);
@ -65,6 +71,10 @@ const MessageBox = ({
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
const handleSuggestionClick = (suggestion: string) => {
sendMessage(suggestion, undefined, { isCompact });
};
return (
<div>
{message.role === 'user' && (
@ -163,7 +173,7 @@ const MessageBox = ({
<div className="h-px w-full bg-light-secondary dark:bg-dark-secondary" />
<div
onClick={() => {
sendMessage(suggestion);
handleSuggestionClick(suggestion);
}}
className="cursor-pointer flex flex-row justify-between font-medium space-x-2 items-center"
>

View File

@ -4,6 +4,7 @@ import { useEffect, useRef, useState } from 'react';
import TextareaAutosize from 'react-textarea-autosize';
import Attach from './MessageInputActions/Attach';
import CopilotToggle from './MessageInputActions/Copilot';
import Optimization from './MessageInputActions/Optimization';
import { File } from './ChatWindow';
import AttachSmall from './MessageInputActions/AttachSmall';
@ -14,13 +15,25 @@ const MessageInput = ({
setFileIds,
files,
setFiles,
isCompact,
setIsCompact,
optimizationMode,
setOptimizationMode,
}: {
sendMessage: (message: string) => void;
sendMessage: (
message: string,
messageId?: string,
options?: { isCompact?: boolean },
) => void;
loading: boolean;
fileIds: string[];
setFileIds: (fileIds: string[]) => void;
files: File[];
setFiles: (files: File[]) => void;
isCompact: boolean;
setIsCompact: (isCompact: boolean) => void;
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
}) => {
const [copilotEnabled, setCopilotEnabled] = useState(false);
const [message, setMessage] = useState('');
@ -40,20 +53,16 @@ const MessageInput = ({
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
const activeElement = document.activeElement;
const isInputFocused =
activeElement?.tagName === 'INPUT' ||
activeElement?.tagName === 'TEXTAREA' ||
activeElement?.hasAttribute('contenteditable');
if (e.key === '/' && !isInputFocused) {
e.preventDefault();
inputRef.current?.focus();
}
};
document.addEventListener('keydown', handleKeyDown);
return () => {
document.removeEventListener('keydown', handleKeyDown);
};
@ -64,28 +73,36 @@ const MessageInput = ({
onSubmit={(e) => {
if (loading) return;
e.preventDefault();
sendMessage(message);
sendMessage(message, undefined, { isCompact });
setMessage('');
}}
onKeyDown={(e) => {
if (e.key === 'Enter' && !e.shiftKey && !loading) {
e.preventDefault();
sendMessage(message);
sendMessage(message, undefined, { isCompact });
setMessage('');
}
}}
className={cn(
'bg-light-secondary dark:bg-dark-secondary p-4 flex items-center overflow-hidden border border-light-200 dark:border-dark-200',
'bg-light-secondary dark:bg-dark-secondary p-4 flex items-center border border-light-200 dark:border-dark-200',
mode === 'multi' ? 'flex-col rounded-lg' : 'flex-row rounded-full',
)}
>
{mode === 'single' && (
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<div className="flex flex-row items-center space-x-2">
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<Optimization
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
isCompact={isCompact}
setIsCompact={setIsCompact}
/>
</div>
)}
<TextareaAutosize
ref={inputRef}
@ -113,12 +130,20 @@ const MessageInput = ({
)}
{mode === 'multi' && (
<div className="flex flex-row items-center justify-between w-full pt-2">
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<div className="flex flex-row items-center space-x-2">
<AttachSmall
fileIds={fileIds}
setFileIds={setFileIds}
files={files}
setFiles={setFiles}
/>
<Optimization
optimizationMode={optimizationMode}
setOptimizationMode={setOptimizationMode}
isCompact={isCompact}
setIsCompact={setIsCompact}
/>
</div>
<div className="flex flex-row items-center space-x-4">
<CopilotToggle
copilotEnabled={copilotEnabled}

View File

@ -1,4 +1,4 @@
import { ChevronDown, Sliders, Star, Zap } from 'lucide-react';
import { ChevronDown, Minimize2, Sliders, Star, Zap } from 'lucide-react';
import { cn } from '@/lib/utils';
import {
Popover,
@ -6,8 +6,7 @@ import {
PopoverPanel,
Transition,
} from '@headlessui/react';
import { Fragment } from 'react';
import { Fragment, useEffect } from 'react';
const OptimizationModes = [
{
key: 'speed',
@ -37,10 +36,33 @@ const OptimizationModes = [
const Optimization = ({
optimizationMode,
setOptimizationMode,
isCompact,
setIsCompact,
}: {
optimizationMode: string;
setOptimizationMode: (mode: string) => void;
isCompact: boolean;
setIsCompact: (isCompact: boolean) => void;
}) => {
useEffect(() => {
const savedCompactMode = localStorage.getItem('compactMode');
if (savedCompactMode === null) {
localStorage.setItem('compactMode', String(isCompact));
} else {
setIsCompact(savedCompactMode === 'true');
}
}, [setIsCompact]);
const handleCompactChange = (checked: boolean) => {
setIsCompact(checked);
localStorage.setItem('compactMode', String(checked));
};
const handleOptimizationChange = (mode: string) => {
setOptimizationMode(mode);
localStorage.setItem('optimizationMode', mode);
};
return (
<Popover className="relative w-full max-w-[15rem] md:max-w-md lg:max-w-lg">
<PopoverButton
@ -48,6 +70,12 @@ const Optimization = ({
className="p-2 text-black/50 dark:text-white/50 rounded-xl hover:bg-light-secondary dark:hover:bg-dark-secondary active:scale-95 transition duration-200 hover:text-black dark:hover:text-white"
>
<div className="flex flex-row items-center space-x-1">
{isCompact && (
<Minimize2
size={16}
className="text-gray-600 dark:text-gray-400"
/>
)}
{
OptimizationModes.find((mode) => mode.key === optimizationMode)
?.icon
@ -70,11 +98,11 @@ const Optimization = ({
leaveFrom="opacity-100 translate-y-0"
leaveTo="opacity-0 translate-y-1"
>
<PopoverPanel className="absolute z-10 w-64 md:w-[250px] right-0">
<PopoverPanel className="absolute z-10 w-64 md:w-[250px] right-0 bottom-[100%] mb-2">
<div className="flex flex-col gap-2 bg-light-primary dark:bg-dark-primary border rounded-lg border-light-200 dark:border-dark-200 w-full p-4 max-h-[200px] md:max-h-none overflow-y-auto">
{OptimizationModes.map((mode, i) => (
<PopoverButton
onClick={() => setOptimizationMode(mode.key)}
onClick={() => handleOptimizationChange(mode.key)}
key={i}
disabled={mode.key === 'quality'}
className={cn(
@ -94,6 +122,30 @@ const Optimization = ({
</p>
</PopoverButton>
))}
<div className="border-t border-light-200 dark:border-dark-200 pt-2 mt-1">
<label className="flex items-center space-x-2 p-2 rounded-lg cursor-pointer hover:bg-light-secondary dark:hover:bg-dark-secondary">
<input
type="checkbox"
checked={isCompact}
onChange={(e) => handleCompactChange(e.target.checked)}
className="form-checkbox h-4 w-4 text-blue-600 transition duration-150 ease-in-out"
/>
<div className="flex items-center space-x-2">
<Minimize2
size={16}
className="text-gray-600 dark:text-gray-400"
/>
<div>
<p className="text-sm font-medium text-black dark:text-white">
Compact Mode
</p>
<p className="text-xs text-black/70 dark:text-white/70">
Generate more concise responses
</p>
</div>
</div>
</label>
</div>
</div>
</PopoverPanel>
</Transition>

View File

@ -33,9 +33,10 @@ const SearchImages = ({
const chatModelProvider = localStorage.getItem('chatModelProvider');
const chatModel = localStorage.getItem('chatModel');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/images`,
@ -54,6 +55,9 @@ const SearchImages = ({
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
...(chatModelProvider === 'ollama' && {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
}),
},

View File

@ -48,9 +48,10 @@ const Searchvideos = ({
const chatModelProvider = localStorage.getItem('chatModelProvider');
const chatModel = localStorage.getItem('chatModel');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
const res = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
@ -69,6 +70,9 @@ const Searchvideos = ({
customOpenAIBaseURL: customOpenAIBaseURL,
customOpenAIKey: customOpenAIKey,
}),
...(chatModelProvider === 'ollama' && {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
}),
},

View File

@ -6,6 +6,8 @@ export const getSuggestions = async (chatHisory: Message[]) => {
const customOpenAIKey = localStorage.getItem('openAIApiKey');
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
const ollamaContextWindow =
localStorage.getItem('ollamaContextWindow') || '2048';
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, {
method: 'POST',
@ -21,6 +23,9 @@ export const getSuggestions = async (chatHisory: Message[]) => {
customOpenAIKey,
customOpenAIBaseURL,
}),
...(chatModelProvider === 'ollama' && {
ollamaContextWindow: parseInt(ollamaContextWindow),
}),
},
}),
});