mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-16 06:11:32 +00:00
Compare commits
6 Commits
v1.10.0-rc
...
572f1311e9
Author | SHA1 | Date | |
---|---|---|---|
|
572f1311e9 | ||
|
18b6f5b674 | ||
|
2bdcbf20fb | ||
|
115e6b2a71 | ||
|
a5c79c92ed | ||
|
db3cea446e |
@@ -7,34 +7,43 @@ To update Perplexica to the latest version, follow these steps:
|
||||
1. Clone the latest version of Perplexica from GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
```
|
||||
|
||||
2. Navigate to the Project Directory.
|
||||
2. Navigate to the project directory.
|
||||
|
||||
3. Pull latest images from registry.
|
||||
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||
|
||||
4. Pull the latest images from the registry.
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
```
|
||||
|
||||
4. Update and Recreate containers.
|
||||
5. Update and recreate the containers.
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
|
||||
6. Once the command completes, go to http://localhost:3000 and verify the latest changes.
|
||||
|
||||
## For non Docker users
|
||||
## For non-Docker users
|
||||
|
||||
1. Clone the latest version of Perplexica from GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
git clone https://github.com/ItzCrazyKns/Perplexica.git
|
||||
```
|
||||
|
||||
2. Navigate to the Project Directory
|
||||
3. Execute `npm i` in both the `ui` folder and the root directory.
|
||||
4. Once packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
|
||||
5. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
||||
2. Navigate to the project directory.
|
||||
|
||||
3. Check for changes in the configuration files. If the `sample.config.toml` file contains new fields, delete your existing `config.toml` file, rename `sample.config.toml` to `config.toml`, and update the configuration accordingly.
|
||||
|
||||
4. Execute `npm i` in both the `ui` folder and the root directory.
|
||||
|
||||
5. Once the packages are updated, execute `npm run build` in both the `ui` folder and the root directory.
|
||||
|
||||
6. Finally, start both the frontend and the backend by running `npm run start` in both the `ui` folder and the root directory.
|
||||
|
||||
---
|
||||
|
@@ -5,6 +5,7 @@ import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
@@ -16,6 +17,7 @@ const router = express.Router();
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
}
|
||||
|
||||
interface ImageSearchBody {
|
||||
@@ -61,6 +63,10 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
|
||||
}
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
|
@@ -15,12 +15,14 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface chatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
customOpenAIKey?: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
}
|
||||
@@ -78,6 +80,7 @@ router.post('/', async (req, res) => {
|
||||
const embeddingModel =
|
||||
body.embeddingModel?.model ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
const ollamaContextWindow = body.chatModel?.ollamaContextWindow || 2048;
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
@@ -85,10 +88,12 @@ router.post('/', async (req, res) => {
|
||||
if (body.chatModel?.provider === 'custom_openai') {
|
||||
llm = new ChatOpenAI({
|
||||
modelName: body.chatModel?.model || getCustomOpenaiModelName(),
|
||||
openAIApiKey: body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||
openAIApiKey:
|
||||
body.chatModel?.customOpenAIKey || getCustomOpenaiApiKey(),
|
||||
temperature: 0.7,
|
||||
configuration: {
|
||||
baseURL: body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
|
||||
baseURL:
|
||||
body.chatModel?.customOpenAIBaseURL || getCustomOpenaiApiUrl(),
|
||||
},
|
||||
}) as unknown as BaseChatModel;
|
||||
} else if (
|
||||
@@ -97,6 +102,9 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = ollamaContextWindow;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
|
@@ -10,12 +10,14 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
}
|
||||
|
||||
interface SuggestionsBody {
|
||||
@@ -60,6 +62,9 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
|
||||
}
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
|
@@ -10,12 +10,14 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
}
|
||||
|
||||
interface VideoSearchBody {
|
||||
@@ -61,6 +63,10 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
|
||||
}
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
|
@@ -14,6 +14,7 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
export const handleConnection = async (
|
||||
ws: WebSocket,
|
||||
@@ -42,6 +43,8 @@ export const handleConnection = async (
|
||||
searchParams.get('embeddingModel') ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
|
||||
const ollamaContextWindow = searchParams.get('ollamaContextWindow');
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
|
||||
@@ -52,6 +55,9 @@ export const handleConnection = async (
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = ollamaContextWindow ? parseInt(ollamaContextWindow) : 2048;
|
||||
}
|
||||
} else if (chatModelProvider == 'custom_openai') {
|
||||
const customOpenaiApiKey = getCustomOpenaiApiKey();
|
||||
const customOpenaiApiUrl = getCustomOpenaiApiUrl();
|
||||
|
@@ -23,6 +23,7 @@ interface SettingsType {
|
||||
customOpenaiApiKey: string;
|
||||
customOpenaiApiUrl: string;
|
||||
customOpenaiModelName: string;
|
||||
ollamaContextWindow: number;
|
||||
}
|
||||
|
||||
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
||||
@@ -112,6 +113,11 @@ const Page = () => {
|
||||
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
|
||||
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
|
||||
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
|
||||
const [contextWindowSize, setContextWindowSize] = useState(2048);
|
||||
const [isCustomContextWindow, setIsCustomContextWindow] = useState(false);
|
||||
const predefinedContextSizes = [
|
||||
1024, 2048, 3072, 4096, 8192, 16384, 32768, 65536, 131072,
|
||||
];
|
||||
|
||||
useEffect(() => {
|
||||
const fetchConfig = async () => {
|
||||
@@ -123,6 +129,7 @@ const Page = () => {
|
||||
});
|
||||
|
||||
const data = (await res.json()) as SettingsType;
|
||||
|
||||
setConfig(data);
|
||||
|
||||
const chatModelProvidersKeys = Object.keys(data.chatModelProviders || {});
|
||||
@@ -171,6 +178,13 @@ const Page = () => {
|
||||
setAutomaticVideoSearch(
|
||||
localStorage.getItem('autoVideoSearch') === 'true',
|
||||
);
|
||||
const storedContextWindow = parseInt(
|
||||
localStorage.getItem('ollamaContextWindow') ?? '2048',
|
||||
);
|
||||
setContextWindowSize(storedContextWindow);
|
||||
setIsCustomContextWindow(
|
||||
!predefinedContextSizes.includes(storedContextWindow),
|
||||
);
|
||||
|
||||
setIsLoading(false);
|
||||
};
|
||||
@@ -223,11 +237,11 @@ const Page = () => {
|
||||
setChatModels(data.chatModelProviders || {});
|
||||
setEmbeddingModels(data.embeddingModelProviders || {});
|
||||
|
||||
const currentProvider = selectedChatModelProvider;
|
||||
const newProviders = Object.keys(data.chatModelProviders || {});
|
||||
const currentChatProvider = selectedChatModelProvider;
|
||||
const newChatProviders = Object.keys(data.chatModelProviders || {});
|
||||
|
||||
if (!currentProvider && newProviders.length > 0) {
|
||||
const firstProvider = newProviders[0];
|
||||
if (!currentChatProvider && newChatProviders.length > 0) {
|
||||
const firstProvider = newChatProviders[0];
|
||||
const firstModel = data.chatModelProviders[firstProvider]?.[0]?.name;
|
||||
|
||||
if (firstModel) {
|
||||
@@ -237,11 +251,11 @@ const Page = () => {
|
||||
localStorage.setItem('chatModel', firstModel);
|
||||
}
|
||||
} else if (
|
||||
currentProvider &&
|
||||
currentChatProvider &&
|
||||
(!data.chatModelProviders ||
|
||||
!data.chatModelProviders[currentProvider] ||
|
||||
!Array.isArray(data.chatModelProviders[currentProvider]) ||
|
||||
data.chatModelProviders[currentProvider].length === 0)
|
||||
!data.chatModelProviders[currentChatProvider] ||
|
||||
!Array.isArray(data.chatModelProviders[currentChatProvider]) ||
|
||||
data.chatModelProviders[currentChatProvider].length === 0)
|
||||
) {
|
||||
const firstValidProvider = Object.entries(
|
||||
data.chatModelProviders || {},
|
||||
@@ -267,6 +281,55 @@ const Page = () => {
|
||||
}
|
||||
}
|
||||
|
||||
const currentEmbeddingProvider = selectedEmbeddingModelProvider;
|
||||
const newEmbeddingProviders = Object.keys(
|
||||
data.embeddingModelProviders || {},
|
||||
);
|
||||
|
||||
if (!currentEmbeddingProvider && newEmbeddingProviders.length > 0) {
|
||||
const firstProvider = newEmbeddingProviders[0];
|
||||
const firstModel =
|
||||
data.embeddingModelProviders[firstProvider]?.[0]?.name;
|
||||
|
||||
if (firstModel) {
|
||||
setSelectedEmbeddingModelProvider(firstProvider);
|
||||
setSelectedEmbeddingModel(firstModel);
|
||||
localStorage.setItem('embeddingModelProvider', firstProvider);
|
||||
localStorage.setItem('embeddingModel', firstModel);
|
||||
}
|
||||
} else if (
|
||||
currentEmbeddingProvider &&
|
||||
(!data.embeddingModelProviders ||
|
||||
!data.embeddingModelProviders[currentEmbeddingProvider] ||
|
||||
!Array.isArray(
|
||||
data.embeddingModelProviders[currentEmbeddingProvider],
|
||||
) ||
|
||||
data.embeddingModelProviders[currentEmbeddingProvider].length === 0)
|
||||
) {
|
||||
const firstValidProvider = Object.entries(
|
||||
data.embeddingModelProviders || {},
|
||||
).find(
|
||||
([_, models]) => Array.isArray(models) && models.length > 0,
|
||||
)?.[0];
|
||||
|
||||
if (firstValidProvider) {
|
||||
setSelectedEmbeddingModelProvider(firstValidProvider);
|
||||
setSelectedEmbeddingModel(
|
||||
data.embeddingModelProviders[firstValidProvider][0].name,
|
||||
);
|
||||
localStorage.setItem('embeddingModelProvider', firstValidProvider);
|
||||
localStorage.setItem(
|
||||
'embeddingModel',
|
||||
data.embeddingModelProviders[firstValidProvider][0].name,
|
||||
);
|
||||
} else {
|
||||
setSelectedEmbeddingModelProvider(null);
|
||||
setSelectedEmbeddingModel(null);
|
||||
localStorage.removeItem('embeddingModelProvider');
|
||||
localStorage.removeItem('embeddingModel');
|
||||
}
|
||||
}
|
||||
|
||||
setConfig(data);
|
||||
}
|
||||
|
||||
@@ -278,6 +341,12 @@ const Page = () => {
|
||||
localStorage.setItem('chatModelProvider', value);
|
||||
} else if (key === 'chatModel') {
|
||||
localStorage.setItem('chatModel', value);
|
||||
} else if (key === 'embeddingModelProvider') {
|
||||
localStorage.setItem('embeddingModelProvider', value);
|
||||
} else if (key === 'embeddingModel') {
|
||||
localStorage.setItem('embeddingModel', value);
|
||||
} else if (key === 'ollamaContextWindow') {
|
||||
localStorage.setItem('ollamaContextWindow', value.toString());
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to save:', err);
|
||||
@@ -436,7 +505,6 @@ const Page = () => {
|
||||
const value = e.target.value;
|
||||
setSelectedChatModelProvider(value);
|
||||
saveConfig('chatModelProvider', value);
|
||||
// Auto-select first model of new provider
|
||||
const firstModel =
|
||||
config.chatModelProviders[value]?.[0]?.name;
|
||||
if (firstModel) {
|
||||
@@ -496,6 +564,78 @@ const Page = () => {
|
||||
];
|
||||
})()}
|
||||
/>
|
||||
{selectedChatModelProvider === 'ollama' && (
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Chat Context Window Size
|
||||
</p>
|
||||
<Select
|
||||
value={
|
||||
isCustomContextWindow
|
||||
? 'custom'
|
||||
: contextWindowSize.toString()
|
||||
}
|
||||
onChange={(e) => {
|
||||
const value = e.target.value;
|
||||
if (value === 'custom') {
|
||||
setIsCustomContextWindow(true);
|
||||
} else {
|
||||
setIsCustomContextWindow(false);
|
||||
const numValue = parseInt(value);
|
||||
setContextWindowSize(numValue);
|
||||
setConfig((prev) => ({
|
||||
...prev!,
|
||||
ollamaContextWindow: numValue,
|
||||
}));
|
||||
saveConfig('ollamaContextWindow', numValue);
|
||||
}
|
||||
}}
|
||||
options={[
|
||||
...predefinedContextSizes.map((size) => ({
|
||||
value: size.toString(),
|
||||
label: `${size.toLocaleString()} tokens`,
|
||||
})),
|
||||
{ value: 'custom', label: 'Custom...' },
|
||||
]}
|
||||
/>
|
||||
{isCustomContextWindow && (
|
||||
<div className="mt-2">
|
||||
<Input
|
||||
type="number"
|
||||
min={512}
|
||||
value={contextWindowSize}
|
||||
placeholder="Custom context window size (minimum 512)"
|
||||
isSaving={savingStates['ollamaContextWindow']}
|
||||
onChange={(e) => {
|
||||
// Allow any value to be typed
|
||||
const value =
|
||||
parseInt(e.target.value) ||
|
||||
contextWindowSize;
|
||||
setContextWindowSize(value);
|
||||
}}
|
||||
onSave={(value) => {
|
||||
// Validate only when saving
|
||||
const numValue = Math.max(
|
||||
512,
|
||||
parseInt(value) || 2048,
|
||||
);
|
||||
setContextWindowSize(numValue);
|
||||
setConfig((prev) => ({
|
||||
...prev!,
|
||||
ollamaContextWindow: numValue,
|
||||
}));
|
||||
saveConfig('ollamaContextWindow', numValue);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<p className="text-xs text-black/60 dark:text-white/60 mt-0.5">
|
||||
{isCustomContextWindow
|
||||
? 'Adjust the context window size for Ollama models (minimum 512 tokens)'
|
||||
: 'Adjust the context window size for Ollama models'}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@@ -554,6 +694,81 @@ const Page = () => {
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{config.embeddingModelProviders && (
|
||||
<div className="flex flex-col space-y-4 mt-4 pt-4 border-t border-light-200 dark:border-dark-200">
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Embedding Model Provider
|
||||
</p>
|
||||
<Select
|
||||
value={selectedEmbeddingModelProvider ?? undefined}
|
||||
onChange={(e) => {
|
||||
const value = e.target.value;
|
||||
setSelectedEmbeddingModelProvider(value);
|
||||
saveConfig('embeddingModelProvider', value);
|
||||
const firstModel =
|
||||
config.embeddingModelProviders[value]?.[0]?.name;
|
||||
if (firstModel) {
|
||||
setSelectedEmbeddingModel(firstModel);
|
||||
saveConfig('embeddingModel', firstModel);
|
||||
}
|
||||
}}
|
||||
options={Object.keys(config.embeddingModelProviders).map(
|
||||
(provider) => ({
|
||||
value: provider,
|
||||
label:
|
||||
provider.charAt(0).toUpperCase() +
|
||||
provider.slice(1),
|
||||
}),
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{selectedEmbeddingModelProvider && (
|
||||
<div className="flex flex-col space-y-1">
|
||||
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||
Embedding Model
|
||||
</p>
|
||||
<Select
|
||||
value={selectedEmbeddingModel ?? undefined}
|
||||
onChange={(e) => {
|
||||
const value = e.target.value;
|
||||
setSelectedEmbeddingModel(value);
|
||||
saveConfig('embeddingModel', value);
|
||||
}}
|
||||
options={(() => {
|
||||
const embeddingModelProvider =
|
||||
config.embeddingModelProviders[
|
||||
selectedEmbeddingModelProvider
|
||||
];
|
||||
return embeddingModelProvider
|
||||
? embeddingModelProvider.length > 0
|
||||
? embeddingModelProvider.map((model) => ({
|
||||
value: model.name,
|
||||
label: model.displayName,
|
||||
}))
|
||||
: [
|
||||
{
|
||||
value: '',
|
||||
label: 'No models available',
|
||||
disabled: true,
|
||||
},
|
||||
]
|
||||
: [
|
||||
{
|
||||
value: '',
|
||||
label:
|
||||
'Invalid provider, please check backend logs',
|
||||
disabled: true,
|
||||
},
|
||||
];
|
||||
})()}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</SettingsSection>
|
||||
|
||||
<SettingsSection title="API Keys">
|
||||
|
@@ -197,6 +197,11 @@ const useSocket = (
|
||||
'openAIBaseURL',
|
||||
localStorage.getItem('openAIBaseURL')!,
|
||||
);
|
||||
} else {
|
||||
searchParams.append(
|
||||
'ollamaContextWindow',
|
||||
localStorage.getItem('ollamaContextWindow') || '2048',
|
||||
);
|
||||
}
|
||||
|
||||
searchParams.append('embeddingModel', embeddingModel!);
|
||||
|
@@ -33,9 +33,10 @@ const SearchImages = ({
|
||||
|
||||
const chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||
const chatModel = localStorage.getItem('chatModel');
|
||||
|
||||
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
|
||||
const customOpenAIKey = localStorage.getItem('openAIApiKey');
|
||||
const ollamaContextWindow =
|
||||
localStorage.getItem('ollamaContextWindow') || '2048';
|
||||
|
||||
const res = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/images`,
|
||||
@@ -54,6 +55,9 @@ const SearchImages = ({
|
||||
customOpenAIBaseURL: customOpenAIBaseURL,
|
||||
customOpenAIKey: customOpenAIKey,
|
||||
}),
|
||||
...(chatModelProvider === 'ollama' && {
|
||||
ollamaContextWindow: parseInt(ollamaContextWindow),
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
|
@@ -48,9 +48,10 @@ const Searchvideos = ({
|
||||
|
||||
const chatModelProvider = localStorage.getItem('chatModelProvider');
|
||||
const chatModel = localStorage.getItem('chatModel');
|
||||
|
||||
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
|
||||
const customOpenAIKey = localStorage.getItem('openAIApiKey');
|
||||
const ollamaContextWindow =
|
||||
localStorage.getItem('ollamaContextWindow') || '2048';
|
||||
|
||||
const res = await fetch(
|
||||
`${process.env.NEXT_PUBLIC_API_URL}/videos`,
|
||||
@@ -69,6 +70,9 @@ const Searchvideos = ({
|
||||
customOpenAIBaseURL: customOpenAIBaseURL,
|
||||
customOpenAIKey: customOpenAIKey,
|
||||
}),
|
||||
...(chatModelProvider === 'ollama' && {
|
||||
ollamaContextWindow: parseInt(ollamaContextWindow),
|
||||
}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
|
@@ -6,6 +6,8 @@ export const getSuggestions = async (chatHisory: Message[]) => {
|
||||
|
||||
const customOpenAIKey = localStorage.getItem('openAIApiKey');
|
||||
const customOpenAIBaseURL = localStorage.getItem('openAIBaseURL');
|
||||
const ollamaContextWindow =
|
||||
localStorage.getItem('ollamaContextWindow') || '2048';
|
||||
|
||||
const res = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/suggestions`, {
|
||||
method: 'POST',
|
||||
@@ -21,6 +23,9 @@ export const getSuggestions = async (chatHisory: Message[]) => {
|
||||
customOpenAIKey,
|
||||
customOpenAIBaseURL,
|
||||
}),
|
||||
...(chatModelProvider === 'ollama' && {
|
||||
ollamaContextWindow: parseInt(ollamaContextWindow),
|
||||
}),
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
Reference in New Issue
Block a user