mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-20 08:11:33 +00:00
User customizable context window for ollama models.
This commit is contained in:
@@ -5,6 +5,7 @@ import { getAvailableChatModelProviders } from '../lib/providers';
|
||||
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
||||
import logger from '../utils/logger';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import {
|
||||
getCustomOpenaiApiKey,
|
||||
getCustomOpenaiApiUrl,
|
||||
@@ -16,6 +17,7 @@ const router = express.Router();
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
}
|
||||
|
||||
interface ImageSearchBody {
|
||||
@@ -61,6 +63,10 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
|
||||
}
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
|
@@ -15,12 +15,14 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface chatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
customOpenAIKey?: string;
|
||||
customOpenAIBaseURL?: string;
|
||||
}
|
||||
@@ -78,6 +80,7 @@ router.post('/', async (req, res) => {
|
||||
const embeddingModel =
|
||||
body.embeddingModel?.model ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
const ollamaContextWindow = body.chatModel?.ollamaContextWindow || 2048;
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
@@ -99,6 +102,9 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = ollamaContextWindow;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
|
@@ -10,12 +10,14 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
}
|
||||
|
||||
interface SuggestionsBody {
|
||||
@@ -60,6 +62,9 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
|
||||
}
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
|
@@ -10,12 +10,14 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
interface ChatModel {
|
||||
provider: string;
|
||||
model: string;
|
||||
ollamaContextWindow?: number;
|
||||
}
|
||||
|
||||
interface VideoSearchBody {
|
||||
@@ -61,6 +63,10 @@ router.post('/', async (req, res) => {
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = body.chatModel?.ollamaContextWindow || 2048;
|
||||
}
|
||||
}
|
||||
|
||||
if (!llm) {
|
||||
|
@@ -14,6 +14,7 @@ import {
|
||||
getCustomOpenaiApiUrl,
|
||||
getCustomOpenaiModelName,
|
||||
} from '../config';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
export const handleConnection = async (
|
||||
ws: WebSocket,
|
||||
@@ -42,6 +43,8 @@ export const handleConnection = async (
|
||||
searchParams.get('embeddingModel') ||
|
||||
Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
|
||||
|
||||
const ollamaContextWindow = searchParams.get('ollamaContextWindow');
|
||||
|
||||
let llm: BaseChatModel | undefined;
|
||||
let embeddings: Embeddings | undefined;
|
||||
|
||||
@@ -52,6 +55,9 @@ export const handleConnection = async (
|
||||
) {
|
||||
llm = chatModelProviders[chatModelProvider][chatModel]
|
||||
.model as unknown as BaseChatModel | undefined;
|
||||
if (llm instanceof ChatOllama) {
|
||||
llm.numCtx = ollamaContextWindow ? parseInt(ollamaContextWindow) : 2048;
|
||||
}
|
||||
} else if (chatModelProvider == 'custom_openai') {
|
||||
const customOpenaiApiKey = getCustomOpenaiApiKey();
|
||||
const customOpenaiApiUrl = getCustomOpenaiApiUrl();
|
||||
|
Reference in New Issue
Block a user