mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-09-16 14:21:32 +00:00
Compare commits
1 Commits
8d6243aaa7
...
1dcab6225e
Author | SHA1 | Date | |
---|---|---|---|
|
1dcab6225e |
@@ -19,7 +19,7 @@ API_KEY = ""
|
|||||||
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
API_URL = "" # Ollama API URL - http://host.docker.internal:11434
|
||||||
|
|
||||||
[MODELS.LMSTUDIO]
|
[MODELS.LMSTUDIO]
|
||||||
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234
|
API_URL = "" # LM STUDIO API URL - http://host.docker.internal:1234/v1
|
||||||
|
|
||||||
[MODELS.CUSTOM_OPENAI]
|
[MODELS.CUSTOM_OPENAI]
|
||||||
API_KEY = ""
|
API_KEY = ""
|
||||||
|
@@ -4,10 +4,6 @@ import { getKeepAlive, getLMStudioApiEndpoint } from '../../config';
|
|||||||
import logger from '../../utils/logger';
|
import logger from '../../utils/logger';
|
||||||
import axios from 'axios';
|
import axios from 'axios';
|
||||||
|
|
||||||
const ensureV1Endpoint = (endpoint: string): string => {
|
|
||||||
return endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
|
|
||||||
};
|
|
||||||
|
|
||||||
interface LMStudioModel {
|
interface LMStudioModel {
|
||||||
id: string;
|
id: string;
|
||||||
// add other properties if LM Studio API provides them
|
// add other properties if LM Studio API provides them
|
||||||
@@ -18,22 +14,6 @@ interface ChatModelConfig {
|
|||||||
model: ChatOpenAI;
|
model: ChatOpenAI;
|
||||||
}
|
}
|
||||||
|
|
||||||
const checkLMStudioAvailability = async (endpoint: string): Promise<boolean> => {
|
|
||||||
const v1Endpoint = ensureV1Endpoint(endpoint);
|
|
||||||
try {
|
|
||||||
await axios.get(`${v1Endpoint}/models`, {
|
|
||||||
timeout: 1000, // 1 second timeout
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
return true;
|
|
||||||
} catch (err) {
|
|
||||||
logger.debug(`LM Studio server not available at ${endpoint}`);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
||||||
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||||
|
|
||||||
@@ -42,16 +22,8 @@ export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModel
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if server is available before attempting to load models
|
|
||||||
const isAvailable = await checkLMStudioAvailability(lmStudioEndpoint);
|
|
||||||
if (!isAvailable) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const v1Endpoint = ensureV1Endpoint(lmStudioEndpoint);
|
const response = await axios.get<{ data: LMStudioModel[] }>(`${lmStudioEndpoint}/models`, {
|
||||||
const response = await axios.get<{ data: LMStudioModel[] }>(`${v1Endpoint}/models`, {
|
|
||||||
timeout: 5000, // 5 second timeout for model loading
|
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
},
|
},
|
||||||
@@ -65,7 +37,7 @@ export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModel
|
|||||||
model: new ChatOpenAI({
|
model: new ChatOpenAI({
|
||||||
openAIApiKey: 'lm-studio',
|
openAIApiKey: 'lm-studio',
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: ensureV1Endpoint(lmStudioEndpoint),
|
baseURL: lmStudioEndpoint,
|
||||||
},
|
},
|
||||||
modelName: model.id,
|
modelName: model.id,
|
||||||
temperature: 0.7,
|
temperature: 0.7,
|
||||||
@@ -86,16 +58,8 @@ export const loadLMStudioEmbeddingsModels = async () => {
|
|||||||
|
|
||||||
if (!lmStudioEndpoint) return {};
|
if (!lmStudioEndpoint) return {};
|
||||||
|
|
||||||
// Check if server is available before attempting to load models
|
|
||||||
const isAvailable = await checkLMStudioAvailability(lmStudioEndpoint);
|
|
||||||
if (!isAvailable) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const v1Endpoint = ensureV1Endpoint(lmStudioEndpoint);
|
const response = await axios.get(`${lmStudioEndpoint}/models`, {
|
||||||
const response = await axios.get(`${v1Endpoint}/models`, {
|
|
||||||
timeout: 5000, // 5 second timeout for model loading
|
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
},
|
},
|
||||||
@@ -109,7 +73,7 @@ export const loadLMStudioEmbeddingsModels = async () => {
|
|||||||
model: new OpenAIEmbeddings({
|
model: new OpenAIEmbeddings({
|
||||||
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
|
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: ensureV1Endpoint(lmStudioEndpoint),
|
baseURL: lmStudioEndpoint,
|
||||||
},
|
},
|
||||||
modelName: model.id,
|
modelName: model.id,
|
||||||
}),
|
}),
|
||||||
@@ -122,4 +86,4 @@ export const loadLMStudioEmbeddingsModels = async () => {
|
|||||||
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
};
|
};
|
Reference in New Issue
Block a user