mirror of
https://github.com/ItzCrazyKns/Perplexica.git
synced 2025-06-06 18:08:38 +00:00
LM Studio Integration: - Added LM Studio provider with OpenAI-compatible API support - Dynamic model discovery via /v1/models endpoint - Support for both chat and embeddings models - Docker-compatible networking configuration Thinking Model Panel: - Added collapsible UI panel for model's chain of thought - Parses responses with <think> tags to separate reasoning - Maintains backward compatibility with regular responses - Styled consistently with app theme for light/dark modes - Preserves all existing message functionality (sources, markdown, etc.) These improvements enhance the app's compatibility with local LLMs and provide better visibility into model reasoning processes while maintaining existing functionality.
122 lines
2.9 KiB
TypeScript
122 lines
2.9 KiB
TypeScript
import fs from 'fs';
|
|
import path from 'path';
|
|
import toml from '@iarna/toml';
|
|
|
|
const configFileName = 'config.toml';
|
|
|
|
interface Config {
|
|
GENERAL: {
|
|
PORT: number;
|
|
SIMILARITY_MEASURE: string;
|
|
KEEP_ALIVE: string;
|
|
};
|
|
MODELS: {
|
|
OPENAI: {
|
|
API_KEY: string;
|
|
};
|
|
GROQ: {
|
|
API_KEY: string;
|
|
};
|
|
ANTHROPIC: {
|
|
API_KEY: string;
|
|
};
|
|
GEMINI: {
|
|
API_KEY: string;
|
|
};
|
|
OLLAMA: {
|
|
API_URL: string;
|
|
};
|
|
CUSTOM_OPENAI: {
|
|
API_URL: string;
|
|
API_KEY: string;
|
|
MODEL_NAME: string;
|
|
};
|
|
};
|
|
API_ENDPOINTS: {
|
|
OLLAMA: string;
|
|
LMSTUDIO: string;
|
|
SEARXNG: string;
|
|
};
|
|
}
|
|
|
|
type RecursivePartial<T> = {
|
|
[P in keyof T]?: RecursivePartial<T[P]>;
|
|
};
|
|
|
|
const loadConfig = () =>
|
|
toml.parse(
|
|
fs.readFileSync(path.join(__dirname, `../${configFileName}`), 'utf-8'),
|
|
) as any as Config;
|
|
|
|
export const getPort = () => loadConfig().GENERAL.PORT;
|
|
|
|
export const getSimilarityMeasure = () =>
|
|
loadConfig().GENERAL.SIMILARITY_MEASURE;
|
|
|
|
export const getKeepAlive = () => loadConfig().GENERAL.KEEP_ALIVE;
|
|
|
|
export const getOpenaiApiKey = () => loadConfig().MODELS.OPENAI.API_KEY;
|
|
|
|
export const getGroqApiKey = () => loadConfig().MODELS.GROQ.API_KEY;
|
|
|
|
export const getAnthropicApiKey = () => loadConfig().MODELS.ANTHROPIC.API_KEY;
|
|
|
|
export const getGeminiApiKey = () => loadConfig().MODELS.GEMINI.API_KEY;
|
|
|
|
export const getSearxngApiEndpoint = () =>
|
|
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
|
|
|
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
|
|
|
export const getLMStudioApiEndpoint = () => loadConfig().API_ENDPOINTS.LMSTUDIO;
|
|
|
|
export const getCustomOpenaiApiKey = () =>
|
|
loadConfig().MODELS.CUSTOM_OPENAI.API_KEY;
|
|
|
|
export const getCustomOpenaiApiUrl = () =>
|
|
loadConfig().MODELS.CUSTOM_OPENAI.API_URL;
|
|
|
|
export const getCustomOpenaiModelName = () =>
|
|
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
|
|
|
|
const mergeConfigs = (current: any, update: any): any => {
|
|
if (update === null || update === undefined) {
|
|
return current;
|
|
}
|
|
|
|
if (typeof current !== 'object' || current === null) {
|
|
return update;
|
|
}
|
|
|
|
const result = { ...current };
|
|
|
|
for (const key in update) {
|
|
if (Object.prototype.hasOwnProperty.call(update, key)) {
|
|
const updateValue = update[key];
|
|
|
|
if (
|
|
typeof updateValue === 'object' &&
|
|
updateValue !== null &&
|
|
typeof result[key] === 'object' &&
|
|
result[key] !== null
|
|
) {
|
|
result[key] = mergeConfigs(result[key], updateValue);
|
|
} else if (updateValue !== undefined) {
|
|
result[key] = updateValue;
|
|
}
|
|
}
|
|
}
|
|
|
|
return result;
|
|
};
|
|
|
|
export const updateConfig = (config: RecursivePartial<Config>) => {
|
|
const currentConfig = loadConfig();
|
|
const mergedConfig = mergeConfigs(currentConfig, config);
|
|
|
|
fs.writeFileSync(
|
|
path.join(__dirname, `../${configFileName}`),
|
|
toml.stringify(mergedConfig),
|
|
);
|
|
};
|