Compare commits

..

1 Commits

Author SHA1 Message Date
Rami
45a02477a0 Merge 9a332e79e4 into 72450b9217 2025-04-11 16:08:01 +00:00
13 changed files with 36 additions and 98 deletions

View File

@@ -8,7 +8,6 @@ import {
getOllamaApiEndpoint,
getOpenaiApiKey,
getDeepseekApiKey,
getLMStudioApiEndpoint,
updateConfig,
} from '@/lib/config';
import {
@@ -52,7 +51,6 @@ export const GET = async (req: Request) => {
config['openaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
@@ -95,9 +93,6 @@ export const POST = async (req: Request) => {
DEEPSEEK: {
API_KEY: config.deepseekApiKey,
},
LM_STUDIO: {
API_URL: config.lmStudioApiUrl,
},
CUSTOM_OPENAI: {
API_URL: config.customOpenaiApiUrl,
API_KEY: config.customOpenaiApiKey,

View File

@@ -21,7 +21,6 @@ interface SettingsType {
anthropicApiKey: string;
geminiApiKey: string;
ollamaApiUrl: string;
lmStudioApiUrl: string;
deepseekApiKey: string;
customOpenaiApiKey: string;
customOpenaiApiUrl: string;
@@ -549,10 +548,8 @@ const Page = () => {
options={Object.keys(config.chatModelProviders).map(
(provider) => ({
value: provider,
label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
provider.slice(1),
label: (PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() + provider.slice(1),
}),
)}
/>
@@ -692,10 +689,8 @@ const Page = () => {
options={Object.keys(config.embeddingModelProviders).map(
(provider) => ({
value: provider,
label:
(PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() +
provider.slice(1),
label: (PROVIDER_METADATA as any)[provider]?.displayName ||
provider.charAt(0).toUpperCase() + provider.slice(1),
}),
)}
/>
@@ -862,25 +857,6 @@ const Page = () => {
onSave={(value) => saveConfig('deepseekApiKey', value)}
/>
</div>
<div className="flex flex-col space-y-1">
<p className="text-black/70 dark:text-white/70 text-sm">
LM Studio API URL
</p>
<Input
type="text"
placeholder="LM Studio API URL"
value={config.lmStudioApiUrl}
isSaving={savingStates['lmStudioApiUrl']}
onChange={(e) => {
setConfig((prev) => ({
...prev!,
lmStudioApiUrl: e.target.value,
}));
}}
onSave={(value) => saveConfig('lmStudioApiUrl', value)}
/>
</div>
</div>
</SettingsSection>
</div>

View File

@@ -60,7 +60,7 @@ const loadConfig = () => {
fs.readFileSync(path.join(process.cwd(), `${configFileName}`), 'utf-8'),
) as any as Config;
}
// Client-side fallback - settings will be loaded via API
return {} as Config;
};
@@ -94,8 +94,7 @@ export const getCustomOpenaiApiUrl = () =>
export const getCustomOpenaiModelName = () =>
loadConfig().MODELS.CUSTOM_OPENAI.MODEL_NAME;
export const getLMStudioApiEndpoint = () =>
loadConfig().MODELS.LM_STUDIO.API_URL;
export const getLMStudioApiEndpoint = () => loadConfig().MODELS.LM_STUDIO.API_URL;
const mergeConfigs = (current: any, update: any): any => {
if (update === null || update === undefined) {

View File

@@ -4,7 +4,7 @@ import { getAnthropicApiKey } from '../config';
export const PROVIDER_INFO = {
key: 'anthropic',
displayName: 'Anthropic',
displayName: 'Anthropic'
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';

View File

@@ -5,7 +5,7 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
export const PROVIDER_INFO = {
key: 'deepseek',
displayName: 'Deepseek AI',
displayName: 'Deepseek AI'
};
const deepseekChatModels: Record<string, string>[] = [

View File

@@ -7,7 +7,7 @@ import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'gemini',
displayName: 'Google Gemini',
displayName: 'Google Gemini'
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';

View File

@@ -4,7 +4,7 @@ import { ChatModel } from '.';
export const PROVIDER_INFO = {
key: 'groq',
displayName: 'Groq',
displayName: 'Groq'
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';

View File

@@ -1,45 +1,19 @@
import { Embeddings } from '@langchain/core/embeddings';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import {
loadOpenAIChatModels,
loadOpenAIEmbeddingModels,
PROVIDER_INFO as OpenAIInfo,
PROVIDER_INFO,
} from './openai';
import { loadOpenAIChatModels, loadOpenAIEmbeddingModels, PROVIDER_INFO as OpenAIInfo, PROVIDER_INFO } from './openai';
import {
getCustomOpenaiApiKey,
getCustomOpenaiApiUrl,
getCustomOpenaiModelName,
} from '../config';
import { ChatOpenAI } from '@langchain/openai';
import {
loadOllamaChatModels,
loadOllamaEmbeddingModels,
PROVIDER_INFO as OllamaInfo,
} from './ollama';
import { loadOllamaChatModels, loadOllamaEmbeddingModels, PROVIDER_INFO as OllamaInfo } from './ollama';
import { loadGroqChatModels, PROVIDER_INFO as GroqInfo } from './groq';
import {
loadAnthropicChatModels,
PROVIDER_INFO as AnthropicInfo,
} from './anthropic';
import {
loadGeminiChatModels,
loadGeminiEmbeddingModels,
PROVIDER_INFO as GeminiInfo,
} from './gemini';
import {
loadTransformersEmbeddingsModels,
PROVIDER_INFO as TransformersInfo,
} from './transformers';
import {
loadDeepseekChatModels,
PROVIDER_INFO as DeepseekInfo,
} from './deepseek';
import {
loadLMStudioChatModels,
loadLMStudioEmbeddingsModels,
PROVIDER_INFO as LMStudioInfo,
} from './lmstudio';
import { loadAnthropicChatModels, PROVIDER_INFO as AnthropicInfo } from './anthropic';
import { loadGeminiChatModels, loadGeminiEmbeddingModels, PROVIDER_INFO as GeminiInfo } from './gemini';
import { loadTransformersEmbeddingsModels, PROVIDER_INFO as TransformersInfo } from './transformers';
import { loadDeepseekChatModels, PROVIDER_INFO as DeepseekInfo } from './deepseek';
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels, PROVIDER_INFO as LMStudioInfo } from './lmstudio';
export const PROVIDER_METADATA = {
openai: OpenAIInfo,
@@ -52,8 +26,8 @@ export const PROVIDER_METADATA = {
lmstudio: LMStudioInfo,
custom_openai: {
key: 'custom_openai',
displayName: 'Custom OpenAI',
},
displayName: 'Custom OpenAI'
}
};
export interface ChatModel {

View File

@@ -4,7 +4,7 @@ import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'lmstudio',
displayName: 'LM Studio',
displayName: 'LM Studio'
};
import { ChatOpenAI } from '@langchain/openai';
import { OpenAIEmbeddings } from '@langchain/openai';
@@ -16,12 +16,14 @@ interface LMStudioModel {
name?: string;
}
const ensureV1Endpoint = (endpoint: string): string =>
const ensureV1Endpoint = (endpoint: string): string =>
endpoint.endsWith('/v1') ? endpoint : `${endpoint}/v1`;
const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
try {
const keepAlive = getKeepAlive();
await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
timeout: parseInt(keepAlive) * 1000 || 5000,
headers: { 'Content-Type': 'application/json' },
});
return true;
@@ -32,12 +34,14 @@ const checkServerAvailability = async (endpoint: string): Promise<boolean> => {
export const loadLMStudioChatModels = async () => {
const endpoint = getLMStudioApiEndpoint();
const keepAlive = getKeepAlive();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
if (!await checkServerAvailability(endpoint)) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
timeout: parseInt(keepAlive) * 1000 || 5000,
headers: { 'Content-Type': 'application/json' },
});
@@ -54,7 +58,7 @@ export const loadLMStudioChatModels = async () => {
modelName: model.id,
temperature: 0.7,
streaming: true,
maxRetries: 3,
maxRetries: 3
}) as unknown as BaseChatModel,
};
});
@@ -68,12 +72,14 @@ export const loadLMStudioChatModels = async () => {
export const loadLMStudioEmbeddingsModels = async () => {
const endpoint = getLMStudioApiEndpoint();
const keepAlive = getKeepAlive();
if (!endpoint) return {};
if (!(await checkServerAvailability(endpoint))) return {};
if (!await checkServerAvailability(endpoint)) return {};
try {
const response = await axios.get(`${ensureV1Endpoint(endpoint)}/models`, {
timeout: parseInt(keepAlive) * 1000 || 5000,
headers: { 'Content-Type': 'application/json' },
});

View File

@@ -4,7 +4,7 @@ import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'ollama',
displayName: 'Ollama',
displayName: 'Ollama'
};
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';

View File

@@ -4,7 +4,7 @@ import { ChatModel, EmbeddingModel } from '.';
export const PROVIDER_INFO = {
key: 'openai',
displayName: 'OpenAI',
displayName: 'OpenAI'
};
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
@@ -30,18 +30,6 @@ const openaiChatModels: Record<string, string>[] = [
displayName: 'GPT-4 omni mini',
key: 'gpt-4o-mini',
},
{
displayName: 'GPT 4.1 nano',
key: 'gpt-4.1-nano',
},
{
displayName: 'GPT 4.1 mini',
key: 'gpt-4.1-mini',
},
{
displayName: 'GPT 4.1',
key: 'gpt-4.1',
},
];
const openaiEmbeddingModels: Record<string, string>[] = [

View File

@@ -2,7 +2,7 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const PROVIDER_INFO = {
key: 'transformers',
displayName: 'Hugging Face',
displayName: 'Hugging Face'
};
export const loadTransformersEmbeddingsModels = async () => {

View File

@@ -64,7 +64,7 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splittedText = await splitter.splitText(parsedText);
const title = res.data
.toString('utf8')
.match(/<title.*>(.*?)<\/title>/)?.[1];
.match(/<title>(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => {
return new Document({