mirror of
				https://github.com/ItzCrazyKns/Perplexica.git
				synced 2025-11-03 20:28:14 +00:00 
			
		
		
		
	feat(providers): separate each provider
This commit is contained in:
		@@ -1,187 +0,0 @@
 | 
			
		||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
			
		||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
			
		||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
			
		||||
import { HuggingFaceTransformersEmbeddings } from './huggingfaceTransformer';
 | 
			
		||||
import {
 | 
			
		||||
  getGroqApiKey,
 | 
			
		||||
  getOllamaApiEndpoint,
 | 
			
		||||
  getOpenaiApiKey,
 | 
			
		||||
} from '../config';
 | 
			
		||||
import logger from '../utils/logger';
 | 
			
		||||
 | 
			
		||||
export const getAvailableChatModelProviders = async () => {
 | 
			
		||||
  const openAIApiKey = getOpenaiApiKey();
 | 
			
		||||
  const groqApiKey = getGroqApiKey();
 | 
			
		||||
  const ollamaEndpoint = getOllamaApiEndpoint();
 | 
			
		||||
 | 
			
		||||
  const models = {};
 | 
			
		||||
 | 
			
		||||
  if (openAIApiKey) {
 | 
			
		||||
    try {
 | 
			
		||||
      models['openai'] = {
 | 
			
		||||
        'GPT-3.5 turbo': new ChatOpenAI({
 | 
			
		||||
          openAIApiKey,
 | 
			
		||||
          modelName: 'gpt-3.5-turbo',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        }),
 | 
			
		||||
        'GPT-4': new ChatOpenAI({
 | 
			
		||||
          openAIApiKey,
 | 
			
		||||
          modelName: 'gpt-4',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        }),
 | 
			
		||||
        'GPT-4 turbo': new ChatOpenAI({
 | 
			
		||||
          openAIApiKey,
 | 
			
		||||
          modelName: 'gpt-4-turbo',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        }),
 | 
			
		||||
        'GPT-4 omni': new ChatOpenAI({
 | 
			
		||||
          openAIApiKey,
 | 
			
		||||
          modelName: 'gpt-4o',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        }),
 | 
			
		||||
      };
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      logger.error(`Error loading OpenAI models: ${err}`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (groqApiKey) {
 | 
			
		||||
    try {
 | 
			
		||||
      models['groq'] = {
 | 
			
		||||
        'LLaMA3 8b': new ChatOpenAI(
 | 
			
		||||
          {
 | 
			
		||||
            openAIApiKey: groqApiKey,
 | 
			
		||||
            modelName: 'llama3-8b-8192',
 | 
			
		||||
            temperature: 0.7,
 | 
			
		||||
          },
 | 
			
		||||
          {
 | 
			
		||||
            baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
          },
 | 
			
		||||
        ),
 | 
			
		||||
        'LLaMA3 70b': new ChatOpenAI(
 | 
			
		||||
          {
 | 
			
		||||
            openAIApiKey: groqApiKey,
 | 
			
		||||
            modelName: 'llama3-70b-8192',
 | 
			
		||||
            temperature: 0.7,
 | 
			
		||||
          },
 | 
			
		||||
          {
 | 
			
		||||
            baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
          },
 | 
			
		||||
        ),
 | 
			
		||||
        'Mixtral 8x7b': new ChatOpenAI(
 | 
			
		||||
          {
 | 
			
		||||
            openAIApiKey: groqApiKey,
 | 
			
		||||
            modelName: 'mixtral-8x7b-32768',
 | 
			
		||||
            temperature: 0.7,
 | 
			
		||||
          },
 | 
			
		||||
          {
 | 
			
		||||
            baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
          },
 | 
			
		||||
        ),
 | 
			
		||||
        'Gemma 7b': new ChatOpenAI(
 | 
			
		||||
          {
 | 
			
		||||
            openAIApiKey: groqApiKey,
 | 
			
		||||
            modelName: 'gemma-7b-it',
 | 
			
		||||
            temperature: 0.7,
 | 
			
		||||
          },
 | 
			
		||||
          {
 | 
			
		||||
            baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
          },
 | 
			
		||||
        ),
 | 
			
		||||
      };
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      logger.error(`Error loading Groq models: ${err}`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (ollamaEndpoint) {
 | 
			
		||||
    try {
 | 
			
		||||
      const response = await fetch(`${ollamaEndpoint}/api/tags`, {
 | 
			
		||||
        headers: {
 | 
			
		||||
          'Content-Type': 'application/json',
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      const { models: ollamaModels } = (await response.json()) as any;
 | 
			
		||||
 | 
			
		||||
      models['ollama'] = ollamaModels.reduce((acc, model) => {
 | 
			
		||||
        acc[model.model] = new ChatOllama({
 | 
			
		||||
          baseUrl: ollamaEndpoint,
 | 
			
		||||
          model: model.model,
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        });
 | 
			
		||||
        return acc;
 | 
			
		||||
      }, {});
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      logger.error(`Error loading Ollama models: ${err}`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  models['custom_openai'] = {};
 | 
			
		||||
 | 
			
		||||
  return models;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const getAvailableEmbeddingModelProviders = async () => {
 | 
			
		||||
  const openAIApiKey = getOpenaiApiKey();
 | 
			
		||||
  const ollamaEndpoint = getOllamaApiEndpoint();
 | 
			
		||||
 | 
			
		||||
  const models = {};
 | 
			
		||||
 | 
			
		||||
  if (openAIApiKey) {
 | 
			
		||||
    try {
 | 
			
		||||
      models['openai'] = {
 | 
			
		||||
        'Text embedding 3 small': new OpenAIEmbeddings({
 | 
			
		||||
          openAIApiKey,
 | 
			
		||||
          modelName: 'text-embedding-3-small',
 | 
			
		||||
        }),
 | 
			
		||||
        'Text embedding 3 large': new OpenAIEmbeddings({
 | 
			
		||||
          openAIApiKey,
 | 
			
		||||
          modelName: 'text-embedding-3-large',
 | 
			
		||||
        }),
 | 
			
		||||
      };
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      logger.error(`Error loading OpenAI embeddings: ${err}`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (ollamaEndpoint) {
 | 
			
		||||
    try {
 | 
			
		||||
      const response = await fetch(`${ollamaEndpoint}/api/tags`, {
 | 
			
		||||
        headers: {
 | 
			
		||||
          'Content-Type': 'application/json',
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      const { models: ollamaModels } = (await response.json()) as any;
 | 
			
		||||
 | 
			
		||||
      models['ollama'] = ollamaModels.reduce((acc, model) => {
 | 
			
		||||
        acc[model.model] = new OllamaEmbeddings({
 | 
			
		||||
          baseUrl: ollamaEndpoint,
 | 
			
		||||
          model: model.model,
 | 
			
		||||
        });
 | 
			
		||||
        return acc;
 | 
			
		||||
      }, {});
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      logger.error(`Error loading Ollama embeddings: ${err}`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    models['local'] = {
 | 
			
		||||
      'BGE Small': new HuggingFaceTransformersEmbeddings({
 | 
			
		||||
        modelName: 'Xenova/bge-small-en-v1.5',
 | 
			
		||||
      }),
 | 
			
		||||
      'GTE Small': new HuggingFaceTransformersEmbeddings({
 | 
			
		||||
        modelName: 'Xenova/gte-small',
 | 
			
		||||
      }),
 | 
			
		||||
      'Bert Multilingual': new HuggingFaceTransformersEmbeddings({
 | 
			
		||||
        modelName: 'Xenova/bert-base-multilingual-uncased',
 | 
			
		||||
      }),
 | 
			
		||||
    };
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading local embeddings: ${err}`);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return models;
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										57
									
								
								src/lib/providers/groq.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								src/lib/providers/groq.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,57 @@
 | 
			
		||||
import { ChatOpenAI } from '@langchain/openai';
 | 
			
		||||
import { getGroqApiKey } from '../../config';
 | 
			
		||||
import logger from '../../utils/logger';
 | 
			
		||||
 | 
			
		||||
export const loadGroqChatModels = async () => {
 | 
			
		||||
  const groqApiKey = getGroqApiKey();
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const chatModels = {
 | 
			
		||||
      'LLaMA3 8b': new ChatOpenAI(
 | 
			
		||||
        {
 | 
			
		||||
          openAIApiKey: groqApiKey,
 | 
			
		||||
          modelName: 'llama3-8b-8192',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
          baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
        },
 | 
			
		||||
      ),
 | 
			
		||||
      'LLaMA3 70b': new ChatOpenAI(
 | 
			
		||||
        {
 | 
			
		||||
          openAIApiKey: groqApiKey,
 | 
			
		||||
          modelName: 'llama3-70b-8192',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
          baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
        },
 | 
			
		||||
      ),
 | 
			
		||||
      'Mixtral 8x7b': new ChatOpenAI(
 | 
			
		||||
        {
 | 
			
		||||
          openAIApiKey: groqApiKey,
 | 
			
		||||
          modelName: 'mixtral-8x7b-32768',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
          baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
        },
 | 
			
		||||
      ),
 | 
			
		||||
      'Gemma 7b': new ChatOpenAI(
 | 
			
		||||
        {
 | 
			
		||||
          openAIApiKey: groqApiKey,
 | 
			
		||||
          modelName: 'gemma-7b-it',
 | 
			
		||||
          temperature: 0.7,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
          baseURL: 'https://api.groq.com/openai/v1',
 | 
			
		||||
        },
 | 
			
		||||
      ),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    return chatModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading Groq models: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										36
									
								
								src/lib/providers/index.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								src/lib/providers/index.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,36 @@
 | 
			
		||||
import { loadGroqChatModels } from './groq';
 | 
			
		||||
import { loadOllamaChatModels } from './ollama';
 | 
			
		||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModel } from './openai';
 | 
			
		||||
import { loadTransformersEmbeddingsModel } from './transformers';
 | 
			
		||||
 | 
			
		||||
const chatModelProviders = {
 | 
			
		||||
  openai: loadOpenAIChatModels,
 | 
			
		||||
  groq: loadGroqChatModels,
 | 
			
		||||
  ollama: loadOllamaChatModels,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const embeddingModelProviders = {
 | 
			
		||||
  openai: loadOpenAIEmbeddingsModel,
 | 
			
		||||
  local: loadTransformersEmbeddingsModel,
 | 
			
		||||
  ollama: loadOllamaChatModels,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const getAvailableChatModelProviders = async () => {
 | 
			
		||||
  const models = {};
 | 
			
		||||
 | 
			
		||||
  for (const provider in chatModelProviders) {
 | 
			
		||||
    models[provider] = await chatModelProviders[provider]();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return models;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const getAvailableEmbeddingModelProviders = async () => {
 | 
			
		||||
  const models = {};
 | 
			
		||||
 | 
			
		||||
  for (const provider in embeddingModelProviders) {
 | 
			
		||||
    models[provider] = await embeddingModelProviders[provider]();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return models;
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										59
									
								
								src/lib/providers/ollama.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								src/lib/providers/ollama.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
 | 
			
		||||
import { getOllamaApiEndpoint } from '../../config';
 | 
			
		||||
import logger from '../../utils/logger';
 | 
			
		||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
 | 
			
		||||
 | 
			
		||||
export const loadOllamaChatModels = async () => {
 | 
			
		||||
  const ollamaEndpoint = getOllamaApiEndpoint();
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const response = await fetch(`${ollamaEndpoint}/api/tags`, {
 | 
			
		||||
      headers: {
 | 
			
		||||
        'Content-Type': 'application/json',
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const { models: ollamaModels } = (await response.json()) as any;
 | 
			
		||||
 | 
			
		||||
    const chatModels = ollamaModels.reduce((acc, model) => {
 | 
			
		||||
      acc[model.model] = new ChatOllama({
 | 
			
		||||
        baseUrl: ollamaEndpoint,
 | 
			
		||||
        model: model.model,
 | 
			
		||||
        temperature: 0.7,
 | 
			
		||||
      });
 | 
			
		||||
      return acc;
 | 
			
		||||
    }, {});
 | 
			
		||||
 | 
			
		||||
    return chatModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading Ollama models: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const loadOpenAIEmbeddingsModel = async () => {
 | 
			
		||||
  const ollamaEndpoint = getOllamaApiEndpoint();
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const response = await fetch(`${ollamaEndpoint}/api/tags`, {
 | 
			
		||||
      headers: {
 | 
			
		||||
        'Content-Type': 'application/json',
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const { models: ollamaModels } = (await response.json()) as any;
 | 
			
		||||
 | 
			
		||||
    const embeddingsModels = ollamaModels.reduce((acc, model) => {
 | 
			
		||||
      acc[model.model] = new OllamaEmbeddings({
 | 
			
		||||
        baseUrl: ollamaEndpoint,
 | 
			
		||||
        model: model.model,
 | 
			
		||||
      });
 | 
			
		||||
      return acc;
 | 
			
		||||
    }, {});
 | 
			
		||||
 | 
			
		||||
    return embeddingsModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading Ollama embeddings model: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										59
									
								
								src/lib/providers/openai.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								src/lib/providers/openai.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
 | 
			
		||||
import { getOpenaiApiKey } from '../../config';
 | 
			
		||||
import logger from '../../utils/logger';
 | 
			
		||||
 | 
			
		||||
export const loadOpenAIChatModels = async () => {
 | 
			
		||||
  const openAIApiKey = getOpenaiApiKey();
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const chatModels = {
 | 
			
		||||
      'GPT-3.5 turbo': new ChatOpenAI({
 | 
			
		||||
        openAIApiKey,
 | 
			
		||||
        modelName: 'gpt-3.5-turbo',
 | 
			
		||||
        temperature: 0.7,
 | 
			
		||||
      }),
 | 
			
		||||
      'GPT-4': new ChatOpenAI({
 | 
			
		||||
        openAIApiKey,
 | 
			
		||||
        modelName: 'gpt-4',
 | 
			
		||||
        temperature: 0.7,
 | 
			
		||||
      }),
 | 
			
		||||
      'GPT-4 turbo': new ChatOpenAI({
 | 
			
		||||
        openAIApiKey,
 | 
			
		||||
        modelName: 'gpt-4-turbo',
 | 
			
		||||
        temperature: 0.7,
 | 
			
		||||
      }),
 | 
			
		||||
      'GPT-4 omni': new ChatOpenAI({
 | 
			
		||||
        openAIApiKey,
 | 
			
		||||
        modelName: 'gpt-4o',
 | 
			
		||||
        temperature: 0.7,
 | 
			
		||||
      }),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    return chatModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading OpenAI models: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const loadOpenAIEmbeddingsModel = async () => {
 | 
			
		||||
  const openAIApiKey = getOpenaiApiKey();
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const embeddingModels = {
 | 
			
		||||
      'Text embedding 3 small': new OpenAIEmbeddings({
 | 
			
		||||
        openAIApiKey,
 | 
			
		||||
        modelName: 'text-embedding-3-small',
 | 
			
		||||
      }),
 | 
			
		||||
      'Text embedding 3 large': new OpenAIEmbeddings({
 | 
			
		||||
        openAIApiKey,
 | 
			
		||||
        modelName: 'text-embedding-3-large',
 | 
			
		||||
      }),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    return embeddingModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading OpenAI embeddings model: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
							
								
								
									
										23
									
								
								src/lib/providers/transformers.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								src/lib/providers/transformers.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,23 @@
 | 
			
		||||
import logger from '../../utils/logger';
 | 
			
		||||
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
 | 
			
		||||
 | 
			
		||||
export const loadTransformersEmbeddingsModel = async () => {
 | 
			
		||||
  try {
 | 
			
		||||
    const embeddingModels = {
 | 
			
		||||
      'BGE Small': new HuggingFaceTransformersEmbeddings({
 | 
			
		||||
        modelName: 'Xenova/bge-small-en-v1.5',
 | 
			
		||||
      }),
 | 
			
		||||
      'GTE Small': new HuggingFaceTransformersEmbeddings({
 | 
			
		||||
        modelName: 'Xenova/gte-small',
 | 
			
		||||
      }),
 | 
			
		||||
      'Bert Multilingual': new HuggingFaceTransformersEmbeddings({
 | 
			
		||||
        modelName: 'Xenova/bert-base-multilingual-uncased',
 | 
			
		||||
      }),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    return embeddingModels;
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    logger.error(`Error loading Transformers embeddings model: ${err}`);
 | 
			
		||||
    return {};
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
		Reference in New Issue
	
	Block a user