mirror of
				https://github.com/ItzCrazyKns/Perplexica.git
				synced 2025-11-04 04:38:15 +00:00 
			
		
		
		
	feat(ws-managers): implement better error handling
This commit is contained in:
		@@ -14,74 +14,87 @@ export const handleConnection = async (
 | 
			
		||||
  ws: WebSocket,
 | 
			
		||||
  request: IncomingMessage,
 | 
			
		||||
) => {
 | 
			
		||||
  const searchParams = new URL(request.url, `http://${request.headers.host}`)
 | 
			
		||||
    .searchParams;
 | 
			
		||||
  try {
 | 
			
		||||
    const searchParams = new URL(request.url, `http://${request.headers.host}`)
 | 
			
		||||
      .searchParams;
 | 
			
		||||
 | 
			
		||||
  const [chatModelProviders, embeddingModelProviders] = await Promise.all([
 | 
			
		||||
    getAvailableChatModelProviders(),
 | 
			
		||||
    getAvailableEmbeddingModelProviders(),
 | 
			
		||||
  ]);
 | 
			
		||||
    const [chatModelProviders, embeddingModelProviders] = await Promise.all([
 | 
			
		||||
      getAvailableChatModelProviders(),
 | 
			
		||||
      getAvailableEmbeddingModelProviders(),
 | 
			
		||||
    ]);
 | 
			
		||||
 | 
			
		||||
  const chatModelProvider =
 | 
			
		||||
    searchParams.get('chatModelProvider') || Object.keys(chatModelProviders)[0];
 | 
			
		||||
  const chatModel =
 | 
			
		||||
    searchParams.get('chatModel') ||
 | 
			
		||||
    Object.keys(chatModelProviders[chatModelProvider])[0];
 | 
			
		||||
    const chatModelProvider =
 | 
			
		||||
      searchParams.get('chatModelProvider') ||
 | 
			
		||||
      Object.keys(chatModelProviders)[0];
 | 
			
		||||
    const chatModel =
 | 
			
		||||
      searchParams.get('chatModel') ||
 | 
			
		||||
      Object.keys(chatModelProviders[chatModelProvider])[0];
 | 
			
		||||
 | 
			
		||||
  const embeddingModelProvider =
 | 
			
		||||
    searchParams.get('embeddingModelProvider') ||
 | 
			
		||||
    Object.keys(embeddingModelProviders)[0];
 | 
			
		||||
  const embeddingModel =
 | 
			
		||||
    searchParams.get('embeddingModel') ||
 | 
			
		||||
    Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
 | 
			
		||||
    const embeddingModelProvider =
 | 
			
		||||
      searchParams.get('embeddingModelProvider') ||
 | 
			
		||||
      Object.keys(embeddingModelProviders)[0];
 | 
			
		||||
    const embeddingModel =
 | 
			
		||||
      searchParams.get('embeddingModel') ||
 | 
			
		||||
      Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
 | 
			
		||||
 | 
			
		||||
  let llm: BaseChatModel | undefined;
 | 
			
		||||
  let embeddings: Embeddings | undefined;
 | 
			
		||||
    let llm: BaseChatModel | undefined;
 | 
			
		||||
    let embeddings: Embeddings | undefined;
 | 
			
		||||
 | 
			
		||||
  if (
 | 
			
		||||
    chatModelProviders[chatModelProvider] &&
 | 
			
		||||
    chatModelProviders[chatModelProvider][chatModel] &&
 | 
			
		||||
    chatModelProvider != 'custom_openai'
 | 
			
		||||
  ) {
 | 
			
		||||
    llm = chatModelProviders[chatModelProvider][chatModel] as
 | 
			
		||||
      | BaseChatModel
 | 
			
		||||
      | undefined;
 | 
			
		||||
  } else if (chatModelProvider == 'custom_openai') {
 | 
			
		||||
    llm = new ChatOpenAI({
 | 
			
		||||
      modelName: chatModel,
 | 
			
		||||
      openAIApiKey: searchParams.get('openAIApiKey'),
 | 
			
		||||
      temperature: 0.7,
 | 
			
		||||
      configuration: {
 | 
			
		||||
        baseURL: searchParams.get('openAIBaseURL'),
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
    if (
 | 
			
		||||
      chatModelProviders[chatModelProvider] &&
 | 
			
		||||
      chatModelProviders[chatModelProvider][chatModel] &&
 | 
			
		||||
      chatModelProvider != 'custom_openai'
 | 
			
		||||
    ) {
 | 
			
		||||
      llm = chatModelProviders[chatModelProvider][chatModel] as
 | 
			
		||||
        | BaseChatModel
 | 
			
		||||
        | undefined;
 | 
			
		||||
    } else if (chatModelProvider == 'custom_openai') {
 | 
			
		||||
      llm = new ChatOpenAI({
 | 
			
		||||
        modelName: chatModel,
 | 
			
		||||
        openAIApiKey: searchParams.get('openAIApiKey'),
 | 
			
		||||
        temperature: 0.7,
 | 
			
		||||
        configuration: {
 | 
			
		||||
          baseURL: searchParams.get('openAIBaseURL'),
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  if (
 | 
			
		||||
    embeddingModelProviders[embeddingModelProvider] &&
 | 
			
		||||
    embeddingModelProviders[embeddingModelProvider][embeddingModel]
 | 
			
		||||
  ) {
 | 
			
		||||
    embeddings = embeddingModelProviders[embeddingModelProvider][
 | 
			
		||||
      embeddingModel
 | 
			
		||||
    ] as Embeddings | undefined;
 | 
			
		||||
  }
 | 
			
		||||
    if (
 | 
			
		||||
      embeddingModelProviders[embeddingModelProvider] &&
 | 
			
		||||
      embeddingModelProviders[embeddingModelProvider][embeddingModel]
 | 
			
		||||
    ) {
 | 
			
		||||
      embeddings = embeddingModelProviders[embeddingModelProvider][
 | 
			
		||||
        embeddingModel
 | 
			
		||||
      ] as Embeddings | undefined;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  if (!llm || !embeddings) {
 | 
			
		||||
    if (!llm || !embeddings) {
 | 
			
		||||
      ws.send(
 | 
			
		||||
        JSON.stringify({
 | 
			
		||||
          type: 'error',
 | 
			
		||||
          data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
 | 
			
		||||
          key: 'INVALID_MODEL_SELECTED',
 | 
			
		||||
        }),
 | 
			
		||||
      );
 | 
			
		||||
      ws.close();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ws.on(
 | 
			
		||||
      'message',
 | 
			
		||||
      async (message) =>
 | 
			
		||||
        await handleMessage(message.toString(), ws, llm, embeddings),
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    ws.on('close', () => logger.debug('Connection closed'));
 | 
			
		||||
  } catch (err) {
 | 
			
		||||
    ws.send(
 | 
			
		||||
      JSON.stringify({
 | 
			
		||||
        type: 'error',
 | 
			
		||||
        data: 'Invalid LLM or embeddings model selected, please refresh the page and try again.',
 | 
			
		||||
        key: 'INVALID_MODEL_SELECTED',
 | 
			
		||||
        data: 'Internal server error.',
 | 
			
		||||
        key: 'INTERNAL_SERVER_ERROR',
 | 
			
		||||
      }),
 | 
			
		||||
    );
 | 
			
		||||
    ws.close();
 | 
			
		||||
    logger.error(err);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ws.on(
 | 
			
		||||
    'message',
 | 
			
		||||
    async (message) =>
 | 
			
		||||
      await handleMessage(message.toString(), ws, llm, embeddings),
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  ws.on('close', () => logger.debug('Connection closed'));
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -50,13 +50,13 @@ const useSocket = (url: string) => {
 | 
			
		||||
            !chatModelProviders ||
 | 
			
		||||
            Object.keys(chatModelProviders).length === 0
 | 
			
		||||
          )
 | 
			
		||||
            return console.error('No chat models available');
 | 
			
		||||
            return toast.error('No chat models available');
 | 
			
		||||
 | 
			
		||||
          if (
 | 
			
		||||
            !embeddingModelProviders ||
 | 
			
		||||
            Object.keys(embeddingModelProviders).length === 0
 | 
			
		||||
          )
 | 
			
		||||
            return console.error('No embedding models available');
 | 
			
		||||
            return toast.error('No embedding models available');
 | 
			
		||||
 | 
			
		||||
          chatModelProvider = Object.keys(chatModelProviders)[0];
 | 
			
		||||
          chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user