Compare commits

...

4 Commits

Author SHA1 Message Date
ItzCrazyKns
2f20d845c8 feat(image-search): Use baseURL from env 2024-04-17 20:33:29 +05:30
ItzCrazyKns
7a7eafb8e7 Merge branch 'master' into feat/ollama-support 2024-04-17 19:58:53 +05:30
ItzCrazyKns
043f66b767 feat(readme): Update installation steps 2024-04-17 19:42:08 +05:30
ItzCrazyKns
811822c03b feat(agents): use ollama models 2024-04-17 10:22:20 +05:30
9 changed files with 69 additions and 43 deletions

View File

@ -1,5 +1,5 @@
PORT=3001 PORT=3001
OPENAI_API_KEY= OLLAMA_URL=http://localhost:11434 # url of the ollama server
SIMILARITY_MEASURE=cosine # cosine or dot SIMILARITY_MEASURE=cosine # cosine or dot
SEARXNG_API_URL= # no need to fill this if using docker SEARXNG_API_URL= # no need to fill this if using docker
MODEL_NAME=gpt-3.5-turbo MODEL_NAME=llama2

View File

@ -51,14 +51,15 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker.
2. Clone the Perplexica repository: 2. Clone the Perplexica repository:
```bash ```bash
git clone https://github.com/ItzCrazyKns/Perplexica.git git clone -b feat/ollama-support https://github.com/ItzCrazyKns/Perplexica.git
``` ```
3. After cloning, navigate to the directory containing the project files. 3. After cloning, navigate to the directory containing the project files.
4. Rename the `.env.example` file to `.env`. For Docker setups, you need only fill in the following fields: 4. Rename the `.env.example` file to `.env`. For Docker setups, you need only fill in the following fields:
- `OPENAI_API_KEY` - `OLLAMA_URL` (It should be the URL where Ollama is running; it is also filled by default but you need to replace it if your Ollama URL is different.)
- `MODEL_NAME` (This is filled by default; you can change it if you want to use a different model.)
- `SIMILARITY_MEASURE` (This is filled by default; you can leave it as is if you are unsure about it.) - `SIMILARITY_MEASURE` (This is filled by default; you can leave it as is if you are unsure about it.)
5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute: 5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute:

View File

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicAcademicSearchRetrieverPrompt = ` const basicAcademicSearchRetrieverPrompt = `

View File

@ -4,15 +4,16 @@ import {
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts'; import { PromptTemplate } from '@langchain/core/prompts';
import { OpenAI } from '@langchain/openai'; import { Ollama } from '@langchain/community/llms/ollama';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const imageSearchChainPrompt = ` const imageSearchChainPrompt = `

View File

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicRedditSearchRetrieverPrompt = ` const basicRedditSearchRetrieverPrompt = `

View File

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicSearchRetrieverPrompt = ` const basicSearchRetrieverPrompt = `

View File

@ -9,7 +9,8 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -17,14 +18,16 @@ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicWolframAlphaSearchRetrieverPrompt = ` const basicWolframAlphaSearchRetrieverPrompt = `

View File

@ -4,13 +4,14 @@ import {
MessagesPlaceholder, MessagesPlaceholder,
} from '@langchain/core/prompts'; } from '@langchain/core/prompts';
import { RunnableSequence } from '@langchain/core/runnables'; import { RunnableSequence } from '@langchain/core/runnables';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import type { StreamEvent } from '@langchain/core/tracers/log_stream'; import type { StreamEvent } from '@langchain/core/tracers/log_stream';
import eventEmitter from 'events'; import eventEmitter from 'events';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });

View File

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicYoutubeSearchRetrieverPrompt = ` const basicYoutubeSearchRetrieverPrompt = `