Compare commits

...

15 Commits

23 changed files with 318 additions and 53 deletions

View File

@ -114,6 +114,11 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Create and push multi-arch manifest for main
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |

View File

@ -32,7 +32,8 @@ The API accepts a JSON object in the request body, where you define the focus mo
"history": [
["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"]
]
],
"stream": false
}
```
@ -71,11 +72,13 @@ The API accepts a JSON object in the request body, where you define the focus mo
]
```
- **`stream`** (boolean, optional): When set to `true`, enables streaming responses. Default is `false`.
### Response
The response from the API includes both the final message and the sources used to generate that message.
#### Example Response
#### Standard Response (stream: false)
```json
{
@ -100,6 +103,28 @@ The response from the API includes both the final message and the sources used t
}
```
#### Streaming Response (stream: true)
When streaming is enabled, the API returns a stream of newline-delimited JSON objects. Each line contains a complete, valid JSON object. The response has Content-Type: application/json.
Example of streamed response objects:
```
{"type":"init","data":"Stream connected"}
{"type":"sources","data":[{"pageContent":"...","metadata":{"title":"...","url":"..."}},...]}
{"type":"response","data":"Perplexica is an "}
{"type":"response","data":"innovative, open-source "}
{"type":"response","data":"AI-powered search engine..."}
{"type":"done"}
```
Clients should process each line as a separate JSON object. The different message types include:
- **`init`**: Initial connection message
- **`sources`**: All sources used for the response
- **`response`**: Chunks of the generated answer text
- **`done`**: Indicates the stream is complete
### Fields in the Response
- **`message`** (string): The search result, generated based on the query and focus mode.

View File

@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
"version": "1.10.0",
"version": "1.10.1",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@ -15,8 +15,10 @@
"@headlessui/react": "^2.2.0",
"@iarna/toml": "^2.2.5",
"@icons-pack/react-simple-icons": "^12.3.0",
"@langchain/anthropic": "^0.3.15",
"@langchain/community": "^0.3.36",
"@langchain/core": "^0.3.42",
"@langchain/google-genai": "^0.1.12",
"@langchain/openai": "^0.0.25",
"@langchain/textsplitters": "^0.1.0",
"@tailwindcss/typography": "^0.5.12",

View File

@ -49,6 +49,7 @@ type Body = {
files: Array<string>;
chatModel: ChatModel;
embeddingModel: EmbeddingModel;
systemInstructions: string;
};
const handleEmitterEvents = async (
@ -278,6 +279,7 @@ export const POST = async (req: Request) => {
embedding,
body.optimizationMode,
body.files,
body.systemInstructions,
);
const responseStream = new TransformStream();
@ -295,9 +297,9 @@ export const POST = async (req: Request) => {
},
});
} catch (err) {
console.error('An error ocurred while processing chat request:', err);
console.error('An error occurred while processing chat request:', err);
return Response.json(
{ message: 'An error ocurred while processing chat request' },
{ message: 'An error occurred while processing chat request' },
{ status: 500 },
);
}

View File

@ -59,9 +59,9 @@ export const GET = async (req: Request) => {
return Response.json({ ...config }, { status: 200 });
} catch (err) {
console.error('An error ocurred while getting config:', err);
console.error('An error occurred while getting config:', err);
return Response.json(
{ message: 'An error ocurred while getting config' },
{ message: 'An error occurred while getting config' },
{ status: 500 },
);
}
@ -100,9 +100,9 @@ export const POST = async (req: Request) => {
return Response.json({ message: 'Config updated' }, { status: 200 });
} catch (err) {
console.error('An error ocurred while updating config:', err);
console.error('An error occurred while updating config:', err);
return Response.json(
{ message: 'An error ocurred while updating config' },
{ message: 'An error occurred while updating config' },
{ status: 500 },
);
}

View File

@ -48,7 +48,7 @@ export const GET = async (req: Request) => {
},
);
} catch (err) {
console.error(`An error ocurred in discover route: ${err}`);
console.error(`An error occurred in discover route: ${err}`);
return Response.json(
{
message: 'An error has occurred',

View File

@ -74,9 +74,9 @@ export const POST = async (req: Request) => {
return Response.json({ images }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching images: ${err}`);
console.error(`An error occurred while searching images: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching images' },
{ message: 'An error occurred while searching images' },
{ status: 500 },
);
}

View File

@ -34,7 +34,7 @@ export const GET = async (req: Request) => {
},
);
} catch (err) {
console.error('An error ocurred while fetching models', err);
console.error('An error occurred while fetching models', err);
return Response.json(
{
message: 'An error has occurred.',

View File

@ -33,6 +33,7 @@ interface ChatRequestBody {
embeddingModel?: embeddingModel;
query: string;
history: Array<[string, string]>;
stream?: boolean;
}
export const POST = async (req: Request) => {
@ -48,6 +49,7 @@ export const POST = async (req: Request) => {
body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
body.stream = body.stream || false;
const history: BaseMessage[] = body.history.map((msg) => {
return msg[0] === 'human'
@ -123,8 +125,10 @@ export const POST = async (req: Request) => {
embeddings,
body.optimizationMode,
[],
"",
);
if (!body.stream) {
return new Promise(
(
resolve: (value: Response) => void,
@ -133,7 +137,7 @@ export const POST = async (req: Request) => {
let message = '';
let sources: any[] = [];
emitter.on('data', (data) => {
emitter.on('data', (data: string) => {
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
@ -143,7 +147,10 @@ export const POST = async (req: Request) => {
}
} catch (error) {
reject(
Response.json({ message: 'Error parsing data' }, { status: 500 }),
Response.json(
{ message: 'Error parsing data' },
{ status: 500 },
),
);
}
});
@ -152,13 +159,106 @@ export const POST = async (req: Request) => {
resolve(Response.json({ message, sources }, { status: 200 }));
});
emitter.on('error', (error) => {
emitter.on('error', (error: any) => {
reject(
Response.json({ message: 'Search error', error }, { status: 500 }),
Response.json(
{ message: 'Search error', error },
{ status: 500 },
),
);
});
},
);
}
const encoder = new TextEncoder();
const abortController = new AbortController();
const { signal } = abortController;
const stream = new ReadableStream({
start(controller) {
let sources: any[] = [];
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'init',
data: 'Stream connected',
}) + '\n',
),
);
signal.addEventListener('abort', () => {
emitter.removeAllListeners();
try {
controller.close();
} catch (error) {}
});
emitter.on('data', (data: string) => {
if (signal.aborted) return;
try {
const parsedData = JSON.parse(data);
if (parsedData.type === 'response') {
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'response',
data: parsedData.data,
}) + '\n',
),
);
} else if (parsedData.type === 'sources') {
sources = parsedData.data;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'sources',
data: sources,
}) + '\n',
),
);
}
} catch (error) {
controller.error(error);
}
});
emitter.on('end', () => {
if (signal.aborted) return;
controller.enqueue(
encoder.encode(
JSON.stringify({
type: 'done',
}) + '\n',
),
);
controller.close();
});
emitter.on('error', (error: any) => {
if (signal.aborted) return;
controller.error(error);
});
},
cancel() {
abortController.abort();
},
});
return new Response(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache, no-transform',
Connection: 'keep-alive',
},
});
} catch (err: any) {
console.error(`Error in getting search results: ${err.message}`);
return Response.json(

View File

@ -72,9 +72,9 @@ export const POST = async (req: Request) => {
return Response.json({ suggestions }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while generating suggestions: ${err}`);
console.error(`An error occurred while generating suggestions: ${err}`);
return Response.json(
{ message: 'An error ocurred while generating suggestions' },
{ message: 'An error occurred while generating suggestions' },
{ status: 500 },
);
}

View File

@ -74,9 +74,9 @@ export const POST = async (req: Request) => {
return Response.json({ videos }, { status: 200 });
} catch (err) {
console.error(`An error ocurred while searching videos: ${err}`);
console.error(`An error occurred while searching videos: ${err}`);
return Response.json(
{ message: 'An error ocurred while searching videos' },
{ message: 'An error occurred while searching videos' },
{ status: 500 },
);
}

View File

@ -54,6 +54,38 @@ const Input = ({ className, isSaving, onSave, ...restProps }: InputProps) => {
);
};
interface TextareaProps extends React.InputHTMLAttributes<HTMLTextAreaElement> {
isSaving?: boolean;
onSave?: (value: string) => void;
}
const Textarea = ({
className,
isSaving,
onSave,
...restProps
}: TextareaProps) => {
return (
<div className="relative">
<textarea
placeholder="Any special instructions for the LLM"
className="placeholder:text-sm text-sm w-full flex items-center justify-between p-3 bg-light-secondary dark:bg-dark-secondary rounded-lg hover:bg-light-200 dark:hover:bg-dark-200 transition-colors"
rows={4}
onBlur={(e) => onSave?.(e.target.value)}
{...restProps}
/>
{isSaving && (
<div className="absolute right-3 top-3">
<Loader2
size={16}
className="animate-spin text-black/70 dark:text-white/70"
/>
</div>
)}
</div>
);
};
const Select = ({
className,
options,
@ -111,6 +143,7 @@ const Page = () => {
const [isLoading, setIsLoading] = useState(false);
const [automaticImageSearch, setAutomaticImageSearch] = useState(false);
const [automaticVideoSearch, setAutomaticVideoSearch] = useState(false);
const [systemInstructions, setSystemInstructions] = useState<string>('');
const [savingStates, setSavingStates] = useState<Record<string, boolean>>({});
useEffect(() => {
@ -172,6 +205,8 @@ const Page = () => {
localStorage.getItem('autoVideoSearch') === 'true',
);
setSystemInstructions(localStorage.getItem('systemInstructions')!);
setIsLoading(false);
};
@ -328,6 +363,8 @@ const Page = () => {
localStorage.setItem('embeddingModelProvider', value);
} else if (key === 'embeddingModel') {
localStorage.setItem('embeddingModel', value);
} else if (key === 'systemInstructions') {
localStorage.setItem('systemInstructions', value);
}
} catch (err) {
console.error('Failed to save:', err);
@ -473,6 +510,19 @@ const Page = () => {
</div>
</SettingsSection>
<SettingsSection title="System Instructions">
<div className="flex flex-col space-y-4">
<Textarea
value={systemInstructions}
isSaving={savingStates['systemInstructions']}
onChange={(e) => {
setSystemInstructions(e.target.value);
}}
onSave={(value) => saveConfig('systemInstructions', value)}
/>
</div>
</SettingsSection>
<SettingsSection title="Model Settings">
{config.chatModelProviders && (
<div className="flex flex-col space-y-4">

View File

@ -480,6 +480,7 @@ const ChatWindow = ({ id }: { id?: string }) => {
name: embeddingModelProvider.name,
provider: embeddingModelProvider.provider,
},
systemInstructions: localStorage.getItem('systemInstructions'),
}),
});

View File

@ -51,6 +51,10 @@ export const academicSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Academic', this means you will be searching for academic papers and articles on the web.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -51,6 +51,10 @@ export const redditSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Reddit', this means you will be searching for information, opinions and discussions on the web using Reddit.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -92,6 +92,10 @@ export const webSearchResponsePrompt = `
- If the user provides vague input or if relevant information is missing, explain what additional details might help refine the search.
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -51,6 +51,10 @@ export const wolframAlphaSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Wolfram Alpha', this means you will be searching for information on the web using Wolfram Alpha. It is a computational knowledge engine that can answer factual queries and perform computations.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -7,6 +7,10 @@ You have to cite the answer using [number] notation. You must cite the sentences
Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2].
However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer.
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
<context>
{context}
</context>

View File

@ -51,6 +51,10 @@ export const youtubeSearchResponsePrompt = `
- If no relevant information is found, say: "Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?" Be transparent about limitations and suggest alternatives or ways to reframe the query.
- You are set on focus mode 'Youtube', this means you will be searching for videos on the web using Youtube and providing information based on the video's transcrip
### User instructions
These instructions are shared to you by the user and not by the system. You will have to follow them but give them less priority than the above instructions. If the user has provided specific instructions or preferences, incorporate them into your response while adhering to the overall guidelines.
{systemInstructions}
### Example Output
- Begin with a brief introduction summarizing the event or query topic.
- Follow with detailed sections under clear headings, covering all aspects of the query if possible.

View File

@ -1,4 +1,4 @@
import { ChatOpenAI } from '@langchain/openai';
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatModel } from '.';
import { getAnthropicApiKey } from '../config';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
@ -45,13 +45,10 @@ export const loadAnthropicChatModels = async () => {
anthropicChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: anthropicApiKey,
model: new ChatAnthropic({
apiKey: anthropicApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://api.anthropic.com/v1/',
},
}) as unknown as BaseChatModel,
};
});

View File

@ -1,10 +1,17 @@
import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai';
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../config';
import { ChatModel, EmbeddingModel } from '.';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { Embeddings } from '@langchain/core/embeddings';
const geminiChatModels: Record<string, string>[] = [
{
displayName: 'Gemini 2.5 Pro Experimental',
key: 'gemini-2.5-pro-exp-03-25',
},
{
displayName: 'Gemini 2.0 Flash',
key: 'gemini-2.0-flash',
@ -14,8 +21,8 @@ const geminiChatModels: Record<string, string>[] = [
key: 'gemini-2.0-flash-lite',
},
{
displayName: 'Gemini 2.0 Pro Experimental',
key: 'gemini-2.0-pro-exp-02-05',
displayName: 'Gemini 2.0 Flash Thinking Experimental',
key: 'gemini-2.0-flash-thinking-exp-01-21',
},
{
displayName: 'Gemini 1.5 Flash',
@ -49,13 +56,10 @@ export const loadGeminiChatModels = async () => {
geminiChatModels.forEach((model) => {
chatModels[model.key] = {
displayName: model.displayName,
model: new ChatOpenAI({
openAIApiKey: geminiApiKey,
model: new ChatGoogleGenerativeAI({
apiKey: geminiApiKey,
modelName: model.key,
temperature: 0.7,
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as BaseChatModel,
};
});
@ -78,12 +82,9 @@ export const loadGeminiEmbeddingModels = async () => {
geminiEmbeddingModels.forEach((model) => {
embeddingModels[model.key] = {
displayName: model.displayName,
model: new OpenAIEmbeddings({
openAIApiKey: geminiApiKey,
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: model.key,
configuration: {
baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai/',
},
}) as unknown as Embeddings,
};
});

View File

@ -33,6 +33,7 @@ export interface MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) => Promise<eventEmitter>;
}
@ -236,9 +237,11 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds: string[],
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
systemInstructions: string,
) {
return RunnableSequence.from([
RunnableMap.from({
systemInstructions: () => systemInstructions,
query: (input: BasicChainInput) => input.query,
chat_history: (input: BasicChainInput) => input.chat_history,
date: () => new Date().toISOString(),
@ -468,6 +471,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
embeddings: Embeddings,
optimizationMode: 'speed' | 'balanced' | 'quality',
fileIds: string[],
systemInstructions: string,
) {
const emitter = new eventEmitter();
@ -476,6 +480,7 @@ class MetaSearchAgent implements MetaSearchAgentType {
fileIds,
embeddings,
optimizationMode,
systemInstructions,
);
const stream = answeringChain.streamEvents(

View File

@ -12,6 +12,19 @@
resolved "https://registry.yarnpkg.com/@alloc/quick-lru/-/quick-lru-5.2.0.tgz#7bf68b20c0a350f936915fcae06f58e32007ce30"
integrity sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==
"@anthropic-ai/sdk@^0.37.0":
version "0.37.0"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.37.0.tgz#0018127404ecb9b8a12968068e0c4b3e8bbd6386"
integrity sha512-tHjX2YbkUBwEgg0JZU3EFSSAQPoK4qQR/NFYa8Vtzd5UAyXzZksCw2In69Rml4R/TyHPBfRYaLK35XiOe33pjw==
dependencies:
"@types/node" "^18.11.18"
"@types/node-fetch" "^2.6.4"
abort-controller "^3.0.0"
agentkeepalive "^4.2.1"
form-data-encoder "1.7.2"
formdata-node "^4.3.2"
node-fetch "^2.6.7"
"@anthropic-ai/sdk@^0.9.1":
version "0.9.1"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.9.1.tgz#b2d2b7bf05c90dce502c9a2e869066870f69ba88"
@ -374,6 +387,11 @@
resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.8.tgz#21a907684723bbbaa5f0974cf7730bd797eb8e62"
integrity sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==
"@google/generative-ai@^0.24.0":
version "0.24.0"
resolved "https://registry.yarnpkg.com/@google/generative-ai/-/generative-ai-0.24.0.tgz#4d27af7d944c924a27a593c17ad1336535d53846"
integrity sha512-fnEITCGEB7NdX0BhoYZ/cq/7WPZ1QS5IzJJfC3Tg/OwkvBetMiVJciyaan297OvE4B9Jg1xvo0zIazX/9sGu1Q==
"@headlessui/react@^2.2.0":
version "2.2.0"
resolved "https://registry.yarnpkg.com/@headlessui/react/-/react-2.2.0.tgz#a8e32f0899862849a1ce1615fa280e7891431ab7"
@ -575,6 +593,16 @@
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"
"@langchain/anthropic@^0.3.15":
version "0.3.15"
resolved "https://registry.yarnpkg.com/@langchain/anthropic/-/anthropic-0.3.15.tgz#0244cdb345cb492eb40aedd681881ebadfbb73f2"
integrity sha512-Ar2viYcZ64idgV7EtCBCb36tIkNtPAhQRxSaMTWPHGspFgMfvwRoleVri9e90sCpjpS9xhlHsIQ0LlUS/Atsrw==
dependencies:
"@anthropic-ai/sdk" "^0.37.0"
fast-xml-parser "^4.4.1"
zod "^3.22.4"
zod-to-json-schema "^3.22.4"
"@langchain/community@^0.3.36":
version "0.3.36"
resolved "https://registry.yarnpkg.com/@langchain/community/-/community-0.3.36.tgz#e4c13b8f928b17e0f9257395f43be2246dfada40"
@ -640,6 +668,14 @@
zod "^3.22.4"
zod-to-json-schema "^3.22.3"
"@langchain/google-genai@^0.1.12":
version "0.1.12"
resolved "https://registry.yarnpkg.com/@langchain/google-genai/-/google-genai-0.1.12.tgz#6727253bda6f0d87cd74cf0bb6b1e0f398f60f32"
integrity sha512-0Ea0E2g63ejCuormVxbuoyJQ5BYN53i2/fb6WP8bMKzyh+y43R13V8JqOtr3e/GmgNyv3ou/VeaZjx7KAvu/0g==
dependencies:
"@google/generative-ai" "^0.24.0"
zod-to-json-schema "^3.22.4"
"@langchain/openai@>=0.1.0 <0.5.0", "@langchain/openai@>=0.2.0 <0.5.0":
version "0.4.5"
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.4.5.tgz#d18e207c3ec3f2ecaa4698a5a5888092f643da52"
@ -2369,6 +2405,13 @@ fast-levenshtein@^2.0.6:
resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==
fast-xml-parser@^4.4.1:
version "4.5.3"
resolved "https://registry.yarnpkg.com/fast-xml-parser/-/fast-xml-parser-4.5.3.tgz#c54d6b35aa0f23dc1ea60b6c884340c006dc6efb"
integrity sha512-RKihhV+SHsIUGXObeVy9AXiBbFwkVk7Syp8XgwN5U3JV416+Gwp/GO9i0JYKmikykgz/UHRrrV4ROuZEo/T0ig==
dependencies:
strnum "^1.1.1"
fastq@^1.6.0:
version "1.17.1"
resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47"
@ -4458,6 +4501,11 @@ strip-json-comments@~2.0.1:
resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==
strnum@^1.1.1:
version "1.1.2"
resolved "https://registry.yarnpkg.com/strnum/-/strnum-1.1.2.tgz#57bca4fbaa6f271081715dbc9ed7cee5493e28e4"
integrity sha512-vrN+B7DBIoTTZjnPNewwhx6cBA/H+IS7rfW68n7XxC1y7uoiGQBxaKzqucGUgavX15dJgiGztLJ8vxuEzwqBdA==
styled-jsx@5.1.6:
version "5.1.6"
resolved "https://registry.yarnpkg.com/styled-jsx/-/styled-jsx-5.1.6.tgz#83b90c077e6c6a80f7f5e8781d0f311b2fe41499"
@ -4955,6 +5003,11 @@ zod-to-json-schema@^3.22.3, zod-to-json-schema@^3.22.5:
resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.5.tgz#3646e81cfc318dbad2a22519e5ce661615418673"
integrity sha512-+akaPo6a0zpVCCseDed504KBJUQpEW5QZw7RMneNmKw+fGaML1Z9tUNLnHHAC8x6dzVRO1eB2oEMyZRnuBZg7Q==
zod-to-json-schema@^3.22.4:
version "3.24.5"
resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.24.5.tgz#d1095440b147fb7c2093812a53c54df8d5df50a3"
integrity sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==
zod@^3.22.3, zod@^3.22.4:
version "3.22.4"
resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff"