Compare commits

...

11 Commits

Author SHA1 Message Date
ItzCrazyKns
8347b798f3 feat(app): lint & beautify 2026-01-03 23:12:19 +05:30
ItzCrazyKns
a16472bcf3 feat(actions): prevent double conversion to object array 2026-01-01 21:56:46 +05:30
ItzCrazyKns
3b8d8be676 feat(package): bump version 2025-12-31 12:58:59 +05:30
ItzCrazyKns
b83f9bac78 feat(providers): extract/repair json before parsing 2025-12-31 12:58:24 +05:30
ItzCrazyKns
bd7c563137 feat(package): add json repair 2025-12-31 12:57:59 +05:30
ItzCrazyKns
23b903db9a Update searxng.ts 2025-12-30 22:16:06 +05:30
ItzCrazyKns
a98f0df83f feat(app): lint & beautify 2025-12-29 22:02:21 +05:30
ItzCrazyKns
164d528761 feat(compose): add build context, remove uploads 2025-12-28 13:11:05 +05:30
ItzCrazyKns
af4ec17117 Update docker-compose.yaml 2025-12-28 12:49:25 +05:30
ItzCrazyKns
1622e0893a feat(providers): add lm studio 2025-12-28 11:29:34 +05:30
ItzCrazyKns
55a4b9d436 feat(openai-llm): use function call index instead of type 2025-12-28 01:21:33 +05:30
14 changed files with 186 additions and 21 deletions

View File

@@ -7,11 +7,8 @@ services:
- '3000:3000'
volumes:
- data:/home/perplexica/data
- uploads:/home/perplexica/uploads
restart: unless-stopped
volumes:
data:
name: 'perplexica-data'
uploads:
name: 'perplexica-uploads'
name: 'perplexica-data'

2
next-env.d.ts vendored
View File

@@ -1,6 +1,6 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
import "./.next/dev/types/routes.d.ts";
import './.next/dev/types/routes.d.ts';
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.

View File

@@ -1,6 +1,6 @@
{
"name": "perplexica",
"version": "1.12.0",
"version": "1.12.1",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
@@ -19,6 +19,7 @@
"@phosphor-icons/react": "^2.1.10",
"@radix-ui/react-tooltip": "^1.2.8",
"@tailwindcss/typography": "^0.5.12",
"@toolsycc/json-repair": "^0.1.22",
"axios": "^1.8.3",
"better-sqlite3": "^11.9.1",
"clsx": "^2.1.0",

View File

@@ -80,7 +80,10 @@ const Chat = () => {
{loading && !messageAppeared && <MessageBoxLoading />}
<div ref={messageEnd} className="h-0" />
{dividerWidth > 0 && (
<div className="fixed z-40 bottom-24 lg:bottom-6" style={{ width: dividerWidth }}>
<div
className="fixed z-40 bottom-24 lg:bottom-6"
style={{ width: dividerWidth }}
>
<div
className="pointer-events-none absolute -bottom-6 left-0 right-0 h-[calc(100%+24px+24px)] dark:hidden"
style={{

View File

@@ -1,12 +1,4 @@
export const getSuggestions = async (chatHistory: [string, string][]) => {
const chatTurns = chatHistory.map(([role, content]) => {
if (role === 'human') {
return { role: 'user', content };
} else {
return { role: 'assistant', content };
}
});
const chatModel = localStorage.getItem('chatModelKey');
const chatModelProvider = localStorage.getItem('chatModelProviderId');
@@ -16,7 +8,7 @@ export const getSuggestions = async (chatHistory: [string, string][]) => {
'Content-Type': 'application/json',
},
body: JSON.stringify({
chatHistory: chatTurns,
chatHistory,
chatModel: {
providerId: chatModelProvider,
key: chatModel,

View File

@@ -3,7 +3,6 @@ import { suggestionGeneratorPrompt } from '@/lib/prompts/suggestions';
import { ChatTurnMessage } from '@/lib/types';
import z from 'zod';
import BaseLLM from '@/lib/models/base/llm';
import { i } from 'mathjs';
type SuggestionGeneratorInput = {
chatHistory: ChatTurnMessage[];

View File

@@ -7,6 +7,7 @@ import TransformersProvider from './transformers';
import GroqProvider from './groq';
import LemonadeProvider from './lemonade';
import AnthropicProvider from './anthropic';
import LMStudioProvider from './lmstudio';
export const providers: Record<string, ProviderConstructor<any>> = {
openai: OpenAIProvider,
@@ -16,6 +17,7 @@ export const providers: Record<string, ProviderConstructor<any>> = {
groq: GroqProvider,
lemonade: LemonadeProvider,
anthropic: AnthropicProvider,
lmstudio: LMStudioProvider,
};
export const getModelProvidersUIConfigSection =

View File

@@ -0,0 +1,143 @@
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import BaseModelProvider from '../../base/provider';
import { Model, ModelList, ProviderMetadata } from '../../types';
import LMStudioLLM from './lmstudioLLM';
import BaseLLM from '../../base/llm';
import BaseEmbedding from '../../base/embedding';
import LMStudioEmbedding from './lmstudioEmbedding';
interface LMStudioConfig {
baseURL: string;
}
const providerConfigFields: UIConfigField[] = [
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'The base URL for LM Studio server',
required: true,
placeholder: 'http://localhost:1234',
env: 'LM_STUDIO_BASE_URL',
scope: 'server',
},
];
class LMStudioProvider extends BaseModelProvider<LMStudioConfig> {
constructor(id: string, name: string, config: LMStudioConfig) {
super(id, name, config);
}
private normalizeBaseURL(url: string): string {
const trimmed = url.trim().replace(/\/+$/, '');
return trimmed.endsWith('/v1') ? trimmed : `${trimmed}/v1`;
}
async getDefaultModels(): Promise<ModelList> {
try {
const baseURL = this.normalizeBaseURL(this.config.baseURL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
});
const data = await res.json();
const models: Model[] = data.data.map((m: any) => {
return {
name: m.id,
key: m.id,
};
});
return {
embedding: models,
chat: models,
};
} catch (err) {
if (err instanceof TypeError) {
throw new Error(
'Error connecting to LM Studio. Please ensure the base URL is correct and the LM Studio server is running.',
);
}
throw err;
}
}
async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;
return {
embedding: [
...defaultModels.embedding,
...configProvider.embeddingModels,
],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}
async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();
const exists = modelList.chat.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Chat Model. Invalid Model Selected',
);
}
return new LMStudioLLM({
apiKey: 'lm-studio',
model: key,
baseURL: this.normalizeBaseURL(this.config.baseURL),
});
}
async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
const modelList = await this.getModelList();
const exists = modelList.embedding.find((m) => m.key === key);
if (!exists) {
throw new Error(
'Error Loading LM Studio Embedding Model. Invalid Model Selected.',
);
}
return new LMStudioEmbedding({
apiKey: 'lm-studio',
model: key,
baseURL: this.normalizeBaseURL(this.config.baseURL),
});
}
static parseAndValidate(raw: any): LMStudioConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.baseURL)
throw new Error('Invalid config provided. Base URL must be provided');
return {
baseURL: String(raw.baseURL),
};
}
static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}
static getProviderMetadata(): ProviderMetadata {
return {
key: 'lmstudio',
name: 'LM Studio',
};
}
}
export default LMStudioProvider;

View File

@@ -0,0 +1,5 @@
import OpenAIEmbedding from '../openai/openaiEmbedding';
class LMStudioEmbedding extends OpenAIEmbedding {}
export default LMStudioEmbedding;

View File

@@ -0,0 +1,5 @@
import OpenAILLM from '../openai/openaiLLM';
class LMStudioLLM extends OpenAILLM {}
export default LMStudioLLM;

View File

@@ -11,6 +11,7 @@ import { Ollama, Tool as OllamaTool, Message as OllamaMessage } from 'ollama';
import { parse } from 'partial-json';
import crypto from 'crypto';
import { Message } from '@/lib/types';
import { repairJson } from '@toolsycc/json-repair';
type OllamaConfig = {
baseURL: string;
@@ -205,7 +206,13 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
});
try {
return input.schema.parse(JSON.parse(response.message.content)) as T;
return input.schema.parse(
JSON.parse(
repairJson(response.message.content, {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from Ollama: ${err}`);
}

View File

@@ -18,6 +18,7 @@ import {
ChatCompletionToolMessageParam,
} from 'openai/resources/index.mjs';
import { Message } from '@/lib/types';
import { repairJson } from '@toolsycc/json-repair';
type OpenAIConfig = {
apiKey: string;
@@ -167,7 +168,7 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
contentChunk: chunk.choices[0].delta.content || '',
toolCallChunk:
toolCalls?.map((tc) => {
if (tc.type === 'function') {
if (!recievedToolCalls[tc.index]) {
const call = {
name: tc.function?.name!,
id: tc.id!,
@@ -213,7 +214,13 @@ class OpenAILLM extends BaseLLM<OpenAIConfig> {
if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(response.choices[0].message.parsed) as T;
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content!, {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from OpenAI: ${err}`);
}

View File

@@ -1,4 +1,3 @@
import axios from 'axios';
import { getSearxngURL } from './config/serverRegistry';
interface SearxngSearchOptions {

View File

@@ -1384,6 +1384,11 @@
resolved "https://registry.yarnpkg.com/@tokenizer/token/-/token-0.3.0.tgz#fe98a93fe789247e998c75e74e9c7c63217aa276"
integrity sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==
"@toolsycc/json-repair@^0.1.22":
version "0.1.22"
resolved "https://registry.yarnpkg.com/@toolsycc/json-repair/-/json-repair-0.1.22.tgz#7ad0eb30c4ef1c4286ad3487dc1bbda562f09986"
integrity sha512-IMrsxovS9a5pWGRxMCDQDW8FKKEZI/yK/HMcyJlbnd/s+Mk0dRtGr1BFicL276gDsPvb/JfNHtHSi1oc0eY1jA==
"@types/better-sqlite3@^7.6.12":
version "7.6.12"
resolved "https://registry.yarnpkg.com/@types/better-sqlite3/-/better-sqlite3-7.6.12.tgz#e5712d46d71097dcc2775c0b068072eadc15deb7"