feat(ollama-llm): implement function calling

This commit is contained in:
ItzCrazyKns
2025-12-05 21:17:28 +05:30
parent ee5d9172a4
commit 1c0e90c8e0

View File

@@ -7,7 +7,7 @@ import {
GenerateTextOutput, GenerateTextOutput,
StreamTextOutput, StreamTextOutput,
} from '../../types'; } from '../../types';
import { Ollama } from 'ollama'; import { Ollama, Tool as OllamaTool } from 'ollama';
import { parse } from 'partial-json'; import { parse } from 'partial-json';
type OllamaConfig = { type OllamaConfig = {
@@ -36,9 +36,23 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
} }
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> { async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const ollamaTools: OllamaTool[] = [];
input.tools?.forEach((tool) => {
ollamaTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema).properties,
},
});
});
const res = await this.ollamaClient.chat({ const res = await this.ollamaClient.chat({
model: this.config.model, model: this.config.model,
messages: input.messages, messages: input.messages,
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
options: { options: {
top_p: input.options?.topP ?? this.config.options?.topP, top_p: input.options?.topP ?? this.config.options?.topP,
temperature: temperature:
@@ -58,6 +72,11 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
return { return {
content: res.message.content, content: res.message.content,
toolCalls:
res.message.tool_calls?.map((tc) => ({
name: tc.function.name,
arguments: tc.function.arguments,
})) || [],
additionalInfo: { additionalInfo: {
reasoning: res.message.thinking, reasoning: res.message.thinking,
}, },
@@ -67,10 +86,24 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
async *streamText( async *streamText(
input: GenerateTextInput, input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> { ): AsyncGenerator<StreamTextOutput> {
const ollamaTools: OllamaTool[] = [];
input.tools?.forEach((tool) => {
ollamaTools.push({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: z.toJSONSchema(tool.schema) as any,
},
});
});
const stream = await this.ollamaClient.chat({ const stream = await this.ollamaClient.chat({
model: this.config.model, model: this.config.model,
messages: input.messages, messages: input.messages,
stream: true, stream: true,
tools: ollamaTools.length > 0 ? ollamaTools : undefined,
options: { options: {
top_p: input.options?.topP ?? this.config.options?.topP, top_p: input.options?.topP ?? this.config.options?.topP,
temperature: temperature:
@@ -91,6 +124,11 @@ class OllamaLLM extends BaseLLM<OllamaConfig> {
for await (const chunk of stream) { for await (const chunk of stream) {
yield { yield {
contentChunk: chunk.message.content, contentChunk: chunk.message.content,
toolCallChunk:
chunk.message.tool_calls?.map((tc) => ({
name: tc.function.name,
arguments: tc.function.arguments,
})) || [],
done: chunk.done, done: chunk.done,
additionalInfo: { additionalInfo: {
reasoning: chunk.message.thinking, reasoning: chunk.message.thinking,