ollama-ai-provider-v2 1.5.5 → 2.0.0-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +155 -0
- package/dist/index.js +54 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +56 -4
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -3
package/README.md
CHANGED
|
@@ -7,6 +7,20 @@ Use Ollama with the Vercel AI SDK, implementing the official Ollama API. This pr
|
|
|
7
7
|
[](https://nodejs.org/)
|
|
8
8
|
[](https://opensource.org/licenses/Apache-2.0)
|
|
9
9
|
|
|
10
|
+
## 🎉 AI SDK 6 Beta Support
|
|
11
|
+
|
|
12
|
+
This provider now supports **AI SDK 6 Beta** features including:
|
|
13
|
+
|
|
14
|
+
- **🤖 Agent Abstraction** - Build complex agents with `ToolLoopAgent`
|
|
15
|
+
- **🔐 Tool Approval** - Request user confirmation before executing tools
|
|
16
|
+
- **📊 Structured Output** - Generate typed data alongside tool calling
|
|
17
|
+
- **⚡ Enhanced Performance** - Optimized for the latest AI SDK features
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
# Install AI SDK 6 Beta + Ollama Provider
|
|
21
|
+
npm install ai@beta ollama-ai-provider-v2
|
|
22
|
+
```
|
|
23
|
+
|
|
10
24
|
## Why Choose Ollama Provider V2?
|
|
11
25
|
|
|
12
26
|
- ✅ **Minimal Dependencies** - Lean codebase with just 2 core dependencies
|
|
@@ -192,6 +206,147 @@ ollama serve
|
|
|
192
206
|
ollama pull llama3.2
|
|
193
207
|
```
|
|
194
208
|
|
|
209
|
+
# AI SDK 6 Beta examples
|
|
210
|
+
## Agent Abstraction
|
|
211
|
+
|
|
212
|
+
AI SDK 6 introduces the `ToolLoopAgent` class for building agents with full control over execution flow.
|
|
213
|
+
|
|
214
|
+
### Basic Agent
|
|
215
|
+
|
|
216
|
+
```typescript
|
|
217
|
+
import { ToolLoopAgent } from 'ai';
|
|
218
|
+
import { ollama } from 'ollama-ai-provider-v2';
|
|
219
|
+
|
|
220
|
+
const weatherAgent = new ToolLoopAgent({
|
|
221
|
+
model: ollama('llama3.3:70b'),
|
|
222
|
+
instructions: 'You are a helpful weather assistant.',
|
|
223
|
+
tools: {
|
|
224
|
+
weather: weatherTool,
|
|
225
|
+
},
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
const result = await weatherAgent.generate({
|
|
229
|
+
prompt: 'What is the weather in San Francisco?',
|
|
230
|
+
});
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Agent with Call Options
|
|
234
|
+
|
|
235
|
+
Use call options to pass runtime configuration to agents:
|
|
236
|
+
|
|
237
|
+
```typescript
|
|
238
|
+
import { ToolLoopAgent } from 'ai';
|
|
239
|
+
import { ollama } from 'ollama-ai-provider-v2';
|
|
240
|
+
import { z } from 'zod';
|
|
241
|
+
|
|
242
|
+
const supportAgent = new ToolLoopAgent({
|
|
243
|
+
model: ollama('qwen2.5:32b'),
|
|
244
|
+
callOptionsSchema: z.object({
|
|
245
|
+
userId: z.string(),
|
|
246
|
+
accountType: z.enum(['free', 'pro', 'enterprise']),
|
|
247
|
+
}),
|
|
248
|
+
instructions: 'You are a helpful customer support agent.',
|
|
249
|
+
prepareCall: ({ options, ...settings }) => ({
|
|
250
|
+
...settings,
|
|
251
|
+
instructions: `${settings.instructions}
|
|
252
|
+
|
|
253
|
+
User context:
|
|
254
|
+
- Account type: ${options.accountType}
|
|
255
|
+
- User ID: ${options.userId}
|
|
256
|
+
|
|
257
|
+
Adjust your response based on the user's account level.`,
|
|
258
|
+
}),
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
const result = await supportAgent.generate({
|
|
262
|
+
prompt: 'How do I upgrade my account?',
|
|
263
|
+
options: {
|
|
264
|
+
userId: 'user_123',
|
|
265
|
+
accountType: 'free',
|
|
266
|
+
},
|
|
267
|
+
});
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
## Tool Execution Approval
|
|
271
|
+
|
|
272
|
+
AI SDK 6 allows you to require user approval before executing tools.
|
|
273
|
+
|
|
274
|
+
### Basic Tool Approval
|
|
275
|
+
|
|
276
|
+
```typescript
|
|
277
|
+
import { tool } from 'ai';
|
|
278
|
+
import { z } from 'zod';
|
|
279
|
+
|
|
280
|
+
export const weatherTool = tool({
|
|
281
|
+
description: 'Get the weather in a location',
|
|
282
|
+
inputSchema: z.object({
|
|
283
|
+
city: z.string(),
|
|
284
|
+
}),
|
|
285
|
+
needsApproval: true, // Always require approval
|
|
286
|
+
execute: async ({ city }) => {
|
|
287
|
+
const weather = await fetchWeather(city);
|
|
288
|
+
return weather;
|
|
289
|
+
},
|
|
290
|
+
});
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
### Dynamic Approval
|
|
294
|
+
|
|
295
|
+
Make approval decisions based on tool input:
|
|
296
|
+
|
|
297
|
+
```typescript
|
|
298
|
+
export const paymentTool = tool({
|
|
299
|
+
description: 'Process a payment',
|
|
300
|
+
inputSchema: z.object({
|
|
301
|
+
amount: z.number(),
|
|
302
|
+
recipient: z.string(),
|
|
303
|
+
}),
|
|
304
|
+
needsApproval: async ({ amount }) => amount > 1000, // Only large payments
|
|
305
|
+
execute: async ({ amount, recipient }) => {
|
|
306
|
+
return await processPayment(amount, recipient);
|
|
307
|
+
},
|
|
308
|
+
});
|
|
309
|
+
```
|
|
310
|
+
|
|
311
|
+
## UI Integration
|
|
312
|
+
|
|
313
|
+
### Server-side API Route
|
|
314
|
+
|
|
315
|
+
```typescript
|
|
316
|
+
import { createAgentUIStreamResponse } from 'ai';
|
|
317
|
+
import { weatherAgent } from '@/lib/agents';
|
|
318
|
+
|
|
319
|
+
export async function POST(request: Request) {
|
|
320
|
+
const { messages } = await request.json();
|
|
321
|
+
|
|
322
|
+
return createAgentUIStreamResponse({
|
|
323
|
+
agent: weatherAgent,
|
|
324
|
+
messages,
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Client-side with Type Safety
|
|
330
|
+
|
|
331
|
+
```typescript
|
|
332
|
+
import { useChat } from '@ai-sdk/react';
|
|
333
|
+
import { InferAgentUIMessage } from 'ai';
|
|
334
|
+
import { weatherAgent } from '@/lib/agents';
|
|
335
|
+
|
|
336
|
+
type WeatherAgentUIMessage = InferAgentUIMessage<typeof weatherAgent>;
|
|
337
|
+
|
|
338
|
+
export function WeatherChat() {
|
|
339
|
+
const { messages, sendMessage } = useChat<WeatherAgentUIMessage>();
|
|
340
|
+
|
|
341
|
+
return (
|
|
342
|
+
<div>
|
|
343
|
+
{/* Your chat UI */}
|
|
344
|
+
</div>
|
|
345
|
+
);
|
|
346
|
+
}
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
|
|
195
350
|
## Contributing
|
|
196
351
|
|
|
197
352
|
Contributions are welcome! Here's how to get started:
|
package/dist/index.js
CHANGED
|
@@ -157,6 +157,32 @@ function getResponseMetadata({
|
|
|
157
157
|
}
|
|
158
158
|
|
|
159
159
|
// src/completion/ollama-completion-language-model.ts
|
|
160
|
+
function createJsonStreamResponseHandler(schema) {
|
|
161
|
+
return async ({ response }) => {
|
|
162
|
+
if (!response.body) {
|
|
163
|
+
throw new Error("Response body is null");
|
|
164
|
+
}
|
|
165
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new TransformStream({
|
|
166
|
+
transform(chunk, controller) {
|
|
167
|
+
const lines = chunk.split("\n");
|
|
168
|
+
for (const line of lines) {
|
|
169
|
+
if (line.trim()) {
|
|
170
|
+
const result = (0, import_provider_utils2.safeParseJSON)({ text: line.trim(), schema });
|
|
171
|
+
controller.enqueue(result);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}));
|
|
176
|
+
const responseHeaders = {};
|
|
177
|
+
response.headers.forEach((value, key) => {
|
|
178
|
+
responseHeaders[key] = value;
|
|
179
|
+
});
|
|
180
|
+
return {
|
|
181
|
+
value: stream,
|
|
182
|
+
responseHeaders
|
|
183
|
+
};
|
|
184
|
+
};
|
|
185
|
+
}
|
|
160
186
|
var ollamaCompletionProviderOptions = import_v42.z.object({
|
|
161
187
|
think: import_v42.z.boolean().optional(),
|
|
162
188
|
user: import_v42.z.string().optional(),
|
|
@@ -304,7 +330,7 @@ var OllamaCompletionLanguageModel = class {
|
|
|
304
330
|
headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
|
|
305
331
|
body,
|
|
306
332
|
failedResponseHandler: ollamaFailedResponseHandler,
|
|
307
|
-
successfulResponseHandler:
|
|
333
|
+
successfulResponseHandler: createJsonStreamResponseHandler(
|
|
308
334
|
baseOllamaResponseSchema
|
|
309
335
|
),
|
|
310
336
|
abortSignal: options.abortSignal,
|
|
@@ -1236,6 +1262,32 @@ var OllamaStreamProcessor = class {
|
|
|
1236
1262
|
};
|
|
1237
1263
|
|
|
1238
1264
|
// src/responses/ollama-responses-language-model.ts
|
|
1265
|
+
function createJsonStreamResponseHandler2(schema) {
|
|
1266
|
+
return async ({ response }) => {
|
|
1267
|
+
if (!response.body) {
|
|
1268
|
+
throw new Error("Response body is null");
|
|
1269
|
+
}
|
|
1270
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new TransformStream({
|
|
1271
|
+
transform(chunk, controller) {
|
|
1272
|
+
const lines = chunk.split("\n");
|
|
1273
|
+
for (const line of lines) {
|
|
1274
|
+
if (line.trim()) {
|
|
1275
|
+
const result = (0, import_provider_utils7.safeParseJSON)({ text: line.trim(), schema });
|
|
1276
|
+
controller.enqueue(result);
|
|
1277
|
+
}
|
|
1278
|
+
}
|
|
1279
|
+
}
|
|
1280
|
+
}));
|
|
1281
|
+
const responseHeaders = {};
|
|
1282
|
+
response.headers.forEach((value, key) => {
|
|
1283
|
+
responseHeaders[key] = value;
|
|
1284
|
+
});
|
|
1285
|
+
return {
|
|
1286
|
+
value: stream,
|
|
1287
|
+
responseHeaders
|
|
1288
|
+
};
|
|
1289
|
+
};
|
|
1290
|
+
}
|
|
1239
1291
|
var OllamaResponsesLanguageModel = class {
|
|
1240
1292
|
constructor(modelId, config) {
|
|
1241
1293
|
this.specificationVersion = "v2";
|
|
@@ -1293,7 +1345,7 @@ var OllamaResponsesLanguageModel = class {
|
|
|
1293
1345
|
headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
|
|
1294
1346
|
body: { ...body, stream: true },
|
|
1295
1347
|
failedResponseHandler: ollamaFailedResponseHandler,
|
|
1296
|
-
successfulResponseHandler: (
|
|
1348
|
+
successfulResponseHandler: createJsonStreamResponseHandler2(baseOllamaResponseSchema2),
|
|
1297
1349
|
abortSignal: options.abortSignal,
|
|
1298
1350
|
fetch: this.config.fetch
|
|
1299
1351
|
});
|