@kernl-sdk/ai 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +4 -0
- package/CHANGELOG.md +10 -0
- package/LICENSE +201 -0
- package/dist/__tests__/integration.test.d.ts +2 -0
- package/dist/__tests__/integration.test.d.ts.map +1 -0
- package/dist/__tests__/integration.test.js +388 -0
- package/dist/convert/__tests__/message.test.d.ts +2 -0
- package/dist/convert/__tests__/message.test.d.ts.map +1 -0
- package/dist/convert/__tests__/message.test.js +300 -0
- package/dist/convert/__tests__/response.test.d.ts +2 -0
- package/dist/convert/__tests__/response.test.d.ts.map +1 -0
- package/dist/convert/__tests__/response.test.js +49 -0
- package/dist/convert/__tests__/settings.test.d.ts +2 -0
- package/dist/convert/__tests__/settings.test.d.ts.map +1 -0
- package/dist/convert/__tests__/settings.test.js +144 -0
- package/dist/convert/__tests__/stream.test.d.ts +2 -0
- package/dist/convert/__tests__/stream.test.d.ts.map +1 -0
- package/dist/convert/__tests__/stream.test.js +389 -0
- package/dist/convert/__tests__/tools.test.d.ts +2 -0
- package/dist/convert/__tests__/tools.test.d.ts.map +1 -0
- package/dist/convert/__tests__/tools.test.js +152 -0
- package/dist/convert/message.d.ts +4 -0
- package/dist/convert/message.d.ts.map +1 -0
- package/dist/convert/message.js +122 -0
- package/dist/convert/messages.d.ts +4 -0
- package/dist/convert/messages.d.ts.map +1 -0
- package/dist/convert/messages.js +130 -0
- package/dist/convert/response.d.ts +15 -0
- package/dist/convert/response.d.ts.map +1 -0
- package/dist/convert/response.js +105 -0
- package/dist/convert/settings.d.ts +16 -0
- package/dist/convert/settings.d.ts.map +1 -0
- package/dist/convert/settings.js +36 -0
- package/dist/convert/stream.d.ts +11 -0
- package/dist/convert/stream.d.ts.map +1 -0
- package/dist/convert/stream.js +154 -0
- package/dist/convert/tools.d.ts +5 -0
- package/dist/convert/tools.d.ts.map +1 -0
- package/dist/convert/tools.js +42 -0
- package/dist/error.d.ts +8 -0
- package/dist/error.d.ts.map +1 -0
- package/dist/error.js +15 -0
- package/dist/index.d.ts +20 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +20 -0
- package/dist/language-model.d.ts +21 -0
- package/dist/language-model.d.ts.map +1 -0
- package/dist/language-model.js +60 -0
- package/dist/providers/anthropic.d.ts +14 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +17 -0
- package/dist/providers/google.d.ts +14 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +17 -0
- package/dist/providers/openai.d.ts +14 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +17 -0
- package/dist/types.d.ts +1 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +1 -0
- package/package.json +79 -0
- package/src/__tests__/integration.test.ts +447 -0
- package/src/convert/__tests__/message.test.ts +336 -0
- package/src/convert/__tests__/response.test.ts +63 -0
- package/src/convert/__tests__/settings.test.ts +188 -0
- package/src/convert/__tests__/stream.test.ts +460 -0
- package/src/convert/__tests__/tools.test.ts +179 -0
- package/src/convert/message.ts +150 -0
- package/src/convert/response.ts +144 -0
- package/src/convert/settings.ts +62 -0
- package/src/convert/stream.ts +181 -0
- package/src/convert/tools.ts +59 -0
- package/src/error.ts +16 -0
- package/src/index.ts +22 -0
- package/src/language-model.ts +77 -0
- package/src/providers/anthropic.ts +18 -0
- package/src/providers/google.ts +18 -0
- package/src/providers/openai.ts +18 -0
- package/src/types.ts +0 -0
- package/tsconfig.json +13 -0
- package/vitest.config.ts +14 -0
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
import { describe, it, expect, beforeAll } from "vitest";
|
|
2
|
+
import { openai } from "@ai-sdk/openai";
|
|
3
|
+
|
|
4
|
+
import { AISDKLanguageModel } from "../language-model";
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Integration tests for AISDKLanguageModel with real AI SDK providers.
|
|
8
|
+
*
|
|
9
|
+
* These tests require an OPENAI_API_KEY environment variable to be set.
|
|
10
|
+
* They will be skipped if the API key is not available.
|
|
11
|
+
*
|
|
12
|
+
* Run with: OPENAI_API_KEY=your-key pnpm test:run
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const SKIP_INTEGRATION_TESTS = !process.env.OPENAI_API_KEY;
|
|
16
|
+
|
|
17
|
+
describe.skipIf(SKIP_INTEGRATION_TESTS)(
|
|
18
|
+
"AISDKLanguageModel integration",
|
|
19
|
+
() => {
|
|
20
|
+
let gpt4omini: AISDKLanguageModel;
|
|
21
|
+
|
|
22
|
+
beforeAll(() => {
|
|
23
|
+
gpt4omini = new AISDKLanguageModel(openai("gpt-4o-mini")); // gpt-4o-mini for fast, cheap testing
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
describe("generate", () => {
|
|
27
|
+
it("should generate a simple text response", async () => {
|
|
28
|
+
const response = await gpt4omini.generate({
|
|
29
|
+
input: [
|
|
30
|
+
{
|
|
31
|
+
kind: "message",
|
|
32
|
+
role: "user",
|
|
33
|
+
id: "msg-1",
|
|
34
|
+
content: [
|
|
35
|
+
{ kind: "text", text: "Say 'Hello, World!' and nothing else." },
|
|
36
|
+
],
|
|
37
|
+
},
|
|
38
|
+
],
|
|
39
|
+
settings: {
|
|
40
|
+
maxTokens: 50,
|
|
41
|
+
temperature: 0,
|
|
42
|
+
},
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
expect(response.content).toBeDefined();
|
|
46
|
+
expect(response.content.length).toBeGreaterThan(0);
|
|
47
|
+
expect(response.usage).toBeDefined();
|
|
48
|
+
expect(response.usage.totalTokens).toBeGreaterThan(0);
|
|
49
|
+
expect(response.usage.inputTokens).toBeGreaterThan(0);
|
|
50
|
+
expect(response.usage.outputTokens).toBeGreaterThan(0);
|
|
51
|
+
|
|
52
|
+
// Should have at least one message
|
|
53
|
+
const messages = response.content.filter(
|
|
54
|
+
(item) => item.kind === "message",
|
|
55
|
+
);
|
|
56
|
+
expect(messages.length).toBeGreaterThan(0);
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
it("should handle system messages", async () => {
|
|
60
|
+
const response = await gpt4omini.generate({
|
|
61
|
+
input: [
|
|
62
|
+
{
|
|
63
|
+
kind: "message",
|
|
64
|
+
role: "system",
|
|
65
|
+
id: "msg-sys",
|
|
66
|
+
content: [
|
|
67
|
+
{
|
|
68
|
+
kind: "text",
|
|
69
|
+
text: "You are a helpful assistant that always responds with 'Acknowledged.'",
|
|
70
|
+
},
|
|
71
|
+
],
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
kind: "message",
|
|
75
|
+
role: "user",
|
|
76
|
+
id: "msg-1",
|
|
77
|
+
content: [{ kind: "text", text: "Hello" }],
|
|
78
|
+
},
|
|
79
|
+
],
|
|
80
|
+
settings: {
|
|
81
|
+
maxTokens: 50,
|
|
82
|
+
temperature: 0,
|
|
83
|
+
},
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
expect(response.content).toBeDefined();
|
|
87
|
+
expect(response.usage.totalTokens).toBeGreaterThan(0);
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
it("should handle multi-turn conversations", async () => {
|
|
91
|
+
const response = await gpt4omini.generate({
|
|
92
|
+
input: [
|
|
93
|
+
{
|
|
94
|
+
kind: "message",
|
|
95
|
+
role: "user",
|
|
96
|
+
id: "msg-1",
|
|
97
|
+
content: [{ kind: "text", text: "My name is Alice." }],
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
kind: "message",
|
|
101
|
+
role: "assistant",
|
|
102
|
+
id: "msg-2",
|
|
103
|
+
content: [{ kind: "text", text: "Nice to meet you, Alice!" }],
|
|
104
|
+
},
|
|
105
|
+
{
|
|
106
|
+
kind: "message",
|
|
107
|
+
role: "user",
|
|
108
|
+
id: "msg-3",
|
|
109
|
+
content: [{ kind: "text", text: "What is my name?" }],
|
|
110
|
+
},
|
|
111
|
+
],
|
|
112
|
+
settings: {
|
|
113
|
+
maxTokens: 50,
|
|
114
|
+
temperature: 0,
|
|
115
|
+
},
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
expect(response.content).toBeDefined();
|
|
119
|
+
expect(response.usage.totalTokens).toBeGreaterThan(0);
|
|
120
|
+
|
|
121
|
+
// Check that it remembers the name (should mention Alice)
|
|
122
|
+
const assistantMessages = response.content.filter(
|
|
123
|
+
(item) => item.kind === "message" && item.role === "assistant",
|
|
124
|
+
);
|
|
125
|
+
expect(assistantMessages.length).toBeGreaterThan(0);
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
it("should respect temperature setting", async () => {
|
|
129
|
+
const response = await gpt4omini.generate({
|
|
130
|
+
input: [
|
|
131
|
+
{
|
|
132
|
+
kind: "message",
|
|
133
|
+
role: "user",
|
|
134
|
+
id: "msg-1",
|
|
135
|
+
content: [{ kind: "text", text: "Say hello" }],
|
|
136
|
+
},
|
|
137
|
+
],
|
|
138
|
+
settings: {
|
|
139
|
+
maxTokens: 20,
|
|
140
|
+
temperature: 0, // Deterministic
|
|
141
|
+
},
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
expect(response.content).toBeDefined();
|
|
145
|
+
expect(response.usage.totalTokens).toBeGreaterThan(0);
|
|
146
|
+
});
|
|
147
|
+
|
|
148
|
+
it("should respect maxTokens setting", async () => {
|
|
149
|
+
const response = await gpt4omini.generate({
|
|
150
|
+
input: [
|
|
151
|
+
{
|
|
152
|
+
kind: "message",
|
|
153
|
+
role: "user",
|
|
154
|
+
id: "msg-1",
|
|
155
|
+
content: [{ kind: "text", text: "Count from 1 to 100" }],
|
|
156
|
+
},
|
|
157
|
+
],
|
|
158
|
+
settings: {
|
|
159
|
+
maxTokens: 20, // Minimum is 16 for OpenAI
|
|
160
|
+
temperature: 0,
|
|
161
|
+
},
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
expect(response.content).toBeDefined();
|
|
165
|
+
expect(response.usage.outputTokens).toBeDefined();
|
|
166
|
+
expect(response.usage.outputTokens).toBeLessThanOrEqual(20);
|
|
167
|
+
});
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
describe("stream", () => {
|
|
171
|
+
it("should stream text responses", async () => {
|
|
172
|
+
const events = [];
|
|
173
|
+
|
|
174
|
+
for await (const event of gpt4omini.stream({
|
|
175
|
+
input: [
|
|
176
|
+
{
|
|
177
|
+
kind: "message",
|
|
178
|
+
role: "user",
|
|
179
|
+
id: "msg-1",
|
|
180
|
+
content: [{ kind: "text", text: "Count to 5" }],
|
|
181
|
+
},
|
|
182
|
+
],
|
|
183
|
+
settings: {
|
|
184
|
+
maxTokens: 50,
|
|
185
|
+
temperature: 0,
|
|
186
|
+
},
|
|
187
|
+
})) {
|
|
188
|
+
events.push(event);
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
expect(events.length).toBeGreaterThan(0);
|
|
192
|
+
|
|
193
|
+
// Should have at least one finish event
|
|
194
|
+
const finishEvents = events.filter((e) => e.kind === "finish");
|
|
195
|
+
expect(finishEvents.length).toBe(1);
|
|
196
|
+
|
|
197
|
+
// Should have usage information
|
|
198
|
+
const finishEvent = finishEvents[0] as any;
|
|
199
|
+
expect(finishEvent.usage).toBeDefined();
|
|
200
|
+
expect(finishEvent.usage.totalTokens).toBeGreaterThan(0);
|
|
201
|
+
});
|
|
202
|
+
|
|
203
|
+
it("should stream text deltas", async () => {
|
|
204
|
+
const events = [];
|
|
205
|
+
|
|
206
|
+
for await (const event of gpt4omini.stream({
|
|
207
|
+
input: [
|
|
208
|
+
{
|
|
209
|
+
kind: "message",
|
|
210
|
+
role: "user",
|
|
211
|
+
id: "msg-1",
|
|
212
|
+
content: [{ kind: "text", text: "Say 'Hello World'" }],
|
|
213
|
+
},
|
|
214
|
+
],
|
|
215
|
+
settings: {
|
|
216
|
+
maxTokens: 20,
|
|
217
|
+
temperature: 0,
|
|
218
|
+
},
|
|
219
|
+
})) {
|
|
220
|
+
events.push(event);
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// Should have text-delta events
|
|
224
|
+
const textDeltas = events.filter((e) => e.kind === "text-delta");
|
|
225
|
+
expect(textDeltas.length).toBeGreaterThan(0);
|
|
226
|
+
|
|
227
|
+
// Each text-delta should have text
|
|
228
|
+
for (const delta of textDeltas) {
|
|
229
|
+
expect(delta.text).toBeDefined();
|
|
230
|
+
expect(typeof delta.text).toBe("string");
|
|
231
|
+
}
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
it("should handle limited token streams", async () => {
|
|
235
|
+
const events = [];
|
|
236
|
+
|
|
237
|
+
for await (const event of gpt4omini.stream({
|
|
238
|
+
input: [
|
|
239
|
+
{
|
|
240
|
+
kind: "message",
|
|
241
|
+
role: "user",
|
|
242
|
+
id: "msg-1",
|
|
243
|
+
content: [{ kind: "text", text: "Hi" }],
|
|
244
|
+
},
|
|
245
|
+
],
|
|
246
|
+
settings: {
|
|
247
|
+
maxTokens: 16, // Minimum for OpenAI
|
|
248
|
+
temperature: 0,
|
|
249
|
+
},
|
|
250
|
+
})) {
|
|
251
|
+
events.push(event);
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
expect(events.length).toBeGreaterThan(0);
|
|
255
|
+
|
|
256
|
+
// Should still have a finish event
|
|
257
|
+
const finishEvents = events.filter((e) => e.kind === "finish");
|
|
258
|
+
expect(finishEvents.length).toBe(1);
|
|
259
|
+
});
|
|
260
|
+
});
|
|
261
|
+
|
|
262
|
+
describe("tools", () => {
|
|
263
|
+
it("should call tools when requested", async () => {
|
|
264
|
+
const response = await gpt4omini.generate({
|
|
265
|
+
input: [
|
|
266
|
+
{
|
|
267
|
+
kind: "message",
|
|
268
|
+
role: "user",
|
|
269
|
+
id: "msg-1",
|
|
270
|
+
content: [{ kind: "text", text: "What is 25 + 17?" }],
|
|
271
|
+
},
|
|
272
|
+
],
|
|
273
|
+
tools: [
|
|
274
|
+
{
|
|
275
|
+
kind: "function",
|
|
276
|
+
name: "calculate",
|
|
277
|
+
description: "Perform a mathematical calculation",
|
|
278
|
+
parameters: {
|
|
279
|
+
type: "object",
|
|
280
|
+
properties: {
|
|
281
|
+
expression: {
|
|
282
|
+
type: "string",
|
|
283
|
+
description: "The mathematical expression to evaluate",
|
|
284
|
+
},
|
|
285
|
+
},
|
|
286
|
+
required: ["expression"],
|
|
287
|
+
},
|
|
288
|
+
},
|
|
289
|
+
],
|
|
290
|
+
settings: {
|
|
291
|
+
maxTokens: 200,
|
|
292
|
+
temperature: 0,
|
|
293
|
+
},
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
expect(response.content).toBeDefined();
|
|
297
|
+
|
|
298
|
+
// Should have a tool call
|
|
299
|
+
const toolCalls = response.content.filter(
|
|
300
|
+
(item) => item.kind === "tool-call",
|
|
301
|
+
);
|
|
302
|
+
expect(toolCalls.length).toBeGreaterThan(0);
|
|
303
|
+
|
|
304
|
+
// Tool call should have proper structure
|
|
305
|
+
const toolCall = toolCalls[0] as any;
|
|
306
|
+
expect(toolCall.callId).toBeDefined();
|
|
307
|
+
expect(toolCall.toolId).toBe("calculate");
|
|
308
|
+
expect(toolCall.arguments).toBeDefined();
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
it("should handle tool choice setting", async () => {
|
|
312
|
+
const response = await gpt4omini.generate({
|
|
313
|
+
input: [
|
|
314
|
+
{
|
|
315
|
+
kind: "message",
|
|
316
|
+
role: "user",
|
|
317
|
+
id: "msg-1",
|
|
318
|
+
content: [
|
|
319
|
+
{
|
|
320
|
+
kind: "text",
|
|
321
|
+
text: "Use the get_weather tool for San Francisco",
|
|
322
|
+
},
|
|
323
|
+
],
|
|
324
|
+
},
|
|
325
|
+
],
|
|
326
|
+
tools: [
|
|
327
|
+
{
|
|
328
|
+
kind: "function",
|
|
329
|
+
name: "get_weather",
|
|
330
|
+
description: "Get weather for a city",
|
|
331
|
+
parameters: {
|
|
332
|
+
type: "object",
|
|
333
|
+
properties: {
|
|
334
|
+
city: {
|
|
335
|
+
type: "string",
|
|
336
|
+
description: "The city name",
|
|
337
|
+
},
|
|
338
|
+
},
|
|
339
|
+
required: ["city"],
|
|
340
|
+
},
|
|
341
|
+
},
|
|
342
|
+
],
|
|
343
|
+
settings: {
|
|
344
|
+
maxTokens: 200,
|
|
345
|
+
temperature: 0,
|
|
346
|
+
toolChoice: { kind: "required" }, // Force tool use
|
|
347
|
+
},
|
|
348
|
+
});
|
|
349
|
+
|
|
350
|
+
expect(response.content).toBeDefined();
|
|
351
|
+
|
|
352
|
+
// Should have a tool call since it's required
|
|
353
|
+
const toolCalls = response.content.filter(
|
|
354
|
+
(item) => item.kind === "tool-call",
|
|
355
|
+
);
|
|
356
|
+
expect(toolCalls.length).toBeGreaterThan(0);
|
|
357
|
+
});
|
|
358
|
+
|
|
359
|
+
it("should handle multiple tool calls", async () => {
|
|
360
|
+
const response = await gpt4omini.generate({
|
|
361
|
+
input: [
|
|
362
|
+
{
|
|
363
|
+
kind: "message",
|
|
364
|
+
role: "user",
|
|
365
|
+
id: "msg-1",
|
|
366
|
+
content: [
|
|
367
|
+
{
|
|
368
|
+
kind: "text",
|
|
369
|
+
text: "Get the weather for both San Francisco and New York",
|
|
370
|
+
},
|
|
371
|
+
],
|
|
372
|
+
},
|
|
373
|
+
],
|
|
374
|
+
tools: [
|
|
375
|
+
{
|
|
376
|
+
kind: "function",
|
|
377
|
+
name: "get_weather",
|
|
378
|
+
description: "Get weather for a city",
|
|
379
|
+
parameters: {
|
|
380
|
+
type: "object",
|
|
381
|
+
properties: {
|
|
382
|
+
city: {
|
|
383
|
+
type: "string",
|
|
384
|
+
description: "The city name",
|
|
385
|
+
},
|
|
386
|
+
},
|
|
387
|
+
required: ["city"],
|
|
388
|
+
},
|
|
389
|
+
},
|
|
390
|
+
],
|
|
391
|
+
settings: {
|
|
392
|
+
maxTokens: 300,
|
|
393
|
+
temperature: 0,
|
|
394
|
+
},
|
|
395
|
+
});
|
|
396
|
+
|
|
397
|
+
expect(response.content).toBeDefined();
|
|
398
|
+
|
|
399
|
+
// Should potentially have multiple tool calls
|
|
400
|
+
const toolCalls = response.content.filter(
|
|
401
|
+
(item) => item.kind === "tool-call",
|
|
402
|
+
);
|
|
403
|
+
expect(toolCalls.length).toBeGreaterThan(0);
|
|
404
|
+
});
|
|
405
|
+
});
|
|
406
|
+
|
|
407
|
+
describe("validation", () => {
|
|
408
|
+
it("should throw error for invalid maxTokens", async () => {
|
|
409
|
+
// AI SDK properly validates and throws errors for invalid values
|
|
410
|
+
await expect(
|
|
411
|
+
gpt4omini.generate({
|
|
412
|
+
input: [
|
|
413
|
+
{
|
|
414
|
+
kind: "message",
|
|
415
|
+
role: "user",
|
|
416
|
+
id: "msg-1",
|
|
417
|
+
content: [{ kind: "text", text: "Hello" }],
|
|
418
|
+
},
|
|
419
|
+
],
|
|
420
|
+
settings: {
|
|
421
|
+
maxTokens: -1, // Invalid
|
|
422
|
+
},
|
|
423
|
+
}),
|
|
424
|
+
).rejects.toThrow(/max_output_tokens/);
|
|
425
|
+
});
|
|
426
|
+
|
|
427
|
+
it("should throw error for below minimum maxTokens", async () => {
|
|
428
|
+
// OpenAI requires minimum 16 tokens
|
|
429
|
+
await expect(
|
|
430
|
+
gpt4omini.generate({
|
|
431
|
+
input: [
|
|
432
|
+
{
|
|
433
|
+
kind: "message",
|
|
434
|
+
role: "user",
|
|
435
|
+
id: "msg-1",
|
|
436
|
+
content: [{ kind: "text", text: "Hello" }],
|
|
437
|
+
},
|
|
438
|
+
],
|
|
439
|
+
settings: {
|
|
440
|
+
maxTokens: 10, // Below minimum
|
|
441
|
+
},
|
|
442
|
+
}),
|
|
443
|
+
).rejects.toThrow(/max_output_tokens/);
|
|
444
|
+
});
|
|
445
|
+
});
|
|
446
|
+
},
|
|
447
|
+
);
|