@kernl-sdk/ai 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/.turbo/turbo-build.log +4 -0
  2. package/CHANGELOG.md +10 -0
  3. package/LICENSE +201 -0
  4. package/dist/__tests__/integration.test.d.ts +2 -0
  5. package/dist/__tests__/integration.test.d.ts.map +1 -0
  6. package/dist/__tests__/integration.test.js +388 -0
  7. package/dist/convert/__tests__/message.test.d.ts +2 -0
  8. package/dist/convert/__tests__/message.test.d.ts.map +1 -0
  9. package/dist/convert/__tests__/message.test.js +300 -0
  10. package/dist/convert/__tests__/response.test.d.ts +2 -0
  11. package/dist/convert/__tests__/response.test.d.ts.map +1 -0
  12. package/dist/convert/__tests__/response.test.js +49 -0
  13. package/dist/convert/__tests__/settings.test.d.ts +2 -0
  14. package/dist/convert/__tests__/settings.test.d.ts.map +1 -0
  15. package/dist/convert/__tests__/settings.test.js +144 -0
  16. package/dist/convert/__tests__/stream.test.d.ts +2 -0
  17. package/dist/convert/__tests__/stream.test.d.ts.map +1 -0
  18. package/dist/convert/__tests__/stream.test.js +389 -0
  19. package/dist/convert/__tests__/tools.test.d.ts +2 -0
  20. package/dist/convert/__tests__/tools.test.d.ts.map +1 -0
  21. package/dist/convert/__tests__/tools.test.js +152 -0
  22. package/dist/convert/message.d.ts +4 -0
  23. package/dist/convert/message.d.ts.map +1 -0
  24. package/dist/convert/message.js +122 -0
  25. package/dist/convert/messages.d.ts +4 -0
  26. package/dist/convert/messages.d.ts.map +1 -0
  27. package/dist/convert/messages.js +130 -0
  28. package/dist/convert/response.d.ts +15 -0
  29. package/dist/convert/response.d.ts.map +1 -0
  30. package/dist/convert/response.js +105 -0
  31. package/dist/convert/settings.d.ts +16 -0
  32. package/dist/convert/settings.d.ts.map +1 -0
  33. package/dist/convert/settings.js +36 -0
  34. package/dist/convert/stream.d.ts +11 -0
  35. package/dist/convert/stream.d.ts.map +1 -0
  36. package/dist/convert/stream.js +154 -0
  37. package/dist/convert/tools.d.ts +5 -0
  38. package/dist/convert/tools.d.ts.map +1 -0
  39. package/dist/convert/tools.js +42 -0
  40. package/dist/error.d.ts +8 -0
  41. package/dist/error.d.ts.map +1 -0
  42. package/dist/error.js +15 -0
  43. package/dist/index.d.ts +20 -0
  44. package/dist/index.d.ts.map +1 -0
  45. package/dist/index.js +20 -0
  46. package/dist/language-model.d.ts +21 -0
  47. package/dist/language-model.d.ts.map +1 -0
  48. package/dist/language-model.js +60 -0
  49. package/dist/providers/anthropic.d.ts +14 -0
  50. package/dist/providers/anthropic.d.ts.map +1 -0
  51. package/dist/providers/anthropic.js +17 -0
  52. package/dist/providers/google.d.ts +14 -0
  53. package/dist/providers/google.d.ts.map +1 -0
  54. package/dist/providers/google.js +17 -0
  55. package/dist/providers/openai.d.ts +14 -0
  56. package/dist/providers/openai.d.ts.map +1 -0
  57. package/dist/providers/openai.js +17 -0
  58. package/dist/types.d.ts +1 -0
  59. package/dist/types.d.ts.map +1 -0
  60. package/dist/types.js +1 -0
  61. package/package.json +79 -0
  62. package/src/__tests__/integration.test.ts +447 -0
  63. package/src/convert/__tests__/message.test.ts +336 -0
  64. package/src/convert/__tests__/response.test.ts +63 -0
  65. package/src/convert/__tests__/settings.test.ts +188 -0
  66. package/src/convert/__tests__/stream.test.ts +460 -0
  67. package/src/convert/__tests__/tools.test.ts +179 -0
  68. package/src/convert/message.ts +150 -0
  69. package/src/convert/response.ts +144 -0
  70. package/src/convert/settings.ts +62 -0
  71. package/src/convert/stream.ts +181 -0
  72. package/src/convert/tools.ts +59 -0
  73. package/src/error.ts +16 -0
  74. package/src/index.ts +22 -0
  75. package/src/language-model.ts +77 -0
  76. package/src/providers/anthropic.ts +18 -0
  77. package/src/providers/google.ts +18 -0
  78. package/src/providers/openai.ts +18 -0
  79. package/src/types.ts +0 -0
  80. package/tsconfig.json +13 -0
  81. package/vitest.config.ts +14 -0
@@ -0,0 +1,447 @@
1
+ import { describe, it, expect, beforeAll } from "vitest";
2
+ import { openai } from "@ai-sdk/openai";
3
+
4
+ import { AISDKLanguageModel } from "../language-model";
5
+
6
+ /**
7
+ * Integration tests for AISDKLanguageModel with real AI SDK providers.
8
+ *
9
+ * These tests require an OPENAI_API_KEY environment variable to be set.
10
+ * They will be skipped if the API key is not available.
11
+ *
12
+ * Run with: OPENAI_API_KEY=your-key pnpm test:run
13
+ */
14
+
15
+ const SKIP_INTEGRATION_TESTS = !process.env.OPENAI_API_KEY;
16
+
17
+ describe.skipIf(SKIP_INTEGRATION_TESTS)(
18
+ "AISDKLanguageModel integration",
19
+ () => {
20
+ let gpt4omini: AISDKLanguageModel;
21
+
22
+ beforeAll(() => {
23
+ gpt4omini = new AISDKLanguageModel(openai("gpt-4o-mini")); // gpt-4o-mini for fast, cheap testing
24
+ });
25
+
26
+ describe("generate", () => {
27
+ it("should generate a simple text response", async () => {
28
+ const response = await gpt4omini.generate({
29
+ input: [
30
+ {
31
+ kind: "message",
32
+ role: "user",
33
+ id: "msg-1",
34
+ content: [
35
+ { kind: "text", text: "Say 'Hello, World!' and nothing else." },
36
+ ],
37
+ },
38
+ ],
39
+ settings: {
40
+ maxTokens: 50,
41
+ temperature: 0,
42
+ },
43
+ });
44
+
45
+ expect(response.content).toBeDefined();
46
+ expect(response.content.length).toBeGreaterThan(0);
47
+ expect(response.usage).toBeDefined();
48
+ expect(response.usage.totalTokens).toBeGreaterThan(0);
49
+ expect(response.usage.inputTokens).toBeGreaterThan(0);
50
+ expect(response.usage.outputTokens).toBeGreaterThan(0);
51
+
52
+ // Should have at least one message
53
+ const messages = response.content.filter(
54
+ (item) => item.kind === "message",
55
+ );
56
+ expect(messages.length).toBeGreaterThan(0);
57
+ });
58
+
59
+ it("should handle system messages", async () => {
60
+ const response = await gpt4omini.generate({
61
+ input: [
62
+ {
63
+ kind: "message",
64
+ role: "system",
65
+ id: "msg-sys",
66
+ content: [
67
+ {
68
+ kind: "text",
69
+ text: "You are a helpful assistant that always responds with 'Acknowledged.'",
70
+ },
71
+ ],
72
+ },
73
+ {
74
+ kind: "message",
75
+ role: "user",
76
+ id: "msg-1",
77
+ content: [{ kind: "text", text: "Hello" }],
78
+ },
79
+ ],
80
+ settings: {
81
+ maxTokens: 50,
82
+ temperature: 0,
83
+ },
84
+ });
85
+
86
+ expect(response.content).toBeDefined();
87
+ expect(response.usage.totalTokens).toBeGreaterThan(0);
88
+ });
89
+
90
+ it("should handle multi-turn conversations", async () => {
91
+ const response = await gpt4omini.generate({
92
+ input: [
93
+ {
94
+ kind: "message",
95
+ role: "user",
96
+ id: "msg-1",
97
+ content: [{ kind: "text", text: "My name is Alice." }],
98
+ },
99
+ {
100
+ kind: "message",
101
+ role: "assistant",
102
+ id: "msg-2",
103
+ content: [{ kind: "text", text: "Nice to meet you, Alice!" }],
104
+ },
105
+ {
106
+ kind: "message",
107
+ role: "user",
108
+ id: "msg-3",
109
+ content: [{ kind: "text", text: "What is my name?" }],
110
+ },
111
+ ],
112
+ settings: {
113
+ maxTokens: 50,
114
+ temperature: 0,
115
+ },
116
+ });
117
+
118
+ expect(response.content).toBeDefined();
119
+ expect(response.usage.totalTokens).toBeGreaterThan(0);
120
+
121
+ // Check that it remembers the name (should mention Alice)
122
+ const assistantMessages = response.content.filter(
123
+ (item) => item.kind === "message" && item.role === "assistant",
124
+ );
125
+ expect(assistantMessages.length).toBeGreaterThan(0);
126
+ });
127
+
128
+ it("should respect temperature setting", async () => {
129
+ const response = await gpt4omini.generate({
130
+ input: [
131
+ {
132
+ kind: "message",
133
+ role: "user",
134
+ id: "msg-1",
135
+ content: [{ kind: "text", text: "Say hello" }],
136
+ },
137
+ ],
138
+ settings: {
139
+ maxTokens: 20,
140
+ temperature: 0, // Deterministic
141
+ },
142
+ });
143
+
144
+ expect(response.content).toBeDefined();
145
+ expect(response.usage.totalTokens).toBeGreaterThan(0);
146
+ });
147
+
148
+ it("should respect maxTokens setting", async () => {
149
+ const response = await gpt4omini.generate({
150
+ input: [
151
+ {
152
+ kind: "message",
153
+ role: "user",
154
+ id: "msg-1",
155
+ content: [{ kind: "text", text: "Count from 1 to 100" }],
156
+ },
157
+ ],
158
+ settings: {
159
+ maxTokens: 20, // Minimum is 16 for OpenAI
160
+ temperature: 0,
161
+ },
162
+ });
163
+
164
+ expect(response.content).toBeDefined();
165
+ expect(response.usage.outputTokens).toBeDefined();
166
+ expect(response.usage.outputTokens).toBeLessThanOrEqual(20);
167
+ });
168
+ });
169
+
170
+ describe("stream", () => {
171
+ it("should stream text responses", async () => {
172
+ const events = [];
173
+
174
+ for await (const event of gpt4omini.stream({
175
+ input: [
176
+ {
177
+ kind: "message",
178
+ role: "user",
179
+ id: "msg-1",
180
+ content: [{ kind: "text", text: "Count to 5" }],
181
+ },
182
+ ],
183
+ settings: {
184
+ maxTokens: 50,
185
+ temperature: 0,
186
+ },
187
+ })) {
188
+ events.push(event);
189
+ }
190
+
191
+ expect(events.length).toBeGreaterThan(0);
192
+
193
+ // Should have at least one finish event
194
+ const finishEvents = events.filter((e) => e.kind === "finish");
195
+ expect(finishEvents.length).toBe(1);
196
+
197
+ // Should have usage information
198
+ const finishEvent = finishEvents[0] as any;
199
+ expect(finishEvent.usage).toBeDefined();
200
+ expect(finishEvent.usage.totalTokens).toBeGreaterThan(0);
201
+ });
202
+
203
+ it("should stream text deltas", async () => {
204
+ const events = [];
205
+
206
+ for await (const event of gpt4omini.stream({
207
+ input: [
208
+ {
209
+ kind: "message",
210
+ role: "user",
211
+ id: "msg-1",
212
+ content: [{ kind: "text", text: "Say 'Hello World'" }],
213
+ },
214
+ ],
215
+ settings: {
216
+ maxTokens: 20,
217
+ temperature: 0,
218
+ },
219
+ })) {
220
+ events.push(event);
221
+ }
222
+
223
+ // Should have text-delta events
224
+ const textDeltas = events.filter((e) => e.kind === "text-delta");
225
+ expect(textDeltas.length).toBeGreaterThan(0);
226
+
227
+ // Each text-delta should have text
228
+ for (const delta of textDeltas) {
229
+ expect(delta.text).toBeDefined();
230
+ expect(typeof delta.text).toBe("string");
231
+ }
232
+ });
233
+
234
+ it("should handle limited token streams", async () => {
235
+ const events = [];
236
+
237
+ for await (const event of gpt4omini.stream({
238
+ input: [
239
+ {
240
+ kind: "message",
241
+ role: "user",
242
+ id: "msg-1",
243
+ content: [{ kind: "text", text: "Hi" }],
244
+ },
245
+ ],
246
+ settings: {
247
+ maxTokens: 16, // Minimum for OpenAI
248
+ temperature: 0,
249
+ },
250
+ })) {
251
+ events.push(event);
252
+ }
253
+
254
+ expect(events.length).toBeGreaterThan(0);
255
+
256
+ // Should still have a finish event
257
+ const finishEvents = events.filter((e) => e.kind === "finish");
258
+ expect(finishEvents.length).toBe(1);
259
+ });
260
+ });
261
+
262
+ describe("tools", () => {
263
+ it("should call tools when requested", async () => {
264
+ const response = await gpt4omini.generate({
265
+ input: [
266
+ {
267
+ kind: "message",
268
+ role: "user",
269
+ id: "msg-1",
270
+ content: [{ kind: "text", text: "What is 25 + 17?" }],
271
+ },
272
+ ],
273
+ tools: [
274
+ {
275
+ kind: "function",
276
+ name: "calculate",
277
+ description: "Perform a mathematical calculation",
278
+ parameters: {
279
+ type: "object",
280
+ properties: {
281
+ expression: {
282
+ type: "string",
283
+ description: "The mathematical expression to evaluate",
284
+ },
285
+ },
286
+ required: ["expression"],
287
+ },
288
+ },
289
+ ],
290
+ settings: {
291
+ maxTokens: 200,
292
+ temperature: 0,
293
+ },
294
+ });
295
+
296
+ expect(response.content).toBeDefined();
297
+
298
+ // Should have a tool call
299
+ const toolCalls = response.content.filter(
300
+ (item) => item.kind === "tool-call",
301
+ );
302
+ expect(toolCalls.length).toBeGreaterThan(0);
303
+
304
+ // Tool call should have proper structure
305
+ const toolCall = toolCalls[0] as any;
306
+ expect(toolCall.callId).toBeDefined();
307
+ expect(toolCall.toolId).toBe("calculate");
308
+ expect(toolCall.arguments).toBeDefined();
309
+ });
310
+
311
+ it("should handle tool choice setting", async () => {
312
+ const response = await gpt4omini.generate({
313
+ input: [
314
+ {
315
+ kind: "message",
316
+ role: "user",
317
+ id: "msg-1",
318
+ content: [
319
+ {
320
+ kind: "text",
321
+ text: "Use the get_weather tool for San Francisco",
322
+ },
323
+ ],
324
+ },
325
+ ],
326
+ tools: [
327
+ {
328
+ kind: "function",
329
+ name: "get_weather",
330
+ description: "Get weather for a city",
331
+ parameters: {
332
+ type: "object",
333
+ properties: {
334
+ city: {
335
+ type: "string",
336
+ description: "The city name",
337
+ },
338
+ },
339
+ required: ["city"],
340
+ },
341
+ },
342
+ ],
343
+ settings: {
344
+ maxTokens: 200,
345
+ temperature: 0,
346
+ toolChoice: { kind: "required" }, // Force tool use
347
+ },
348
+ });
349
+
350
+ expect(response.content).toBeDefined();
351
+
352
+ // Should have a tool call since it's required
353
+ const toolCalls = response.content.filter(
354
+ (item) => item.kind === "tool-call",
355
+ );
356
+ expect(toolCalls.length).toBeGreaterThan(0);
357
+ });
358
+
359
+ it("should handle multiple tool calls", async () => {
360
+ const response = await gpt4omini.generate({
361
+ input: [
362
+ {
363
+ kind: "message",
364
+ role: "user",
365
+ id: "msg-1",
366
+ content: [
367
+ {
368
+ kind: "text",
369
+ text: "Get the weather for both San Francisco and New York",
370
+ },
371
+ ],
372
+ },
373
+ ],
374
+ tools: [
375
+ {
376
+ kind: "function",
377
+ name: "get_weather",
378
+ description: "Get weather for a city",
379
+ parameters: {
380
+ type: "object",
381
+ properties: {
382
+ city: {
383
+ type: "string",
384
+ description: "The city name",
385
+ },
386
+ },
387
+ required: ["city"],
388
+ },
389
+ },
390
+ ],
391
+ settings: {
392
+ maxTokens: 300,
393
+ temperature: 0,
394
+ },
395
+ });
396
+
397
+ expect(response.content).toBeDefined();
398
+
399
+ // Should potentially have multiple tool calls
400
+ const toolCalls = response.content.filter(
401
+ (item) => item.kind === "tool-call",
402
+ );
403
+ expect(toolCalls.length).toBeGreaterThan(0);
404
+ });
405
+ });
406
+
407
+ describe("validation", () => {
408
+ it("should throw error for invalid maxTokens", async () => {
409
+ // AI SDK properly validates and throws errors for invalid values
410
+ await expect(
411
+ gpt4omini.generate({
412
+ input: [
413
+ {
414
+ kind: "message",
415
+ role: "user",
416
+ id: "msg-1",
417
+ content: [{ kind: "text", text: "Hello" }],
418
+ },
419
+ ],
420
+ settings: {
421
+ maxTokens: -1, // Invalid
422
+ },
423
+ }),
424
+ ).rejects.toThrow(/max_output_tokens/);
425
+ });
426
+
427
+ it("should throw error for below minimum maxTokens", async () => {
428
+ // OpenAI requires minimum 16 tokens
429
+ await expect(
430
+ gpt4omini.generate({
431
+ input: [
432
+ {
433
+ kind: "message",
434
+ role: "user",
435
+ id: "msg-1",
436
+ content: [{ kind: "text", text: "Hello" }],
437
+ },
438
+ ],
439
+ settings: {
440
+ maxTokens: 10, // Below minimum
441
+ },
442
+ }),
443
+ ).rejects.toThrow(/max_output_tokens/);
444
+ });
445
+ });
446
+ },
447
+ );