@ai-sdk/baseten 0.0.0-1c33ba03-20260114162300

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,688 @@
1
+ # @ai-sdk/baseten
2
+
3
+ ## 0.0.0-1c33ba03-20260114162300
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [261c011]
8
+ - @ai-sdk/provider-utils@0.0.0-1c33ba03-20260114162300
9
+ - @ai-sdk/openai-compatible@0.0.0-1c33ba03-20260114162300
10
+
11
+ ## 1.0.11
12
+
13
+ ### Patch Changes
14
+
15
+ - Updated dependencies [1b11dcb]
16
+ - @ai-sdk/provider-utils@4.0.6
17
+ - @ai-sdk/provider@3.0.3
18
+ - @ai-sdk/openai-compatible@2.0.10
19
+
20
+ ## 1.0.10
21
+
22
+ ### Patch Changes
23
+
24
+ - Updated dependencies [bc02a3c]
25
+ - @ai-sdk/openai-compatible@2.0.9
26
+
27
+ ## 1.0.9
28
+
29
+ ### Patch Changes
30
+
31
+ - Updated dependencies [78fcb18]
32
+ - @ai-sdk/openai-compatible@2.0.8
33
+
34
+ ## 1.0.8
35
+
36
+ ### Patch Changes
37
+
38
+ - Updated dependencies [cd7bb0e]
39
+ - @ai-sdk/openai-compatible@2.0.7
40
+
41
+ ## 1.0.7
42
+
43
+ ### Patch Changes
44
+
45
+ - Updated dependencies [34d1c8a]
46
+ - @ai-sdk/provider-utils@4.0.5
47
+ - @ai-sdk/openai-compatible@2.0.6
48
+
49
+ ## 1.0.6
50
+
51
+ ### Patch Changes
52
+
53
+ - Updated dependencies [d54c380]
54
+ - @ai-sdk/openai-compatible@2.0.5
55
+
56
+ ## 1.0.5
57
+
58
+ ### Patch Changes
59
+
60
+ - Updated dependencies [d937c8f]
61
+ - @ai-sdk/provider@3.0.2
62
+ - @ai-sdk/openai-compatible@2.0.4
63
+ - @ai-sdk/provider-utils@4.0.4
64
+
65
+ ## 1.0.4
66
+
67
+ ### Patch Changes
68
+
69
+ - Updated dependencies [0b429d4]
70
+ - @ai-sdk/provider-utils@4.0.3
71
+ - @ai-sdk/openai-compatible@2.0.3
72
+
73
+ ## 1.0.3
74
+
75
+ ### Patch Changes
76
+
77
+ - 863d34f: fix: trigger release to update `@latest`
78
+ - Updated dependencies [863d34f]
79
+ - @ai-sdk/openai-compatible@2.0.2
80
+ - @ai-sdk/provider@3.0.1
81
+ - @ai-sdk/provider-utils@4.0.2
82
+
83
+ ## 1.0.2
84
+
85
+ ### Patch Changes
86
+
87
+ - Updated dependencies [29264a3]
88
+ - @ai-sdk/provider-utils@4.0.1
89
+ - @ai-sdk/openai-compatible@2.0.1
90
+
91
+ ## 1.0.1
92
+
93
+ ### Patch Changes
94
+
95
+ - c0c8a0e: Add zai/glm-4.7 model support
96
+
97
+ ## 1.0.0
98
+
99
+ ### Major Changes
100
+
101
+ - dee8b05: ai SDK 6 beta
102
+ - 6cc9cd0: Added Baseten as a Provider for AI SDK
103
+
104
+ ### Patch Changes
105
+
106
+ - 0c3b58b: fix(provider): add specificationVersion to ProviderV3
107
+ - ab9af9c: add moonshotai/Kimi-K2-Thinking model ID for Baseten provider
108
+ - 8d9e8ad: chore(provider): remove generics from EmbeddingModelV3
109
+
110
+ Before
111
+
112
+ ```ts
113
+ model.textEmbeddingModel('my-model-id');
114
+ ```
115
+
116
+ After
117
+
118
+ ```ts
119
+ model.embeddingModel('my-model-id');
120
+ ```
121
+
122
+ - 95f65c2: chore: use import \* from zod/v4
123
+ - 0c4822d: feat: `EmbeddingModelV3`
124
+ - ed329cb: feat: `Provider-V3`
125
+ - 1cad0ab: feat: add provider version to user-agent header
126
+ - 916bc46: bumped performance client to 0.0.10
127
+ - 8dac895: feat: `LanguageModelV3`
128
+ - 366f50b: chore(provider): add deprecated textEmbeddingModel and textEmbedding aliases
129
+ - 4616b86: chore: update zod peer depenedency version
130
+ - Updated dependencies
131
+ - @ai-sdk/openai-compatible@2.0.0
132
+ - @ai-sdk/provider@3.0.0
133
+ - @ai-sdk/provider-utils@4.0.0
134
+
135
+ ## 1.0.0-beta.62
136
+
137
+ ### Patch Changes
138
+
139
+ - Updated dependencies [475189e]
140
+ - @ai-sdk/provider@3.0.0-beta.32
141
+ - @ai-sdk/openai-compatible@2.0.0-beta.60
142
+ - @ai-sdk/provider-utils@4.0.0-beta.59
143
+
144
+ ## 1.0.0-beta.61
145
+
146
+ ### Patch Changes
147
+
148
+ - Updated dependencies [2625a04]
149
+ - @ai-sdk/openai-compatible@2.0.0-beta.59
150
+ - @ai-sdk/provider@3.0.0-beta.31
151
+ - @ai-sdk/provider-utils@4.0.0-beta.58
152
+
153
+ ## 1.0.0-beta.60
154
+
155
+ ### Patch Changes
156
+
157
+ - Updated dependencies [cbf52cd]
158
+ - @ai-sdk/openai-compatible@2.0.0-beta.58
159
+ - @ai-sdk/provider@3.0.0-beta.30
160
+ - @ai-sdk/provider-utils@4.0.0-beta.57
161
+
162
+ ## 1.0.0-beta.59
163
+
164
+ ### Patch Changes
165
+
166
+ - Updated dependencies [9549c9e]
167
+ - @ai-sdk/provider@3.0.0-beta.29
168
+ - @ai-sdk/openai-compatible@2.0.0-beta.57
169
+ - @ai-sdk/provider-utils@4.0.0-beta.56
170
+
171
+ ## 1.0.0-beta.58
172
+
173
+ ### Patch Changes
174
+
175
+ - Updated dependencies [50b70d6]
176
+ - @ai-sdk/provider-utils@4.0.0-beta.55
177
+ - @ai-sdk/openai-compatible@2.0.0-beta.56
178
+
179
+ ## 1.0.0-beta.57
180
+
181
+ ### Patch Changes
182
+
183
+ - Updated dependencies [9061dc0]
184
+ - @ai-sdk/openai-compatible@2.0.0-beta.55
185
+ - @ai-sdk/provider-utils@4.0.0-beta.54
186
+ - @ai-sdk/provider@3.0.0-beta.28
187
+
188
+ ## 1.0.0-beta.56
189
+
190
+ ### Patch Changes
191
+
192
+ - 366f50b: chore(provider): add deprecated textEmbeddingModel and textEmbedding aliases
193
+ - Updated dependencies [366f50b]
194
+ - @ai-sdk/openai-compatible@2.0.0-beta.54
195
+ - @ai-sdk/provider@3.0.0-beta.27
196
+ - @ai-sdk/provider-utils@4.0.0-beta.53
197
+
198
+ ## 1.0.0-beta.55
199
+
200
+ ### Patch Changes
201
+
202
+ - Updated dependencies [763d04a]
203
+ - @ai-sdk/provider-utils@4.0.0-beta.52
204
+ - @ai-sdk/openai-compatible@2.0.0-beta.53
205
+
206
+ ## 1.0.0-beta.54
207
+
208
+ ### Patch Changes
209
+
210
+ - Updated dependencies [c1efac4]
211
+ - @ai-sdk/provider-utils@4.0.0-beta.51
212
+ - @ai-sdk/openai-compatible@2.0.0-beta.52
213
+
214
+ ## 1.0.0-beta.53
215
+
216
+ ### Patch Changes
217
+
218
+ - Updated dependencies [32223c8]
219
+ - @ai-sdk/provider-utils@4.0.0-beta.50
220
+ - @ai-sdk/openai-compatible@2.0.0-beta.51
221
+
222
+ ## 1.0.0-beta.52
223
+
224
+ ### Patch Changes
225
+
226
+ - Updated dependencies [83e5744]
227
+ - @ai-sdk/provider-utils@4.0.0-beta.49
228
+ - @ai-sdk/openai-compatible@2.0.0-beta.50
229
+
230
+ ## 1.0.0-beta.51
231
+
232
+ ### Patch Changes
233
+
234
+ - Updated dependencies [960ec8f]
235
+ - @ai-sdk/provider-utils@4.0.0-beta.48
236
+ - @ai-sdk/openai-compatible@2.0.0-beta.49
237
+
238
+ ## 1.0.0-beta.50
239
+
240
+ ### Patch Changes
241
+
242
+ - Updated dependencies [e9e157f]
243
+ - @ai-sdk/provider-utils@4.0.0-beta.47
244
+ - @ai-sdk/openai-compatible@2.0.0-beta.48
245
+
246
+ ## 1.0.0-beta.49
247
+
248
+ ### Patch Changes
249
+
250
+ - Updated dependencies [81e29ab]
251
+ - @ai-sdk/provider-utils@4.0.0-beta.46
252
+ - @ai-sdk/openai-compatible@2.0.0-beta.47
253
+
254
+ ## 1.0.0-beta.48
255
+
256
+ ### Patch Changes
257
+
258
+ - Updated dependencies [3bd2689]
259
+ - @ai-sdk/openai-compatible@2.0.0-beta.46
260
+ - @ai-sdk/provider@3.0.0-beta.26
261
+ - @ai-sdk/provider-utils@4.0.0-beta.45
262
+
263
+ ## 1.0.0-beta.47
264
+
265
+ ### Patch Changes
266
+
267
+ - Updated dependencies [53f3368]
268
+ - @ai-sdk/provider@3.0.0-beta.25
269
+ - @ai-sdk/openai-compatible@2.0.0-beta.45
270
+ - @ai-sdk/provider-utils@4.0.0-beta.44
271
+
272
+ ## 1.0.0-beta.46
273
+
274
+ ### Patch Changes
275
+
276
+ - Updated dependencies [dce03c4]
277
+ - @ai-sdk/provider-utils@4.0.0-beta.43
278
+ - @ai-sdk/provider@3.0.0-beta.24
279
+ - @ai-sdk/openai-compatible@2.0.0-beta.44
280
+
281
+ ## 1.0.0-beta.45
282
+
283
+ ### Patch Changes
284
+
285
+ - Updated dependencies [3ed5519]
286
+ - @ai-sdk/provider-utils@4.0.0-beta.42
287
+ - @ai-sdk/openai-compatible@2.0.0-beta.43
288
+
289
+ ## 1.0.0-beta.44
290
+
291
+ ### Patch Changes
292
+
293
+ - Updated dependencies [1bd7d32]
294
+ - @ai-sdk/openai-compatible@2.0.0-beta.42
295
+ - @ai-sdk/provider-utils@4.0.0-beta.41
296
+ - @ai-sdk/provider@3.0.0-beta.23
297
+
298
+ ## 1.0.0-beta.43
299
+
300
+ ### Patch Changes
301
+
302
+ - Updated dependencies [544d4e8]
303
+ - @ai-sdk/openai-compatible@2.0.0-beta.41
304
+ - @ai-sdk/provider-utils@4.0.0-beta.40
305
+ - @ai-sdk/provider@3.0.0-beta.22
306
+
307
+ ## 1.0.0-beta.42
308
+
309
+ ### Patch Changes
310
+
311
+ - Updated dependencies [954c356]
312
+ - @ai-sdk/provider-utils@4.0.0-beta.39
313
+ - @ai-sdk/provider@3.0.0-beta.21
314
+ - @ai-sdk/openai-compatible@2.0.0-beta.40
315
+
316
+ ## 1.0.0-beta.41
317
+
318
+ ### Patch Changes
319
+
320
+ - Updated dependencies [03849b0]
321
+ - @ai-sdk/provider-utils@4.0.0-beta.38
322
+ - @ai-sdk/openai-compatible@2.0.0-beta.39
323
+
324
+ ## 1.0.0-beta.40
325
+
326
+ ### Patch Changes
327
+
328
+ - Updated dependencies [457318b]
329
+ - @ai-sdk/openai-compatible@2.0.0-beta.38
330
+ - @ai-sdk/provider@3.0.0-beta.20
331
+ - @ai-sdk/provider-utils@4.0.0-beta.37
332
+
333
+ ## 1.0.0-beta.39
334
+
335
+ ### Patch Changes
336
+
337
+ - 8d9e8ad: chore(provider): remove generics from EmbeddingModelV3
338
+
339
+ Before
340
+
341
+ ```ts
342
+ model.textEmbeddingModel('my-model-id');
343
+ ```
344
+
345
+ After
346
+
347
+ ```ts
348
+ model.embeddingModel('my-model-id');
349
+ ```
350
+
351
+ - Updated dependencies [8d9e8ad]
352
+ - @ai-sdk/openai-compatible@2.0.0-beta.37
353
+ - @ai-sdk/provider@3.0.0-beta.19
354
+ - @ai-sdk/provider-utils@4.0.0-beta.36
355
+
356
+ ## 1.0.0-beta.38
357
+
358
+ ### Patch Changes
359
+
360
+ - Updated dependencies [10d819b]
361
+ - @ai-sdk/provider@3.0.0-beta.18
362
+ - @ai-sdk/openai-compatible@2.0.0-beta.36
363
+ - @ai-sdk/provider-utils@4.0.0-beta.35
364
+
365
+ ## 1.0.0-beta.37
366
+
367
+ ### Patch Changes
368
+
369
+ - Updated dependencies [db913bd]
370
+ - @ai-sdk/provider@3.0.0-beta.17
371
+ - @ai-sdk/openai-compatible@2.0.0-beta.35
372
+ - @ai-sdk/provider-utils@4.0.0-beta.34
373
+
374
+ ## 1.0.0-beta.36
375
+
376
+ ### Patch Changes
377
+
378
+ - ab9af9c: add moonshotai/Kimi-K2-Thinking model ID for Baseten provider
379
+
380
+ ## 1.0.0-beta.35
381
+
382
+ ### Patch Changes
383
+
384
+ - Updated dependencies [b681d7d]
385
+ - @ai-sdk/provider@3.0.0-beta.16
386
+ - @ai-sdk/openai-compatible@2.0.0-beta.34
387
+ - @ai-sdk/provider-utils@4.0.0-beta.33
388
+
389
+ ## 1.0.0-beta.34
390
+
391
+ ### Patch Changes
392
+
393
+ - Updated dependencies [32d8dbb]
394
+ - @ai-sdk/provider-utils@4.0.0-beta.32
395
+ - @ai-sdk/openai-compatible@2.0.0-beta.33
396
+
397
+ ## 1.0.0-beta.33
398
+
399
+ ### Patch Changes
400
+
401
+ - Updated dependencies [bb36798]
402
+ - @ai-sdk/provider@3.0.0-beta.15
403
+ - @ai-sdk/openai-compatible@2.0.0-beta.32
404
+ - @ai-sdk/provider-utils@4.0.0-beta.31
405
+
406
+ ## 1.0.0-beta.32
407
+
408
+ ### Patch Changes
409
+
410
+ - Updated dependencies [4f16c37]
411
+ - @ai-sdk/provider-utils@4.0.0-beta.30
412
+ - @ai-sdk/openai-compatible@2.0.0-beta.31
413
+
414
+ ## 1.0.0-beta.31
415
+
416
+ ### Patch Changes
417
+
418
+ - Updated dependencies [af3780b]
419
+ - @ai-sdk/provider@3.0.0-beta.14
420
+ - @ai-sdk/openai-compatible@2.0.0-beta.30
421
+ - @ai-sdk/provider-utils@4.0.0-beta.29
422
+
423
+ ## 1.0.0-beta.30
424
+
425
+ ### Patch Changes
426
+
427
+ - Updated dependencies [016b111]
428
+ - @ai-sdk/provider-utils@4.0.0-beta.28
429
+ - @ai-sdk/openai-compatible@2.0.0-beta.29
430
+
431
+ ## 1.0.0-beta.29
432
+
433
+ ### Patch Changes
434
+
435
+ - Updated dependencies [37c58a0]
436
+ - @ai-sdk/provider@3.0.0-beta.13
437
+ - @ai-sdk/openai-compatible@2.0.0-beta.28
438
+ - @ai-sdk/provider-utils@4.0.0-beta.27
439
+
440
+ ## 1.0.0-beta.28
441
+
442
+ ### Patch Changes
443
+
444
+ - Updated dependencies [d1bdadb]
445
+ - @ai-sdk/provider@3.0.0-beta.12
446
+ - @ai-sdk/openai-compatible@2.0.0-beta.27
447
+ - @ai-sdk/provider-utils@4.0.0-beta.26
448
+
449
+ ## 1.0.0-beta.27
450
+
451
+ ### Patch Changes
452
+
453
+ - Updated dependencies [4c44a5b]
454
+ - @ai-sdk/provider@3.0.0-beta.11
455
+ - @ai-sdk/openai-compatible@2.0.0-beta.26
456
+ - @ai-sdk/provider-utils@4.0.0-beta.25
457
+
458
+ ## 1.0.0-beta.26
459
+
460
+ ### Patch Changes
461
+
462
+ - 0c3b58b: fix(provider): add specificationVersion to ProviderV3
463
+ - Updated dependencies [0c3b58b]
464
+ - @ai-sdk/openai-compatible@2.0.0-beta.25
465
+ - @ai-sdk/provider@3.0.0-beta.10
466
+ - @ai-sdk/provider-utils@4.0.0-beta.24
467
+
468
+ ## 1.0.0-beta.25
469
+
470
+ ### Patch Changes
471
+
472
+ - Updated dependencies [a755db5]
473
+ - @ai-sdk/provider@3.0.0-beta.9
474
+ - @ai-sdk/openai-compatible@2.0.0-beta.24
475
+ - @ai-sdk/provider-utils@4.0.0-beta.23
476
+
477
+ ## 1.0.0-beta.24
478
+
479
+ ### Patch Changes
480
+
481
+ - Updated dependencies [58920e0]
482
+ - @ai-sdk/provider-utils@4.0.0-beta.22
483
+ - @ai-sdk/openai-compatible@2.0.0-beta.23
484
+
485
+ ## 1.0.0-beta.23
486
+
487
+ ### Patch Changes
488
+
489
+ - Updated dependencies [293a6b7]
490
+ - @ai-sdk/provider-utils@4.0.0-beta.21
491
+ - @ai-sdk/openai-compatible@2.0.0-beta.22
492
+
493
+ ## 1.0.0-beta.22
494
+
495
+ ### Patch Changes
496
+
497
+ - Updated dependencies [fca786b]
498
+ - @ai-sdk/provider-utils@4.0.0-beta.20
499
+ - @ai-sdk/openai-compatible@2.0.0-beta.21
500
+
501
+ ## 1.0.0-beta.21
502
+
503
+ ### Patch Changes
504
+
505
+ - Updated dependencies [3794514]
506
+ - @ai-sdk/provider-utils@4.0.0-beta.19
507
+ - @ai-sdk/provider@3.0.0-beta.8
508
+ - @ai-sdk/openai-compatible@2.0.0-beta.20
509
+
510
+ ## 1.0.0-beta.20
511
+
512
+ ### Patch Changes
513
+
514
+ - Updated dependencies [81d4308]
515
+ - @ai-sdk/provider@3.0.0-beta.7
516
+ - @ai-sdk/openai-compatible@2.0.0-beta.19
517
+ - @ai-sdk/provider-utils@4.0.0-beta.18
518
+
519
+ ## 1.0.0-beta.19
520
+
521
+ ### Patch Changes
522
+
523
+ - Updated dependencies [703459a]
524
+ - @ai-sdk/provider-utils@4.0.0-beta.17
525
+ - @ai-sdk/openai-compatible@2.0.0-beta.18
526
+
527
+ ## 1.0.0-beta.18
528
+
529
+ ### Patch Changes
530
+
531
+ - Updated dependencies [b689220]
532
+ - @ai-sdk/openai-compatible@2.0.0-beta.17
533
+
534
+ ## 1.0.0-beta.17
535
+
536
+ ### Patch Changes
537
+
538
+ - Updated dependencies [6306603]
539
+ - @ai-sdk/provider-utils@4.0.0-beta.16
540
+ - @ai-sdk/openai-compatible@2.0.0-beta.16
541
+
542
+ ## 1.0.0-beta.16
543
+
544
+ ### Patch Changes
545
+
546
+ - Updated dependencies [f0b2157]
547
+ - @ai-sdk/provider-utils@4.0.0-beta.15
548
+ - @ai-sdk/openai-compatible@2.0.0-beta.15
549
+
550
+ ## 1.0.0-beta.15
551
+
552
+ ### Patch Changes
553
+
554
+ - Updated dependencies [3b1d015]
555
+ - @ai-sdk/provider-utils@4.0.0-beta.14
556
+ - @ai-sdk/openai-compatible@2.0.0-beta.14
557
+
558
+ ## 1.0.0-beta.14
559
+
560
+ ### Patch Changes
561
+
562
+ - Updated dependencies [d116b4b]
563
+ - @ai-sdk/provider-utils@4.0.0-beta.13
564
+ - @ai-sdk/openai-compatible@2.0.0-beta.13
565
+
566
+ ## 1.0.0-beta.13
567
+
568
+ ### Patch Changes
569
+
570
+ - Updated dependencies [7e32fea]
571
+ - @ai-sdk/provider-utils@4.0.0-beta.12
572
+ - @ai-sdk/openai-compatible@2.0.0-beta.12
573
+
574
+ ## 1.0.0-beta.12
575
+
576
+ ### Patch Changes
577
+
578
+ - 95f65c2: chore: use import \* from zod/v4
579
+ - Updated dependencies
580
+ - @ai-sdk/openai-compatible@2.0.0-beta.11
581
+ - @ai-sdk/provider-utils@4.0.0-beta.11
582
+
583
+ ## 1.0.0-beta.11
584
+
585
+ ### Major Changes
586
+
587
+ - dee8b05: ai SDK 6 beta
588
+
589
+ ### Patch Changes
590
+
591
+ - Updated dependencies [dee8b05]
592
+ - @ai-sdk/openai-compatible@2.0.0-beta.10
593
+ - @ai-sdk/provider@3.0.0-beta.6
594
+ - @ai-sdk/provider-utils@4.0.0-beta.10
595
+
596
+ ## 1.0.0-beta.10
597
+
598
+ ### Patch Changes
599
+
600
+ - Updated dependencies [521c537]
601
+ - @ai-sdk/provider-utils@3.1.0-beta.9
602
+ - @ai-sdk/openai-compatible@1.1.0-beta.9
603
+
604
+ ## 1.0.0-beta.9
605
+
606
+ ### Patch Changes
607
+
608
+ - Updated dependencies [e06565c]
609
+ - @ai-sdk/provider-utils@3.1.0-beta.8
610
+ - @ai-sdk/openai-compatible@1.1.0-beta.8
611
+
612
+ ## 1.0.0-beta.8
613
+
614
+ ### Patch Changes
615
+
616
+ - Updated dependencies
617
+ - @ai-sdk/provider@2.1.0-beta.5
618
+ - @ai-sdk/openai-compatible@1.1.0-beta.7
619
+ - @ai-sdk/provider-utils@3.1.0-beta.7
620
+
621
+ ## 1.0.0-beta.7
622
+
623
+ ### Patch Changes
624
+
625
+ - Updated dependencies
626
+ - @ai-sdk/openai-compatible@1.1.0-beta.6
627
+ - @ai-sdk/provider-utils@3.1.0-beta.6
628
+ - @ai-sdk/provider@2.1.0-beta.4
629
+
630
+ ## 1.0.0-beta.6
631
+
632
+ ### Patch Changes
633
+
634
+ - 916bc46: bumped performance client to 0.0.10
635
+
636
+ ## 1.0.0-beta.5
637
+
638
+ ### Patch Changes
639
+
640
+ - 8dac895: feat: `LanguageModelV3`
641
+ - Updated dependencies
642
+ - @ai-sdk/openai-compatible@1.1.0-beta.5
643
+ - @ai-sdk/provider-utils@3.1.0-beta.5
644
+ - @ai-sdk/provider@2.1.0-beta.3
645
+
646
+ ## 1.0.0-beta.4
647
+
648
+ ### Patch Changes
649
+
650
+ - 4616b86: chore: update zod peer depenedency version
651
+ - Updated dependencies [4616b86]
652
+ - @ai-sdk/openai-compatible@1.1.0-beta.4
653
+ - @ai-sdk/provider-utils@3.1.0-beta.4
654
+
655
+ ## 1.0.0-beta.3
656
+
657
+ ### Patch Changes
658
+
659
+ - ed329cb: feat: `Provider-V3`
660
+ - Updated dependencies
661
+ - @ai-sdk/openai-compatible@1.1.0-beta.3
662
+ - @ai-sdk/provider@2.1.0-beta.2
663
+ - @ai-sdk/provider-utils@3.1.0-beta.3
664
+
665
+ ## 1.0.0-beta.2
666
+
667
+ ### Patch Changes
668
+
669
+ - 0c4822d: feat: `EmbeddingModelV3`
670
+ - 1cad0ab: feat: add provider version to user-agent header
671
+ - Updated dependencies [0c4822d]
672
+ - @ai-sdk/openai-compatible@1.1.0-beta.2
673
+ - @ai-sdk/provider@2.1.0-beta.1
674
+ - @ai-sdk/provider-utils@3.1.0-beta.2
675
+
676
+ ## 1.0.0-beta.1
677
+
678
+ ### Patch Changes
679
+
680
+ - Updated dependencies [cbb1d35]
681
+ - @ai-sdk/provider-utils@3.1.0-beta.1
682
+ - @ai-sdk/openai-compatible@1.1.0-beta.1
683
+
684
+ ## 1.0.0-beta.0
685
+
686
+ ### Major Changes
687
+
688
+ - 6cc9cd0: Added Baseten as a Provider for AI SDK
package/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2023 Vercel, Inc.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
package/README.md ADDED
@@ -0,0 +1,35 @@
1
+ # AI SDK - Baseten Provider
2
+
3
+ The **[Baseten provider](https://ai-sdk.dev/providers/ai-sdk-providers/baseten)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model and embedding model support for the [Baseten](https://baseten.co) platform.
4
+
5
+ ## Setup
6
+
7
+ The Baseten provider is available in the `@ai-sdk/baseten` module. You can install it with
8
+
9
+ ```bash
10
+ npm i @ai-sdk/baseten
11
+ ```
12
+
13
+ ## Provider Instance
14
+
15
+ You can import the default provider instance `baseten` from `@ai-sdk/baseten`:
16
+
17
+ ```ts
18
+ import { baseten } from '@ai-sdk/baseten';
19
+ ```
20
+
21
+ ## Language Model Example (Model APIs)
22
+
23
+ ```ts
24
+ import { baseten } from '@ai-sdk/baseten';
25
+ import { generateText } from 'ai';
26
+
27
+ const { text } = await generateText({
28
+ model: baseten('deepseek-ai/DeepSeek-V3-0324'),
29
+ prompt: 'What is the meaning of life?',
30
+ });
31
+ ```
32
+
33
+ ## Documentation
34
+
35
+ Please check out the **[Baseten provider](https://ai-sdk.dev/providers/ai-sdk-providers/baseten)** for more information.
@@ -0,0 +1,65 @@
1
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod/v4';
4
+
5
+ type BasetenChatModelId = 'deepseek-ai/DeepSeek-R1-0528' | 'deepseek-ai/DeepSeek-V3-0324' | 'deepseek-ai/DeepSeek-V3.1' | 'moonshotai/Kimi-K2-Instruct-0905' | 'moonshotai/Kimi-K2-Thinking' | 'Qwen/Qwen3-235B-A22B-Instruct-2507' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct' | 'openai/gpt-oss-120b' | 'zai-org/GLM-4.6' | 'zai-org/GLM-4.7' | (string & {});
6
+
7
+ type BasetenEmbeddingModelId = string & {};
8
+
9
+ type BasetenErrorData = z.infer<typeof basetenErrorSchema>;
10
+ declare const basetenErrorSchema: z.ZodObject<{
11
+ error: z.ZodString;
12
+ }, z.core.$strip>;
13
+ interface BasetenProviderSettings {
14
+ /**
15
+ * Baseten API key. Default value is taken from the `BASETEN_API_KEY`
16
+ * environment variable.
17
+ */
18
+ apiKey?: string;
19
+ /**
20
+ * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'
21
+ */
22
+ baseURL?: string;
23
+ /**
24
+ * Model URL for custom models (chat or embeddings).
25
+ * If not supplied, the default Model APIs will be used.
26
+ */
27
+ modelURL?: string;
28
+ /**
29
+ * Custom headers to include in the requests.
30
+ */
31
+ headers?: Record<string, string>;
32
+ /**
33
+ * Custom fetch implementation. You can use it as a middleware to intercept requests,
34
+ * or to provide a custom fetch implementation for e.g. testing.
35
+ */
36
+ fetch?: FetchFunction;
37
+ }
38
+ interface BasetenProvider extends ProviderV3 {
39
+ /**
40
+ Creates a chat model for text generation.
41
+ */
42
+ (modelId?: BasetenChatModelId): LanguageModelV3;
43
+ /**
44
+ Creates a chat model for text generation.
45
+ */
46
+ chatModel(modelId?: BasetenChatModelId): LanguageModelV3;
47
+ /**
48
+ Creates a language model for text generation. Alias for chatModel.
49
+ */
50
+ languageModel(modelId?: BasetenChatModelId): LanguageModelV3;
51
+ /**
52
+ Creates a embedding model for text generation.
53
+ */
54
+ embeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;
55
+ /**
56
+ * @deprecated Use `embeddingModel` instead.
57
+ */
58
+ textEmbeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;
59
+ }
60
+ declare function createBaseten(options?: BasetenProviderSettings): BasetenProvider;
61
+ declare const baseten: BasetenProvider;
62
+
63
+ declare const VERSION: string;
64
+
65
+ export { type BasetenChatModelId, type BasetenErrorData, type BasetenProvider, type BasetenProviderSettings, VERSION, baseten, createBaseten };
@@ -0,0 +1,65 @@
1
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod/v4';
4
+
5
+ type BasetenChatModelId = 'deepseek-ai/DeepSeek-R1-0528' | 'deepseek-ai/DeepSeek-V3-0324' | 'deepseek-ai/DeepSeek-V3.1' | 'moonshotai/Kimi-K2-Instruct-0905' | 'moonshotai/Kimi-K2-Thinking' | 'Qwen/Qwen3-235B-A22B-Instruct-2507' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct' | 'openai/gpt-oss-120b' | 'zai-org/GLM-4.6' | 'zai-org/GLM-4.7' | (string & {});
6
+
7
+ type BasetenEmbeddingModelId = string & {};
8
+
9
+ type BasetenErrorData = z.infer<typeof basetenErrorSchema>;
10
+ declare const basetenErrorSchema: z.ZodObject<{
11
+ error: z.ZodString;
12
+ }, z.core.$strip>;
13
+ interface BasetenProviderSettings {
14
+ /**
15
+ * Baseten API key. Default value is taken from the `BASETEN_API_KEY`
16
+ * environment variable.
17
+ */
18
+ apiKey?: string;
19
+ /**
20
+ * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'
21
+ */
22
+ baseURL?: string;
23
+ /**
24
+ * Model URL for custom models (chat or embeddings).
25
+ * If not supplied, the default Model APIs will be used.
26
+ */
27
+ modelURL?: string;
28
+ /**
29
+ * Custom headers to include in the requests.
30
+ */
31
+ headers?: Record<string, string>;
32
+ /**
33
+ * Custom fetch implementation. You can use it as a middleware to intercept requests,
34
+ * or to provide a custom fetch implementation for e.g. testing.
35
+ */
36
+ fetch?: FetchFunction;
37
+ }
38
+ interface BasetenProvider extends ProviderV3 {
39
+ /**
40
+ Creates a chat model for text generation.
41
+ */
42
+ (modelId?: BasetenChatModelId): LanguageModelV3;
43
+ /**
44
+ Creates a chat model for text generation.
45
+ */
46
+ chatModel(modelId?: BasetenChatModelId): LanguageModelV3;
47
+ /**
48
+ Creates a language model for text generation. Alias for chatModel.
49
+ */
50
+ languageModel(modelId?: BasetenChatModelId): LanguageModelV3;
51
+ /**
52
+ Creates a embedding model for text generation.
53
+ */
54
+ embeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;
55
+ /**
56
+ * @deprecated Use `embeddingModel` instead.
57
+ */
58
+ textEmbeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;
59
+ }
60
+ declare function createBaseten(options?: BasetenProviderSettings): BasetenProvider;
61
+ declare const baseten: BasetenProvider;
62
+
63
+ declare const VERSION: string;
64
+
65
+ export { type BasetenChatModelId, type BasetenErrorData, type BasetenProvider, type BasetenProviderSettings, VERSION, baseten, createBaseten };
package/dist/index.js ADDED
@@ -0,0 +1,160 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ VERSION: () => VERSION,
24
+ baseten: () => baseten,
25
+ createBaseten: () => createBaseten
26
+ });
27
+ module.exports = __toCommonJS(src_exports);
28
+
29
+ // src/baseten-provider.ts
30
+ var import_openai_compatible = require("@ai-sdk/openai-compatible");
31
+ var import_provider = require("@ai-sdk/provider");
32
+ var import_provider_utils = require("@ai-sdk/provider-utils");
33
+ var import_v4 = require("zod/v4");
34
+ var import_performance_client = require("@basetenlabs/performance-client");
35
+
36
+ // src/version.ts
37
+ var VERSION = true ? "0.0.0-1c33ba03-20260114162300" : "0.0.0-test";
38
+
39
+ // src/baseten-provider.ts
40
+ var basetenErrorSchema = import_v4.z.object({
41
+ error: import_v4.z.string()
42
+ });
43
+ var basetenErrorStructure = {
44
+ errorSchema: basetenErrorSchema,
45
+ errorToMessage: (data) => data.error
46
+ };
47
+ var defaultBaseURL = "https://inference.baseten.co/v1";
48
+ function createBaseten(options = {}) {
49
+ var _a;
50
+ const baseURL = (0, import_provider_utils.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : defaultBaseURL);
51
+ const getHeaders = () => (0, import_provider_utils.withUserAgentSuffix)(
52
+ {
53
+ Authorization: `Bearer ${(0, import_provider_utils.loadApiKey)({
54
+ apiKey: options.apiKey,
55
+ environmentVariableName: "BASETEN_API_KEY",
56
+ description: "Baseten API key"
57
+ })}`,
58
+ ...options.headers
59
+ },
60
+ `ai-sdk/baseten/${VERSION}`
61
+ );
62
+ const getCommonModelConfig = (modelType, customURL) => ({
63
+ provider: `baseten.${modelType}`,
64
+ url: ({ path }) => {
65
+ if (modelType === "embedding" && (customURL == null ? void 0 : customURL.includes("/sync")) && !(customURL == null ? void 0 : customURL.includes("/sync/v1"))) {
66
+ return `${customURL}/v1${path}`;
67
+ }
68
+ return `${customURL || baseURL}${path}`;
69
+ },
70
+ headers: getHeaders,
71
+ fetch: options.fetch
72
+ });
73
+ const createChatModel = (modelId) => {
74
+ const customURL = options.modelURL;
75
+ if (customURL) {
76
+ const isOpenAICompatible = customURL.includes("/sync/v1");
77
+ if (isOpenAICompatible) {
78
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "placeholder", {
79
+ ...getCommonModelConfig("chat", customURL),
80
+ errorStructure: basetenErrorStructure
81
+ });
82
+ } else if (customURL.includes("/predict")) {
83
+ throw new Error(
84
+ "Not supported. You must use a /sync/v1 endpoint for chat models."
85
+ );
86
+ }
87
+ }
88
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "chat", {
89
+ ...getCommonModelConfig("chat"),
90
+ errorStructure: basetenErrorStructure
91
+ });
92
+ };
93
+ const createEmbeddingModel = (modelId) => {
94
+ const customURL = options.modelURL;
95
+ if (!customURL) {
96
+ throw new Error(
97
+ "No model URL provided for embeddings. Please set modelURL option for embeddings."
98
+ );
99
+ }
100
+ const isOpenAICompatible = customURL.includes("/sync");
101
+ if (isOpenAICompatible) {
102
+ const model = new import_openai_compatible.OpenAICompatibleEmbeddingModel(
103
+ modelId != null ? modelId : "embeddings",
104
+ {
105
+ ...getCommonModelConfig("embedding", customURL),
106
+ errorStructure: basetenErrorStructure
107
+ }
108
+ );
109
+ const performanceClientURL = customURL.replace("/sync/v1", "/sync");
110
+ const performanceClient = new import_performance_client.PerformanceClient(
111
+ performanceClientURL,
112
+ (0, import_provider_utils.loadApiKey)({
113
+ apiKey: options.apiKey,
114
+ environmentVariableName: "BASETEN_API_KEY",
115
+ description: "Baseten API key"
116
+ })
117
+ );
118
+ model.doEmbed = async (params) => {
119
+ if (!params.values || !Array.isArray(params.values)) {
120
+ throw new Error("params.values must be an array of strings");
121
+ }
122
+ const response = await performanceClient.embed(
123
+ params.values,
124
+ modelId != null ? modelId : "embeddings"
125
+ // model_id is for Model APIs, we don't use it here for dedicated
126
+ );
127
+ const embeddings = response.data.map((item) => item.embedding);
128
+ return {
129
+ embeddings,
130
+ usage: response.usage ? { tokens: response.usage.total_tokens } : void 0,
131
+ response: { headers: {}, body: response },
132
+ warnings: []
133
+ };
134
+ };
135
+ return model;
136
+ } else {
137
+ throw new Error(
138
+ "Not supported. You must use a /sync or /sync/v1 endpoint for embeddings."
139
+ );
140
+ }
141
+ };
142
+ const provider = (modelId) => createChatModel(modelId);
143
+ provider.specificationVersion = "v3";
144
+ provider.chatModel = createChatModel;
145
+ provider.languageModel = createChatModel;
146
+ provider.imageModel = (modelId) => {
147
+ throw new import_provider.NoSuchModelError({ modelId, modelType: "imageModel" });
148
+ };
149
+ provider.embeddingModel = createEmbeddingModel;
150
+ provider.textEmbeddingModel = createEmbeddingModel;
151
+ return provider;
152
+ }
153
+ var baseten = createBaseten();
154
+ // Annotate the CommonJS export names for ESM import in node:
155
+ 0 && (module.exports = {
156
+ VERSION,
157
+ baseten,
158
+ createBaseten
159
+ });
160
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["export type { BasetenChatModelId } from './baseten-chat-options';\nexport { baseten, createBaseten } from './baseten-provider';\nexport type {\n BasetenProvider,\n BasetenProviderSettings,\n BasetenErrorData,\n} from './baseten-provider';\nexport { VERSION } from './version';\n","import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n LanguageModelV3,\n NoSuchModelError,\n ProviderV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV3 {\n /**\nCreates a chat model for text generation.\n*/\n (modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a embedding model for text generation.\n*/\n embeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embeddingModel` instead.\n */\n textEmbeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n warnings: [],\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n\n provider.specificationVersion = 'v3' as const;\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.embeddingModel = createEmbeddingModel;\n provider.textEmbeddingModel = createEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,+BAIO;AACP,sBAKO;AACP,4BAKO;AACP,gBAAkB;AAGlB,gCAAkC;;;AClB3B,IAAM,UACX,OACI,kCACA;;;ADoBN,IAAM,qBAAqB,YAAE,OAAO;AAAA,EAClC,OAAO,YAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AA2DA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA/FnB;AAgGE,QAAM,cAAU,6CAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,UACjB;AAAA,IACE;AAAA,MACE,eAAe,cAAU,kCAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,2DAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,2DAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,uBAAuB,CAAC,YAAsC;AAElE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,YACA,kCAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,UACxC,UAAU,CAAC;AAAA,QACb;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAE1E,WAAS,uBAAuB;AAChC,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iCAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,iBAAiB;AAC1B,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
package/dist/index.mjs ADDED
@@ -0,0 +1,140 @@
1
+ // src/baseten-provider.ts
2
+ import {
3
+ OpenAICompatibleChatLanguageModel,
4
+ OpenAICompatibleEmbeddingModel
5
+ } from "@ai-sdk/openai-compatible";
6
+ import {
7
+ NoSuchModelError
8
+ } from "@ai-sdk/provider";
9
+ import {
10
+ loadApiKey,
11
+ withoutTrailingSlash,
12
+ withUserAgentSuffix
13
+ } from "@ai-sdk/provider-utils";
14
+ import { z } from "zod/v4";
15
+ import { PerformanceClient } from "@basetenlabs/performance-client";
16
+
17
+ // src/version.ts
18
+ var VERSION = true ? "0.0.0-1c33ba03-20260114162300" : "0.0.0-test";
19
+
20
+ // src/baseten-provider.ts
21
+ var basetenErrorSchema = z.object({
22
+ error: z.string()
23
+ });
24
+ var basetenErrorStructure = {
25
+ errorSchema: basetenErrorSchema,
26
+ errorToMessage: (data) => data.error
27
+ };
28
+ var defaultBaseURL = "https://inference.baseten.co/v1";
29
+ function createBaseten(options = {}) {
30
+ var _a;
31
+ const baseURL = withoutTrailingSlash((_a = options.baseURL) != null ? _a : defaultBaseURL);
32
+ const getHeaders = () => withUserAgentSuffix(
33
+ {
34
+ Authorization: `Bearer ${loadApiKey({
35
+ apiKey: options.apiKey,
36
+ environmentVariableName: "BASETEN_API_KEY",
37
+ description: "Baseten API key"
38
+ })}`,
39
+ ...options.headers
40
+ },
41
+ `ai-sdk/baseten/${VERSION}`
42
+ );
43
+ const getCommonModelConfig = (modelType, customURL) => ({
44
+ provider: `baseten.${modelType}`,
45
+ url: ({ path }) => {
46
+ if (modelType === "embedding" && (customURL == null ? void 0 : customURL.includes("/sync")) && !(customURL == null ? void 0 : customURL.includes("/sync/v1"))) {
47
+ return `${customURL}/v1${path}`;
48
+ }
49
+ return `${customURL || baseURL}${path}`;
50
+ },
51
+ headers: getHeaders,
52
+ fetch: options.fetch
53
+ });
54
+ const createChatModel = (modelId) => {
55
+ const customURL = options.modelURL;
56
+ if (customURL) {
57
+ const isOpenAICompatible = customURL.includes("/sync/v1");
58
+ if (isOpenAICompatible) {
59
+ return new OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "placeholder", {
60
+ ...getCommonModelConfig("chat", customURL),
61
+ errorStructure: basetenErrorStructure
62
+ });
63
+ } else if (customURL.includes("/predict")) {
64
+ throw new Error(
65
+ "Not supported. You must use a /sync/v1 endpoint for chat models."
66
+ );
67
+ }
68
+ }
69
+ return new OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "chat", {
70
+ ...getCommonModelConfig("chat"),
71
+ errorStructure: basetenErrorStructure
72
+ });
73
+ };
74
+ const createEmbeddingModel = (modelId) => {
75
+ const customURL = options.modelURL;
76
+ if (!customURL) {
77
+ throw new Error(
78
+ "No model URL provided for embeddings. Please set modelURL option for embeddings."
79
+ );
80
+ }
81
+ const isOpenAICompatible = customURL.includes("/sync");
82
+ if (isOpenAICompatible) {
83
+ const model = new OpenAICompatibleEmbeddingModel(
84
+ modelId != null ? modelId : "embeddings",
85
+ {
86
+ ...getCommonModelConfig("embedding", customURL),
87
+ errorStructure: basetenErrorStructure
88
+ }
89
+ );
90
+ const performanceClientURL = customURL.replace("/sync/v1", "/sync");
91
+ const performanceClient = new PerformanceClient(
92
+ performanceClientURL,
93
+ loadApiKey({
94
+ apiKey: options.apiKey,
95
+ environmentVariableName: "BASETEN_API_KEY",
96
+ description: "Baseten API key"
97
+ })
98
+ );
99
+ model.doEmbed = async (params) => {
100
+ if (!params.values || !Array.isArray(params.values)) {
101
+ throw new Error("params.values must be an array of strings");
102
+ }
103
+ const response = await performanceClient.embed(
104
+ params.values,
105
+ modelId != null ? modelId : "embeddings"
106
+ // model_id is for Model APIs, we don't use it here for dedicated
107
+ );
108
+ const embeddings = response.data.map((item) => item.embedding);
109
+ return {
110
+ embeddings,
111
+ usage: response.usage ? { tokens: response.usage.total_tokens } : void 0,
112
+ response: { headers: {}, body: response },
113
+ warnings: []
114
+ };
115
+ };
116
+ return model;
117
+ } else {
118
+ throw new Error(
119
+ "Not supported. You must use a /sync or /sync/v1 endpoint for embeddings."
120
+ );
121
+ }
122
+ };
123
+ const provider = (modelId) => createChatModel(modelId);
124
+ provider.specificationVersion = "v3";
125
+ provider.chatModel = createChatModel;
126
+ provider.languageModel = createChatModel;
127
+ provider.imageModel = (modelId) => {
128
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
129
+ };
130
+ provider.embeddingModel = createEmbeddingModel;
131
+ provider.textEmbeddingModel = createEmbeddingModel;
132
+ return provider;
133
+ }
134
+ var baseten = createBaseten();
135
+ export {
136
+ VERSION,
137
+ baseten,
138
+ createBaseten
139
+ };
140
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n LanguageModelV3,\n NoSuchModelError,\n ProviderV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV3 {\n /**\nCreates a chat model for text generation.\n*/\n (modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a embedding model for text generation.\n*/\n embeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;\n\n /**\n * @deprecated Use `embeddingModel` instead.\n */\n textEmbeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV3;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n warnings: [],\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n\n provider.specificationVersion = 'v3' as const;\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.embeddingModel = createEmbeddingModel;\n provider.textEmbeddingModel = createEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";AAAA;AAAA,EACE;AAAA,EACA;AAAA,OAEK;AACP;AAAA,EAGE;AAAA,OAEK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,SAAS;AAGlB,SAAS,yBAAyB;;;AClB3B,IAAM,UACX,OACI,kCACA;;;ADoBN,IAAM,qBAAqB,EAAE,OAAO;AAAA,EAClC,OAAO,EAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AA2DA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA/FnB;AAgGE,QAAM,UAAU,sBAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,MACjB;AAAA,IACE;AAAA,MACE,eAAe,UAAU,WAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,kCAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,kCAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,uBAAuB,CAAC,YAAsC;AAElE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,UACxC,UAAU,CAAC;AAAA,QACb;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAE1E,WAAS,uBAAuB;AAChC,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iBAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,iBAAiB;AAC1B,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
package/package.json ADDED
@@ -0,0 +1,68 @@
1
+ {
2
+ "name": "@ai-sdk/baseten",
3
+ "version": "0.0.0-1c33ba03-20260114162300",
4
+ "license": "Apache-2.0",
5
+ "sideEffects": false,
6
+ "main": "./dist/index.js",
7
+ "module": "./dist/index.mjs",
8
+ "types": "./dist/index.d.ts",
9
+ "files": [
10
+ "dist/**/*",
11
+ "CHANGELOG.md",
12
+ "README.md"
13
+ ],
14
+ "exports": {
15
+ "./package.json": "./package.json",
16
+ ".": {
17
+ "types": "./dist/index.d.ts",
18
+ "import": "./dist/index.mjs",
19
+ "require": "./dist/index.js"
20
+ }
21
+ },
22
+ "dependencies": {
23
+ "@basetenlabs/performance-client": "^0.0.10",
24
+ "@ai-sdk/openai-compatible": "0.0.0-1c33ba03-20260114162300",
25
+ "@ai-sdk/provider": "3.0.3",
26
+ "@ai-sdk/provider-utils": "0.0.0-1c33ba03-20260114162300"
27
+ },
28
+ "devDependencies": {
29
+ "@types/node": "20.17.24",
30
+ "tsup": "^8",
31
+ "typescript": "5.8.3",
32
+ "zod": "3.25.76",
33
+ "@vercel/ai-tsconfig": "0.0.0"
34
+ },
35
+ "peerDependencies": {
36
+ "zod": "^3.25.76 || ^4.1.8"
37
+ },
38
+ "engines": {
39
+ "node": ">=18"
40
+ },
41
+ "publishConfig": {
42
+ "access": "public"
43
+ },
44
+ "homepage": "https://ai-sdk.dev/docs",
45
+ "repository": {
46
+ "type": "git",
47
+ "url": "git+https://github.com/vercel/ai.git"
48
+ },
49
+ "bugs": {
50
+ "url": "https://github.com/vercel/ai/issues"
51
+ },
52
+ "keywords": [
53
+ "ai"
54
+ ],
55
+ "scripts": {
56
+ "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
57
+ "build:watch": "pnpm clean && tsup --watch",
58
+ "clean": "rm -rf dist *.tsbuildinfo",
59
+ "lint": "eslint \"./**/*.ts*\"",
60
+ "type-check": "tsc --build",
61
+ "prettier-check": "prettier --check \"./**/*.ts*\"",
62
+ "test": "pnpm test:node && pnpm test:edge",
63
+ "test:update": "pnpm test:node -u",
64
+ "test:watch": "vitest --config vitest.node.config.js",
65
+ "test:edge": "vitest --config vitest.edge.config.js --run",
66
+ "test:node": "vitest --config vitest.node.config.js --run"
67
+ }
68
+ }