ai 3.0.14 → 3.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/index.d.mts +375 -56
  2. package/dist/index.d.ts +375 -56
  3. package/dist/index.js +195 -135
  4. package/dist/index.js.map +1 -1
  5. package/dist/index.mjs +195 -135
  6. package/dist/index.mjs.map +1 -1
  7. package/mistral/dist/index.d.mts +4 -1
  8. package/mistral/dist/index.d.ts +4 -1
  9. package/mistral/dist/index.js +15 -15
  10. package/mistral/dist/index.js.map +1 -1
  11. package/mistral/dist/index.mjs +15 -15
  12. package/mistral/dist/index.mjs.map +1 -1
  13. package/openai/dist/index.d.mts +4 -0
  14. package/openai/dist/index.d.ts +4 -0
  15. package/openai/dist/index.js +19 -19
  16. package/openai/dist/index.js.map +1 -1
  17. package/openai/dist/index.mjs +19 -19
  18. package/openai/dist/index.mjs.map +1 -1
  19. package/package.json +8 -8
  20. package/rsc/dist/index.d.ts +21 -3
  21. package/rsc/dist/rsc-client.d.mts +1 -1
  22. package/rsc/dist/rsc-client.mjs +2 -0
  23. package/rsc/dist/rsc-client.mjs.map +1 -1
  24. package/rsc/dist/rsc-server.d.mts +2 -2
  25. package/rsc/dist/rsc-server.mjs +1 -1
  26. package/rsc/dist/rsc-server.mjs.map +1 -1
  27. package/rsc/dist/rsc-shared.d.mts +20 -2
  28. package/rsc/dist/rsc-shared.mjs +75 -2
  29. package/rsc/dist/rsc-shared.mjs.map +1 -1
  30. package/{ai-model-specification → spec}/dist/index.d.mts +4 -0
  31. package/{ai-model-specification → spec}/dist/index.d.ts +4 -0
  32. package/{ai-model-specification → spec}/dist/index.js +29 -29
  33. package/{ai-model-specification → spec}/dist/index.mjs +25 -25
  34. /package/{ai-model-specification → spec}/dist/index.js.map +0 -0
  35. /package/{ai-model-specification → spec}/dist/index.mjs.map +0 -0
package/dist/index.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  // core/generate-object/generate-object.ts
2
2
  import zodToJsonSchema from "zod-to-json-schema";
3
3
 
4
- // ai-model-specification/errors/api-call-error.ts
4
+ // spec/errors/api-call-error.ts
5
5
  var APICallError = class extends Error {
6
6
  constructor({
7
7
  message,
@@ -45,7 +45,7 @@ var APICallError = class extends Error {
45
45
  }
46
46
  };
47
47
 
48
- // ai-model-specification/errors/invalid-argument-error.ts
48
+ // spec/errors/invalid-argument-error.ts
49
49
  var InvalidArgumentError = class extends Error {
50
50
  constructor({
51
51
  parameter,
@@ -71,7 +71,7 @@ var InvalidArgumentError = class extends Error {
71
71
  }
72
72
  };
73
73
 
74
- // ai-model-specification/errors/invalid-data-content-error.ts
74
+ // spec/errors/invalid-data-content-error.ts
75
75
  var InvalidDataContentError = class extends Error {
76
76
  constructor({
77
77
  content,
@@ -94,7 +94,27 @@ var InvalidDataContentError = class extends Error {
94
94
  }
95
95
  };
96
96
 
97
- // ai-model-specification/util/get-error-message.ts
97
+ // spec/errors/invalid-prompt-error.ts
98
+ var InvalidPromptError = class extends Error {
99
+ constructor({ prompt: prompt2, message }) {
100
+ super(`Invalid prompt: ${message}`);
101
+ this.name = "AI_InvalidPromptError";
102
+ this.prompt = prompt2;
103
+ }
104
+ static isInvalidPromptError(error) {
105
+ return error instanceof Error && error.name === "AI_InvalidPromptError" && prompt != null;
106
+ }
107
+ toJSON() {
108
+ return {
109
+ name: this.name,
110
+ message: this.message,
111
+ stack: this.stack,
112
+ prompt: this.prompt
113
+ };
114
+ }
115
+ };
116
+
117
+ // spec/util/get-error-message.ts
98
118
  function getErrorMessage(error) {
99
119
  if (error == null) {
100
120
  return "unknown error";
@@ -108,10 +128,10 @@ function getErrorMessage(error) {
108
128
  return JSON.stringify(error);
109
129
  }
110
130
 
111
- // ai-model-specification/util/parse-json.ts
131
+ // spec/util/parse-json.ts
112
132
  import SecureJSON from "secure-json-parse";
113
133
 
114
- // ai-model-specification/errors/json-parse-error.ts
134
+ // spec/errors/json-parse-error.ts
115
135
  var JSONParseError = class extends Error {
116
136
  constructor({ text, cause }) {
117
137
  super(
@@ -136,7 +156,7 @@ Error message: ${getErrorMessage(cause)}`
136
156
  }
137
157
  };
138
158
 
139
- // ai-model-specification/errors/type-validation-error.ts
159
+ // spec/errors/type-validation-error.ts
140
160
  var TypeValidationError = class extends Error {
141
161
  constructor({ value, cause }) {
142
162
  super(
@@ -161,7 +181,7 @@ Error message: ${getErrorMessage(cause)}`
161
181
  }
162
182
  };
163
183
 
164
- // ai-model-specification/util/validate-types.ts
184
+ // spec/util/validate-types.ts
165
185
  function safeValidateTypes({
166
186
  value,
167
187
  schema
@@ -189,7 +209,7 @@ function safeValidateTypes({
189
209
  }
190
210
  }
191
211
 
192
- // ai-model-specification/util/parse-json.ts
212
+ // spec/util/parse-json.ts
193
213
  function safeParseJSON({
194
214
  text,
195
215
  schema
@@ -211,7 +231,7 @@ function safeParseJSON({
211
231
  }
212
232
  }
213
233
 
214
- // ai-model-specification/util/uint8-utils.ts
234
+ // spec/util/uint8-utils.ts
215
235
  function convertBase64ToUint8Array(base64String) {
216
236
  const base64Url = base64String.replace(/-/g, "+").replace(/_/g, "/");
217
237
  const latin1string = globalThis.atob(base64Url);
@@ -225,7 +245,7 @@ function convertUint8ArrayToBase64(array) {
225
245
  return globalThis.btoa(latin1string);
226
246
  }
227
247
 
228
- // ai-model-specification/errors/invalid-tool-arguments-error.ts
248
+ // spec/errors/invalid-tool-arguments-error.ts
229
249
  var InvalidToolArgumentsError = class extends Error {
230
250
  constructor({
231
251
  toolArgs,
@@ -256,7 +276,7 @@ var InvalidToolArgumentsError = class extends Error {
256
276
  }
257
277
  };
258
278
 
259
- // ai-model-specification/errors/no-object-generated-error.ts
279
+ // spec/errors/no-object-generated-error.ts
260
280
  var NoTextGeneratedError = class extends Error {
261
281
  constructor() {
262
282
  super(`No text generated.`);
@@ -275,7 +295,7 @@ var NoTextGeneratedError = class extends Error {
275
295
  }
276
296
  };
277
297
 
278
- // ai-model-specification/errors/no-such-tool-error.ts
298
+ // spec/errors/no-such-tool-error.ts
279
299
  var NoSuchToolError = class extends Error {
280
300
  constructor({ message, toolName }) {
281
301
  super(message);
@@ -295,7 +315,7 @@ var NoSuchToolError = class extends Error {
295
315
  }
296
316
  };
297
317
 
298
- // ai-model-specification/errors/retry-error.ts
318
+ // spec/errors/retry-error.ts
299
319
  var RetryError = class extends Error {
300
320
  constructor({
301
321
  message,
@@ -355,89 +375,101 @@ function convertDataContentToUint8Array(content) {
355
375
  }
356
376
 
357
377
  // core/prompt/convert-to-language-model-prompt.ts
358
- function convertToLanguageModelPrompt({
359
- system,
360
- prompt,
361
- messages
362
- }) {
363
- if (prompt == null && messages == null) {
364
- throw new Error("prompt or messages must be defined");
365
- }
366
- if (prompt != null && messages != null) {
367
- throw new Error("prompt and messages cannot be defined at the same time");
368
- }
378
+ function convertToLanguageModelPrompt(prompt2) {
369
379
  const languageModelMessages = [];
370
- if (system != null) {
371
- languageModelMessages.push({ role: "system", content: system });
372
- }
373
- if (typeof prompt === "string") {
374
- languageModelMessages.push({
375
- role: "user",
376
- content: [{ type: "text", text: prompt }]
377
- });
378
- } else {
379
- messages = messages;
380
- languageModelMessages.push(
381
- ...messages.map((message) => {
382
- switch (message.role) {
383
- case "user": {
384
- if (typeof message.content === "string") {
380
+ if (prompt2.system != null) {
381
+ languageModelMessages.push({ role: "system", content: prompt2.system });
382
+ }
383
+ switch (prompt2.type) {
384
+ case "prompt": {
385
+ languageModelMessages.push({
386
+ role: "user",
387
+ content: [{ type: "text", text: prompt2.prompt }]
388
+ });
389
+ break;
390
+ }
391
+ case "messages": {
392
+ languageModelMessages.push(
393
+ ...prompt2.messages.map((message) => {
394
+ switch (message.role) {
395
+ case "user": {
396
+ if (typeof message.content === "string") {
397
+ return {
398
+ role: "user",
399
+ content: [{ type: "text", text: message.content }]
400
+ };
401
+ }
385
402
  return {
386
403
  role: "user",
387
- content: [{ type: "text", text: message.content }]
388
- };
389
- }
390
- return {
391
- role: "user",
392
- content: message.content.map(
393
- (part) => {
394
- switch (part.type) {
395
- case "text": {
396
- return part;
397
- }
398
- case "image": {
399
- return {
400
- type: "image",
401
- image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
402
- mimeType: part.mimeType
403
- };
404
+ content: message.content.map(
405
+ (part) => {
406
+ switch (part.type) {
407
+ case "text": {
408
+ return part;
409
+ }
410
+ case "image": {
411
+ return {
412
+ type: "image",
413
+ image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
414
+ mimeType: part.mimeType
415
+ };
416
+ }
404
417
  }
405
418
  }
406
- }
407
- )
408
- };
409
- }
410
- case "assistant": {
411
- if (typeof message.content === "string") {
412
- return {
413
- role: "assistant",
414
- content: [{ type: "text", text: message.content }]
419
+ )
415
420
  };
416
421
  }
417
- return { role: "assistant", content: message.content };
418
- }
419
- case "tool": {
420
- return message;
422
+ case "assistant": {
423
+ if (typeof message.content === "string") {
424
+ return {
425
+ role: "assistant",
426
+ content: [{ type: "text", text: message.content }]
427
+ };
428
+ }
429
+ return { role: "assistant", content: message.content };
430
+ }
431
+ case "tool": {
432
+ return message;
433
+ }
421
434
  }
422
- }
423
- })
424
- );
435
+ })
436
+ );
437
+ break;
438
+ }
439
+ default: {
440
+ const _exhaustiveCheck = prompt2;
441
+ throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
442
+ }
425
443
  }
426
444
  return languageModelMessages;
427
445
  }
428
446
 
429
- // core/prompt/get-input-format.ts
430
- function getInputFormat({
431
- prompt,
432
- messages
433
- }) {
434
- if (prompt == null && messages == null) {
435
- throw new Error("prompt or messages must be defined");
447
+ // core/prompt/get-validated-prompt.ts
448
+ function getValidatedPrompt(prompt2) {
449
+ if (prompt2.prompt == null && prompt2.messages == null) {
450
+ throw new InvalidPromptError({
451
+ prompt: prompt2,
452
+ message: "prompt or messages must be defined"
453
+ });
436
454
  }
437
- if (prompt != null && messages != null) {
438
- throw new Error("prompt and messages cannot be defined at the same time");
455
+ if (prompt2.prompt != null && prompt2.messages != null) {
456
+ throw new InvalidPromptError({
457
+ prompt: prompt2,
458
+ message: "prompt and messages cannot be defined at the same time"
459
+ });
439
460
  }
440
- return prompt != null ? "prompt" : "messages";
461
+ return prompt2.prompt != null ? {
462
+ type: "prompt",
463
+ prompt: prompt2.prompt,
464
+ messages: void 0,
465
+ system: prompt2.system
466
+ } : {
467
+ type: "messages",
468
+ prompt: void 0,
469
+ messages: prompt2.messages,
470
+ // only possible case bc of checks above
471
+ system: prompt2.system
472
+ };
441
473
  }
442
474
 
443
475
  // core/prompt/prepare-call-settings.ts
@@ -649,7 +681,7 @@ async function experimental_generateObject({
649
681
  schema,
650
682
  mode,
651
683
  system,
652
- prompt,
684
+ prompt: prompt2,
653
685
  messages,
654
686
  maxRetries,
655
687
  abortSignal,
@@ -667,19 +699,20 @@ async function experimental_generateObject({
667
699
  let warnings;
668
700
  switch (mode) {
669
701
  case "json": {
670
- const generateResult = await retry(
671
- () => model.doGenerate({
702
+ const validatedPrompt = getValidatedPrompt({
703
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
704
+ prompt: prompt2,
705
+ messages
706
+ });
707
+ const generateResult = await retry(() => {
708
+ return model.doGenerate({
672
709
  mode: { type: "object-json" },
673
710
  ...prepareCallSettings(settings),
674
- inputFormat: getInputFormat({ prompt, messages }),
675
- prompt: convertToLanguageModelPrompt({
676
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
677
- prompt,
678
- messages
679
- }),
711
+ inputFormat: validatedPrompt.type,
712
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
680
713
  abortSignal
681
- })
682
- );
714
+ });
715
+ });
683
716
  if (generateResult.text === void 0) {
684
717
  throw new NoTextGeneratedError();
685
718
  }
@@ -690,16 +723,17 @@ async function experimental_generateObject({
690
723
  break;
691
724
  }
692
725
  case "grammar": {
726
+ const validatedPrompt = getValidatedPrompt({
727
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
728
+ prompt: prompt2,
729
+ messages
730
+ });
693
731
  const generateResult = await retry(
694
732
  () => model.doGenerate({
695
733
  mode: { type: "object-grammar", schema: jsonSchema },
696
734
  ...settings,
697
- inputFormat: getInputFormat({ prompt, messages }),
698
- prompt: convertToLanguageModelPrompt({
699
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
700
- prompt,
701
- messages
702
- }),
735
+ inputFormat: validatedPrompt.type,
736
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
703
737
  abortSignal
704
738
  })
705
739
  );
@@ -713,6 +747,11 @@ async function experimental_generateObject({
713
747
  break;
714
748
  }
715
749
  case "tool": {
750
+ const validatedPrompt = getValidatedPrompt({
751
+ system,
752
+ prompt: prompt2,
753
+ messages
754
+ });
716
755
  const generateResult = await retry(
717
756
  () => model.doGenerate({
718
757
  mode: {
@@ -725,8 +764,8 @@ async function experimental_generateObject({
725
764
  }
726
765
  },
727
766
  ...settings,
728
- inputFormat: getInputFormat({ prompt, messages }),
729
- prompt: convertToLanguageModelPrompt({ system, prompt, messages }),
767
+ inputFormat: validatedPrompt.type,
768
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
730
769
  abortSignal
731
770
  })
732
771
  );
@@ -1166,7 +1205,7 @@ async function experimental_streamObject({
1166
1205
  schema,
1167
1206
  mode,
1168
1207
  system,
1169
- prompt,
1208
+ prompt: prompt2,
1170
1209
  messages,
1171
1210
  maxRetries,
1172
1211
  abortSignal,
@@ -1181,15 +1220,16 @@ async function experimental_streamObject({
1181
1220
  let transformer;
1182
1221
  switch (mode) {
1183
1222
  case "json": {
1223
+ const validatedPrompt = getValidatedPrompt({
1224
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1225
+ prompt: prompt2,
1226
+ messages
1227
+ });
1184
1228
  callOptions = {
1185
1229
  mode: { type: "object-json" },
1186
1230
  ...prepareCallSettings(settings),
1187
- inputFormat: getInputFormat({ prompt, messages }),
1188
- prompt: convertToLanguageModelPrompt({
1189
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1190
- prompt,
1191
- messages
1192
- }),
1231
+ inputFormat: validatedPrompt.type,
1232
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1193
1233
  abortSignal
1194
1234
  };
1195
1235
  transformer = {
@@ -1207,15 +1247,16 @@ async function experimental_streamObject({
1207
1247
  break;
1208
1248
  }
1209
1249
  case "grammar": {
1250
+ const validatedPrompt = getValidatedPrompt({
1251
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1252
+ prompt: prompt2,
1253
+ messages
1254
+ });
1210
1255
  callOptions = {
1211
1256
  mode: { type: "object-grammar", schema: jsonSchema },
1212
1257
  ...settings,
1213
- inputFormat: getInputFormat({ prompt, messages }),
1214
- prompt: convertToLanguageModelPrompt({
1215
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1216
- prompt,
1217
- messages
1218
- }),
1258
+ inputFormat: validatedPrompt.type,
1259
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1219
1260
  abortSignal
1220
1261
  };
1221
1262
  transformer = {
@@ -1233,6 +1274,11 @@ async function experimental_streamObject({
1233
1274
  break;
1234
1275
  }
1235
1276
  case "tool": {
1277
+ const validatedPrompt = getValidatedPrompt({
1278
+ system,
1279
+ prompt: prompt2,
1280
+ messages
1281
+ });
1236
1282
  callOptions = {
1237
1283
  mode: {
1238
1284
  type: "object-tool",
@@ -1244,8 +1290,8 @@ async function experimental_streamObject({
1244
1290
  }
1245
1291
  },
1246
1292
  ...settings,
1247
- inputFormat: getInputFormat({ prompt, messages }),
1248
- prompt: convertToLanguageModelPrompt({ system, prompt, messages }),
1293
+ inputFormat: validatedPrompt.type,
1294
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1249
1295
  abortSignal
1250
1296
  };
1251
1297
  transformer = {
@@ -1352,7 +1398,7 @@ async function experimental_generateText({
1352
1398
  model,
1353
1399
  tools,
1354
1400
  system,
1355
- prompt,
1401
+ prompt: prompt2,
1356
1402
  messages,
1357
1403
  maxRetries,
1358
1404
  abortSignal,
@@ -1360,8 +1406,9 @@ async function experimental_generateText({
1360
1406
  }) {
1361
1407
  var _a, _b;
1362
1408
  const retry = retryWithExponentialBackoff({ maxRetries });
1363
- const modelResponse = await retry(
1364
- () => model.doGenerate({
1409
+ const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
1410
+ const modelResponse = await retry(() => {
1411
+ return model.doGenerate({
1365
1412
  mode: {
1366
1413
  type: "regular",
1367
1414
  tools: tools == null ? void 0 : Object.entries(tools).map(([name, tool2]) => ({
@@ -1372,15 +1419,11 @@ async function experimental_generateText({
1372
1419
  }))
1373
1420
  },
1374
1421
  ...prepareCallSettings(settings),
1375
- inputFormat: getInputFormat({ prompt, messages }),
1376
- prompt: convertToLanguageModelPrompt({
1377
- system,
1378
- prompt,
1379
- messages
1380
- }),
1422
+ inputFormat: validatedPrompt.type,
1423
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1381
1424
  abortSignal
1382
- })
1383
- );
1425
+ });
1426
+ });
1384
1427
  const toolCalls = [];
1385
1428
  for (const modelToolCall of (_a = modelResponse.toolCalls) != null ? _a : []) {
1386
1429
  toolCalls.push(parseToolCall({ toolCall: modelToolCall, tools }));
@@ -1589,13 +1632,14 @@ async function experimental_streamText({
1589
1632
  model,
1590
1633
  tools,
1591
1634
  system,
1592
- prompt,
1635
+ prompt: prompt2,
1593
1636
  messages,
1594
1637
  maxRetries,
1595
1638
  abortSignal,
1596
1639
  ...settings
1597
1640
  }) {
1598
1641
  const retry = retryWithExponentialBackoff({ maxRetries });
1642
+ const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
1599
1643
  const { stream, warnings } = await retry(
1600
1644
  () => model.doStream({
1601
1645
  mode: {
@@ -1608,12 +1652,8 @@ async function experimental_streamText({
1608
1652
  }))
1609
1653
  },
1610
1654
  ...prepareCallSettings(settings),
1611
- inputFormat: getInputFormat({ prompt, messages }),
1612
- prompt: convertToLanguageModelPrompt({
1613
- system,
1614
- prompt,
1615
- messages
1616
- }),
1655
+ inputFormat: validatedPrompt.type,
1656
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1617
1657
  abortSignal
1618
1658
  })
1619
1659
  );
@@ -1633,6 +1673,11 @@ var StreamTextResult = class {
1633
1673
  this.originalStream = stream;
1634
1674
  this.warnings = warnings;
1635
1675
  }
1676
+ /**
1677
+ A text stream that returns only the generated text deltas. You can use it
1678
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
1679
+ stream will throw the error.
1680
+ */
1636
1681
  get textStream() {
1637
1682
  return createAsyncIterableStream(this.originalStream, {
1638
1683
  transform(chunk, controller) {
@@ -1646,6 +1691,12 @@ var StreamTextResult = class {
1646
1691
  }
1647
1692
  });
1648
1693
  }
1694
+ /**
1695
+ A stream with all events, including text deltas, tool calls, tool results, and
1696
+ errors.
1697
+ You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
1698
+ stream will throw the error.
1699
+ */
1649
1700
  get fullStream() {
1650
1701
  return createAsyncIterableStream(this.originalStream, {
1651
1702
  transform(chunk, controller) {
@@ -1659,6 +1710,15 @@ var StreamTextResult = class {
1659
1710
  }
1660
1711
  });
1661
1712
  }
1713
+ /**
1714
+ Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
1715
+ It can be used with the `useChat` and `useCompletion` hooks.
1716
+
1717
+ @param callbacks
1718
+ Stream callbacks that will be called when the stream emits events.
1719
+
1720
+ @returns an `AIStream` object.
1721
+ */
1662
1722
  toAIStream(callbacks) {
1663
1723
  return readableFromAsyncIterable(this.textStream).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(
1664
1724
  createStreamDataTransformer(callbacks == null ? void 0 : callbacks.experimental_streamData)