ai 3.0.14 → 3.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/index.d.mts +375 -56
  2. package/dist/index.d.ts +375 -56
  3. package/dist/index.js +195 -135
  4. package/dist/index.js.map +1 -1
  5. package/dist/index.mjs +195 -135
  6. package/dist/index.mjs.map +1 -1
  7. package/mistral/dist/index.d.mts +4 -1
  8. package/mistral/dist/index.d.ts +4 -1
  9. package/mistral/dist/index.js +15 -15
  10. package/mistral/dist/index.js.map +1 -1
  11. package/mistral/dist/index.mjs +15 -15
  12. package/mistral/dist/index.mjs.map +1 -1
  13. package/openai/dist/index.d.mts +4 -0
  14. package/openai/dist/index.d.ts +4 -0
  15. package/openai/dist/index.js +19 -19
  16. package/openai/dist/index.js.map +1 -1
  17. package/openai/dist/index.mjs +19 -19
  18. package/openai/dist/index.mjs.map +1 -1
  19. package/package.json +8 -8
  20. package/rsc/dist/index.d.ts +21 -3
  21. package/rsc/dist/rsc-client.d.mts +1 -1
  22. package/rsc/dist/rsc-client.mjs +2 -0
  23. package/rsc/dist/rsc-client.mjs.map +1 -1
  24. package/rsc/dist/rsc-server.d.mts +2 -2
  25. package/rsc/dist/rsc-server.mjs +1 -1
  26. package/rsc/dist/rsc-server.mjs.map +1 -1
  27. package/rsc/dist/rsc-shared.d.mts +20 -2
  28. package/rsc/dist/rsc-shared.mjs +75 -2
  29. package/rsc/dist/rsc-shared.mjs.map +1 -1
  30. package/{ai-model-specification → spec}/dist/index.d.mts +4 -0
  31. package/{ai-model-specification → spec}/dist/index.d.ts +4 -0
  32. package/{ai-model-specification → spec}/dist/index.js +29 -29
  33. package/{ai-model-specification → spec}/dist/index.mjs +25 -25
  34. /package/{ai-model-specification → spec}/dist/index.js.map +0 -0
  35. /package/{ai-model-specification → spec}/dist/index.mjs.map +0 -0
package/dist/index.js CHANGED
@@ -77,7 +77,7 @@ module.exports = __toCommonJS(streams_exports);
77
77
  // core/generate-object/generate-object.ts
78
78
  var import_zod_to_json_schema = __toESM(require("zod-to-json-schema"));
79
79
 
80
- // ai-model-specification/errors/api-call-error.ts
80
+ // spec/errors/api-call-error.ts
81
81
  var APICallError = class extends Error {
82
82
  constructor({
83
83
  message,
@@ -121,7 +121,7 @@ var APICallError = class extends Error {
121
121
  }
122
122
  };
123
123
 
124
- // ai-model-specification/errors/invalid-argument-error.ts
124
+ // spec/errors/invalid-argument-error.ts
125
125
  var InvalidArgumentError = class extends Error {
126
126
  constructor({
127
127
  parameter,
@@ -147,7 +147,7 @@ var InvalidArgumentError = class extends Error {
147
147
  }
148
148
  };
149
149
 
150
- // ai-model-specification/errors/invalid-data-content-error.ts
150
+ // spec/errors/invalid-data-content-error.ts
151
151
  var InvalidDataContentError = class extends Error {
152
152
  constructor({
153
153
  content,
@@ -170,7 +170,27 @@ var InvalidDataContentError = class extends Error {
170
170
  }
171
171
  };
172
172
 
173
- // ai-model-specification/util/get-error-message.ts
173
+ // spec/errors/invalid-prompt-error.ts
174
+ var InvalidPromptError = class extends Error {
175
+ constructor({ prompt: prompt2, message }) {
176
+ super(`Invalid prompt: ${message}`);
177
+ this.name = "AI_InvalidPromptError";
178
+ this.prompt = prompt2;
179
+ }
180
+ static isInvalidPromptError(error) {
181
+ return error instanceof Error && error.name === "AI_InvalidPromptError" && prompt != null;
182
+ }
183
+ toJSON() {
184
+ return {
185
+ name: this.name,
186
+ message: this.message,
187
+ stack: this.stack,
188
+ prompt: this.prompt
189
+ };
190
+ }
191
+ };
192
+
193
+ // spec/util/get-error-message.ts
174
194
  function getErrorMessage(error) {
175
195
  if (error == null) {
176
196
  return "unknown error";
@@ -184,10 +204,10 @@ function getErrorMessage(error) {
184
204
  return JSON.stringify(error);
185
205
  }
186
206
 
187
- // ai-model-specification/util/parse-json.ts
207
+ // spec/util/parse-json.ts
188
208
  var import_secure_json_parse = __toESM(require("secure-json-parse"));
189
209
 
190
- // ai-model-specification/errors/json-parse-error.ts
210
+ // spec/errors/json-parse-error.ts
191
211
  var JSONParseError = class extends Error {
192
212
  constructor({ text, cause }) {
193
213
  super(
@@ -212,7 +232,7 @@ Error message: ${getErrorMessage(cause)}`
212
232
  }
213
233
  };
214
234
 
215
- // ai-model-specification/errors/type-validation-error.ts
235
+ // spec/errors/type-validation-error.ts
216
236
  var TypeValidationError = class extends Error {
217
237
  constructor({ value, cause }) {
218
238
  super(
@@ -237,7 +257,7 @@ Error message: ${getErrorMessage(cause)}`
237
257
  }
238
258
  };
239
259
 
240
- // ai-model-specification/util/validate-types.ts
260
+ // spec/util/validate-types.ts
241
261
  function safeValidateTypes({
242
262
  value,
243
263
  schema
@@ -265,7 +285,7 @@ function safeValidateTypes({
265
285
  }
266
286
  }
267
287
 
268
- // ai-model-specification/util/parse-json.ts
288
+ // spec/util/parse-json.ts
269
289
  function safeParseJSON({
270
290
  text,
271
291
  schema
@@ -287,7 +307,7 @@ function safeParseJSON({
287
307
  }
288
308
  }
289
309
 
290
- // ai-model-specification/util/uint8-utils.ts
310
+ // spec/util/uint8-utils.ts
291
311
  function convertBase64ToUint8Array(base64String) {
292
312
  const base64Url = base64String.replace(/-/g, "+").replace(/_/g, "/");
293
313
  const latin1string = globalThis.atob(base64Url);
@@ -301,7 +321,7 @@ function convertUint8ArrayToBase64(array) {
301
321
  return globalThis.btoa(latin1string);
302
322
  }
303
323
 
304
- // ai-model-specification/errors/invalid-tool-arguments-error.ts
324
+ // spec/errors/invalid-tool-arguments-error.ts
305
325
  var InvalidToolArgumentsError = class extends Error {
306
326
  constructor({
307
327
  toolArgs,
@@ -332,7 +352,7 @@ var InvalidToolArgumentsError = class extends Error {
332
352
  }
333
353
  };
334
354
 
335
- // ai-model-specification/errors/no-object-generated-error.ts
355
+ // spec/errors/no-object-generated-error.ts
336
356
  var NoTextGeneratedError = class extends Error {
337
357
  constructor() {
338
358
  super(`No text generated.`);
@@ -351,7 +371,7 @@ var NoTextGeneratedError = class extends Error {
351
371
  }
352
372
  };
353
373
 
354
- // ai-model-specification/errors/no-such-tool-error.ts
374
+ // spec/errors/no-such-tool-error.ts
355
375
  var NoSuchToolError = class extends Error {
356
376
  constructor({ message, toolName }) {
357
377
  super(message);
@@ -371,7 +391,7 @@ var NoSuchToolError = class extends Error {
371
391
  }
372
392
  };
373
393
 
374
- // ai-model-specification/errors/retry-error.ts
394
+ // spec/errors/retry-error.ts
375
395
  var RetryError = class extends Error {
376
396
  constructor({
377
397
  message,
@@ -431,89 +451,101 @@ function convertDataContentToUint8Array(content) {
431
451
  }
432
452
 
433
453
  // core/prompt/convert-to-language-model-prompt.ts
434
- function convertToLanguageModelPrompt({
435
- system,
436
- prompt,
437
- messages
438
- }) {
439
- if (prompt == null && messages == null) {
440
- throw new Error("prompt or messages must be defined");
441
- }
442
- if (prompt != null && messages != null) {
443
- throw new Error("prompt and messages cannot be defined at the same time");
444
- }
454
+ function convertToLanguageModelPrompt(prompt2) {
445
455
  const languageModelMessages = [];
446
- if (system != null) {
447
- languageModelMessages.push({ role: "system", content: system });
448
- }
449
- if (typeof prompt === "string") {
450
- languageModelMessages.push({
451
- role: "user",
452
- content: [{ type: "text", text: prompt }]
453
- });
454
- } else {
455
- messages = messages;
456
- languageModelMessages.push(
457
- ...messages.map((message) => {
458
- switch (message.role) {
459
- case "user": {
460
- if (typeof message.content === "string") {
456
+ if (prompt2.system != null) {
457
+ languageModelMessages.push({ role: "system", content: prompt2.system });
458
+ }
459
+ switch (prompt2.type) {
460
+ case "prompt": {
461
+ languageModelMessages.push({
462
+ role: "user",
463
+ content: [{ type: "text", text: prompt2.prompt }]
464
+ });
465
+ break;
466
+ }
467
+ case "messages": {
468
+ languageModelMessages.push(
469
+ ...prompt2.messages.map((message) => {
470
+ switch (message.role) {
471
+ case "user": {
472
+ if (typeof message.content === "string") {
473
+ return {
474
+ role: "user",
475
+ content: [{ type: "text", text: message.content }]
476
+ };
477
+ }
461
478
  return {
462
479
  role: "user",
463
- content: [{ type: "text", text: message.content }]
464
- };
465
- }
466
- return {
467
- role: "user",
468
- content: message.content.map(
469
- (part) => {
470
- switch (part.type) {
471
- case "text": {
472
- return part;
473
- }
474
- case "image": {
475
- return {
476
- type: "image",
477
- image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
478
- mimeType: part.mimeType
479
- };
480
+ content: message.content.map(
481
+ (part) => {
482
+ switch (part.type) {
483
+ case "text": {
484
+ return part;
485
+ }
486
+ case "image": {
487
+ return {
488
+ type: "image",
489
+ image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
490
+ mimeType: part.mimeType
491
+ };
492
+ }
480
493
  }
481
494
  }
482
- }
483
- )
484
- };
485
- }
486
- case "assistant": {
487
- if (typeof message.content === "string") {
488
- return {
489
- role: "assistant",
490
- content: [{ type: "text", text: message.content }]
495
+ )
491
496
  };
492
497
  }
493
- return { role: "assistant", content: message.content };
494
- }
495
- case "tool": {
496
- return message;
498
+ case "assistant": {
499
+ if (typeof message.content === "string") {
500
+ return {
501
+ role: "assistant",
502
+ content: [{ type: "text", text: message.content }]
503
+ };
504
+ }
505
+ return { role: "assistant", content: message.content };
506
+ }
507
+ case "tool": {
508
+ return message;
509
+ }
497
510
  }
498
- }
499
- })
500
- );
511
+ })
512
+ );
513
+ break;
514
+ }
515
+ default: {
516
+ const _exhaustiveCheck = prompt2;
517
+ throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
518
+ }
501
519
  }
502
520
  return languageModelMessages;
503
521
  }
504
522
 
505
- // core/prompt/get-input-format.ts
506
- function getInputFormat({
507
- prompt,
508
- messages
509
- }) {
510
- if (prompt == null && messages == null) {
511
- throw new Error("prompt or messages must be defined");
523
+ // core/prompt/get-validated-prompt.ts
524
+ function getValidatedPrompt(prompt2) {
525
+ if (prompt2.prompt == null && prompt2.messages == null) {
526
+ throw new InvalidPromptError({
527
+ prompt: prompt2,
528
+ message: "prompt or messages must be defined"
529
+ });
512
530
  }
513
- if (prompt != null && messages != null) {
514
- throw new Error("prompt and messages cannot be defined at the same time");
531
+ if (prompt2.prompt != null && prompt2.messages != null) {
532
+ throw new InvalidPromptError({
533
+ prompt: prompt2,
534
+ message: "prompt and messages cannot be defined at the same time"
535
+ });
515
536
  }
516
- return prompt != null ? "prompt" : "messages";
537
+ return prompt2.prompt != null ? {
538
+ type: "prompt",
539
+ prompt: prompt2.prompt,
540
+ messages: void 0,
541
+ system: prompt2.system
542
+ } : {
543
+ type: "messages",
544
+ prompt: void 0,
545
+ messages: prompt2.messages,
546
+ // only possible case bc of checks above
547
+ system: prompt2.system
548
+ };
517
549
  }
518
550
 
519
551
  // core/prompt/prepare-call-settings.ts
@@ -725,7 +757,7 @@ async function experimental_generateObject({
725
757
  schema,
726
758
  mode,
727
759
  system,
728
- prompt,
760
+ prompt: prompt2,
729
761
  messages,
730
762
  maxRetries,
731
763
  abortSignal,
@@ -743,19 +775,20 @@ async function experimental_generateObject({
743
775
  let warnings;
744
776
  switch (mode) {
745
777
  case "json": {
746
- const generateResult = await retry(
747
- () => model.doGenerate({
778
+ const validatedPrompt = getValidatedPrompt({
779
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
780
+ prompt: prompt2,
781
+ messages
782
+ });
783
+ const generateResult = await retry(() => {
784
+ return model.doGenerate({
748
785
  mode: { type: "object-json" },
749
786
  ...prepareCallSettings(settings),
750
- inputFormat: getInputFormat({ prompt, messages }),
751
- prompt: convertToLanguageModelPrompt({
752
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
753
- prompt,
754
- messages
755
- }),
787
+ inputFormat: validatedPrompt.type,
788
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
756
789
  abortSignal
757
- })
758
- );
790
+ });
791
+ });
759
792
  if (generateResult.text === void 0) {
760
793
  throw new NoTextGeneratedError();
761
794
  }
@@ -766,16 +799,17 @@ async function experimental_generateObject({
766
799
  break;
767
800
  }
768
801
  case "grammar": {
802
+ const validatedPrompt = getValidatedPrompt({
803
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
804
+ prompt: prompt2,
805
+ messages
806
+ });
769
807
  const generateResult = await retry(
770
808
  () => model.doGenerate({
771
809
  mode: { type: "object-grammar", schema: jsonSchema },
772
810
  ...settings,
773
- inputFormat: getInputFormat({ prompt, messages }),
774
- prompt: convertToLanguageModelPrompt({
775
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
776
- prompt,
777
- messages
778
- }),
811
+ inputFormat: validatedPrompt.type,
812
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
779
813
  abortSignal
780
814
  })
781
815
  );
@@ -789,6 +823,11 @@ async function experimental_generateObject({
789
823
  break;
790
824
  }
791
825
  case "tool": {
826
+ const validatedPrompt = getValidatedPrompt({
827
+ system,
828
+ prompt: prompt2,
829
+ messages
830
+ });
792
831
  const generateResult = await retry(
793
832
  () => model.doGenerate({
794
833
  mode: {
@@ -801,8 +840,8 @@ async function experimental_generateObject({
801
840
  }
802
841
  },
803
842
  ...settings,
804
- inputFormat: getInputFormat({ prompt, messages }),
805
- prompt: convertToLanguageModelPrompt({ system, prompt, messages }),
843
+ inputFormat: validatedPrompt.type,
844
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
806
845
  abortSignal
807
846
  })
808
847
  );
@@ -1242,7 +1281,7 @@ async function experimental_streamObject({
1242
1281
  schema,
1243
1282
  mode,
1244
1283
  system,
1245
- prompt,
1284
+ prompt: prompt2,
1246
1285
  messages,
1247
1286
  maxRetries,
1248
1287
  abortSignal,
@@ -1257,15 +1296,16 @@ async function experimental_streamObject({
1257
1296
  let transformer;
1258
1297
  switch (mode) {
1259
1298
  case "json": {
1299
+ const validatedPrompt = getValidatedPrompt({
1300
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1301
+ prompt: prompt2,
1302
+ messages
1303
+ });
1260
1304
  callOptions = {
1261
1305
  mode: { type: "object-json" },
1262
1306
  ...prepareCallSettings(settings),
1263
- inputFormat: getInputFormat({ prompt, messages }),
1264
- prompt: convertToLanguageModelPrompt({
1265
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1266
- prompt,
1267
- messages
1268
- }),
1307
+ inputFormat: validatedPrompt.type,
1308
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1269
1309
  abortSignal
1270
1310
  };
1271
1311
  transformer = {
@@ -1283,15 +1323,16 @@ async function experimental_streamObject({
1283
1323
  break;
1284
1324
  }
1285
1325
  case "grammar": {
1326
+ const validatedPrompt = getValidatedPrompt({
1327
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1328
+ prompt: prompt2,
1329
+ messages
1330
+ });
1286
1331
  callOptions = {
1287
1332
  mode: { type: "object-grammar", schema: jsonSchema },
1288
1333
  ...settings,
1289
- inputFormat: getInputFormat({ prompt, messages }),
1290
- prompt: convertToLanguageModelPrompt({
1291
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1292
- prompt,
1293
- messages
1294
- }),
1334
+ inputFormat: validatedPrompt.type,
1335
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1295
1336
  abortSignal
1296
1337
  };
1297
1338
  transformer = {
@@ -1309,6 +1350,11 @@ async function experimental_streamObject({
1309
1350
  break;
1310
1351
  }
1311
1352
  case "tool": {
1353
+ const validatedPrompt = getValidatedPrompt({
1354
+ system,
1355
+ prompt: prompt2,
1356
+ messages
1357
+ });
1312
1358
  callOptions = {
1313
1359
  mode: {
1314
1360
  type: "object-tool",
@@ -1320,8 +1366,8 @@ async function experimental_streamObject({
1320
1366
  }
1321
1367
  },
1322
1368
  ...settings,
1323
- inputFormat: getInputFormat({ prompt, messages }),
1324
- prompt: convertToLanguageModelPrompt({ system, prompt, messages }),
1369
+ inputFormat: validatedPrompt.type,
1370
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1325
1371
  abortSignal
1326
1372
  };
1327
1373
  transformer = {
@@ -1428,7 +1474,7 @@ async function experimental_generateText({
1428
1474
  model,
1429
1475
  tools,
1430
1476
  system,
1431
- prompt,
1477
+ prompt: prompt2,
1432
1478
  messages,
1433
1479
  maxRetries,
1434
1480
  abortSignal,
@@ -1436,8 +1482,9 @@ async function experimental_generateText({
1436
1482
  }) {
1437
1483
  var _a, _b;
1438
1484
  const retry = retryWithExponentialBackoff({ maxRetries });
1439
- const modelResponse = await retry(
1440
- () => model.doGenerate({
1485
+ const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
1486
+ const modelResponse = await retry(() => {
1487
+ return model.doGenerate({
1441
1488
  mode: {
1442
1489
  type: "regular",
1443
1490
  tools: tools == null ? void 0 : Object.entries(tools).map(([name, tool2]) => ({
@@ -1448,15 +1495,11 @@ async function experimental_generateText({
1448
1495
  }))
1449
1496
  },
1450
1497
  ...prepareCallSettings(settings),
1451
- inputFormat: getInputFormat({ prompt, messages }),
1452
- prompt: convertToLanguageModelPrompt({
1453
- system,
1454
- prompt,
1455
- messages
1456
- }),
1498
+ inputFormat: validatedPrompt.type,
1499
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1457
1500
  abortSignal
1458
- })
1459
- );
1501
+ });
1502
+ });
1460
1503
  const toolCalls = [];
1461
1504
  for (const modelToolCall of (_a = modelResponse.toolCalls) != null ? _a : []) {
1462
1505
  toolCalls.push(parseToolCall({ toolCall: modelToolCall, tools }));
@@ -1665,13 +1708,14 @@ async function experimental_streamText({
1665
1708
  model,
1666
1709
  tools,
1667
1710
  system,
1668
- prompt,
1711
+ prompt: prompt2,
1669
1712
  messages,
1670
1713
  maxRetries,
1671
1714
  abortSignal,
1672
1715
  ...settings
1673
1716
  }) {
1674
1717
  const retry = retryWithExponentialBackoff({ maxRetries });
1718
+ const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
1675
1719
  const { stream, warnings } = await retry(
1676
1720
  () => model.doStream({
1677
1721
  mode: {
@@ -1684,12 +1728,8 @@ async function experimental_streamText({
1684
1728
  }))
1685
1729
  },
1686
1730
  ...prepareCallSettings(settings),
1687
- inputFormat: getInputFormat({ prompt, messages }),
1688
- prompt: convertToLanguageModelPrompt({
1689
- system,
1690
- prompt,
1691
- messages
1692
- }),
1731
+ inputFormat: validatedPrompt.type,
1732
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1693
1733
  abortSignal
1694
1734
  })
1695
1735
  );
@@ -1709,6 +1749,11 @@ var StreamTextResult = class {
1709
1749
  this.originalStream = stream;
1710
1750
  this.warnings = warnings;
1711
1751
  }
1752
+ /**
1753
+ A text stream that returns only the generated text deltas. You can use it
1754
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
1755
+ stream will throw the error.
1756
+ */
1712
1757
  get textStream() {
1713
1758
  return createAsyncIterableStream(this.originalStream, {
1714
1759
  transform(chunk, controller) {
@@ -1722,6 +1767,12 @@ var StreamTextResult = class {
1722
1767
  }
1723
1768
  });
1724
1769
  }
1770
+ /**
1771
+ A stream with all events, including text deltas, tool calls, tool results, and
1772
+ errors.
1773
+ You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
1774
+ stream will throw the error.
1775
+ */
1725
1776
  get fullStream() {
1726
1777
  return createAsyncIterableStream(this.originalStream, {
1727
1778
  transform(chunk, controller) {
@@ -1735,6 +1786,15 @@ var StreamTextResult = class {
1735
1786
  }
1736
1787
  });
1737
1788
  }
1789
+ /**
1790
+ Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
1791
+ It can be used with the `useChat` and `useCompletion` hooks.
1792
+
1793
+ @param callbacks
1794
+ Stream callbacks that will be called when the stream emits events.
1795
+
1796
+ @returns an `AIStream` object.
1797
+ */
1738
1798
  toAIStream(callbacks) {
1739
1799
  return readableFromAsyncIterable(this.textStream).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(
1740
1800
  createStreamDataTransformer(callbacks == null ? void 0 : callbacks.experimental_streamData)