ai 3.0.15 → 3.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -94,6 +94,26 @@ var InvalidDataContentError = class extends Error {
94
94
  }
95
95
  };
96
96
 
97
+ // spec/errors/invalid-prompt-error.ts
98
+ var InvalidPromptError = class extends Error {
99
+ constructor({ prompt: prompt2, message }) {
100
+ super(`Invalid prompt: ${message}`);
101
+ this.name = "AI_InvalidPromptError";
102
+ this.prompt = prompt2;
103
+ }
104
+ static isInvalidPromptError(error) {
105
+ return error instanceof Error && error.name === "AI_InvalidPromptError" && prompt != null;
106
+ }
107
+ toJSON() {
108
+ return {
109
+ name: this.name,
110
+ message: this.message,
111
+ stack: this.stack,
112
+ prompt: this.prompt
113
+ };
114
+ }
115
+ };
116
+
97
117
  // spec/util/get-error-message.ts
98
118
  function getErrorMessage(error) {
99
119
  if (error == null) {
@@ -355,89 +375,101 @@ function convertDataContentToUint8Array(content) {
355
375
  }
356
376
 
357
377
  // core/prompt/convert-to-language-model-prompt.ts
358
- function convertToLanguageModelPrompt({
359
- system,
360
- prompt,
361
- messages
362
- }) {
363
- if (prompt == null && messages == null) {
364
- throw new Error("prompt or messages must be defined");
365
- }
366
- if (prompt != null && messages != null) {
367
- throw new Error("prompt and messages cannot be defined at the same time");
368
- }
378
+ function convertToLanguageModelPrompt(prompt2) {
369
379
  const languageModelMessages = [];
370
- if (system != null) {
371
- languageModelMessages.push({ role: "system", content: system });
372
- }
373
- if (typeof prompt === "string") {
374
- languageModelMessages.push({
375
- role: "user",
376
- content: [{ type: "text", text: prompt }]
377
- });
378
- } else {
379
- messages = messages;
380
- languageModelMessages.push(
381
- ...messages.map((message) => {
382
- switch (message.role) {
383
- case "user": {
384
- if (typeof message.content === "string") {
380
+ if (prompt2.system != null) {
381
+ languageModelMessages.push({ role: "system", content: prompt2.system });
382
+ }
383
+ switch (prompt2.type) {
384
+ case "prompt": {
385
+ languageModelMessages.push({
386
+ role: "user",
387
+ content: [{ type: "text", text: prompt2.prompt }]
388
+ });
389
+ break;
390
+ }
391
+ case "messages": {
392
+ languageModelMessages.push(
393
+ ...prompt2.messages.map((message) => {
394
+ switch (message.role) {
395
+ case "user": {
396
+ if (typeof message.content === "string") {
397
+ return {
398
+ role: "user",
399
+ content: [{ type: "text", text: message.content }]
400
+ };
401
+ }
385
402
  return {
386
403
  role: "user",
387
- content: [{ type: "text", text: message.content }]
388
- };
389
- }
390
- return {
391
- role: "user",
392
- content: message.content.map(
393
- (part) => {
394
- switch (part.type) {
395
- case "text": {
396
- return part;
397
- }
398
- case "image": {
399
- return {
400
- type: "image",
401
- image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
402
- mimeType: part.mimeType
403
- };
404
+ content: message.content.map(
405
+ (part) => {
406
+ switch (part.type) {
407
+ case "text": {
408
+ return part;
409
+ }
410
+ case "image": {
411
+ return {
412
+ type: "image",
413
+ image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
414
+ mimeType: part.mimeType
415
+ };
416
+ }
404
417
  }
405
418
  }
406
- }
407
- )
408
- };
409
- }
410
- case "assistant": {
411
- if (typeof message.content === "string") {
412
- return {
413
- role: "assistant",
414
- content: [{ type: "text", text: message.content }]
419
+ )
415
420
  };
416
421
  }
417
- return { role: "assistant", content: message.content };
418
- }
419
- case "tool": {
420
- return message;
422
+ case "assistant": {
423
+ if (typeof message.content === "string") {
424
+ return {
425
+ role: "assistant",
426
+ content: [{ type: "text", text: message.content }]
427
+ };
428
+ }
429
+ return { role: "assistant", content: message.content };
430
+ }
431
+ case "tool": {
432
+ return message;
433
+ }
421
434
  }
422
- }
423
- })
424
- );
435
+ })
436
+ );
437
+ break;
438
+ }
439
+ default: {
440
+ const _exhaustiveCheck = prompt2;
441
+ throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
442
+ }
425
443
  }
426
444
  return languageModelMessages;
427
445
  }
428
446
 
429
- // core/prompt/get-input-format.ts
430
- function getInputFormat({
431
- prompt,
432
- messages
433
- }) {
434
- if (prompt == null && messages == null) {
435
- throw new Error("prompt or messages must be defined");
447
+ // core/prompt/get-validated-prompt.ts
448
+ function getValidatedPrompt(prompt2) {
449
+ if (prompt2.prompt == null && prompt2.messages == null) {
450
+ throw new InvalidPromptError({
451
+ prompt: prompt2,
452
+ message: "prompt or messages must be defined"
453
+ });
436
454
  }
437
- if (prompt != null && messages != null) {
438
- throw new Error("prompt and messages cannot be defined at the same time");
455
+ if (prompt2.prompt != null && prompt2.messages != null) {
456
+ throw new InvalidPromptError({
457
+ prompt: prompt2,
458
+ message: "prompt and messages cannot be defined at the same time"
459
+ });
439
460
  }
440
- return prompt != null ? "prompt" : "messages";
461
+ return prompt2.prompt != null ? {
462
+ type: "prompt",
463
+ prompt: prompt2.prompt,
464
+ messages: void 0,
465
+ system: prompt2.system
466
+ } : {
467
+ type: "messages",
468
+ prompt: void 0,
469
+ messages: prompt2.messages,
470
+ // only possible case bc of checks above
471
+ system: prompt2.system
472
+ };
441
473
  }
442
474
 
443
475
  // core/prompt/prepare-call-settings.ts
@@ -649,7 +681,7 @@ async function experimental_generateObject({
649
681
  schema,
650
682
  mode,
651
683
  system,
652
- prompt,
684
+ prompt: prompt2,
653
685
  messages,
654
686
  maxRetries,
655
687
  abortSignal,
@@ -667,19 +699,20 @@ async function experimental_generateObject({
667
699
  let warnings;
668
700
  switch (mode) {
669
701
  case "json": {
670
- const generateResult = await retry(
671
- () => model.doGenerate({
702
+ const validatedPrompt = getValidatedPrompt({
703
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
704
+ prompt: prompt2,
705
+ messages
706
+ });
707
+ const generateResult = await retry(() => {
708
+ return model.doGenerate({
672
709
  mode: { type: "object-json" },
673
710
  ...prepareCallSettings(settings),
674
- inputFormat: getInputFormat({ prompt, messages }),
675
- prompt: convertToLanguageModelPrompt({
676
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
677
- prompt,
678
- messages
679
- }),
711
+ inputFormat: validatedPrompt.type,
712
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
680
713
  abortSignal
681
- })
682
- );
714
+ });
715
+ });
683
716
  if (generateResult.text === void 0) {
684
717
  throw new NoTextGeneratedError();
685
718
  }
@@ -690,16 +723,17 @@ async function experimental_generateObject({
690
723
  break;
691
724
  }
692
725
  case "grammar": {
726
+ const validatedPrompt = getValidatedPrompt({
727
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
728
+ prompt: prompt2,
729
+ messages
730
+ });
693
731
  const generateResult = await retry(
694
732
  () => model.doGenerate({
695
733
  mode: { type: "object-grammar", schema: jsonSchema },
696
734
  ...settings,
697
- inputFormat: getInputFormat({ prompt, messages }),
698
- prompt: convertToLanguageModelPrompt({
699
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
700
- prompt,
701
- messages
702
- }),
735
+ inputFormat: validatedPrompt.type,
736
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
703
737
  abortSignal
704
738
  })
705
739
  );
@@ -713,6 +747,11 @@ async function experimental_generateObject({
713
747
  break;
714
748
  }
715
749
  case "tool": {
750
+ const validatedPrompt = getValidatedPrompt({
751
+ system,
752
+ prompt: prompt2,
753
+ messages
754
+ });
716
755
  const generateResult = await retry(
717
756
  () => model.doGenerate({
718
757
  mode: {
@@ -725,8 +764,8 @@ async function experimental_generateObject({
725
764
  }
726
765
  },
727
766
  ...settings,
728
- inputFormat: getInputFormat({ prompt, messages }),
729
- prompt: convertToLanguageModelPrompt({ system, prompt, messages }),
767
+ inputFormat: validatedPrompt.type,
768
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
730
769
  abortSignal
731
770
  })
732
771
  );
@@ -1166,7 +1205,7 @@ async function experimental_streamObject({
1166
1205
  schema,
1167
1206
  mode,
1168
1207
  system,
1169
- prompt,
1208
+ prompt: prompt2,
1170
1209
  messages,
1171
1210
  maxRetries,
1172
1211
  abortSignal,
@@ -1181,15 +1220,16 @@ async function experimental_streamObject({
1181
1220
  let transformer;
1182
1221
  switch (mode) {
1183
1222
  case "json": {
1223
+ const validatedPrompt = getValidatedPrompt({
1224
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1225
+ prompt: prompt2,
1226
+ messages
1227
+ });
1184
1228
  callOptions = {
1185
1229
  mode: { type: "object-json" },
1186
1230
  ...prepareCallSettings(settings),
1187
- inputFormat: getInputFormat({ prompt, messages }),
1188
- prompt: convertToLanguageModelPrompt({
1189
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1190
- prompt,
1191
- messages
1192
- }),
1231
+ inputFormat: validatedPrompt.type,
1232
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1193
1233
  abortSignal
1194
1234
  };
1195
1235
  transformer = {
@@ -1207,15 +1247,16 @@ async function experimental_streamObject({
1207
1247
  break;
1208
1248
  }
1209
1249
  case "grammar": {
1250
+ const validatedPrompt = getValidatedPrompt({
1251
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1252
+ prompt: prompt2,
1253
+ messages
1254
+ });
1210
1255
  callOptions = {
1211
1256
  mode: { type: "object-grammar", schema: jsonSchema },
1212
1257
  ...settings,
1213
- inputFormat: getInputFormat({ prompt, messages }),
1214
- prompt: convertToLanguageModelPrompt({
1215
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
1216
- prompt,
1217
- messages
1218
- }),
1258
+ inputFormat: validatedPrompt.type,
1259
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1219
1260
  abortSignal
1220
1261
  };
1221
1262
  transformer = {
@@ -1233,6 +1274,11 @@ async function experimental_streamObject({
1233
1274
  break;
1234
1275
  }
1235
1276
  case "tool": {
1277
+ const validatedPrompt = getValidatedPrompt({
1278
+ system,
1279
+ prompt: prompt2,
1280
+ messages
1281
+ });
1236
1282
  callOptions = {
1237
1283
  mode: {
1238
1284
  type: "object-tool",
@@ -1244,8 +1290,8 @@ async function experimental_streamObject({
1244
1290
  }
1245
1291
  },
1246
1292
  ...settings,
1247
- inputFormat: getInputFormat({ prompt, messages }),
1248
- prompt: convertToLanguageModelPrompt({ system, prompt, messages }),
1293
+ inputFormat: validatedPrompt.type,
1294
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1249
1295
  abortSignal
1250
1296
  };
1251
1297
  transformer = {
@@ -1352,7 +1398,7 @@ async function experimental_generateText({
1352
1398
  model,
1353
1399
  tools,
1354
1400
  system,
1355
- prompt,
1401
+ prompt: prompt2,
1356
1402
  messages,
1357
1403
  maxRetries,
1358
1404
  abortSignal,
@@ -1360,8 +1406,9 @@ async function experimental_generateText({
1360
1406
  }) {
1361
1407
  var _a, _b;
1362
1408
  const retry = retryWithExponentialBackoff({ maxRetries });
1363
- const modelResponse = await retry(
1364
- () => model.doGenerate({
1409
+ const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
1410
+ const modelResponse = await retry(() => {
1411
+ return model.doGenerate({
1365
1412
  mode: {
1366
1413
  type: "regular",
1367
1414
  tools: tools == null ? void 0 : Object.entries(tools).map(([name, tool2]) => ({
@@ -1372,15 +1419,11 @@ async function experimental_generateText({
1372
1419
  }))
1373
1420
  },
1374
1421
  ...prepareCallSettings(settings),
1375
- inputFormat: getInputFormat({ prompt, messages }),
1376
- prompt: convertToLanguageModelPrompt({
1377
- system,
1378
- prompt,
1379
- messages
1380
- }),
1422
+ inputFormat: validatedPrompt.type,
1423
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1381
1424
  abortSignal
1382
- })
1383
- );
1425
+ });
1426
+ });
1384
1427
  const toolCalls = [];
1385
1428
  for (const modelToolCall of (_a = modelResponse.toolCalls) != null ? _a : []) {
1386
1429
  toolCalls.push(parseToolCall({ toolCall: modelToolCall, tools }));
@@ -1589,13 +1632,14 @@ async function experimental_streamText({
1589
1632
  model,
1590
1633
  tools,
1591
1634
  system,
1592
- prompt,
1635
+ prompt: prompt2,
1593
1636
  messages,
1594
1637
  maxRetries,
1595
1638
  abortSignal,
1596
1639
  ...settings
1597
1640
  }) {
1598
1641
  const retry = retryWithExponentialBackoff({ maxRetries });
1642
+ const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
1599
1643
  const { stream, warnings } = await retry(
1600
1644
  () => model.doStream({
1601
1645
  mode: {
@@ -1608,12 +1652,8 @@ async function experimental_streamText({
1608
1652
  }))
1609
1653
  },
1610
1654
  ...prepareCallSettings(settings),
1611
- inputFormat: getInputFormat({ prompt, messages }),
1612
- prompt: convertToLanguageModelPrompt({
1613
- system,
1614
- prompt,
1615
- messages
1616
- }),
1655
+ inputFormat: validatedPrompt.type,
1656
+ prompt: convertToLanguageModelPrompt(validatedPrompt),
1617
1657
  abortSignal
1618
1658
  })
1619
1659
  );