ai 3.0.15 → 3.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +161 -121
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +161 -121
- package/dist/index.mjs.map +1 -1
- package/google/dist/index.d.mts +365 -0
- package/google/dist/index.d.ts +365 -0
- package/google/dist/index.js +950 -0
- package/google/dist/index.js.map +1 -0
- package/google/dist/index.mjs +914 -0
- package/google/dist/index.mjs.map +1 -0
- package/mistral/dist/index.d.mts +0 -1
- package/mistral/dist/index.d.ts +0 -1
- package/mistral/dist/index.js +4 -4
- package/mistral/dist/index.js.map +1 -1
- package/mistral/dist/index.mjs +4 -4
- package/mistral/dist/index.mjs.map +1 -1
- package/openai/dist/index.js +1 -2
- package/openai/dist/index.js.map +1 -1
- package/openai/dist/index.mjs +1 -2
- package/openai/dist/index.mjs.map +1 -1
- package/package.json +12 -4
- package/rsc/dist/index.d.ts +21 -3
- package/rsc/dist/rsc-client.d.mts +1 -1
- package/rsc/dist/rsc-client.mjs +2 -0
- package/rsc/dist/rsc-client.mjs.map +1 -1
- package/rsc/dist/rsc-server.d.mts +2 -2
- package/rsc/dist/rsc-server.mjs +1 -1
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/rsc/dist/rsc-shared.d.mts +20 -2
- package/rsc/dist/rsc-shared.mjs +75 -2
- package/rsc/dist/rsc-shared.mjs.map +1 -1
- package/spec/dist/index.js +1 -2
- package/spec/dist/index.js.map +1 -1
- package/spec/dist/index.mjs +1 -2
- package/spec/dist/index.mjs.map +1 -1
package/dist/index.d.mts
CHANGED
@@ -536,7 +536,9 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
536
536
|
This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
|
537
537
|
|
538
538
|
@param model - The language model to use.
|
539
|
+
|
539
540
|
@param schema - The schema of the object that the model should generate.
|
541
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
540
542
|
|
541
543
|
@param system - A system message that will be part of the prompt.
|
542
544
|
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
@@ -634,7 +636,9 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
634
636
|
This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
|
635
637
|
|
636
638
|
@param model - The language model to use.
|
639
|
+
|
637
640
|
@param schema - The schema of the object that the model should generate.
|
641
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
638
642
|
|
639
643
|
@param system - A system message that will be part of the prompt.
|
640
644
|
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
package/dist/index.d.ts
CHANGED
@@ -536,7 +536,9 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
536
536
|
This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
|
537
537
|
|
538
538
|
@param model - The language model to use.
|
539
|
+
|
539
540
|
@param schema - The schema of the object that the model should generate.
|
541
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
540
542
|
|
541
543
|
@param system - A system message that will be part of the prompt.
|
542
544
|
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
@@ -634,7 +636,9 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
634
636
|
This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
|
635
637
|
|
636
638
|
@param model - The language model to use.
|
639
|
+
|
637
640
|
@param schema - The schema of the object that the model should generate.
|
641
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
638
642
|
|
639
643
|
@param system - A system message that will be part of the prompt.
|
640
644
|
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
package/dist/index.js
CHANGED
@@ -170,6 +170,26 @@ var InvalidDataContentError = class extends Error {
|
|
170
170
|
}
|
171
171
|
};
|
172
172
|
|
173
|
+
// spec/errors/invalid-prompt-error.ts
|
174
|
+
var InvalidPromptError = class extends Error {
|
175
|
+
constructor({ prompt: prompt2, message }) {
|
176
|
+
super(`Invalid prompt: ${message}`);
|
177
|
+
this.name = "AI_InvalidPromptError";
|
178
|
+
this.prompt = prompt2;
|
179
|
+
}
|
180
|
+
static isInvalidPromptError(error) {
|
181
|
+
return error instanceof Error && error.name === "AI_InvalidPromptError" && prompt != null;
|
182
|
+
}
|
183
|
+
toJSON() {
|
184
|
+
return {
|
185
|
+
name: this.name,
|
186
|
+
message: this.message,
|
187
|
+
stack: this.stack,
|
188
|
+
prompt: this.prompt
|
189
|
+
};
|
190
|
+
}
|
191
|
+
};
|
192
|
+
|
173
193
|
// spec/util/get-error-message.ts
|
174
194
|
function getErrorMessage(error) {
|
175
195
|
if (error == null) {
|
@@ -431,89 +451,101 @@ function convertDataContentToUint8Array(content) {
|
|
431
451
|
}
|
432
452
|
|
433
453
|
// core/prompt/convert-to-language-model-prompt.ts
|
434
|
-
function convertToLanguageModelPrompt({
|
435
|
-
system,
|
436
|
-
prompt,
|
437
|
-
messages
|
438
|
-
}) {
|
439
|
-
if (prompt == null && messages == null) {
|
440
|
-
throw new Error("prompt or messages must be defined");
|
441
|
-
}
|
442
|
-
if (prompt != null && messages != null) {
|
443
|
-
throw new Error("prompt and messages cannot be defined at the same time");
|
444
|
-
}
|
454
|
+
function convertToLanguageModelPrompt(prompt2) {
|
445
455
|
const languageModelMessages = [];
|
446
|
-
if (system != null) {
|
447
|
-
languageModelMessages.push({ role: "system", content: system });
|
448
|
-
}
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
456
|
+
if (prompt2.system != null) {
|
457
|
+
languageModelMessages.push({ role: "system", content: prompt2.system });
|
458
|
+
}
|
459
|
+
switch (prompt2.type) {
|
460
|
+
case "prompt": {
|
461
|
+
languageModelMessages.push({
|
462
|
+
role: "user",
|
463
|
+
content: [{ type: "text", text: prompt2.prompt }]
|
464
|
+
});
|
465
|
+
break;
|
466
|
+
}
|
467
|
+
case "messages": {
|
468
|
+
languageModelMessages.push(
|
469
|
+
...prompt2.messages.map((message) => {
|
470
|
+
switch (message.role) {
|
471
|
+
case "user": {
|
472
|
+
if (typeof message.content === "string") {
|
473
|
+
return {
|
474
|
+
role: "user",
|
475
|
+
content: [{ type: "text", text: message.content }]
|
476
|
+
};
|
477
|
+
}
|
461
478
|
return {
|
462
479
|
role: "user",
|
463
|
-
content:
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
type: "image",
|
477
|
-
image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
|
478
|
-
mimeType: part.mimeType
|
479
|
-
};
|
480
|
+
content: message.content.map(
|
481
|
+
(part) => {
|
482
|
+
switch (part.type) {
|
483
|
+
case "text": {
|
484
|
+
return part;
|
485
|
+
}
|
486
|
+
case "image": {
|
487
|
+
return {
|
488
|
+
type: "image",
|
489
|
+
image: part.image instanceof URL ? part.image : convertDataContentToUint8Array(part.image),
|
490
|
+
mimeType: part.mimeType
|
491
|
+
};
|
492
|
+
}
|
480
493
|
}
|
481
494
|
}
|
482
|
-
|
483
|
-
)
|
484
|
-
};
|
485
|
-
}
|
486
|
-
case "assistant": {
|
487
|
-
if (typeof message.content === "string") {
|
488
|
-
return {
|
489
|
-
role: "assistant",
|
490
|
-
content: [{ type: "text", text: message.content }]
|
495
|
+
)
|
491
496
|
};
|
492
497
|
}
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
498
|
+
case "assistant": {
|
499
|
+
if (typeof message.content === "string") {
|
500
|
+
return {
|
501
|
+
role: "assistant",
|
502
|
+
content: [{ type: "text", text: message.content }]
|
503
|
+
};
|
504
|
+
}
|
505
|
+
return { role: "assistant", content: message.content };
|
506
|
+
}
|
507
|
+
case "tool": {
|
508
|
+
return message;
|
509
|
+
}
|
497
510
|
}
|
498
|
-
}
|
499
|
-
|
500
|
-
|
511
|
+
})
|
512
|
+
);
|
513
|
+
break;
|
514
|
+
}
|
515
|
+
default: {
|
516
|
+
const _exhaustiveCheck = prompt2;
|
517
|
+
throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
|
518
|
+
}
|
501
519
|
}
|
502
520
|
return languageModelMessages;
|
503
521
|
}
|
504
522
|
|
505
|
-
// core/prompt/get-
|
506
|
-
function
|
507
|
-
prompt
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
523
|
+
// core/prompt/get-validated-prompt.ts
|
524
|
+
function getValidatedPrompt(prompt2) {
|
525
|
+
if (prompt2.prompt == null && prompt2.messages == null) {
|
526
|
+
throw new InvalidPromptError({
|
527
|
+
prompt: prompt2,
|
528
|
+
message: "prompt or messages must be defined"
|
529
|
+
});
|
512
530
|
}
|
513
|
-
if (prompt != null && messages != null) {
|
514
|
-
throw new
|
531
|
+
if (prompt2.prompt != null && prompt2.messages != null) {
|
532
|
+
throw new InvalidPromptError({
|
533
|
+
prompt: prompt2,
|
534
|
+
message: "prompt and messages cannot be defined at the same time"
|
535
|
+
});
|
515
536
|
}
|
516
|
-
return prompt != null ?
|
537
|
+
return prompt2.prompt != null ? {
|
538
|
+
type: "prompt",
|
539
|
+
prompt: prompt2.prompt,
|
540
|
+
messages: void 0,
|
541
|
+
system: prompt2.system
|
542
|
+
} : {
|
543
|
+
type: "messages",
|
544
|
+
prompt: void 0,
|
545
|
+
messages: prompt2.messages,
|
546
|
+
// only possible case bc of checks above
|
547
|
+
system: prompt2.system
|
548
|
+
};
|
517
549
|
}
|
518
550
|
|
519
551
|
// core/prompt/prepare-call-settings.ts
|
@@ -725,7 +757,7 @@ async function experimental_generateObject({
|
|
725
757
|
schema,
|
726
758
|
mode,
|
727
759
|
system,
|
728
|
-
prompt,
|
760
|
+
prompt: prompt2,
|
729
761
|
messages,
|
730
762
|
maxRetries,
|
731
763
|
abortSignal,
|
@@ -743,19 +775,20 @@ async function experimental_generateObject({
|
|
743
775
|
let warnings;
|
744
776
|
switch (mode) {
|
745
777
|
case "json": {
|
746
|
-
const
|
747
|
-
(
|
778
|
+
const validatedPrompt = getValidatedPrompt({
|
779
|
+
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
780
|
+
prompt: prompt2,
|
781
|
+
messages
|
782
|
+
});
|
783
|
+
const generateResult = await retry(() => {
|
784
|
+
return model.doGenerate({
|
748
785
|
mode: { type: "object-json" },
|
749
786
|
...prepareCallSettings(settings),
|
750
|
-
inputFormat:
|
751
|
-
prompt: convertToLanguageModelPrompt(
|
752
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
753
|
-
prompt,
|
754
|
-
messages
|
755
|
-
}),
|
787
|
+
inputFormat: validatedPrompt.type,
|
788
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
756
789
|
abortSignal
|
757
|
-
})
|
758
|
-
);
|
790
|
+
});
|
791
|
+
});
|
759
792
|
if (generateResult.text === void 0) {
|
760
793
|
throw new NoTextGeneratedError();
|
761
794
|
}
|
@@ -766,16 +799,17 @@ async function experimental_generateObject({
|
|
766
799
|
break;
|
767
800
|
}
|
768
801
|
case "grammar": {
|
802
|
+
const validatedPrompt = getValidatedPrompt({
|
803
|
+
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
804
|
+
prompt: prompt2,
|
805
|
+
messages
|
806
|
+
});
|
769
807
|
const generateResult = await retry(
|
770
808
|
() => model.doGenerate({
|
771
809
|
mode: { type: "object-grammar", schema: jsonSchema },
|
772
810
|
...settings,
|
773
|
-
inputFormat:
|
774
|
-
prompt: convertToLanguageModelPrompt(
|
775
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
776
|
-
prompt,
|
777
|
-
messages
|
778
|
-
}),
|
811
|
+
inputFormat: validatedPrompt.type,
|
812
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
779
813
|
abortSignal
|
780
814
|
})
|
781
815
|
);
|
@@ -789,6 +823,11 @@ async function experimental_generateObject({
|
|
789
823
|
break;
|
790
824
|
}
|
791
825
|
case "tool": {
|
826
|
+
const validatedPrompt = getValidatedPrompt({
|
827
|
+
system,
|
828
|
+
prompt: prompt2,
|
829
|
+
messages
|
830
|
+
});
|
792
831
|
const generateResult = await retry(
|
793
832
|
() => model.doGenerate({
|
794
833
|
mode: {
|
@@ -801,8 +840,8 @@ async function experimental_generateObject({
|
|
801
840
|
}
|
802
841
|
},
|
803
842
|
...settings,
|
804
|
-
inputFormat:
|
805
|
-
prompt: convertToLanguageModelPrompt(
|
843
|
+
inputFormat: validatedPrompt.type,
|
844
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
806
845
|
abortSignal
|
807
846
|
})
|
808
847
|
);
|
@@ -1242,7 +1281,7 @@ async function experimental_streamObject({
|
|
1242
1281
|
schema,
|
1243
1282
|
mode,
|
1244
1283
|
system,
|
1245
|
-
prompt,
|
1284
|
+
prompt: prompt2,
|
1246
1285
|
messages,
|
1247
1286
|
maxRetries,
|
1248
1287
|
abortSignal,
|
@@ -1257,15 +1296,16 @@ async function experimental_streamObject({
|
|
1257
1296
|
let transformer;
|
1258
1297
|
switch (mode) {
|
1259
1298
|
case "json": {
|
1299
|
+
const validatedPrompt = getValidatedPrompt({
|
1300
|
+
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
1301
|
+
prompt: prompt2,
|
1302
|
+
messages
|
1303
|
+
});
|
1260
1304
|
callOptions = {
|
1261
1305
|
mode: { type: "object-json" },
|
1262
1306
|
...prepareCallSettings(settings),
|
1263
|
-
inputFormat:
|
1264
|
-
prompt: convertToLanguageModelPrompt(
|
1265
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
1266
|
-
prompt,
|
1267
|
-
messages
|
1268
|
-
}),
|
1307
|
+
inputFormat: validatedPrompt.type,
|
1308
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
1269
1309
|
abortSignal
|
1270
1310
|
};
|
1271
1311
|
transformer = {
|
@@ -1283,15 +1323,16 @@ async function experimental_streamObject({
|
|
1283
1323
|
break;
|
1284
1324
|
}
|
1285
1325
|
case "grammar": {
|
1326
|
+
const validatedPrompt = getValidatedPrompt({
|
1327
|
+
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
1328
|
+
prompt: prompt2,
|
1329
|
+
messages
|
1330
|
+
});
|
1286
1331
|
callOptions = {
|
1287
1332
|
mode: { type: "object-grammar", schema: jsonSchema },
|
1288
1333
|
...settings,
|
1289
|
-
inputFormat:
|
1290
|
-
prompt: convertToLanguageModelPrompt(
|
1291
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
1292
|
-
prompt,
|
1293
|
-
messages
|
1294
|
-
}),
|
1334
|
+
inputFormat: validatedPrompt.type,
|
1335
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
1295
1336
|
abortSignal
|
1296
1337
|
};
|
1297
1338
|
transformer = {
|
@@ -1309,6 +1350,11 @@ async function experimental_streamObject({
|
|
1309
1350
|
break;
|
1310
1351
|
}
|
1311
1352
|
case "tool": {
|
1353
|
+
const validatedPrompt = getValidatedPrompt({
|
1354
|
+
system,
|
1355
|
+
prompt: prompt2,
|
1356
|
+
messages
|
1357
|
+
});
|
1312
1358
|
callOptions = {
|
1313
1359
|
mode: {
|
1314
1360
|
type: "object-tool",
|
@@ -1320,8 +1366,8 @@ async function experimental_streamObject({
|
|
1320
1366
|
}
|
1321
1367
|
},
|
1322
1368
|
...settings,
|
1323
|
-
inputFormat:
|
1324
|
-
prompt: convertToLanguageModelPrompt(
|
1369
|
+
inputFormat: validatedPrompt.type,
|
1370
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
1325
1371
|
abortSignal
|
1326
1372
|
};
|
1327
1373
|
transformer = {
|
@@ -1428,7 +1474,7 @@ async function experimental_generateText({
|
|
1428
1474
|
model,
|
1429
1475
|
tools,
|
1430
1476
|
system,
|
1431
|
-
prompt,
|
1477
|
+
prompt: prompt2,
|
1432
1478
|
messages,
|
1433
1479
|
maxRetries,
|
1434
1480
|
abortSignal,
|
@@ -1436,8 +1482,9 @@ async function experimental_generateText({
|
|
1436
1482
|
}) {
|
1437
1483
|
var _a, _b;
|
1438
1484
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
1439
|
-
const
|
1440
|
-
|
1485
|
+
const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
|
1486
|
+
const modelResponse = await retry(() => {
|
1487
|
+
return model.doGenerate({
|
1441
1488
|
mode: {
|
1442
1489
|
type: "regular",
|
1443
1490
|
tools: tools == null ? void 0 : Object.entries(tools).map(([name, tool2]) => ({
|
@@ -1448,15 +1495,11 @@ async function experimental_generateText({
|
|
1448
1495
|
}))
|
1449
1496
|
},
|
1450
1497
|
...prepareCallSettings(settings),
|
1451
|
-
inputFormat:
|
1452
|
-
prompt: convertToLanguageModelPrompt(
|
1453
|
-
system,
|
1454
|
-
prompt,
|
1455
|
-
messages
|
1456
|
-
}),
|
1498
|
+
inputFormat: validatedPrompt.type,
|
1499
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
1457
1500
|
abortSignal
|
1458
|
-
})
|
1459
|
-
);
|
1501
|
+
});
|
1502
|
+
});
|
1460
1503
|
const toolCalls = [];
|
1461
1504
|
for (const modelToolCall of (_a = modelResponse.toolCalls) != null ? _a : []) {
|
1462
1505
|
toolCalls.push(parseToolCall({ toolCall: modelToolCall, tools }));
|
@@ -1665,13 +1708,14 @@ async function experimental_streamText({
|
|
1665
1708
|
model,
|
1666
1709
|
tools,
|
1667
1710
|
system,
|
1668
|
-
prompt,
|
1711
|
+
prompt: prompt2,
|
1669
1712
|
messages,
|
1670
1713
|
maxRetries,
|
1671
1714
|
abortSignal,
|
1672
1715
|
...settings
|
1673
1716
|
}) {
|
1674
1717
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
1718
|
+
const validatedPrompt = getValidatedPrompt({ system, prompt: prompt2, messages });
|
1675
1719
|
const { stream, warnings } = await retry(
|
1676
1720
|
() => model.doStream({
|
1677
1721
|
mode: {
|
@@ -1684,12 +1728,8 @@ async function experimental_streamText({
|
|
1684
1728
|
}))
|
1685
1729
|
},
|
1686
1730
|
...prepareCallSettings(settings),
|
1687
|
-
inputFormat:
|
1688
|
-
prompt: convertToLanguageModelPrompt(
|
1689
|
-
system,
|
1690
|
-
prompt,
|
1691
|
-
messages
|
1692
|
-
}),
|
1731
|
+
inputFormat: validatedPrompt.type,
|
1732
|
+
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
1693
1733
|
abortSignal
|
1694
1734
|
})
|
1695
1735
|
);
|