ai 6.0.0-beta.98 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  var __defProp = Object.defineProperty;
2
2
  var __export = (target, all) => {
3
- for (var name15 in all)
4
- __defProp(target, name15, { get: all[name15], enumerable: true });
3
+ for (var name14 in all)
4
+ __defProp(target, name14, { get: all[name14], enumerable: true });
5
5
  };
6
6
 
7
7
  // src/index.ts
@@ -24,79 +24,9 @@ import {
24
24
  withUserAgentSuffix as withUserAgentSuffix2
25
25
  } from "@ai-sdk/provider-utils";
26
26
 
27
- // src/logger/log-warnings.ts
28
- function formatWarning({
29
- warning,
30
- provider,
31
- model
32
- }) {
33
- const prefix = `AI SDK Warning (${provider} / ${model}):`;
34
- switch (warning.type) {
35
- case "unsupported-setting": {
36
- let message = `${prefix} The "${warning.setting}" setting is not supported.`;
37
- if (warning.details) {
38
- message += ` ${warning.details}`;
39
- }
40
- return message;
41
- }
42
- case "compatibility": {
43
- let message = `${prefix} The "${warning.feature}" feature is not fully supported.`;
44
- if (warning.details) {
45
- message += ` ${warning.details}`;
46
- }
47
- return message;
48
- }
49
- case "unsupported-tool": {
50
- const toolName = "name" in warning.tool ? warning.tool.name : "unknown tool";
51
- let message = `${prefix} The tool "${toolName}" is not supported.`;
52
- if (warning.details) {
53
- message += ` ${warning.details}`;
54
- }
55
- return message;
56
- }
57
- case "other": {
58
- return `${prefix} ${warning.message}`;
59
- }
60
- default: {
61
- return `${prefix} ${JSON.stringify(warning, null, 2)}`;
62
- }
63
- }
64
- }
65
- var FIRST_WARNING_INFO_MESSAGE = "AI SDK Warning System: To turn off warning logging, set the AI_SDK_LOG_WARNINGS global to false.";
66
- var hasLoggedBefore = false;
67
- var logWarnings = (options) => {
68
- if (options.warnings.length === 0) {
69
- return;
70
- }
71
- const logger = globalThis.AI_SDK_LOG_WARNINGS;
72
- if (logger === false) {
73
- return;
74
- }
75
- if (typeof logger === "function") {
76
- logger(options);
77
- return;
78
- }
79
- if (!hasLoggedBefore) {
80
- hasLoggedBefore = true;
81
- console.info(FIRST_WARNING_INFO_MESSAGE);
82
- }
83
- for (const warning of options.warnings) {
84
- console.warn(
85
- formatWarning({
86
- warning,
87
- provider: options.provider,
88
- model: options.model
89
- })
90
- );
91
- }
92
- };
93
-
94
- // src/model/resolve-model.ts
95
- import { gateway } from "@ai-sdk/gateway";
96
-
97
27
  // src/error/index.ts
98
28
  import {
99
- AISDKError as AISDKError16,
29
+ AISDKError as AISDKError15,
100
30
  APICallError,
101
31
  EmptyResponseBodyError,
102
32
  InvalidPromptError,
@@ -388,25 +318,26 @@ var MessageConversionError = class extends AISDKError13 {
388
318
  };
389
319
  _a11 = symbol11;
390
320
 
391
- // src/util/download/download-error.ts
321
+ // src/error/index.ts
322
+ import { DownloadError } from "@ai-sdk/provider-utils";
323
+
324
+ // src/util/retry-error.ts
392
325
  import { AISDKError as AISDKError14 } from "@ai-sdk/provider";
393
- var name12 = "AI_DownloadError";
326
+ var name12 = "AI_RetryError";
394
327
  var marker12 = `vercel.ai.error.${name12}`;
395
328
  var symbol12 = Symbol.for(marker12);
396
329
  var _a12;
397
- var DownloadError = class extends AISDKError14 {
330
+ var RetryError = class extends AISDKError14 {
398
331
  constructor({
399
- url,
400
- statusCode,
401
- statusText,
402
- cause,
403
- message = cause == null ? `Failed to download ${url}: ${statusCode} ${statusText}` : `Failed to download ${url}: ${cause}`
332
+ message,
333
+ reason,
334
+ errors
404
335
  }) {
405
- super({ name: name12, message, cause });
336
+ super({ name: name12, message });
406
337
  this[_a12] = true;
407
- this.url = url;
408
- this.statusCode = statusCode;
409
- this.statusText = statusText;
338
+ this.reason = reason;
339
+ this.errors = errors;
340
+ this.lastError = errors[errors.length - 1];
410
341
  }
411
342
  static isInstance(error) {
412
343
  return AISDKError14.hasMarker(error, marker12);
@@ -414,35 +345,95 @@ var DownloadError = class extends AISDKError14 {
414
345
  };
415
346
  _a12 = symbol12;
416
347
 
417
- // src/util/retry-error.ts
418
- import { AISDKError as AISDKError15 } from "@ai-sdk/provider";
419
- var name13 = "AI_RetryError";
420
- var marker13 = `vercel.ai.error.${name13}`;
421
- var symbol13 = Symbol.for(marker13);
422
- var _a13;
423
- var RetryError = class extends AISDKError15 {
424
- constructor({
425
- message,
426
- reason,
427
- errors
428
- }) {
429
- super({ name: name13, message });
430
- this[_a13] = true;
431
- this.reason = reason;
432
- this.errors = errors;
433
- this.lastError = errors[errors.length - 1];
348
+ // src/logger/log-warnings.ts
349
+ function formatWarning({
350
+ warning,
351
+ provider,
352
+ model
353
+ }) {
354
+ const prefix = `AI SDK Warning (${provider} / ${model}):`;
355
+ switch (warning.type) {
356
+ case "unsupported": {
357
+ let message = `${prefix} The feature "${warning.feature}" is not supported.`;
358
+ if (warning.details) {
359
+ message += ` ${warning.details}`;
360
+ }
361
+ return message;
362
+ }
363
+ case "compatibility": {
364
+ let message = `${prefix} The feature "${warning.feature}" is used in a compatibility mode.`;
365
+ if (warning.details) {
366
+ message += ` ${warning.details}`;
367
+ }
368
+ return message;
369
+ }
370
+ case "other": {
371
+ return `${prefix} ${warning.message}`;
372
+ }
373
+ default: {
374
+ return `${prefix} ${JSON.stringify(warning, null, 2)}`;
375
+ }
434
376
  }
435
- static isInstance(error) {
436
- return AISDKError15.hasMarker(error, marker13);
377
+ }
378
+ var FIRST_WARNING_INFO_MESSAGE = "AI SDK Warning System: To turn off warning logging, set the AI_SDK_LOG_WARNINGS global to false.";
379
+ var hasLoggedBefore = false;
380
+ var logWarnings = (options) => {
381
+ if (options.warnings.length === 0) {
382
+ return;
383
+ }
384
+ const logger = globalThis.AI_SDK_LOG_WARNINGS;
385
+ if (logger === false) {
386
+ return;
387
+ }
388
+ if (typeof logger === "function") {
389
+ logger(options);
390
+ return;
391
+ }
392
+ if (!hasLoggedBefore) {
393
+ hasLoggedBefore = true;
394
+ console.info(FIRST_WARNING_INFO_MESSAGE);
395
+ }
396
+ for (const warning of options.warnings) {
397
+ console.warn(
398
+ formatWarning({
399
+ warning,
400
+ provider: options.provider,
401
+ model: options.model
402
+ })
403
+ );
437
404
  }
438
405
  };
439
- _a13 = symbol13;
406
+
407
+ // src/model/resolve-model.ts
408
+ import { gateway } from "@ai-sdk/gateway";
409
+
410
+ // src/util/log-v2-compatibility-warning.ts
411
+ function logV2CompatibilityWarning({
412
+ provider,
413
+ modelId
414
+ }) {
415
+ logWarnings({
416
+ warnings: [
417
+ {
418
+ type: "compatibility",
419
+ feature: "specificationVersion",
420
+ details: `Using v2 specification compatibility mode. Some features may not be available.`
421
+ }
422
+ ],
423
+ provider,
424
+ model: modelId
425
+ });
426
+ }
440
427
 
441
428
  // src/model/as-embedding-model-v3.ts
442
429
  function asEmbeddingModelV3(model) {
443
430
  if (model.specificationVersion === "v3") {
444
431
  return model;
445
432
  }
433
+ logV2CompatibilityWarning({
434
+ provider: model.provider,
435
+ modelId: model.modelId
436
+ });
446
437
  return new Proxy(model, {
447
438
  get(target, prop) {
448
439
  if (prop === "specificationVersion")
@@ -452,11 +443,15 @@ function asEmbeddingModelV3(model) {
452
443
  });
453
444
  }
454
445
 
455
- // src/model/as-language-model-v3.ts
456
- function asLanguageModelV3(model) {
446
+ // src/model/as-image-model-v3.ts
447
+ function asImageModelV3(model) {
457
448
  if (model.specificationVersion === "v3") {
458
449
  return model;
459
450
  }
451
+ logV2CompatibilityWarning({
452
+ provider: model.provider,
453
+ modelId: model.modelId
454
+ });
460
455
  return new Proxy(model, {
461
456
  get(target, prop) {
462
457
  if (prop === "specificationVersion")
@@ -466,11 +461,94 @@ function asLanguageModelV3(model) {
466
461
  });
467
462
  }
468
463
 
464
+ // src/model/as-language-model-v3.ts
465
+ function asLanguageModelV3(model) {
466
+ if (model.specificationVersion === "v3") {
467
+ return model;
468
+ }
469
+ logV2CompatibilityWarning({
470
+ provider: model.provider,
471
+ modelId: model.modelId
472
+ });
473
+ return new Proxy(model, {
474
+ get(target, prop) {
475
+ switch (prop) {
476
+ case "specificationVersion":
477
+ return "v3";
478
+ case "doGenerate":
479
+ return async (...args) => {
480
+ const result = await target.doGenerate(...args);
481
+ return {
482
+ ...result,
483
+ finishReason: convertV2FinishReasonToV3(result.finishReason),
484
+ usage: convertV2UsageToV3(result.usage)
485
+ };
486
+ };
487
+ case "doStream":
488
+ return async (...args) => {
489
+ const result = await target.doStream(...args);
490
+ return {
491
+ ...result,
492
+ stream: convertV2StreamToV3(result.stream)
493
+ };
494
+ };
495
+ default:
496
+ return target[prop];
497
+ }
498
+ }
499
+ });
500
+ }
501
+ function convertV2StreamToV3(stream) {
502
+ return stream.pipeThrough(
503
+ new TransformStream({
504
+ transform(chunk, controller) {
505
+ switch (chunk.type) {
506
+ case "finish":
507
+ controller.enqueue({
508
+ ...chunk,
509
+ finishReason: convertV2FinishReasonToV3(chunk.finishReason),
510
+ usage: convertV2UsageToV3(chunk.usage)
511
+ });
512
+ break;
513
+ default:
514
+ controller.enqueue(chunk);
515
+ break;
516
+ }
517
+ }
518
+ })
519
+ );
520
+ }
521
+ function convertV2FinishReasonToV3(finishReason) {
522
+ return {
523
+ unified: finishReason === "unknown" ? "other" : finishReason,
524
+ raw: void 0
525
+ };
526
+ }
527
+ function convertV2UsageToV3(usage) {
528
+ return {
529
+ inputTokens: {
530
+ total: usage.inputTokens,
531
+ noCache: void 0,
532
+ cacheRead: usage.cachedInputTokens,
533
+ cacheWrite: void 0
534
+ },
535
+ outputTokens: {
536
+ total: usage.outputTokens,
537
+ text: void 0,
538
+ reasoning: usage.reasoningTokens
539
+ }
540
+ };
541
+ }
542
+
469
543
  // src/model/as-speech-model-v3.ts
470
544
  function asSpeechModelV3(model) {
471
545
  if (model.specificationVersion === "v3") {
472
546
  return model;
473
547
  }
548
+ logV2CompatibilityWarning({
549
+ provider: model.provider,
550
+ modelId: model.modelId
551
+ });
474
552
  return new Proxy(model, {
475
553
  get(target, prop) {
476
554
  if (prop === "specificationVersion")
@@ -485,6 +563,10 @@ function asTranscriptionModelV3(model) {
485
563
  if (model.specificationVersion === "v3") {
486
564
  return model;
487
565
  }
566
+ logV2CompatibilityWarning({
567
+ provider: model.provider,
568
+ modelId: model.modelId
569
+ });
488
570
  return new Proxy(model, {
489
571
  get(target, prop) {
490
572
  if (prop === "specificationVersion")
@@ -521,12 +603,10 @@ function resolveEmbeddingModel(model) {
521
603
  }
522
604
  return asEmbeddingModelV3(model);
523
605
  }
524
- return getGlobalProvider().textEmbeddingModel(
525
- model
526
- );
606
+ return getGlobalProvider().embeddingModel(model);
527
607
  }
528
608
  function resolveTranscriptionModel(model) {
529
- var _a15, _b;
609
+ var _a14, _b;
530
610
  if (typeof model !== "string") {
531
611
  if (model.specificationVersion !== "v3" && model.specificationVersion !== "v2") {
532
612
  const unsupportedModel = model;
@@ -538,10 +618,10 @@ function resolveTranscriptionModel(model) {
538
618
  }
539
619
  return asTranscriptionModelV3(model);
540
620
  }
541
- return (_b = (_a15 = getGlobalProvider()).transcriptionModel) == null ? void 0 : _b.call(_a15, model);
621
+ return (_b = (_a14 = getGlobalProvider()).transcriptionModel) == null ? void 0 : _b.call(_a14, model);
542
622
  }
543
623
  function resolveSpeechModel(model) {
544
- var _a15, _b;
624
+ var _a14, _b;
545
625
  if (typeof model !== "string") {
546
626
  if (model.specificationVersion !== "v3" && model.specificationVersion !== "v2") {
547
627
  const unsupportedModel = model;
@@ -553,11 +633,25 @@ function resolveSpeechModel(model) {
553
633
  }
554
634
  return asSpeechModelV3(model);
555
635
  }
556
- return (_b = (_a15 = getGlobalProvider()).speechModel) == null ? void 0 : _b.call(_a15, model);
636
+ return (_b = (_a14 = getGlobalProvider()).speechModel) == null ? void 0 : _b.call(_a14, model);
637
+ }
638
+ function resolveImageModel(model) {
639
+ if (typeof model !== "string") {
640
+ if (model.specificationVersion !== "v3" && model.specificationVersion !== "v2") {
641
+ const unsupportedModel = model;
642
+ throw new UnsupportedModelVersionError({
643
+ version: unsupportedModel.specificationVersion,
644
+ provider: unsupportedModel.provider,
645
+ modelId: unsupportedModel.modelId
646
+ });
647
+ }
648
+ return asImageModelV3(model);
649
+ }
650
+ return getGlobalProvider().imageModel(model);
557
651
  }
558
652
  function getGlobalProvider() {
559
- var _a15;
560
- return (_a15 = globalThis.AI_SDK_DEFAULT_PROVIDER) != null ? _a15 : gateway;
653
+ var _a14;
654
+ return (_a14 = globalThis.AI_SDK_DEFAULT_PROVIDER) != null ? _a14 : gateway;
561
655
  }
562
656
 
563
657
  // src/prompt/convert-to-language-model-prompt.ts
@@ -751,17 +845,18 @@ function detectMediaType({
751
845
  }
752
846
 
753
847
  // src/util/download/download.ts
848
+ import { DownloadError as DownloadError2 } from "@ai-sdk/provider-utils";
754
849
  import {
755
850
  withUserAgentSuffix,
756
851
  getRuntimeEnvironmentUserAgent
757
852
  } from "@ai-sdk/provider-utils";
758
853
 
759
854
  // src/version.ts
760
- var VERSION = true ? "6.0.0-beta.98" : "0.0.0-test";
855
+ var VERSION = true ? "6.0.0" : "0.0.0-test";
761
856
 
762
857
  // src/util/download/download.ts
763
858
  var download = async ({ url }) => {
764
- var _a15;
859
+ var _a14;
765
860
  const urlText = url.toString();
766
861
  try {
767
862
  const response = await fetch(urlText, {
@@ -772,7 +867,7 @@ var download = async ({ url }) => {
772
867
  )
773
868
  });
774
869
  if (!response.ok) {
775
- throw new DownloadError({
870
+ throw new DownloadError2({
776
871
  url: urlText,
777
872
  statusCode: response.status,
778
873
  statusText: response.statusText
@@ -780,13 +875,13 @@ var download = async ({ url }) => {
780
875
  }
781
876
  return {
782
877
  data: new Uint8Array(await response.arrayBuffer()),
783
- mediaType: (_a15 = response.headers.get("content-type")) != null ? _a15 : void 0
878
+ mediaType: (_a14 = response.headers.get("content-type")) != null ? _a14 : void 0
784
879
  };
785
880
  } catch (error) {
786
- if (DownloadError.isInstance(error)) {
881
+ if (DownloadError2.isInstance(error)) {
787
882
  throw error;
788
883
  }
789
- throw new DownloadError({ url: urlText, cause: error });
884
+ throw new DownloadError2({ url: urlText, cause: error });
790
885
  }
791
886
  };
792
887
 
@@ -798,7 +893,7 @@ var createDefaultDownloadFunction = (download2 = download) => (requestedDownload
798
893
  );
799
894
 
800
895
  // src/prompt/data-content.ts
801
- import { AISDKError as AISDKError17 } from "@ai-sdk/provider";
896
+ import { AISDKError as AISDKError16 } from "@ai-sdk/provider";
802
897
  import {
803
898
  convertBase64ToUint8Array as convertBase64ToUint8Array2,
804
899
  convertUint8ArrayToBase64
@@ -829,8 +924,8 @@ var dataContentSchema = z.union([
829
924
  z.custom(
830
925
  // Buffer might not be available in some environments such as CloudFlare:
831
926
  (value) => {
832
- var _a15, _b;
833
- return (_b = (_a15 = globalThis.Buffer) == null ? void 0 : _a15.isBuffer(value)) != null ? _b : false;
927
+ var _a14, _b;
928
+ return (_b = (_a14 = globalThis.Buffer) == null ? void 0 : _a14.isBuffer(value)) != null ? _b : false;
834
929
  },
835
930
  { message: "Must be a Buffer" }
836
931
  )
@@ -853,7 +948,7 @@ function convertToLanguageModelV3DataContent(content) {
853
948
  content.toString()
854
949
  );
855
950
  if (dataUrlMediaType == null || base64Content == null) {
856
- throw new AISDKError17({
951
+ throw new AISDKError16({
857
952
  name: "InvalidDataContentError",
858
953
  message: `Invalid data URL format in content ${content.toString()}`
859
954
  });
@@ -892,6 +987,11 @@ function convertDataContentToUint8Array(content) {
892
987
  throw new InvalidDataContentError({ content });
893
988
  }
894
989
 
990
+ // src/util/as-array.ts
991
+ function asArray(value) {
992
+ return value === void 0 ? [] : Array.isArray(value) ? value : [value];
993
+ }
994
+
895
995
  // src/prompt/convert-to-language-model-prompt.ts
896
996
  async function convertToLanguageModelPrompt({
897
997
  prompt,
@@ -904,7 +1004,11 @@ async function convertToLanguageModelPrompt({
904
1004
  supportedUrls
905
1005
  );
906
1006
  const messages = [
907
- ...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
1007
+ ...prompt.system != null ? typeof prompt.system === "string" ? [{ role: "system", content: prompt.system }] : asArray(prompt.system).map((message) => ({
1008
+ role: "system",
1009
+ content: message.content,
1010
+ providerOptions: message.providerOptions
1011
+ })) : [],
908
1012
  ...prompt.messages.map(
909
1013
  (message) => convertToLanguageModelMessage({ message, downloadedAssets })
910
1014
  )
@@ -1044,8 +1148,8 @@ async function downloadAssets(messages, download2, supportedUrls) {
1044
1148
  ).flat().filter(
1045
1149
  (part) => part.type === "image" || part.type === "file"
1046
1150
  ).map((part) => {
1047
- var _a15;
1048
- const mediaType = (_a15 = part.mediaType) != null ? _a15 : part.type === "image" ? "image/*" : void 0;
1151
+ var _a14;
1152
+ const mediaType = (_a14 = part.mediaType) != null ? _a14 : part.type === "image" ? "image/*" : void 0;
1049
1153
  let data = part.type === "image" ? part.image : part.data;
1050
1154
  if (typeof data === "string") {
1051
1155
  try {
@@ -1075,7 +1179,7 @@ async function downloadAssets(messages, download2, supportedUrls) {
1075
1179
  );
1076
1180
  }
1077
1181
  function convertPartToLanguageModelPart(part, downloadedAssets) {
1078
- var _a15;
1182
+ var _a14;
1079
1183
  if (part.type === "text") {
1080
1184
  return {
1081
1185
  type: "text",
@@ -1108,7 +1212,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
1108
1212
  switch (type) {
1109
1213
  case "image": {
1110
1214
  if (data instanceof Uint8Array || typeof data === "string") {
1111
- mediaType = (_a15 = detectMediaType({ data, signatures: imageMediaTypeSignatures })) != null ? _a15 : mediaType;
1215
+ mediaType = (_a14 = detectMediaType({ data, signatures: imageMediaTypeSignatures })) != null ? _a14 : mediaType;
1112
1216
  }
1113
1217
  return {
1114
1218
  type: "file",
@@ -1161,7 +1265,9 @@ function mapToolResultOutput(output) {
1161
1265
 
1162
1266
  // src/prompt/create-tool-model-output.ts
1163
1267
  import { getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
1164
- function createToolModelOutput({
1268
+ async function createToolModelOutput({
1269
+ toolCallId,
1270
+ input,
1165
1271
  output,
1166
1272
  tool: tool2,
1167
1273
  errorMode
@@ -1172,7 +1278,7 @@ function createToolModelOutput({
1172
1278
  return { type: "error-json", value: toJSONValue(output) };
1173
1279
  }
1174
1280
  if (tool2 == null ? void 0 : tool2.toModelOutput) {
1175
- return tool2.toModelOutput(output);
1281
+ return await tool2.toModelOutput({ toolCallId, input, output });
1176
1282
  }
1177
1283
  return typeof output === "string" ? { type: "text", value: output } : { type: "json", value: toJSONValue(output) };
1178
1284
  }
@@ -1294,10 +1400,10 @@ async function prepareToolsAndToolChoice({
1294
1400
  };
1295
1401
  }
1296
1402
  const filteredTools = activeTools != null ? Object.entries(tools).filter(
1297
- ([name15]) => activeTools.includes(name15)
1403
+ ([name14]) => activeTools.includes(name14)
1298
1404
  ) : Object.entries(tools);
1299
1405
  const languageModelTools = [];
1300
- for (const [name15, tool2] of filteredTools) {
1406
+ for (const [name14, tool2] of filteredTools) {
1301
1407
  const toolType = tool2.type;
1302
1408
  switch (toolType) {
1303
1409
  case void 0:
@@ -1305,16 +1411,18 @@ async function prepareToolsAndToolChoice({
1305
1411
  case "function":
1306
1412
  languageModelTools.push({
1307
1413
  type: "function",
1308
- name: name15,
1414
+ name: name14,
1309
1415
  description: tool2.description,
1310
1416
  inputSchema: await asSchema(tool2.inputSchema).jsonSchema,
1311
- providerOptions: tool2.providerOptions
1417
+ ...tool2.inputExamples != null ? { inputExamples: tool2.inputExamples } : {},
1418
+ providerOptions: tool2.providerOptions,
1419
+ ...tool2.strict != null ? { strict: tool2.strict } : {}
1312
1420
  });
1313
1421
  break;
1314
- case "provider-defined":
1422
+ case "provider":
1315
1423
  languageModelTools.push({
1316
- type: "provider-defined",
1317
- name: name15,
1424
+ type: "provider",
1425
+ name: name14,
1318
1426
  id: tool2.id,
1319
1427
  args: tool2.args
1320
1428
  });
@@ -1333,7 +1441,9 @@ async function prepareToolsAndToolChoice({
1333
1441
 
1334
1442
  // src/prompt/standardize-prompt.ts
1335
1443
  import { InvalidPromptError as InvalidPromptError2 } from "@ai-sdk/provider";
1336
- import { safeValidateTypes } from "@ai-sdk/provider-utils";
1444
+ import {
1445
+ safeValidateTypes
1446
+ } from "@ai-sdk/provider-utils";
1337
1447
  import { z as z6 } from "zod/v4";
1338
1448
 
1339
1449
  // src/prompt/message.ts
@@ -1505,7 +1615,6 @@ var systemModelMessageSchema = z5.object(
1505
1615
  providerOptions: providerMetadataSchema.optional()
1506
1616
  }
1507
1617
  );
1508
- var coreSystemMessageSchema = systemModelMessageSchema;
1509
1618
  var userModelMessageSchema = z5.object({
1510
1619
  role: z5.literal("user"),
1511
1620
  content: z5.union([
@@ -1514,7 +1623,6 @@ var userModelMessageSchema = z5.object({
1514
1623
  ]),
1515
1624
  providerOptions: providerMetadataSchema.optional()
1516
1625
  });
1517
- var coreUserMessageSchema = userModelMessageSchema;
1518
1626
  var assistantModelMessageSchema = z5.object({
1519
1627
  role: z5.literal("assistant"),
1520
1628
  content: z5.union([
@@ -1532,20 +1640,17 @@ var assistantModelMessageSchema = z5.object({
1532
1640
  ]),
1533
1641
  providerOptions: providerMetadataSchema.optional()
1534
1642
  });
1535
- var coreAssistantMessageSchema = assistantModelMessageSchema;
1536
1643
  var toolModelMessageSchema = z5.object({
1537
1644
  role: z5.literal("tool"),
1538
1645
  content: z5.array(z5.union([toolResultPartSchema, toolApprovalResponseSchema])),
1539
1646
  providerOptions: providerMetadataSchema.optional()
1540
1647
  });
1541
- var coreToolMessageSchema = toolModelMessageSchema;
1542
1648
  var modelMessageSchema = z5.union([
1543
1649
  systemModelMessageSchema,
1544
1650
  userModelMessageSchema,
1545
1651
  assistantModelMessageSchema,
1546
1652
  toolModelMessageSchema
1547
1653
  ]);
1548
- var coreMessageSchema = modelMessageSchema;
1549
1654
 
1550
1655
  // src/prompt/standardize-prompt.ts
1551
1656
  async function standardizePrompt(prompt) {
@@ -1561,10 +1666,12 @@ async function standardizePrompt(prompt) {
1561
1666
  message: "prompt and messages cannot be defined at the same time"
1562
1667
  });
1563
1668
  }
1564
- if (prompt.system != null && typeof prompt.system !== "string") {
1669
+ if (prompt.system != null && typeof prompt.system !== "string" && !asArray(prompt.system).every(
1670
+ (message) => typeof message === "object" && message !== null && "role" in message && message.role === "system"
1671
+ )) {
1565
1672
  throw new InvalidPromptError2({
1566
1673
  prompt,
1567
- message: "system must be a string"
1674
+ message: "system must be a string, SystemModelMessage, or array of SystemModelMessage"
1568
1675
  });
1569
1676
  }
1570
1677
  let messages;
@@ -1593,7 +1700,7 @@ async function standardizePrompt(prompt) {
1593
1700
  if (!validationResult.success) {
1594
1701
  throw new InvalidPromptError2({
1595
1702
  prompt,
1596
- message: "The messages must be a ModelMessage[]. If you have passed a UIMessage[], you can use convertToModelMessages to convert them.",
1703
+ message: "The messages do not match the ModelMessage[] schema.",
1597
1704
  cause: validationResult.error
1598
1705
  });
1599
1706
  }
@@ -1604,20 +1711,31 @@ async function standardizePrompt(prompt) {
1604
1711
  }
1605
1712
 
1606
1713
  // src/prompt/wrap-gateway-error.ts
1607
- import {
1608
- GatewayAuthenticationError,
1609
- GatewayModelNotFoundError
1610
- } from "@ai-sdk/gateway";
1611
- import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
1714
+ import { GatewayAuthenticationError } from "@ai-sdk/gateway";
1715
+ import { AISDKError as AISDKError17 } from "@ai-sdk/provider";
1612
1716
  function wrapGatewayError(error) {
1613
- if (GatewayAuthenticationError.isInstance(error) || GatewayModelNotFoundError.isInstance(error)) {
1614
- return new AISDKError18({
1717
+ if (!GatewayAuthenticationError.isInstance(error))
1718
+ return error;
1719
+ const isProductionEnv = (process == null ? void 0 : process.env.NODE_ENV) === "production";
1720
+ const moreInfoURL = "https://ai-sdk.dev/unauthenticated-ai-gateway";
1721
+ if (isProductionEnv) {
1722
+ return new AISDKError17({
1615
1723
  name: "GatewayError",
1616
- message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
1617
- cause: error
1724
+ message: `Unauthenticated. Configure AI_GATEWAY_API_KEY or use a provider module. Learn more: ${moreInfoURL}`
1618
1725
  });
1619
1726
  }
1620
- return error;
1727
+ return Object.assign(
1728
+ new Error(`\x1B[1m\x1B[31mUnauthenticated request to AI Gateway.\x1B[0m
1729
+
1730
+ To authenticate, set the \x1B[33mAI_GATEWAY_API_KEY\x1B[0m environment variable with your API key.
1731
+
1732
+ Alternatively, you can use a provider module instead of the AI Gateway.
1733
+
1734
+ Learn more: \x1B[34m${moreInfoURL}\x1B[0m
1735
+
1736
+ `),
1737
+ { name: "GatewayAuthenticationError" }
1738
+ );
1621
1739
  }
1622
1740
 
1623
1741
  // src/telemetry/assemble-operation-name.ts
@@ -1642,7 +1760,7 @@ function getBaseTelemetryAttributes({
1642
1760
  telemetry,
1643
1761
  headers
1644
1762
  }) {
1645
- var _a15;
1763
+ var _a14;
1646
1764
  return {
1647
1765
  "ai.model.provider": model.provider,
1648
1766
  "ai.model.id": model.modelId,
@@ -1652,7 +1770,7 @@ function getBaseTelemetryAttributes({
1652
1770
  return attributes;
1653
1771
  }, {}),
1654
1772
  // add metadata as attributes:
1655
- ...Object.entries((_a15 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a15 : {}).reduce(
1773
+ ...Object.entries((_a14 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a14 : {}).reduce(
1656
1774
  (attributes, [key, value]) => {
1657
1775
  attributes[`ai.telemetry.metadata.${key}`] = value;
1658
1776
  return attributes;
@@ -1677,7 +1795,7 @@ var noopTracer = {
1677
1795
  startSpan() {
1678
1796
  return noopSpan;
1679
1797
  },
1680
- startActiveSpan(name15, arg1, arg2, arg3) {
1798
+ startActiveSpan(name14, arg1, arg2, arg3) {
1681
1799
  if (typeof arg1 === "function") {
1682
1800
  return arg1(noopSpan);
1683
1801
  }
@@ -1747,14 +1865,14 @@ function getTracer({
1747
1865
  // src/telemetry/record-span.ts
1748
1866
  import { SpanStatusCode } from "@opentelemetry/api";
1749
1867
  async function recordSpan({
1750
- name: name15,
1868
+ name: name14,
1751
1869
  tracer,
1752
1870
  attributes,
1753
1871
  fn,
1754
1872
  endWhenDone = true
1755
1873
  }) {
1756
1874
  return tracer.startActiveSpan(
1757
- name15,
1875
+ name14,
1758
1876
  { attributes: await attributes },
1759
1877
  async (span) => {
1760
1878
  try {
@@ -1844,10 +1962,74 @@ function stringifyForTelemetry(prompt) {
1844
1962
  }
1845
1963
 
1846
1964
  // src/types/usage.ts
1965
+ function asLanguageModelUsage(usage) {
1966
+ return {
1967
+ inputTokens: usage.inputTokens.total,
1968
+ inputTokenDetails: {
1969
+ noCacheTokens: usage.inputTokens.noCache,
1970
+ cacheReadTokens: usage.inputTokens.cacheRead,
1971
+ cacheWriteTokens: usage.inputTokens.cacheWrite
1972
+ },
1973
+ outputTokens: usage.outputTokens.total,
1974
+ outputTokenDetails: {
1975
+ textTokens: usage.outputTokens.text,
1976
+ reasoningTokens: usage.outputTokens.reasoning
1977
+ },
1978
+ totalTokens: addTokenCounts(
1979
+ usage.inputTokens.total,
1980
+ usage.outputTokens.total
1981
+ ),
1982
+ raw: usage.raw,
1983
+ reasoningTokens: usage.outputTokens.reasoning,
1984
+ cachedInputTokens: usage.inputTokens.cacheRead
1985
+ };
1986
+ }
1987
+ function createNullLanguageModelUsage() {
1988
+ return {
1989
+ inputTokens: void 0,
1990
+ inputTokenDetails: {
1991
+ noCacheTokens: void 0,
1992
+ cacheReadTokens: void 0,
1993
+ cacheWriteTokens: void 0
1994
+ },
1995
+ outputTokens: void 0,
1996
+ outputTokenDetails: {
1997
+ textTokens: void 0,
1998
+ reasoningTokens: void 0
1999
+ },
2000
+ totalTokens: void 0,
2001
+ raw: void 0
2002
+ };
2003
+ }
1847
2004
  function addLanguageModelUsage(usage1, usage2) {
2005
+ var _a14, _b, _c, _d, _e, _f, _g, _h, _i, _j;
1848
2006
  return {
1849
2007
  inputTokens: addTokenCounts(usage1.inputTokens, usage2.inputTokens),
2008
+ inputTokenDetails: {
2009
+ noCacheTokens: addTokenCounts(
2010
+ (_a14 = usage1.inputTokenDetails) == null ? void 0 : _a14.noCacheTokens,
2011
+ (_b = usage2.inputTokenDetails) == null ? void 0 : _b.noCacheTokens
2012
+ ),
2013
+ cacheReadTokens: addTokenCounts(
2014
+ (_c = usage1.inputTokenDetails) == null ? void 0 : _c.cacheReadTokens,
2015
+ (_d = usage2.inputTokenDetails) == null ? void 0 : _d.cacheReadTokens
2016
+ ),
2017
+ cacheWriteTokens: addTokenCounts(
2018
+ (_e = usage1.inputTokenDetails) == null ? void 0 : _e.cacheWriteTokens,
2019
+ (_f = usage2.inputTokenDetails) == null ? void 0 : _f.cacheWriteTokens
2020
+ )
2021
+ },
1850
2022
  outputTokens: addTokenCounts(usage1.outputTokens, usage2.outputTokens),
2023
+ outputTokenDetails: {
2024
+ textTokens: addTokenCounts(
2025
+ (_g = usage1.outputTokenDetails) == null ? void 0 : _g.textTokens,
2026
+ (_h = usage2.outputTokenDetails) == null ? void 0 : _h.textTokens
2027
+ ),
2028
+ reasoningTokens: addTokenCounts(
2029
+ (_i = usage1.outputTokenDetails) == null ? void 0 : _i.reasoningTokens,
2030
+ (_j = usage2.outputTokenDetails) == null ? void 0 : _j.reasoningTokens
2031
+ )
2032
+ },
1851
2033
  totalTokens: addTokenCounts(usage1.totalTokens, usage2.totalTokens),
1852
2034
  reasoningTokens: addTokenCounts(
1853
2035
  usage1.reasoningTokens,
@@ -1870,9 +2052,37 @@ function addImageModelUsage(usage1, usage2) {
1870
2052
  };
1871
2053
  }
1872
2054
 
1873
- // src/util/as-array.ts
1874
- function asArray(value) {
1875
- return value === void 0 ? [] : Array.isArray(value) ? value : [value];
2055
+ // src/util/merge-objects.ts
2056
+ function mergeObjects(base, overrides) {
2057
+ if (base === void 0 && overrides === void 0) {
2058
+ return void 0;
2059
+ }
2060
+ if (base === void 0) {
2061
+ return overrides;
2062
+ }
2063
+ if (overrides === void 0) {
2064
+ return base;
2065
+ }
2066
+ const result = { ...base };
2067
+ for (const key in overrides) {
2068
+ if (Object.prototype.hasOwnProperty.call(overrides, key)) {
2069
+ const overridesValue = overrides[key];
2070
+ if (overridesValue === void 0)
2071
+ continue;
2072
+ const baseValue = key in base ? base[key] : void 0;
2073
+ const isSourceObject = overridesValue !== null && typeof overridesValue === "object" && !Array.isArray(overridesValue) && !(overridesValue instanceof Date) && !(overridesValue instanceof RegExp);
2074
+ const isTargetObject = baseValue !== null && baseValue !== void 0 && typeof baseValue === "object" && !Array.isArray(baseValue) && !(baseValue instanceof Date) && !(baseValue instanceof RegExp);
2075
+ if (isSourceObject && isTargetObject) {
2076
+ result[key] = mergeObjects(
2077
+ baseValue,
2078
+ overridesValue
2079
+ );
2080
+ } else {
2081
+ result[key] = overridesValue;
2082
+ }
2083
+ }
2084
+ }
2085
+ return result;
1876
2086
  }
1877
2087
 
1878
2088
  // src/util/retry-with-exponential-backoff.ts
@@ -2134,7 +2344,8 @@ async function executeToolCall({
2134
2344
  toolName,
2135
2345
  input,
2136
2346
  error,
2137
- dynamic: tool2.type === "dynamic"
2347
+ dynamic: tool2.type === "dynamic",
2348
+ ...toolCall.providerMetadata != null ? { providerMetadata: toolCall.providerMetadata } : {}
2138
2349
  };
2139
2350
  }
2140
2351
  try {
@@ -2156,7 +2367,8 @@ async function executeToolCall({
2156
2367
  toolName,
2157
2368
  input,
2158
2369
  output,
2159
- dynamic: tool2.type === "dynamic"
2370
+ dynamic: tool2.type === "dynamic",
2371
+ ...toolCall.providerMetadata != null ? { providerMetadata: toolCall.providerMetadata } : {}
2160
2372
  };
2161
2373
  }
2162
2374
  });
@@ -2597,13 +2809,17 @@ var text = () => ({
2597
2809
  }
2598
2810
  });
2599
2811
  var object = ({
2600
- schema: inputSchema
2812
+ schema: inputSchema,
2813
+ name: name14,
2814
+ description
2601
2815
  }) => {
2602
2816
  const schema = asSchema2(inputSchema);
2603
2817
  return {
2604
2818
  responseFormat: resolve(schema.jsonSchema).then((jsonSchema2) => ({
2605
2819
  type: "json",
2606
- schema: jsonSchema2
2820
+ schema: jsonSchema2,
2821
+ ...name14 != null && { name: name14 },
2822
+ ...description != null && { description }
2607
2823
  })),
2608
2824
  async parseCompleteOutput({ text: text2 }, context) {
2609
2825
  const parseResult = await safeParseJSON2({ text: text2 });
@@ -2652,7 +2868,9 @@ var object = ({
2652
2868
  };
2653
2869
  };
2654
2870
  var array = ({
2655
- element: inputElementSchema
2871
+ element: inputElementSchema,
2872
+ name: name14,
2873
+ description
2656
2874
  }) => {
2657
2875
  const elementSchema = asSchema2(inputElementSchema);
2658
2876
  return {
@@ -2669,7 +2887,9 @@ var array = ({
2669
2887
  },
2670
2888
  required: ["elements"],
2671
2889
  additionalProperties: false
2672
- }
2890
+ },
2891
+ ...name14 != null && { name: name14 },
2892
+ ...description != null && { description }
2673
2893
  };
2674
2894
  }),
2675
2895
  async parseCompleteOutput({ text: text2 }, context) {
@@ -2747,7 +2967,9 @@ var array = ({
2747
2967
  };
2748
2968
  };
2749
2969
  var choice = ({
2750
- options: choiceOptions
2970
+ options: choiceOptions,
2971
+ name: name14,
2972
+ description
2751
2973
  }) => {
2752
2974
  return {
2753
2975
  // JSON schema that describes an enumeration:
@@ -2761,7 +2983,9 @@ var choice = ({
2761
2983
  },
2762
2984
  required: ["result"],
2763
2985
  additionalProperties: false
2764
- }
2986
+ },
2987
+ ...name14 != null && { name: name14 },
2988
+ ...description != null && { description }
2765
2989
  }),
2766
2990
  async parseCompleteOutput({ text: text2 }, context) {
2767
2991
  const parseResult = await safeParseJSON2({ text: text2 });
@@ -2817,10 +3041,15 @@ var choice = ({
2817
3041
  }
2818
3042
  };
2819
3043
  };
2820
- var json = () => {
3044
+ var json = ({
3045
+ name: name14,
3046
+ description
3047
+ } = {}) => {
2821
3048
  return {
2822
3049
  responseFormat: Promise.resolve({
2823
- type: "json"
3050
+ type: "json",
3051
+ ...name14 != null && { name: name14 },
3052
+ ...description != null && { description }
2824
3053
  }),
2825
3054
  async parseCompleteOutput({ text: text2 }, context) {
2826
3055
  const parseResult = await safeParseJSON2({ text: text2 });
@@ -2865,7 +3094,7 @@ async function parseToolCall({
2865
3094
  system,
2866
3095
  messages
2867
3096
  }) {
2868
- var _a15;
3097
+ var _a14;
2869
3098
  try {
2870
3099
  if (tools == null) {
2871
3100
  if (toolCall.providerExecuted && toolCall.dynamic) {
@@ -2914,7 +3143,9 @@ async function parseToolCall({
2914
3143
  dynamic: true,
2915
3144
  invalid: true,
2916
3145
  error,
2917
- title: (_a15 = tools == null ? void 0 : tools[toolCall.toolName]) == null ? void 0 : _a15.title
3146
+ title: (_a14 = tools == null ? void 0 : tools[toolCall.toolName]) == null ? void 0 : _a14.title,
3147
+ providerExecuted: toolCall.providerExecuted,
3148
+ providerMetadata: toolCall.providerMetadata
2918
3149
  };
2919
3150
  }
2920
3151
  }
@@ -2986,6 +3217,7 @@ var DefaultStepResult = class {
2986
3217
  constructor({
2987
3218
  content,
2988
3219
  finishReason,
3220
+ rawFinishReason,
2989
3221
  usage,
2990
3222
  warnings,
2991
3223
  request,
@@ -2994,6 +3226,7 @@ var DefaultStepResult = class {
2994
3226
  }) {
2995
3227
  this.content = content;
2996
3228
  this.finishReason = finishReason;
3229
+ this.rawFinishReason = rawFinishReason;
2997
3230
  this.usage = usage;
2998
3231
  this.warnings = warnings;
2999
3232
  this.request = request;
@@ -3049,8 +3282,8 @@ function stepCountIs(stepCount) {
3049
3282
  }
3050
3283
  function hasToolCall(toolName) {
3051
3284
  return ({ steps }) => {
3052
- var _a15, _b, _c;
3053
- return (_c = (_b = (_a15 = steps[steps.length - 1]) == null ? void 0 : _a15.toolCalls) == null ? void 0 : _b.some(
3285
+ var _a14, _b, _c;
3286
+ return (_c = (_b = (_a14 = steps[steps.length - 1]) == null ? void 0 : _a14.toolCalls) == null ? void 0 : _b.some(
3054
3287
  (toolCall) => toolCall.toolName === toolName
3055
3288
  )) != null ? _c : false;
3056
3289
  };
@@ -3063,92 +3296,118 @@ async function isStopConditionMet({
3063
3296
  }
3064
3297
 
3065
3298
  // src/generate-text/to-response-messages.ts
3066
- function toResponseMessages({
3299
+ async function toResponseMessages({
3067
3300
  content: inputContent,
3068
3301
  tools
3069
3302
  }) {
3070
3303
  const responseMessages = [];
3071
- const content = inputContent.filter((part) => part.type !== "source").filter(
3072
- (part) => (part.type !== "tool-result" || part.providerExecuted) && (part.type !== "tool-error" || part.providerExecuted)
3073
- ).filter((part) => part.type !== "text" || part.text.length > 0).map((part) => {
3304
+ const content = [];
3305
+ for (const part of inputContent) {
3306
+ if (part.type === "source" || (part.type === "tool-result" || part.type === "tool-error") && !part.providerExecuted || part.type === "text" && part.text.length === 0) {
3307
+ continue;
3308
+ }
3074
3309
  switch (part.type) {
3075
3310
  case "text":
3076
- return {
3311
+ content.push({
3077
3312
  type: "text",
3078
3313
  text: part.text,
3079
3314
  providerOptions: part.providerMetadata
3080
- };
3315
+ });
3316
+ break;
3081
3317
  case "reasoning":
3082
- return {
3318
+ content.push({
3083
3319
  type: "reasoning",
3084
3320
  text: part.text,
3085
3321
  providerOptions: part.providerMetadata
3086
- };
3322
+ });
3323
+ break;
3087
3324
  case "file":
3088
- return {
3325
+ content.push({
3089
3326
  type: "file",
3090
3327
  data: part.file.base64,
3091
3328
  mediaType: part.file.mediaType,
3092
3329
  providerOptions: part.providerMetadata
3093
- };
3330
+ });
3331
+ break;
3094
3332
  case "tool-call":
3095
- return {
3333
+ content.push({
3096
3334
  type: "tool-call",
3097
3335
  toolCallId: part.toolCallId,
3098
3336
  toolName: part.toolName,
3099
3337
  input: part.input,
3100
3338
  providerExecuted: part.providerExecuted,
3101
3339
  providerOptions: part.providerMetadata
3102
- };
3103
- case "tool-result":
3104
- return {
3340
+ });
3341
+ break;
3342
+ case "tool-result": {
3343
+ const output = await createToolModelOutput({
3344
+ toolCallId: part.toolCallId,
3345
+ input: part.input,
3346
+ tool: tools == null ? void 0 : tools[part.toolName],
3347
+ output: part.output,
3348
+ errorMode: "none"
3349
+ });
3350
+ content.push({
3105
3351
  type: "tool-result",
3106
3352
  toolCallId: part.toolCallId,
3107
3353
  toolName: part.toolName,
3108
- output: createToolModelOutput({
3109
- tool: tools == null ? void 0 : tools[part.toolName],
3110
- output: part.output,
3111
- errorMode: "none"
3112
- }),
3113
- providerExecuted: true,
3354
+ output,
3114
3355
  providerOptions: part.providerMetadata
3115
- };
3116
- case "tool-error":
3117
- return {
3356
+ });
3357
+ break;
3358
+ }
3359
+ case "tool-error": {
3360
+ const output = await createToolModelOutput({
3361
+ toolCallId: part.toolCallId,
3362
+ input: part.input,
3363
+ tool: tools == null ? void 0 : tools[part.toolName],
3364
+ output: part.error,
3365
+ errorMode: "json"
3366
+ });
3367
+ content.push({
3118
3368
  type: "tool-result",
3119
3369
  toolCallId: part.toolCallId,
3120
3370
  toolName: part.toolName,
3121
- output: createToolModelOutput({
3122
- tool: tools == null ? void 0 : tools[part.toolName],
3123
- output: part.error,
3124
- errorMode: "json"
3125
- }),
3371
+ output,
3126
3372
  providerOptions: part.providerMetadata
3127
- };
3373
+ });
3374
+ break;
3375
+ }
3128
3376
  case "tool-approval-request":
3129
- return {
3377
+ content.push({
3130
3378
  type: "tool-approval-request",
3131
3379
  approvalId: part.approvalId,
3132
3380
  toolCallId: part.toolCall.toolCallId
3133
- };
3381
+ });
3382
+ break;
3134
3383
  }
3135
- });
3384
+ }
3136
3385
  if (content.length > 0) {
3137
3386
  responseMessages.push({
3138
3387
  role: "assistant",
3139
3388
  content
3140
3389
  });
3141
3390
  }
3142
- const toolResultContent = inputContent.filter((part) => part.type === "tool-result" || part.type === "tool-error").filter((part) => !part.providerExecuted).map((toolResult) => ({
3143
- type: "tool-result",
3144
- toolCallId: toolResult.toolCallId,
3145
- toolName: toolResult.toolName,
3146
- output: createToolModelOutput({
3147
- tool: tools == null ? void 0 : tools[toolResult.toolName],
3148
- output: toolResult.type === "tool-result" ? toolResult.output : toolResult.error,
3149
- errorMode: toolResult.type === "tool-error" ? "text" : "none"
3150
- })
3151
- }));
3391
+ const toolResultContent = [];
3392
+ for (const part of inputContent) {
3393
+ if (!(part.type === "tool-result" || part.type === "tool-error") || part.providerExecuted) {
3394
+ continue;
3395
+ }
3396
+ const output = await createToolModelOutput({
3397
+ toolCallId: part.toolCallId,
3398
+ input: part.input,
3399
+ tool: tools == null ? void 0 : tools[part.toolName],
3400
+ output: part.type === "tool-result" ? part.output : part.error,
3401
+ errorMode: part.type === "tool-error" ? "text" : "none"
3402
+ });
3403
+ toolResultContent.push({
3404
+ type: "tool-result",
3405
+ toolCallId: part.toolCallId,
3406
+ toolName: part.toolName,
3407
+ output,
3408
+ ...part.providerMetadata != null ? { providerOptions: part.providerMetadata } : {}
3409
+ });
3410
+ }
3152
3411
  if (toolResultContent.length > 0) {
3153
3412
  responseMessages.push({
3154
3413
  role: "tool",
@@ -3238,7 +3497,7 @@ async function generateText({
3238
3497
  }),
3239
3498
  tracer,
3240
3499
  fn: async (span) => {
3241
- var _a15, _b, _c, _d, _e, _f, _g;
3500
+ var _a14, _b, _c, _d, _e, _f, _g, _h;
3242
3501
  const initialMessages = initialPrompt.messages;
3243
3502
  const responseMessages = [];
3244
3503
  const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
@@ -3254,31 +3513,36 @@ async function generateText({
3254
3513
  abortSignal,
3255
3514
  experimental_context
3256
3515
  });
3516
+ const toolContent = [];
3517
+ for (const output2 of toolOutputs) {
3518
+ const modelOutput = await createToolModelOutput({
3519
+ toolCallId: output2.toolCallId,
3520
+ input: output2.input,
3521
+ tool: tools == null ? void 0 : tools[output2.toolName],
3522
+ output: output2.type === "tool-result" ? output2.output : output2.error,
3523
+ errorMode: output2.type === "tool-error" ? "json" : "none"
3524
+ });
3525
+ toolContent.push({
3526
+ type: "tool-result",
3527
+ toolCallId: output2.toolCallId,
3528
+ toolName: output2.toolName,
3529
+ output: modelOutput
3530
+ });
3531
+ }
3532
+ for (const toolApproval of deniedToolApprovals) {
3533
+ toolContent.push({
3534
+ type: "tool-result",
3535
+ toolCallId: toolApproval.toolCall.toolCallId,
3536
+ toolName: toolApproval.toolCall.toolName,
3537
+ output: {
3538
+ type: "execution-denied",
3539
+ reason: toolApproval.approvalResponse.reason
3540
+ }
3541
+ });
3542
+ }
3257
3543
  responseMessages.push({
3258
3544
  role: "tool",
3259
- content: [
3260
- // add regular tool results for approved tool calls:
3261
- ...toolOutputs.map((output2) => ({
3262
- type: "tool-result",
3263
- toolCallId: output2.toolCallId,
3264
- toolName: output2.toolName,
3265
- output: createToolModelOutput({
3266
- tool: tools == null ? void 0 : tools[output2.toolName],
3267
- output: output2.type === "tool-result" ? output2.output : output2.error,
3268
- errorMode: output2.type === "tool-error" ? "json" : "none"
3269
- })
3270
- })),
3271
- // add execution denied tool results for denied tool approvals:
3272
- ...deniedToolApprovals.map((toolApproval) => ({
3273
- type: "tool-result",
3274
- toolCallId: toolApproval.toolCall.toolCallId,
3275
- toolName: toolApproval.toolCall.toolName,
3276
- output: {
3277
- type: "execution-denied",
3278
- reason: toolApproval.approvalResponse.reason
3279
- }
3280
- }))
3281
- ]
3545
+ content: toolContent
3282
3546
  });
3283
3547
  }
3284
3548
  const callSettings2 = prepareCallSettings(settings);
@@ -3286,16 +3550,18 @@ async function generateText({
3286
3550
  let clientToolCalls = [];
3287
3551
  let clientToolOutputs = [];
3288
3552
  const steps = [];
3553
+ const pendingDeferredToolCalls = /* @__PURE__ */ new Map();
3289
3554
  do {
3290
3555
  const stepInputMessages = [...initialMessages, ...responseMessages];
3291
3556
  const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
3292
3557
  model,
3293
3558
  steps,
3294
3559
  stepNumber: steps.length,
3295
- messages: stepInputMessages
3560
+ messages: stepInputMessages,
3561
+ experimental_context
3296
3562
  }));
3297
3563
  const stepModel = resolveLanguageModel(
3298
- (_a15 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a15 : model
3564
+ (_a14 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a14 : model
3299
3565
  );
3300
3566
  const promptMessages = await convertToLanguageModelPrompt({
3301
3567
  prompt: {
@@ -3305,14 +3571,15 @@ async function generateText({
3305
3571
  supportedUrls: await stepModel.supportedUrls,
3306
3572
  download: download2
3307
3573
  });
3574
+ experimental_context = (_d = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _d : experimental_context;
3308
3575
  const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
3309
3576
  tools,
3310
- toolChoice: (_d = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _d : toolChoice,
3311
- activeTools: (_e = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _e : activeTools
3577
+ toolChoice: (_e = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _e : toolChoice,
3578
+ activeTools: (_f = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _f : activeTools
3312
3579
  });
3313
3580
  currentModelResponse = await retry(
3314
3581
  () => {
3315
- var _a16;
3582
+ var _a15;
3316
3583
  return recordSpan({
3317
3584
  name: "ai.generateText.doGenerate",
3318
3585
  attributes: selectTelemetryAttributes({
@@ -3344,36 +3611,40 @@ async function generateText({
3344
3611
  "gen_ai.request.max_tokens": settings.maxOutputTokens,
3345
3612
  "gen_ai.request.presence_penalty": settings.presencePenalty,
3346
3613
  "gen_ai.request.stop_sequences": settings.stopSequences,
3347
- "gen_ai.request.temperature": (_a16 = settings.temperature) != null ? _a16 : void 0,
3614
+ "gen_ai.request.temperature": (_a15 = settings.temperature) != null ? _a15 : void 0,
3348
3615
  "gen_ai.request.top_k": settings.topK,
3349
3616
  "gen_ai.request.top_p": settings.topP
3350
3617
  }
3351
3618
  }),
3352
3619
  tracer,
3353
3620
  fn: async (span2) => {
3354
- var _a17, _b2, _c2, _d2, _e2, _f2, _g2, _h;
3621
+ var _a16, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
3622
+ const stepProviderOptions = mergeObjects(
3623
+ providerOptions,
3624
+ prepareStepResult == null ? void 0 : prepareStepResult.providerOptions
3625
+ );
3355
3626
  const result = await stepModel.doGenerate({
3356
3627
  ...callSettings2,
3357
3628
  tools: stepTools,
3358
3629
  toolChoice: stepToolChoice,
3359
3630
  responseFormat: await (output == null ? void 0 : output.responseFormat),
3360
3631
  prompt: promptMessages,
3361
- providerOptions,
3632
+ providerOptions: stepProviderOptions,
3362
3633
  abortSignal,
3363
3634
  headers: headersWithUserAgent
3364
3635
  });
3365
3636
  const responseData = {
3366
- id: (_b2 = (_a17 = result.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId2(),
3637
+ id: (_b2 = (_a16 = result.response) == null ? void 0 : _a16.id) != null ? _b2 : generateId2(),
3367
3638
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3368
3639
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
3369
3640
  headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
3370
- body: (_h = result.response) == null ? void 0 : _h.body
3641
+ body: (_h2 = result.response) == null ? void 0 : _h2.body
3371
3642
  };
3372
3643
  span2.setAttributes(
3373
3644
  await selectTelemetryAttributes({
3374
3645
  telemetry,
3375
3646
  attributes: {
3376
- "ai.response.finishReason": result.finishReason,
3647
+ "ai.response.finishReason": result.finishReason.unified,
3377
3648
  "ai.response.text": {
3378
3649
  output: () => extractTextContent(result.content)
3379
3650
  },
@@ -3390,14 +3661,16 @@ async function generateText({
3390
3661
  result.providerMetadata
3391
3662
  ),
3392
3663
  // TODO rename telemetry attributes to inputTokens and outputTokens
3393
- "ai.usage.promptTokens": result.usage.inputTokens,
3394
- "ai.usage.completionTokens": result.usage.outputTokens,
3664
+ "ai.usage.promptTokens": result.usage.inputTokens.total,
3665
+ "ai.usage.completionTokens": result.usage.outputTokens.total,
3395
3666
  // standardized gen-ai llm span attributes:
3396
- "gen_ai.response.finish_reasons": [result.finishReason],
3667
+ "gen_ai.response.finish_reasons": [
3668
+ result.finishReason.unified
3669
+ ],
3397
3670
  "gen_ai.response.id": responseData.id,
3398
3671
  "gen_ai.response.model": responseData.modelId,
3399
- "gen_ai.usage.input_tokens": result.usage.inputTokens,
3400
- "gen_ai.usage.output_tokens": result.usage.outputTokens
3672
+ "gen_ai.usage.input_tokens": result.usage.inputTokens.total,
3673
+ "gen_ai.usage.output_tokens": result.usage.outputTokens.total
3401
3674
  }
3402
3675
  })
3403
3676
  );
@@ -3482,25 +3755,47 @@ async function generateText({
3482
3755
  })
3483
3756
  );
3484
3757
  }
3758
+ for (const toolCall of stepToolCalls) {
3759
+ if (!toolCall.providerExecuted)
3760
+ continue;
3761
+ const tool2 = tools == null ? void 0 : tools[toolCall.toolName];
3762
+ if ((tool2 == null ? void 0 : tool2.type) === "provider" && tool2.supportsDeferredResults) {
3763
+ const hasResultInResponse = currentModelResponse.content.some(
3764
+ (part) => part.type === "tool-result" && part.toolCallId === toolCall.toolCallId
3765
+ );
3766
+ if (!hasResultInResponse) {
3767
+ pendingDeferredToolCalls.set(toolCall.toolCallId, {
3768
+ toolName: toolCall.toolName
3769
+ });
3770
+ }
3771
+ }
3772
+ }
3773
+ for (const part of currentModelResponse.content) {
3774
+ if (part.type === "tool-result") {
3775
+ pendingDeferredToolCalls.delete(part.toolCallId);
3776
+ }
3777
+ }
3485
3778
  const stepContent = asContent({
3486
3779
  content: currentModelResponse.content,
3487
3780
  toolCalls: stepToolCalls,
3488
3781
  toolOutputs: clientToolOutputs,
3489
- toolApprovalRequests: Object.values(toolApprovalRequests)
3782
+ toolApprovalRequests: Object.values(toolApprovalRequests),
3783
+ tools
3490
3784
  });
3491
3785
  responseMessages.push(
3492
- ...toResponseMessages({
3786
+ ...await toResponseMessages({
3493
3787
  content: stepContent,
3494
3788
  tools
3495
3789
  })
3496
3790
  );
3497
3791
  const currentStepResult = new DefaultStepResult({
3498
3792
  content: stepContent,
3499
- finishReason: currentModelResponse.finishReason,
3500
- usage: currentModelResponse.usage,
3793
+ finishReason: currentModelResponse.finishReason.unified,
3794
+ rawFinishReason: currentModelResponse.finishReason.raw,
3795
+ usage: asLanguageModelUsage(currentModelResponse.usage),
3501
3796
  warnings: currentModelResponse.warnings,
3502
3797
  providerMetadata: currentModelResponse.providerMetadata,
3503
- request: (_f = currentModelResponse.request) != null ? _f : {},
3798
+ request: (_g = currentModelResponse.request) != null ? _g : {},
3504
3799
  response: {
3505
3800
  ...currentModelResponse.response,
3506
3801
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -3508,23 +3803,24 @@ async function generateText({
3508
3803
  }
3509
3804
  });
3510
3805
  logWarnings({
3511
- warnings: (_g = currentModelResponse.warnings) != null ? _g : [],
3806
+ warnings: (_h = currentModelResponse.warnings) != null ? _h : [],
3512
3807
  provider: stepModel.provider,
3513
3808
  model: stepModel.modelId
3514
3809
  });
3515
3810
  steps.push(currentStepResult);
3516
3811
  await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
3517
3812
  } while (
3518
- // there are tool calls:
3519
- clientToolCalls.length > 0 && // all current tool calls have outputs (incl. execution errors):
3520
- clientToolOutputs.length === clientToolCalls.length && // continue until a stop condition is met:
3813
+ // Continue if:
3814
+ // 1. There are client tool calls that have all been executed, OR
3815
+ // 2. There are pending deferred results from provider-executed tools
3816
+ (clientToolCalls.length > 0 && clientToolOutputs.length === clientToolCalls.length || pendingDeferredToolCalls.size > 0) && // continue until a stop condition is met:
3521
3817
  !await isStopConditionMet({ stopConditions, steps })
3522
3818
  );
3523
3819
  span.setAttributes(
3524
3820
  await selectTelemetryAttributes({
3525
3821
  telemetry,
3526
3822
  attributes: {
3527
- "ai.response.finishReason": currentModelResponse.finishReason,
3823
+ "ai.response.finishReason": currentModelResponse.finishReason.unified,
3528
3824
  "ai.response.text": {
3529
3825
  output: () => extractTextContent(currentModelResponse.content)
3530
3826
  },
@@ -3538,8 +3834,8 @@ async function generateText({
3538
3834
  currentModelResponse.providerMetadata
3539
3835
  ),
3540
3836
  // TODO rename telemetry attributes to inputTokens and outputTokens
3541
- "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
3542
- "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
3837
+ "ai.usage.promptTokens": currentModelResponse.usage.inputTokens.total,
3838
+ "ai.usage.completionTokens": currentModelResponse.usage.outputTokens.total
3543
3839
  }
3544
3840
  })
3545
3841
  );
@@ -3558,6 +3854,7 @@ async function generateText({
3558
3854
  );
3559
3855
  await (onFinish == null ? void 0 : onFinish({
3560
3856
  finishReason: lastStep.finishReason,
3857
+ rawFinishReason: lastStep.rawFinishReason,
3561
3858
  usage: lastStep.usage,
3562
3859
  content: lastStep.content,
3563
3860
  text: lastStep.text,
@@ -3576,7 +3873,8 @@ async function generateText({
3576
3873
  warnings: lastStep.warnings,
3577
3874
  providerMetadata: lastStep.providerMetadata,
3578
3875
  steps,
3579
- totalUsage
3876
+ totalUsage,
3877
+ experimental_context
3580
3878
  }));
3581
3879
  let resolvedOutput;
3582
3880
  if (lastStep.finishReason === "stop") {
@@ -3675,6 +3973,9 @@ var DefaultGenerateTextResult = class {
3675
3973
  get finishReason() {
3676
3974
  return this.finalStep.finishReason;
3677
3975
  }
3976
+ get rawFinishReason() {
3977
+ return this.finalStep.rawFinishReason;
3978
+ }
3678
3979
  get warnings() {
3679
3980
  return this.finalStep.warnings;
3680
3981
  }
@@ -3717,45 +4018,76 @@ function asContent({
3717
4018
  content,
3718
4019
  toolCalls,
3719
4020
  toolOutputs,
3720
- toolApprovalRequests
4021
+ toolApprovalRequests,
4022
+ tools
3721
4023
  }) {
3722
- return [
3723
- ...content.map((part) => {
3724
- switch (part.type) {
3725
- case "text":
3726
- case "reasoning":
3727
- case "source":
3728
- return part;
3729
- case "file": {
3730
- return {
3731
- type: "file",
3732
- file: new DefaultGeneratedFile(part)
3733
- };
3734
- }
3735
- case "tool-call": {
3736
- return toolCalls.find(
3737
- (toolCall) => toolCall.toolCallId === part.toolCallId
3738
- );
3739
- }
3740
- case "tool-result": {
3741
- const toolCall = toolCalls.find(
3742
- (toolCall2) => toolCall2.toolCallId === part.toolCallId
3743
- );
3744
- if (toolCall == null) {
4024
+ const contentParts = [];
4025
+ for (const part of content) {
4026
+ switch (part.type) {
4027
+ case "text":
4028
+ case "reasoning":
4029
+ case "source":
4030
+ contentParts.push(part);
4031
+ break;
4032
+ case "file": {
4033
+ contentParts.push({
4034
+ type: "file",
4035
+ file: new DefaultGeneratedFile(part),
4036
+ ...part.providerMetadata != null ? { providerMetadata: part.providerMetadata } : {}
4037
+ });
4038
+ break;
4039
+ }
4040
+ case "tool-call": {
4041
+ contentParts.push(
4042
+ toolCalls.find((toolCall) => toolCall.toolCallId === part.toolCallId)
4043
+ );
4044
+ break;
4045
+ }
4046
+ case "tool-result": {
4047
+ const toolCall = toolCalls.find(
4048
+ (toolCall2) => toolCall2.toolCallId === part.toolCallId
4049
+ );
4050
+ if (toolCall == null) {
4051
+ const tool2 = tools == null ? void 0 : tools[part.toolName];
4052
+ const supportsDeferredResults = (tool2 == null ? void 0 : tool2.type) === "provider" && tool2.supportsDeferredResults;
4053
+ if (!supportsDeferredResults) {
3745
4054
  throw new Error(`Tool call ${part.toolCallId} not found.`);
3746
4055
  }
3747
4056
  if (part.isError) {
3748
- return {
4057
+ contentParts.push({
3749
4058
  type: "tool-error",
3750
4059
  toolCallId: part.toolCallId,
3751
4060
  toolName: part.toolName,
3752
- input: toolCall.input,
4061
+ input: void 0,
3753
4062
  error: part.result,
3754
4063
  providerExecuted: true,
3755
- dynamic: toolCall.dynamic
3756
- };
4064
+ dynamic: part.dynamic
4065
+ });
4066
+ } else {
4067
+ contentParts.push({
4068
+ type: "tool-result",
4069
+ toolCallId: part.toolCallId,
4070
+ toolName: part.toolName,
4071
+ input: void 0,
4072
+ output: part.result,
4073
+ providerExecuted: true,
4074
+ dynamic: part.dynamic
4075
+ });
3757
4076
  }
3758
- return {
4077
+ break;
4078
+ }
4079
+ if (part.isError) {
4080
+ contentParts.push({
4081
+ type: "tool-error",
4082
+ toolCallId: part.toolCallId,
4083
+ toolName: part.toolName,
4084
+ input: toolCall.input,
4085
+ error: part.result,
4086
+ providerExecuted: true,
4087
+ dynamic: toolCall.dynamic
4088
+ });
4089
+ } else {
4090
+ contentParts.push({
3759
4091
  type: "tool-result",
3760
4092
  toolCallId: part.toolCallId,
3761
4093
  toolName: part.toolName,
@@ -3763,13 +4095,16 @@ function asContent({
3763
4095
  output: part.result,
3764
4096
  providerExecuted: true,
3765
4097
  dynamic: toolCall.dynamic
3766
- };
4098
+ });
3767
4099
  }
4100
+ break;
3768
4101
  }
3769
- }),
3770
- ...toolOutputs,
3771
- ...toolApprovalRequests
3772
- ];
4102
+ case "tool-approval-request": {
4103
+ break;
4104
+ }
4105
+ }
4106
+ }
4107
+ return [...contentParts, ...toolOutputs, ...toolApprovalRequests];
3773
4108
  }
3774
4109
 
3775
4110
  // src/generate-text/stream-text.ts
@@ -3778,6 +4113,7 @@ import {
3778
4113
  } from "@ai-sdk/provider";
3779
4114
  import {
3780
4115
  createIdGenerator as createIdGenerator2,
4116
+ DelayedPromise,
3781
4117
  isAbortError as isAbortError2
3782
4118
  } from "@ai-sdk/provider-utils";
3783
4119
 
@@ -3816,7 +4152,12 @@ function writeToServerResponse({
3816
4152
  headers,
3817
4153
  stream
3818
4154
  }) {
3819
- response.writeHead(status != null ? status : 200, statusText, headers);
4155
+ const statusCode = status != null ? status : 200;
4156
+ if (statusText !== void 0) {
4157
+ response.writeHead(statusCode, statusText, headers);
4158
+ } else {
4159
+ response.writeHead(statusCode, headers);
4160
+ }
3820
4161
  const reader = stream.getReader();
3821
4162
  const read = async () => {
3822
4163
  try {
@@ -4072,8 +4413,7 @@ var uiMessageChunkSchema = lazySchema(
4072
4413
  "content-filter",
4073
4414
  "tool-calls",
4074
4415
  "error",
4075
- "other",
4076
- "unknown"
4416
+ "other"
4077
4417
  ]).optional(),
4078
4418
  messageMetadata: z7.unknown().optional()
4079
4419
  }),
@@ -4086,42 +4426,9 @@ var uiMessageChunkSchema = lazySchema(
4086
4426
  })
4087
4427
  ])
4088
4428
  )
4089
- );
4090
- function isDataUIMessageChunk(chunk) {
4091
- return chunk.type.startsWith("data-");
4092
- }
4093
-
4094
- // src/util/merge-objects.ts
4095
- function mergeObjects(base, overrides) {
4096
- if (base === void 0 && overrides === void 0) {
4097
- return void 0;
4098
- }
4099
- if (base === void 0) {
4100
- return overrides;
4101
- }
4102
- if (overrides === void 0) {
4103
- return base;
4104
- }
4105
- const result = { ...base };
4106
- for (const key in overrides) {
4107
- if (Object.prototype.hasOwnProperty.call(overrides, key)) {
4108
- const overridesValue = overrides[key];
4109
- if (overridesValue === void 0)
4110
- continue;
4111
- const baseValue = key in base ? base[key] : void 0;
4112
- const isSourceObject = overridesValue !== null && typeof overridesValue === "object" && !Array.isArray(overridesValue) && !(overridesValue instanceof Date) && !(overridesValue instanceof RegExp);
4113
- const isTargetObject = baseValue !== null && baseValue !== void 0 && typeof baseValue === "object" && !Array.isArray(baseValue) && !(baseValue instanceof Date) && !(baseValue instanceof RegExp);
4114
- if (isSourceObject && isTargetObject) {
4115
- result[key] = mergeObjects(
4116
- baseValue,
4117
- overridesValue
4118
- );
4119
- } else {
4120
- result[key] = overridesValue;
4121
- }
4122
- }
4123
- }
4124
- return result;
4429
+ );
4430
+ function isDataUIMessageChunk(chunk) {
4431
+ return chunk.type.startsWith("data-");
4125
4432
  }
4126
4433
 
4127
4434
  // src/ui/ui-messages.ts
@@ -4137,21 +4444,23 @@ function isFileUIPart(part) {
4137
4444
  function isReasoningUIPart(part) {
4138
4445
  return part.type === "reasoning";
4139
4446
  }
4140
- function isToolUIPart(part) {
4447
+ function isStaticToolUIPart(part) {
4141
4448
  return part.type.startsWith("tool-");
4142
4449
  }
4143
4450
  function isDynamicToolUIPart(part) {
4144
4451
  return part.type === "dynamic-tool";
4145
4452
  }
4146
- function isToolOrDynamicToolUIPart(part) {
4147
- return isToolUIPart(part) || isDynamicToolUIPart(part);
4453
+ function isToolUIPart(part) {
4454
+ return isStaticToolUIPart(part) || isDynamicToolUIPart(part);
4148
4455
  }
4149
- function getToolName(part) {
4456
+ var isToolOrDynamicToolUIPart = isToolUIPart;
4457
+ function getStaticToolName(part) {
4150
4458
  return part.type.split("-").slice(1).join("-");
4151
4459
  }
4152
- function getToolOrDynamicToolName(part) {
4153
- return isDynamicToolUIPart(part) ? part.toolName : getToolName(part);
4460
+ function getToolName(part) {
4461
+ return isDynamicToolUIPart(part) ? part.toolName : getStaticToolName(part);
4154
4462
  }
4463
+ var getToolOrDynamicToolName = getToolName;
4155
4464
 
4156
4465
  // src/ui/process-ui-message-stream.ts
4157
4466
  function createStreamingUIMessageState({
@@ -4183,11 +4492,9 @@ function processUIMessageStream({
4183
4492
  new TransformStream({
4184
4493
  async transform(chunk, controller) {
4185
4494
  await runUpdateMessageJob(async ({ state, write }) => {
4186
- var _a15, _b, _c, _d;
4495
+ var _a14, _b, _c, _d;
4187
4496
  function getToolInvocation(toolCallId) {
4188
- const toolInvocations = state.message.parts.filter(
4189
- isToolOrDynamicToolUIPart
4190
- );
4497
+ const toolInvocations = state.message.parts.filter(isToolUIPart);
4191
4498
  const toolInvocation = toolInvocations.find(
4192
4499
  (invocation) => invocation.toolCallId === toolCallId
4193
4500
  );
@@ -4199,9 +4506,9 @@ function processUIMessageStream({
4199
4506
  return toolInvocation;
4200
4507
  }
4201
4508
  function updateToolPart(options) {
4202
- var _a16;
4509
+ var _a15;
4203
4510
  const part = state.message.parts.find(
4204
- (part2) => isToolUIPart(part2) && part2.toolCallId === options.toolCallId
4511
+ (part2) => isStaticToolUIPart(part2) && part2.toolCallId === options.toolCallId
4205
4512
  );
4206
4513
  const anyOptions = options;
4207
4514
  const anyPart = part;
@@ -4215,7 +4522,7 @@ function processUIMessageStream({
4215
4522
  if (options.title !== void 0) {
4216
4523
  anyPart.title = options.title;
4217
4524
  }
4218
- anyPart.providerExecuted = (_a16 = anyOptions.providerExecuted) != null ? _a16 : part.providerExecuted;
4525
+ anyPart.providerExecuted = (_a15 = anyOptions.providerExecuted) != null ? _a15 : part.providerExecuted;
4219
4526
  if (anyOptions.providerMetadata != null && part.state === "input-available") {
4220
4527
  part.callProviderMetadata = anyOptions.providerMetadata;
4221
4528
  }
@@ -4236,7 +4543,7 @@ function processUIMessageStream({
4236
4543
  }
4237
4544
  }
4238
4545
  function updateDynamicToolPart(options) {
4239
- var _a16, _b2;
4546
+ var _a15, _b2;
4240
4547
  const part = state.message.parts.find(
4241
4548
  (part2) => part2.type === "dynamic-tool" && part2.toolCallId === options.toolCallId
4242
4549
  );
@@ -4248,7 +4555,7 @@ function processUIMessageStream({
4248
4555
  anyPart.input = anyOptions.input;
4249
4556
  anyPart.output = anyOptions.output;
4250
4557
  anyPart.errorText = anyOptions.errorText;
4251
- anyPart.rawInput = (_a16 = anyOptions.rawInput) != null ? _a16 : anyPart.rawInput;
4558
+ anyPart.rawInput = (_a15 = anyOptions.rawInput) != null ? _a15 : anyPart.rawInput;
4252
4559
  anyPart.preliminary = anyOptions.preliminary;
4253
4560
  if (options.title !== void 0) {
4254
4561
  anyPart.title = options.title;
@@ -4301,7 +4608,7 @@ function processUIMessageStream({
4301
4608
  case "text-delta": {
4302
4609
  const textPart = state.activeTextParts[chunk.id];
4303
4610
  textPart.text += chunk.delta;
4304
- textPart.providerMetadata = (_a15 = chunk.providerMetadata) != null ? _a15 : textPart.providerMetadata;
4611
+ textPart.providerMetadata = (_a14 = chunk.providerMetadata) != null ? _a14 : textPart.providerMetadata;
4305
4612
  write();
4306
4613
  break;
4307
4614
  }
@@ -4373,7 +4680,7 @@ function processUIMessageStream({
4373
4680
  break;
4374
4681
  }
4375
4682
  case "tool-input-start": {
4376
- const toolInvocations = state.message.parts.filter(isToolUIPart);
4683
+ const toolInvocations = state.message.parts.filter(isStaticToolUIPart);
4377
4684
  state.partialToolCalls[chunk.toolCallId] = {
4378
4685
  text: "",
4379
4686
  toolName: chunk.toolName,
@@ -4514,7 +4821,7 @@ function processUIMessageStream({
4514
4821
  } else {
4515
4822
  updateToolPart({
4516
4823
  toolCallId: chunk.toolCallId,
4517
- toolName: getToolName(toolInvocation),
4824
+ toolName: getStaticToolName(toolInvocation),
4518
4825
  state: "output-available",
4519
4826
  input: toolInvocation.input,
4520
4827
  output: chunk.output,
@@ -4541,7 +4848,7 @@ function processUIMessageStream({
4541
4848
  } else {
4542
4849
  updateToolPart({
4543
4850
  toolCallId: chunk.toolCallId,
4544
- toolName: getToolName(toolInvocation),
4851
+ toolName: getStaticToolName(toolInvocation),
4545
4852
  state: "output-error",
4546
4853
  input: toolInvocation.input,
4547
4854
  rawInput: toolInvocation.rawInput,
@@ -4740,11 +5047,11 @@ function createAsyncIterableStream(source) {
4740
5047
  const reader = this.getReader();
4741
5048
  let finished = false;
4742
5049
  async function cleanup(cancelStream) {
4743
- var _a15;
5050
+ var _a14;
4744
5051
  finished = true;
4745
5052
  try {
4746
5053
  if (cancelStream) {
4747
- await ((_a15 = reader.cancel) == null ? void 0 : _a15.call(reader));
5054
+ await ((_a14 = reader.cancel) == null ? void 0 : _a14.call(reader));
4748
5055
  }
4749
5056
  } finally {
4750
5057
  try {
@@ -4908,48 +5215,10 @@ function createStitchableStream() {
4908
5215
  };
4909
5216
  }
4910
5217
 
4911
- // src/util/delayed-promise.ts
4912
- var DelayedPromise = class {
4913
- constructor() {
4914
- this.status = { type: "pending" };
4915
- this._resolve = void 0;
4916
- this._reject = void 0;
4917
- }
4918
- get promise() {
4919
- if (this._promise) {
4920
- return this._promise;
4921
- }
4922
- this._promise = new Promise((resolve3, reject) => {
4923
- if (this.status.type === "resolved") {
4924
- resolve3(this.status.value);
4925
- } else if (this.status.type === "rejected") {
4926
- reject(this.status.error);
4927
- }
4928
- this._resolve = resolve3;
4929
- this._reject = reject;
4930
- });
4931
- return this._promise;
4932
- }
4933
- resolve(value) {
4934
- var _a15;
4935
- this.status = { type: "resolved", value };
4936
- if (this._promise) {
4937
- (_a15 = this._resolve) == null ? void 0 : _a15.call(this, value);
4938
- }
4939
- }
4940
- reject(error) {
4941
- var _a15;
4942
- this.status = { type: "rejected", error };
4943
- if (this._promise) {
4944
- (_a15 = this._reject) == null ? void 0 : _a15.call(this, error);
4945
- }
4946
- }
4947
- };
4948
-
4949
5218
  // src/util/now.ts
4950
5219
  function now() {
4951
- var _a15, _b;
4952
- return (_b = (_a15 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a15.now()) != null ? _b : Date.now();
5220
+ var _a14, _b;
5221
+ return (_b = (_a14 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a14.now()) != null ? _b : Date.now();
4953
5222
  }
4954
5223
 
4955
5224
  // src/generate-text/run-tools-transformation.ts
@@ -5020,8 +5289,9 @@ function runToolsTransformation({
5020
5289
  case "finish": {
5021
5290
  finishChunk = {
5022
5291
  type: "finish",
5023
- finishReason: chunk.finishReason,
5024
- usage: chunk.usage,
5292
+ finishReason: chunk.finishReason.unified,
5293
+ rawFinishReason: chunk.finishReason.raw,
5294
+ usage: asLanguageModelUsage(chunk.usage),
5025
5295
  providerMetadata: chunk.providerMetadata
5026
5296
  };
5027
5297
  break;
@@ -5125,6 +5395,9 @@ function runToolsTransformation({
5125
5395
  }
5126
5396
  break;
5127
5397
  }
5398
+ case "tool-approval-request": {
5399
+ break;
5400
+ }
5128
5401
  default: {
5129
5402
  const _exhaustiveCheck = chunkType;
5130
5403
  throw new Error(`Unhandled chunk type: ${_exhaustiveCheck}`);
@@ -5260,7 +5533,7 @@ function createOutputTransformStream(output) {
5260
5533
  }
5261
5534
  return new TransformStream({
5262
5535
  async transform(chunk, controller) {
5263
- var _a15;
5536
+ var _a14;
5264
5537
  if (chunk.type === "finish-step" && textChunk.length > 0) {
5265
5538
  publishTextChunk({ controller });
5266
5539
  }
@@ -5287,7 +5560,7 @@ function createOutputTransformStream(output) {
5287
5560
  }
5288
5561
  text2 += chunk.text;
5289
5562
  textChunk += chunk.text;
5290
- textProviderMetadata = (_a15 = chunk.providerMetadata) != null ? _a15 : textProviderMetadata;
5563
+ textProviderMetadata = (_a14 = chunk.providerMetadata) != null ? _a14 : textProviderMetadata;
5291
5564
  const result = await output.parsePartialOutput({ text: text2 });
5292
5565
  if (result !== void 0) {
5293
5566
  const currentJson = JSON.stringify(result.partial);
@@ -5333,6 +5606,7 @@ var DefaultStreamTextResult = class {
5333
5606
  }) {
5334
5607
  this._totalUsage = new DelayedPromise();
5335
5608
  this._finishReason = new DelayedPromise();
5609
+ this._rawFinishReason = new DelayedPromise();
5336
5610
  this._steps = new DelayedPromise();
5337
5611
  this.outputSpecification = output;
5338
5612
  this.includeRawChunks = includeRawChunks;
@@ -5341,16 +5615,18 @@ var DefaultStreamTextResult = class {
5341
5615
  let recordedContent = [];
5342
5616
  const recordedResponseMessages = [];
5343
5617
  let recordedFinishReason = void 0;
5618
+ let recordedRawFinishReason = void 0;
5344
5619
  let recordedTotalUsage = void 0;
5345
5620
  let recordedRequest = {};
5346
5621
  let recordedWarnings = [];
5347
5622
  const recordedSteps = [];
5623
+ const pendingDeferredToolCalls = /* @__PURE__ */ new Map();
5348
5624
  let rootSpan;
5349
5625
  let activeTextContent = {};
5350
5626
  let activeReasoningContent = {};
5351
5627
  const eventProcessor = new TransformStream({
5352
5628
  async transform(chunk, controller) {
5353
- var _a15, _b, _c, _d;
5629
+ var _a14, _b, _c, _d;
5354
5630
  controller.enqueue(chunk);
5355
5631
  const { part } = chunk;
5356
5632
  if (part.type === "text-delta" || part.type === "reasoning-delta" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-input-start" || part.type === "tool-input-delta" || part.type === "raw") {
@@ -5380,7 +5656,7 @@ var DefaultStreamTextResult = class {
5380
5656
  return;
5381
5657
  }
5382
5658
  activeText.text += part.text;
5383
- activeText.providerMetadata = (_a15 = part.providerMetadata) != null ? _a15 : activeText.providerMetadata;
5659
+ activeText.providerMetadata = (_a14 = part.providerMetadata) != null ? _a14 : activeText.providerMetadata;
5384
5660
  }
5385
5661
  if (part.type === "text-end") {
5386
5662
  const activeText = activeTextContent[part.id];
@@ -5461,13 +5737,14 @@ var DefaultStreamTextResult = class {
5461
5737
  recordedWarnings = part.warnings;
5462
5738
  }
5463
5739
  if (part.type === "finish-step") {
5464
- const stepMessages = toResponseMessages({
5740
+ const stepMessages = await toResponseMessages({
5465
5741
  content: recordedContent,
5466
5742
  tools
5467
5743
  });
5468
5744
  const currentStepResult = new DefaultStepResult({
5469
5745
  content: recordedContent,
5470
5746
  finishReason: part.finishReason,
5747
+ rawFinishReason: part.rawFinishReason,
5471
5748
  usage: part.usage,
5472
5749
  warnings: recordedWarnings,
5473
5750
  request: recordedRequest,
@@ -5490,6 +5767,7 @@ var DefaultStreamTextResult = class {
5490
5767
  if (part.type === "finish") {
5491
5768
  recordedTotalUsage = part.totalUsage;
5492
5769
  recordedFinishReason = part.finishReason;
5770
+ recordedRawFinishReason = part.rawFinishReason;
5493
5771
  }
5494
5772
  },
5495
5773
  async flush(controller) {
@@ -5499,22 +5777,21 @@ var DefaultStreamTextResult = class {
5499
5777
  message: "No output generated. Check the stream for errors."
5500
5778
  });
5501
5779
  self._finishReason.reject(error);
5780
+ self._rawFinishReason.reject(error);
5502
5781
  self._totalUsage.reject(error);
5503
5782
  self._steps.reject(error);
5504
5783
  return;
5505
5784
  }
5506
- const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
5507
- const totalUsage = recordedTotalUsage != null ? recordedTotalUsage : {
5508
- inputTokens: void 0,
5509
- outputTokens: void 0,
5510
- totalTokens: void 0
5511
- };
5785
+ const finishReason = recordedFinishReason != null ? recordedFinishReason : "other";
5786
+ const totalUsage = recordedTotalUsage != null ? recordedTotalUsage : createNullLanguageModelUsage();
5512
5787
  self._finishReason.resolve(finishReason);
5788
+ self._rawFinishReason.resolve(recordedRawFinishReason);
5513
5789
  self._totalUsage.resolve(totalUsage);
5514
5790
  self._steps.resolve(recordedSteps);
5515
5791
  const finalStep = recordedSteps[recordedSteps.length - 1];
5516
5792
  await (onFinish == null ? void 0 : onFinish({
5517
- finishReason,
5793
+ finishReason: finalStep.finishReason,
5794
+ rawFinishReason: finalStep.rawFinishReason,
5518
5795
  totalUsage,
5519
5796
  usage: finalStep.usage,
5520
5797
  content: finalStep.content,
@@ -5533,7 +5810,8 @@ var DefaultStreamTextResult = class {
5533
5810
  response: finalStep.response,
5534
5811
  warnings: finalStep.warnings,
5535
5812
  providerMetadata: finalStep.providerMetadata,
5536
- steps: recordedSteps
5813
+ steps: recordedSteps,
5814
+ experimental_context
5537
5815
  }));
5538
5816
  rootSpan.setAttributes(
5539
5817
  await selectTelemetryAttributes({
@@ -5543,8 +5821,8 @@ var DefaultStreamTextResult = class {
5543
5821
  "ai.response.text": { output: () => finalStep.text },
5544
5822
  "ai.response.toolCalls": {
5545
5823
  output: () => {
5546
- var _a15;
5547
- return ((_a15 = finalStep.toolCalls) == null ? void 0 : _a15.length) ? JSON.stringify(finalStep.toolCalls) : void 0;
5824
+ var _a14;
5825
+ return ((_a14 = finalStep.toolCalls) == null ? void 0 : _a14.length) ? JSON.stringify(finalStep.toolCalls) : void 0;
5548
5826
  }
5549
5827
  },
5550
5828
  "ai.response.providerMetadata": JSON.stringify(
@@ -5688,31 +5966,35 @@ var DefaultStreamTextResult = class {
5688
5966
  }
5689
5967
  })
5690
5968
  );
5969
+ const content = [];
5970
+ for (const output2 of toolOutputs) {
5971
+ content.push({
5972
+ type: "tool-result",
5973
+ toolCallId: output2.toolCallId,
5974
+ toolName: output2.toolName,
5975
+ output: await createToolModelOutput({
5976
+ toolCallId: output2.toolCallId,
5977
+ input: output2.input,
5978
+ tool: tools == null ? void 0 : tools[output2.toolName],
5979
+ output: output2.type === "tool-result" ? output2.output : output2.error,
5980
+ errorMode: output2.type === "tool-error" ? "json" : "none"
5981
+ })
5982
+ });
5983
+ }
5984
+ for (const toolApproval of deniedToolApprovals) {
5985
+ content.push({
5986
+ type: "tool-result",
5987
+ toolCallId: toolApproval.toolCall.toolCallId,
5988
+ toolName: toolApproval.toolCall.toolName,
5989
+ output: {
5990
+ type: "execution-denied",
5991
+ reason: toolApproval.approvalResponse.reason
5992
+ }
5993
+ });
5994
+ }
5691
5995
  initialResponseMessages.push({
5692
5996
  role: "tool",
5693
- content: [
5694
- // add regular tool results for approved tool calls:
5695
- ...toolOutputs.map((output2) => ({
5696
- type: "tool-result",
5697
- toolCallId: output2.toolCallId,
5698
- toolName: output2.toolName,
5699
- output: createToolModelOutput({
5700
- tool: tools == null ? void 0 : tools[output2.toolName],
5701
- output: output2.type === "tool-result" ? output2.output : output2.error,
5702
- errorMode: output2.type === "tool-error" ? "json" : "none"
5703
- })
5704
- })),
5705
- // add execution denied tool results for denied tool approvals:
5706
- ...deniedToolApprovals.map((toolApproval) => ({
5707
- type: "tool-result",
5708
- toolCallId: toolApproval.toolCall.toolCallId,
5709
- toolName: toolApproval.toolCall.toolName,
5710
- output: {
5711
- type: "execution-denied",
5712
- reason: toolApproval.approvalResponse.reason
5713
- }
5714
- }))
5715
- ]
5997
+ content
5716
5998
  });
5717
5999
  } finally {
5718
6000
  toolExecutionStepStreamController == null ? void 0 : toolExecutionStepStreamController.close();
@@ -5724,7 +6006,7 @@ var DefaultStreamTextResult = class {
5724
6006
  responseMessages,
5725
6007
  usage
5726
6008
  }) {
5727
- var _a15, _b, _c, _d, _e;
6009
+ var _a14, _b, _c, _d, _e, _f;
5728
6010
  const includeRawChunks2 = self.includeRawChunks;
5729
6011
  stepFinish = new DelayedPromise();
5730
6012
  const stepInputMessages = [...initialMessages, ...responseMessages];
@@ -5732,10 +6014,11 @@ var DefaultStreamTextResult = class {
5732
6014
  model,
5733
6015
  steps: recordedSteps,
5734
6016
  stepNumber: recordedSteps.length,
5735
- messages: stepInputMessages
6017
+ messages: stepInputMessages,
6018
+ experimental_context
5736
6019
  }));
5737
6020
  const stepModel = resolveLanguageModel(
5738
- (_a15 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a15 : model
6021
+ (_a14 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a14 : model
5739
6022
  );
5740
6023
  const promptMessages = await convertToLanguageModelPrompt({
5741
6024
  prompt: {
@@ -5750,6 +6033,11 @@ var DefaultStreamTextResult = class {
5750
6033
  toolChoice: (_d = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _d : toolChoice,
5751
6034
  activeTools: (_e = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _e : activeTools
5752
6035
  });
6036
+ experimental_context = (_f = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _f : experimental_context;
6037
+ const stepProviderOptions = mergeObjects(
6038
+ providerOptions,
6039
+ prepareStepResult == null ? void 0 : prepareStepResult.providerOptions
6040
+ );
5753
6041
  const {
5754
6042
  result: { stream: stream2, response, request },
5755
6043
  doStreamSpan,
@@ -5803,7 +6091,7 @@ var DefaultStreamTextResult = class {
5803
6091
  toolChoice: stepToolChoice,
5804
6092
  responseFormat: await (output == null ? void 0 : output.responseFormat),
5805
6093
  prompt: promptMessages,
5806
- providerOptions,
6094
+ providerOptions: stepProviderOptions,
5807
6095
  abortSignal,
5808
6096
  headers,
5809
6097
  includeRawChunks: includeRawChunks2
@@ -5828,12 +6116,9 @@ var DefaultStreamTextResult = class {
5828
6116
  const stepToolOutputs = [];
5829
6117
  let warnings;
5830
6118
  const activeToolCallToolNames = {};
5831
- let stepFinishReason = "unknown";
5832
- let stepUsage = {
5833
- inputTokens: void 0,
5834
- outputTokens: void 0,
5835
- totalTokens: void 0
5836
- };
6119
+ let stepFinishReason = "other";
6120
+ let stepRawFinishReason = void 0;
6121
+ let stepUsage = createNullLanguageModelUsage();
5837
6122
  let stepProviderMetadata;
5838
6123
  let stepFirstChunk = true;
5839
6124
  let stepResponse = {
@@ -5846,7 +6131,7 @@ var DefaultStreamTextResult = class {
5846
6131
  streamWithToolResults.pipeThrough(
5847
6132
  new TransformStream({
5848
6133
  async transform(chunk, controller) {
5849
- var _a16, _b2, _c2, _d2, _e2;
6134
+ var _a15, _b2, _c2, _d2, _e2;
5850
6135
  if (chunk.type === "stream-start") {
5851
6136
  warnings = chunk.warnings;
5852
6137
  return;
@@ -5919,7 +6204,7 @@ var DefaultStreamTextResult = class {
5919
6204
  }
5920
6205
  case "response-metadata": {
5921
6206
  stepResponse = {
5922
- id: (_a16 = chunk.id) != null ? _a16 : stepResponse.id,
6207
+ id: (_a15 = chunk.id) != null ? _a15 : stepResponse.id,
5923
6208
  timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
5924
6209
  modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
5925
6210
  };
@@ -5928,6 +6213,7 @@ var DefaultStreamTextResult = class {
5928
6213
  case "finish": {
5929
6214
  stepUsage = chunk.usage;
5930
6215
  stepFinishReason = chunk.finishReason;
6216
+ stepRawFinishReason = chunk.rawFinishReason;
5931
6217
  stepProviderMetadata = chunk.providerMetadata;
5932
6218
  const msToFinish = now2() - startTimestampMs;
5933
6219
  doStreamSpan.addEvent("ai.stream.finish");
@@ -6040,6 +6326,7 @@ var DefaultStreamTextResult = class {
6040
6326
  controller.enqueue({
6041
6327
  type: "finish-step",
6042
6328
  finishReason: stepFinishReason,
6329
+ rawFinishReason: stepRawFinishReason,
6043
6330
  usage: stepUsage,
6044
6331
  providerMetadata: stepProviderMetadata,
6045
6332
  response: {
@@ -6055,14 +6342,38 @@ var DefaultStreamTextResult = class {
6055
6342
  const clientToolOutputs = stepToolOutputs.filter(
6056
6343
  (toolOutput) => toolOutput.providerExecuted !== true
6057
6344
  );
6058
- if (clientToolCalls.length > 0 && // all current tool calls have outputs (incl. execution errors):
6059
- clientToolOutputs.length === clientToolCalls.length && // continue until a stop condition is met:
6060
- !await isStopConditionMet({
6061
- stopConditions,
6062
- steps: recordedSteps
6063
- })) {
6345
+ for (const toolCall of stepToolCalls) {
6346
+ if (toolCall.providerExecuted !== true)
6347
+ continue;
6348
+ const tool2 = tools == null ? void 0 : tools[toolCall.toolName];
6349
+ if ((tool2 == null ? void 0 : tool2.type) === "provider" && tool2.supportsDeferredResults) {
6350
+ const hasResultInStep = stepToolOutputs.some(
6351
+ (output2) => output2.type === "tool-result" && output2.toolCallId === toolCall.toolCallId
6352
+ );
6353
+ if (!hasResultInStep) {
6354
+ pendingDeferredToolCalls.set(toolCall.toolCallId, {
6355
+ toolName: toolCall.toolName
6356
+ });
6357
+ }
6358
+ }
6359
+ }
6360
+ for (const output2 of stepToolOutputs) {
6361
+ if (output2.type === "tool-result") {
6362
+ pendingDeferredToolCalls.delete(output2.toolCallId);
6363
+ }
6364
+ }
6365
+ if (
6366
+ // Continue if:
6367
+ // 1. There are client tool calls that have all been executed, OR
6368
+ // 2. There are pending deferred results from provider-executed tools
6369
+ (clientToolCalls.length > 0 && clientToolOutputs.length === clientToolCalls.length || pendingDeferredToolCalls.size > 0) && // continue until a stop condition is met:
6370
+ !await isStopConditionMet({
6371
+ stopConditions,
6372
+ steps: recordedSteps
6373
+ })
6374
+ ) {
6064
6375
  responseMessages.push(
6065
- ...toResponseMessages({
6376
+ ...await toResponseMessages({
6066
6377
  content: (
6067
6378
  // use transformed content to create the messages for the next step:
6068
6379
  recordedSteps[recordedSteps.length - 1].content
@@ -6087,6 +6398,7 @@ var DefaultStreamTextResult = class {
6087
6398
  controller.enqueue({
6088
6399
  type: "finish",
6089
6400
  finishReason: stepFinishReason,
6401
+ rawFinishReason: stepRawFinishReason,
6090
6402
  totalUsage: combinedUsage
6091
6403
  });
6092
6404
  self.closeStream();
@@ -6099,11 +6411,7 @@ var DefaultStreamTextResult = class {
6099
6411
  await streamStep({
6100
6412
  currentStep: 0,
6101
6413
  responseMessages: initialResponseMessages,
6102
- usage: {
6103
- inputTokens: void 0,
6104
- outputTokens: void 0,
6105
- totalTokens: void 0
6106
- }
6414
+ usage: createNullLanguageModelUsage()
6107
6415
  });
6108
6416
  }
6109
6417
  }).catch((error) => {
@@ -6184,6 +6492,10 @@ var DefaultStreamTextResult = class {
6184
6492
  this.consumeStream();
6185
6493
  return this._finishReason.promise;
6186
6494
  }
6495
+ get rawFinishReason() {
6496
+ this.consumeStream();
6497
+ return this._rawFinishReason.promise;
6498
+ }
6187
6499
  /**
6188
6500
  Split out a new stream from the original stream.
6189
6501
  The original stream is replaced to allow for further splitting,
@@ -6222,14 +6534,14 @@ var DefaultStreamTextResult = class {
6222
6534
  );
6223
6535
  }
6224
6536
  async consumeStream(options) {
6225
- var _a15;
6537
+ var _a14;
6226
6538
  try {
6227
6539
  await consumeStream({
6228
6540
  stream: this.fullStream,
6229
6541
  onError: options == null ? void 0 : options.onError
6230
6542
  });
6231
6543
  } catch (error) {
6232
- (_a15 = options == null ? void 0 : options.onError) == null ? void 0 : _a15.call(options, error);
6544
+ (_a14 = options == null ? void 0 : options.onError) == null ? void 0 : _a14.call(options, error);
6233
6545
  }
6234
6546
  }
6235
6547
  get experimental_partialOutputStream() {
@@ -6250,8 +6562,8 @@ var DefaultStreamTextResult = class {
6250
6562
  }
6251
6563
  get output() {
6252
6564
  return this.finalStep.then((step) => {
6253
- var _a15;
6254
- const output = (_a15 = this.outputSpecification) != null ? _a15 : text();
6565
+ var _a14;
6566
+ const output = (_a14 = this.outputSpecification) != null ? _a14 : text();
6255
6567
  return output.parseCompleteOutput(
6256
6568
  { text: step.text },
6257
6569
  {
@@ -6278,8 +6590,8 @@ var DefaultStreamTextResult = class {
6278
6590
  responseMessageId: generateMessageId
6279
6591
  }) : void 0;
6280
6592
  const isDynamic = (part) => {
6281
- var _a15;
6282
- const tool2 = (_a15 = this.tools) == null ? void 0 : _a15[part.toolName];
6593
+ var _a14;
6594
+ const tool2 = (_a14 = this.tools) == null ? void 0 : _a14[part.toolName];
6283
6595
  if (tool2 == null) {
6284
6596
  return part.dynamic;
6285
6597
  }
@@ -6617,10 +6929,10 @@ var ToolLoopAgent = class {
6617
6929
  return this.settings.tools;
6618
6930
  }
6619
6931
  async prepareCall(options) {
6620
- var _a15, _b, _c, _d;
6932
+ var _a14, _b, _c, _d;
6621
6933
  const baseCallArgs = {
6622
6934
  ...this.settings,
6623
- stopWhen: (_a15 = this.settings.stopWhen) != null ? _a15 : stepCountIs(20),
6935
+ stopWhen: (_a14 = this.settings.stopWhen) != null ? _a14 : stepCountIs(20),
6624
6936
  ...options
6625
6937
  };
6626
6938
  const preparedCallArgs = (_d = await ((_c = (_b = this.settings).prepareCall) == null ? void 0 : _c.call(_b, baseCallArgs))) != null ? _d : baseCallArgs;
@@ -6634,14 +6946,28 @@ var ToolLoopAgent = class {
6634
6946
  /**
6635
6947
  * Generates an output from the agent (non-streaming).
6636
6948
  */
6637
- async generate(options) {
6638
- return generateText(await this.prepareCall(options));
6949
+ async generate({
6950
+ abortSignal,
6951
+ ...options
6952
+ }) {
6953
+ return generateText({
6954
+ ...await this.prepareCall(options),
6955
+ abortSignal
6956
+ });
6639
6957
  }
6640
6958
  /**
6641
6959
  * Streams an output from the agent (streaming).
6642
6960
  */
6643
- async stream(options) {
6644
- return streamText(await this.prepareCall(options));
6961
+ async stream({
6962
+ abortSignal,
6963
+ experimental_transform,
6964
+ ...options
6965
+ }) {
6966
+ return streamText({
6967
+ ...await this.prepareCall(options),
6968
+ abortSignal,
6969
+ experimental_transform
6970
+ });
6645
6971
  }
6646
6972
  };
6647
6973
 
@@ -6741,7 +7067,7 @@ function readUIMessageStream({
6741
7067
  onError,
6742
7068
  terminateOnError = false
6743
7069
  }) {
6744
- var _a15;
7070
+ var _a14;
6745
7071
  let controller;
6746
7072
  let hasErrored = false;
6747
7073
  const outputStream = new ReadableStream({
@@ -6750,7 +7076,7 @@ function readUIMessageStream({
6750
7076
  }
6751
7077
  });
6752
7078
  const state = createStreamingUIMessageState({
6753
- messageId: (_a15 = message == null ? void 0 : message.id) != null ? _a15 : "",
7079
+ messageId: (_a14 = message == null ? void 0 : message.id) != null ? _a14 : "",
6754
7080
  lastMessage: message
6755
7081
  });
6756
7082
  const handleError = (error) => {
@@ -6786,13 +7112,13 @@ function readUIMessageStream({
6786
7112
  import {
6787
7113
  isNonNullable
6788
7114
  } from "@ai-sdk/provider-utils";
6789
- function convertToModelMessages(messages, options) {
7115
+ async function convertToModelMessages(messages, options) {
6790
7116
  const modelMessages = [];
6791
7117
  if (options == null ? void 0 : options.ignoreIncompleteToolCalls) {
6792
7118
  messages = messages.map((message) => ({
6793
7119
  ...message,
6794
7120
  parts: message.parts.filter(
6795
- (part) => !isToolOrDynamicToolUIPart(part) || part.state !== "input-streaming" && part.state !== "input-available"
7121
+ (part) => !isToolUIPart(part) || part.state !== "input-streaming" && part.state !== "input-available"
6796
7122
  )
6797
7123
  }));
6798
7124
  }
@@ -6819,7 +7145,7 @@ function convertToModelMessages(messages, options) {
6819
7145
  modelMessages.push({
6820
7146
  role: "user",
6821
7147
  content: message.parts.map((part) => {
6822
- var _a15;
7148
+ var _a14;
6823
7149
  if (isTextUIPart(part)) {
6824
7150
  return {
6825
7151
  type: "text",
@@ -6837,7 +7163,7 @@ function convertToModelMessages(messages, options) {
6837
7163
  };
6838
7164
  }
6839
7165
  if (isDataUIPart(part)) {
6840
- return (_a15 = options == null ? void 0 : options.convertDataPart) == null ? void 0 : _a15.call(
7166
+ return (_a14 = options == null ? void 0 : options.convertDataPart) == null ? void 0 : _a14.call(
6841
7167
  options,
6842
7168
  part
6843
7169
  );
@@ -6848,8 +7174,9 @@ function convertToModelMessages(messages, options) {
6848
7174
  }
6849
7175
  case "assistant": {
6850
7176
  if (message.parts != null) {
6851
- let processBlock2 = function() {
6852
- var _a15, _b, _c;
7177
+ let block = [];
7178
+ async function processBlock() {
7179
+ var _a14, _b, _c, _d, _e, _f;
6853
7180
  if (block.length === 0) {
6854
7181
  return;
6855
7182
  }
@@ -6874,14 +7201,14 @@ function convertToModelMessages(messages, options) {
6874
7201
  text: part.text,
6875
7202
  providerOptions: part.providerMetadata
6876
7203
  });
6877
- } else if (isToolOrDynamicToolUIPart(part)) {
6878
- const toolName = getToolOrDynamicToolName(part);
7204
+ } else if (isToolUIPart(part)) {
7205
+ const toolName = getToolName(part);
6879
7206
  if (part.state !== "input-streaming") {
6880
7207
  content.push({
6881
7208
  type: "tool-call",
6882
7209
  toolCallId: part.toolCallId,
6883
7210
  toolName,
6884
- input: part.state === "output-error" ? (_a15 = part.input) != null ? _a15 : "rawInput" in part ? part.rawInput : void 0 : part.input,
7211
+ input: part.state === "output-error" ? (_a14 = part.input) != null ? _a14 : "rawInput" in part ? part.rawInput : void 0 : part.input,
6885
7212
  providerExecuted: part.providerExecuted,
6886
7213
  ...part.callProviderMetadata != null ? { providerOptions: part.callProviderMetadata } : {}
6887
7214
  });
@@ -6897,11 +7224,14 @@ function convertToModelMessages(messages, options) {
6897
7224
  type: "tool-result",
6898
7225
  toolCallId: part.toolCallId,
6899
7226
  toolName,
6900
- output: createToolModelOutput({
7227
+ output: await createToolModelOutput({
7228
+ toolCallId: part.toolCallId,
7229
+ input: part.input,
6901
7230
  output: part.state === "output-error" ? part.errorText : part.output,
6902
7231
  tool: (_b = options == null ? void 0 : options.tools) == null ? void 0 : _b[toolName],
6903
7232
  errorMode: part.state === "output-error" ? "json" : "none"
6904
- })
7233
+ }),
7234
+ ...part.callProviderMetadata != null ? { providerOptions: part.callProviderMetadata } : {}
6905
7235
  });
6906
7236
  }
6907
7237
  }
@@ -6923,69 +7253,70 @@ function convertToModelMessages(messages, options) {
6923
7253
  content
6924
7254
  });
6925
7255
  const toolParts = block.filter(
6926
- (part) => isToolOrDynamicToolUIPart(part) && part.providerExecuted !== true
7256
+ (part) => isToolUIPart(part) && part.providerExecuted !== true
6927
7257
  );
6928
7258
  if (toolParts.length > 0) {
6929
- modelMessages.push({
6930
- role: "tool",
6931
- content: toolParts.flatMap(
6932
- (toolPart) => {
6933
- var _a16, _b2, _c2;
6934
- const outputs = [];
6935
- if (((_a16 = toolPart.approval) == null ? void 0 : _a16.approved) != null) {
6936
- outputs.push({
6937
- type: "tool-approval-response",
6938
- approvalId: toolPart.approval.id,
6939
- approved: toolPart.approval.approved,
6940
- reason: toolPart.approval.reason
7259
+ {
7260
+ const content2 = [];
7261
+ for (const toolPart of toolParts) {
7262
+ if (((_d = toolPart.approval) == null ? void 0 : _d.approved) != null) {
7263
+ content2.push({
7264
+ type: "tool-approval-response",
7265
+ approvalId: toolPart.approval.id,
7266
+ approved: toolPart.approval.approved,
7267
+ reason: toolPart.approval.reason
7268
+ });
7269
+ }
7270
+ switch (toolPart.state) {
7271
+ case "output-denied": {
7272
+ content2.push({
7273
+ type: "tool-result",
7274
+ toolCallId: toolPart.toolCallId,
7275
+ toolName: getToolName(toolPart),
7276
+ output: {
7277
+ type: "error-text",
7278
+ value: (_e = toolPart.approval.reason) != null ? _e : "Tool execution denied."
7279
+ },
7280
+ ...toolPart.callProviderMetadata != null ? { providerOptions: toolPart.callProviderMetadata } : {}
6941
7281
  });
7282
+ break;
6942
7283
  }
6943
- switch (toolPart.state) {
6944
- case "output-denied": {
6945
- outputs.push({
6946
- type: "tool-result",
6947
- toolCallId: toolPart.toolCallId,
6948
- toolName: getToolOrDynamicToolName(toolPart),
6949
- output: {
6950
- type: "error-text",
6951
- value: (_b2 = toolPart.approval.reason) != null ? _b2 : "Tool execution denied."
6952
- }
6953
- });
6954
- break;
6955
- }
6956
- case "output-error":
6957
- case "output-available": {
6958
- const toolName = getToolOrDynamicToolName(toolPart);
6959
- outputs.push({
6960
- type: "tool-result",
7284
+ case "output-error":
7285
+ case "output-available": {
7286
+ const toolName = getToolName(toolPart);
7287
+ content2.push({
7288
+ type: "tool-result",
7289
+ toolCallId: toolPart.toolCallId,
7290
+ toolName,
7291
+ output: await createToolModelOutput({
6961
7292
  toolCallId: toolPart.toolCallId,
6962
- toolName,
6963
- output: createToolModelOutput({
6964
- output: toolPart.state === "output-error" ? toolPart.errorText : toolPart.output,
6965
- tool: (_c2 = options == null ? void 0 : options.tools) == null ? void 0 : _c2[toolName],
6966
- errorMode: toolPart.state === "output-error" ? "text" : "none"
6967
- })
6968
- });
6969
- break;
6970
- }
7293
+ input: toolPart.input,
7294
+ output: toolPart.state === "output-error" ? toolPart.errorText : toolPart.output,
7295
+ tool: (_f = options == null ? void 0 : options.tools) == null ? void 0 : _f[toolName],
7296
+ errorMode: toolPart.state === "output-error" ? "text" : "none"
7297
+ }),
7298
+ ...toolPart.callProviderMetadata != null ? { providerOptions: toolPart.callProviderMetadata } : {}
7299
+ });
7300
+ break;
6971
7301
  }
6972
- return outputs;
6973
7302
  }
6974
- )
6975
- });
7303
+ }
7304
+ modelMessages.push({
7305
+ role: "tool",
7306
+ content: content2
7307
+ });
7308
+ }
6976
7309
  }
6977
7310
  block = [];
6978
- };
6979
- var processBlock = processBlock2;
6980
- let block = [];
7311
+ }
6981
7312
  for (const part of message.parts) {
6982
- if (isTextUIPart(part) || isReasoningUIPart(part) || isFileUIPart(part) || isToolOrDynamicToolUIPart(part) || isDataUIPart(part)) {
7313
+ if (isTextUIPart(part) || isReasoningUIPart(part) || isFileUIPart(part) || isToolUIPart(part) || isDataUIPart(part)) {
6983
7314
  block.push(part);
6984
7315
  } else if (part.type === "step-start") {
6985
- processBlock2();
7316
+ await processBlock();
6986
7317
  }
6987
7318
  }
6988
- processBlock2();
7319
+ await processBlock();
6989
7320
  break;
6990
7321
  }
6991
7322
  break;
@@ -7001,7 +7332,6 @@ function convertToModelMessages(messages, options) {
7001
7332
  }
7002
7333
  return modelMessages;
7003
7334
  }
7004
- var convertToCoreMessages = convertToModelMessages;
7005
7335
 
7006
7336
  // src/ui/validate-ui-messages.ts
7007
7337
  import { TypeValidationError as TypeValidationError3 } from "@ai-sdk/provider";
@@ -7386,20 +7716,24 @@ async function validateUIMessages({
7386
7716
  // src/agent/create-agent-ui-stream.ts
7387
7717
  async function createAgentUIStream({
7388
7718
  agent,
7389
- messages,
7719
+ uiMessages,
7390
7720
  options,
7721
+ abortSignal,
7722
+ experimental_transform,
7391
7723
  ...uiMessageStreamOptions
7392
7724
  }) {
7393
7725
  const validatedMessages = await validateUIMessages({
7394
- messages,
7726
+ messages: uiMessages,
7395
7727
  tools: agent.tools
7396
7728
  });
7397
- const modelMessages = convertToModelMessages(validatedMessages, {
7729
+ const modelMessages = await convertToModelMessages(validatedMessages, {
7398
7730
  tools: agent.tools
7399
7731
  });
7400
7732
  const result = await agent.stream({
7401
7733
  prompt: modelMessages,
7402
- options
7734
+ options,
7735
+ abortSignal,
7736
+ experimental_transform
7403
7737
  });
7404
7738
  return result.toUIMessageStream(uiMessageStreamOptions);
7405
7739
  }
@@ -7479,7 +7813,7 @@ async function embed({
7479
7813
  }),
7480
7814
  tracer,
7481
7815
  fn: async (span) => {
7482
- const { embedding, usage, response, providerMetadata } = await retry(
7816
+ const { embedding, usage, warnings, response, providerMetadata } = await retry(
7483
7817
  () => (
7484
7818
  // nested spans to align with the embedMany telemetry data:
7485
7819
  recordSpan({
@@ -7498,7 +7832,7 @@ async function embed({
7498
7832
  }),
7499
7833
  tracer,
7500
7834
  fn: async (doEmbedSpan) => {
7501
- var _a15;
7835
+ var _a14;
7502
7836
  const modelResponse = await model.doEmbed({
7503
7837
  values: [value],
7504
7838
  abortSignal,
@@ -7506,7 +7840,7 @@ async function embed({
7506
7840
  providerOptions
7507
7841
  });
7508
7842
  const embedding2 = modelResponse.embeddings[0];
7509
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
7843
+ const usage2 = (_a14 = modelResponse.usage) != null ? _a14 : { tokens: NaN };
7510
7844
  doEmbedSpan.setAttributes(
7511
7845
  await selectTelemetryAttributes({
7512
7846
  telemetry,
@@ -7523,6 +7857,7 @@ async function embed({
7523
7857
  return {
7524
7858
  embedding: embedding2,
7525
7859
  usage: usage2,
7860
+ warnings: modelResponse.warnings,
7526
7861
  providerMetadata: modelResponse.providerMetadata,
7527
7862
  response: modelResponse.response
7528
7863
  };
@@ -7539,10 +7874,12 @@ async function embed({
7539
7874
  }
7540
7875
  })
7541
7876
  );
7877
+ logWarnings({ warnings, provider: model.provider, model: model.modelId });
7542
7878
  return new DefaultEmbedResult({
7543
7879
  value,
7544
7880
  embedding,
7545
7881
  usage,
7882
+ warnings,
7546
7883
  providerMetadata,
7547
7884
  response
7548
7885
  });
@@ -7554,6 +7891,7 @@ var DefaultEmbedResult = class {
7554
7891
  this.value = options.value;
7555
7892
  this.embedding = options.embedding;
7556
7893
  this.usage = options.usage;
7894
+ this.warnings = options.warnings;
7557
7895
  this.providerMetadata = options.providerMetadata;
7558
7896
  this.response = options.response;
7559
7897
  }
@@ -7616,64 +7954,63 @@ async function embedMany({
7616
7954
  }),
7617
7955
  tracer,
7618
7956
  fn: async (span) => {
7619
- var _a15;
7957
+ var _a14;
7620
7958
  const [maxEmbeddingsPerCall, supportsParallelCalls] = await Promise.all([
7621
7959
  model.maxEmbeddingsPerCall,
7622
7960
  model.supportsParallelCalls
7623
7961
  ]);
7624
7962
  if (maxEmbeddingsPerCall == null || maxEmbeddingsPerCall === Infinity) {
7625
- const { embeddings: embeddings2, usage, response, providerMetadata: providerMetadata2 } = await retry(
7626
- () => {
7627
- return recordSpan({
7628
- name: "ai.embedMany.doEmbed",
7629
- attributes: selectTelemetryAttributes({
7630
- telemetry,
7631
- attributes: {
7632
- ...assembleOperationName({
7633
- operationId: "ai.embedMany.doEmbed",
7634
- telemetry
7635
- }),
7636
- ...baseTelemetryAttributes,
7637
- // specific settings that only make sense on the outer level:
7638
- "ai.values": {
7639
- input: () => values.map((value) => JSON.stringify(value))
7640
- }
7963
+ const { embeddings: embeddings2, usage, warnings: warnings2, response, providerMetadata: providerMetadata2 } = await retry(() => {
7964
+ return recordSpan({
7965
+ name: "ai.embedMany.doEmbed",
7966
+ attributes: selectTelemetryAttributes({
7967
+ telemetry,
7968
+ attributes: {
7969
+ ...assembleOperationName({
7970
+ operationId: "ai.embedMany.doEmbed",
7971
+ telemetry
7972
+ }),
7973
+ ...baseTelemetryAttributes,
7974
+ // specific settings that only make sense on the outer level:
7975
+ "ai.values": {
7976
+ input: () => values.map((value) => JSON.stringify(value))
7641
7977
  }
7642
- }),
7643
- tracer,
7644
- fn: async (doEmbedSpan) => {
7645
- var _a16;
7646
- const modelResponse = await model.doEmbed({
7647
- values,
7648
- abortSignal,
7649
- headers: headersWithUserAgent,
7650
- providerOptions
7651
- });
7652
- const embeddings3 = modelResponse.embeddings;
7653
- const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
7654
- doEmbedSpan.setAttributes(
7655
- await selectTelemetryAttributes({
7656
- telemetry,
7657
- attributes: {
7658
- "ai.embeddings": {
7659
- output: () => embeddings3.map(
7660
- (embedding) => JSON.stringify(embedding)
7661
- )
7662
- },
7663
- "ai.usage.tokens": usage2.tokens
7664
- }
7665
- })
7666
- );
7667
- return {
7668
- embeddings: embeddings3,
7669
- usage: usage2,
7670
- providerMetadata: modelResponse.providerMetadata,
7671
- response: modelResponse.response
7672
- };
7673
7978
  }
7674
- });
7675
- }
7676
- );
7979
+ }),
7980
+ tracer,
7981
+ fn: async (doEmbedSpan) => {
7982
+ var _a15;
7983
+ const modelResponse = await model.doEmbed({
7984
+ values,
7985
+ abortSignal,
7986
+ headers: headersWithUserAgent,
7987
+ providerOptions
7988
+ });
7989
+ const embeddings3 = modelResponse.embeddings;
7990
+ const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
7991
+ doEmbedSpan.setAttributes(
7992
+ await selectTelemetryAttributes({
7993
+ telemetry,
7994
+ attributes: {
7995
+ "ai.embeddings": {
7996
+ output: () => embeddings3.map(
7997
+ (embedding) => JSON.stringify(embedding)
7998
+ )
7999
+ },
8000
+ "ai.usage.tokens": usage2.tokens
8001
+ }
8002
+ })
8003
+ );
8004
+ return {
8005
+ embeddings: embeddings3,
8006
+ usage: usage2,
8007
+ warnings: modelResponse.warnings,
8008
+ providerMetadata: modelResponse.providerMetadata,
8009
+ response: modelResponse.response
8010
+ };
8011
+ }
8012
+ });
8013
+ });
7677
8014
  span.setAttributes(
7678
8015
  await selectTelemetryAttributes({
7679
8016
  telemetry,
@@ -7685,16 +8022,23 @@ async function embedMany({
7685
8022
  }
7686
8023
  })
7687
8024
  );
8025
+ logWarnings({
8026
+ warnings: warnings2,
8027
+ provider: model.provider,
8028
+ model: model.modelId
8029
+ });
7688
8030
  return new DefaultEmbedManyResult({
7689
8031
  values,
7690
8032
  embeddings: embeddings2,
7691
8033
  usage,
8034
+ warnings: warnings2,
7692
8035
  providerMetadata: providerMetadata2,
7693
8036
  responses: [response]
7694
8037
  });
7695
8038
  }
7696
8039
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
7697
8040
  const embeddings = [];
8041
+ const warnings = [];
7698
8042
  const responses = [];
7699
8043
  let tokens = 0;
7700
8044
  let providerMetadata;
@@ -7724,7 +8068,7 @@ async function embedMany({
7724
8068
  }),
7725
8069
  tracer,
7726
8070
  fn: async (doEmbedSpan) => {
7727
- var _a16;
8071
+ var _a15;
7728
8072
  const modelResponse = await model.doEmbed({
7729
8073
  values: chunk,
7730
8074
  abortSignal,
@@ -7732,7 +8076,7 @@ async function embedMany({
7732
8076
  providerOptions
7733
8077
  });
7734
8078
  const embeddings2 = modelResponse.embeddings;
7735
- const usage = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
8079
+ const usage = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
7736
8080
  doEmbedSpan.setAttributes(
7737
8081
  await selectTelemetryAttributes({
7738
8082
  telemetry,
@@ -7749,6 +8093,7 @@ async function embedMany({
7749
8093
  return {
7750
8094
  embeddings: embeddings2,
7751
8095
  usage,
8096
+ warnings: modelResponse.warnings,
7752
8097
  providerMetadata: modelResponse.providerMetadata,
7753
8098
  response: modelResponse.response
7754
8099
  };
@@ -7759,6 +8104,7 @@ async function embedMany({
7759
8104
  );
7760
8105
  for (const result of results) {
7761
8106
  embeddings.push(...result.embeddings);
8107
+ warnings.push(...result.warnings);
7762
8108
  responses.push(result.response);
7763
8109
  tokens += result.usage.tokens;
7764
8110
  if (result.providerMetadata) {
@@ -7769,7 +8115,7 @@ async function embedMany({
7769
8115
  result.providerMetadata
7770
8116
  )) {
7771
8117
  providerMetadata[providerName] = {
7772
- ...(_a15 = providerMetadata[providerName]) != null ? _a15 : {},
8118
+ ...(_a14 = providerMetadata[providerName]) != null ? _a14 : {},
7773
8119
  ...metadata
7774
8120
  };
7775
8121
  }
@@ -7788,10 +8134,16 @@ async function embedMany({
7788
8134
  }
7789
8135
  })
7790
8136
  );
8137
+ logWarnings({
8138
+ warnings,
8139
+ provider: model.provider,
8140
+ model: model.modelId
8141
+ });
7791
8142
  return new DefaultEmbedManyResult({
7792
8143
  values,
7793
8144
  embeddings,
7794
8145
  usage: { tokens },
8146
+ warnings,
7795
8147
  providerMetadata,
7796
8148
  responses
7797
8149
  });
@@ -7803,16 +8155,20 @@ var DefaultEmbedManyResult = class {
7803
8155
  this.values = options.values;
7804
8156
  this.embeddings = options.embeddings;
7805
8157
  this.usage = options.usage;
8158
+ this.warnings = options.warnings;
7806
8159
  this.providerMetadata = options.providerMetadata;
7807
8160
  this.responses = options.responses;
7808
8161
  }
7809
8162
  };
7810
8163
 
7811
8164
  // src/generate-image/generate-image.ts
7812
- import { withUserAgentSuffix as withUserAgentSuffix5 } from "@ai-sdk/provider-utils";
8165
+ import {
8166
+ convertBase64ToUint8Array as convertBase64ToUint8Array4,
8167
+ withUserAgentSuffix as withUserAgentSuffix5
8168
+ } from "@ai-sdk/provider-utils";
7813
8169
  async function generateImage({
7814
- model,
7815
- prompt,
8170
+ model: modelArg,
8171
+ prompt: promptArg,
7816
8172
  n = 1,
7817
8173
  maxImagesPerCall,
7818
8174
  size,
@@ -7823,14 +8179,8 @@ async function generateImage({
7823
8179
  abortSignal,
7824
8180
  headers
7825
8181
  }) {
7826
- var _a15, _b;
7827
- if (model.specificationVersion !== "v3") {
7828
- throw new UnsupportedModelVersionError({
7829
- version: model.specificationVersion,
7830
- provider: model.provider,
7831
- modelId: model.modelId
7832
- });
7833
- }
8182
+ var _a14, _b;
8183
+ const model = resolveImageModel(modelArg);
7834
8184
  const headersWithUserAgent = withUserAgentSuffix5(
7835
8185
  headers != null ? headers : {},
7836
8186
  `ai/${VERSION}`
@@ -7839,7 +8189,7 @@ async function generateImage({
7839
8189
  maxRetries: maxRetriesArg,
7840
8190
  abortSignal
7841
8191
  });
7842
- const maxImagesPerCallWithDefault = (_a15 = maxImagesPerCall != null ? maxImagesPerCall : await invokeModelMaxImagesPerCall(model)) != null ? _a15 : 1;
8192
+ const maxImagesPerCallWithDefault = (_a14 = maxImagesPerCall != null ? maxImagesPerCall : await invokeModelMaxImagesPerCall(model)) != null ? _a14 : 1;
7843
8193
  const callCount = Math.ceil(n / maxImagesPerCallWithDefault);
7844
8194
  const callImageCounts = Array.from({ length: callCount }, (_, i) => {
7845
8195
  if (i < callCount - 1) {
@@ -7850,9 +8200,12 @@ async function generateImage({
7850
8200
  });
7851
8201
  const results = await Promise.all(
7852
8202
  callImageCounts.map(
7853
- async (callImageCount) => retry(
7854
- () => model.doGenerate({
8203
+ async (callImageCount) => retry(() => {
8204
+ const { prompt, files, mask } = normalizePrompt(promptArg);
8205
+ return model.doGenerate({
7855
8206
  prompt,
8207
+ files,
8208
+ mask,
7856
8209
  n: callImageCount,
7857
8210
  abortSignal,
7858
8211
  headers: headersWithUserAgent,
@@ -7860,8 +8213,8 @@ async function generateImage({
7860
8213
  aspectRatio,
7861
8214
  seed,
7862
8215
  providerOptions: providerOptions != null ? providerOptions : {}
7863
- })
7864
- )
8216
+ });
8217
+ })
7865
8218
  )
7866
8219
  );
7867
8220
  const images = [];
@@ -7877,13 +8230,13 @@ async function generateImage({
7877
8230
  images.push(
7878
8231
  ...result.images.map(
7879
8232
  (image) => {
7880
- var _a16;
8233
+ var _a15;
7881
8234
  return new DefaultGeneratedFile({
7882
8235
  data: image,
7883
- mediaType: (_a16 = detectMediaType({
8236
+ mediaType: (_a15 = detectMediaType({
7884
8237
  data: image,
7885
8238
  signatures: imageMediaTypeSignatures
7886
- })) != null ? _a16 : "image/png"
8239
+ })) != null ? _a15 : "image/png"
7887
8240
  });
7888
8241
  }
7889
8242
  )
@@ -7894,10 +8247,26 @@ async function generateImage({
7894
8247
  }
7895
8248
  if (result.providerMetadata) {
7896
8249
  for (const [providerName, metadata] of Object.entries(result.providerMetadata)) {
7897
- (_b = providerMetadata[providerName]) != null ? _b : providerMetadata[providerName] = { images: [] };
7898
- providerMetadata[providerName].images.push(
7899
- ...result.providerMetadata[providerName].images
7900
- );
8250
+ if (providerName === "gateway") {
8251
+ const currentEntry = providerMetadata[providerName];
8252
+ if (currentEntry != null && typeof currentEntry === "object") {
8253
+ providerMetadata[providerName] = {
8254
+ ...currentEntry,
8255
+ ...metadata
8256
+ };
8257
+ } else {
8258
+ providerMetadata[providerName] = metadata;
8259
+ }
8260
+ const imagesValue = providerMetadata[providerName].images;
8261
+ if (Array.isArray(imagesValue) && imagesValue.length === 0) {
8262
+ delete providerMetadata[providerName].images;
8263
+ }
8264
+ } else {
8265
+ (_b = providerMetadata[providerName]) != null ? _b : providerMetadata[providerName] = { images: [] };
8266
+ providerMetadata[providerName].images.push(
8267
+ ...result.providerMetadata[providerName].images
8268
+ );
8269
+ }
7901
8270
  }
7902
8271
  }
7903
8272
  responses.push(result.response);
@@ -7935,6 +8304,50 @@ async function invokeModelMaxImagesPerCall(model) {
7935
8304
  modelId: model.modelId
7936
8305
  });
7937
8306
  }
8307
+ function normalizePrompt(prompt) {
8308
+ if (typeof prompt === "string") {
8309
+ return { prompt, files: void 0, mask: void 0 };
8310
+ }
8311
+ return {
8312
+ prompt: prompt.text,
8313
+ files: prompt.images.map(toImageModelV3File),
8314
+ mask: prompt.mask ? toImageModelV3File(prompt.mask) : void 0
8315
+ };
8316
+ }
8317
+ function toImageModelV3File(dataContent) {
8318
+ if (typeof dataContent === "string" && dataContent.startsWith("http")) {
8319
+ return {
8320
+ type: "url",
8321
+ url: dataContent
8322
+ };
8323
+ }
8324
+ if (typeof dataContent === "string" && dataContent.startsWith("data:")) {
8325
+ const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(dataContent);
8326
+ if (base64Content != null) {
8327
+ const uint8Data2 = convertBase64ToUint8Array4(base64Content);
8328
+ return {
8329
+ type: "file",
8330
+ data: uint8Data2,
8331
+ mediaType: dataUrlMediaType || detectMediaType({
8332
+ data: uint8Data2,
8333
+ signatures: imageMediaTypeSignatures
8334
+ }) || "image/png"
8335
+ };
8336
+ }
8337
+ }
8338
+ const uint8Data = convertDataContentToUint8Array(dataContent);
8339
+ return {
8340
+ type: "file",
8341
+ data: uint8Data,
8342
+ mediaType: detectMediaType({
8343
+ data: uint8Data,
8344
+ signatures: imageMediaTypeSignatures
8345
+ }) || "image/png"
8346
+ };
8347
+ }
8348
+
8349
+ // src/generate-image/index.ts
8350
+ var experimental_generateImage = generateImage;
7938
8351
 
7939
8352
  // src/generate-object/generate-object.ts
7940
8353
  import {
@@ -8031,7 +8444,7 @@ var arrayOutputStrategy = (schema) => {
8031
8444
  isFirstDelta,
8032
8445
  isFinalDelta
8033
8446
  }) {
8034
- var _a15;
8447
+ var _a14;
8035
8448
  if (!isJSONObject(value) || !isJSONArray(value.elements)) {
8036
8449
  return {
8037
8450
  success: false,
@@ -8054,7 +8467,7 @@ var arrayOutputStrategy = (schema) => {
8054
8467
  }
8055
8468
  resultArray.push(result.value);
8056
8469
  }
8057
- const publishedElementCount = (_a15 = latestObject == null ? void 0 : latestObject.length) != null ? _a15 : 0;
8470
+ const publishedElementCount = (_a14 = latestObject == null ? void 0 : latestObject.length) != null ? _a14 : 0;
8058
8471
  let textDelta = "";
8059
8472
  if (isFirstDelta) {
8060
8473
  textDelta += "[";
@@ -8474,7 +8887,7 @@ async function generateObject(options) {
8474
8887
  }),
8475
8888
  tracer,
8476
8889
  fn: async (span) => {
8477
- var _a15;
8890
+ var _a14;
8478
8891
  let result;
8479
8892
  let finishReason;
8480
8893
  let usage;
@@ -8520,7 +8933,7 @@ async function generateObject(options) {
8520
8933
  }),
8521
8934
  tracer,
8522
8935
  fn: async (span2) => {
8523
- var _a16, _b, _c, _d, _e, _f, _g, _h;
8936
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
8524
8937
  const result2 = await model.doGenerate({
8525
8938
  responseFormat: {
8526
8939
  type: "json",
@@ -8535,7 +8948,7 @@ async function generateObject(options) {
8535
8948
  headers: headersWithUserAgent
8536
8949
  });
8537
8950
  const responseData = {
8538
- id: (_b = (_a16 = result2.response) == null ? void 0 : _a16.id) != null ? _b : generateId2(),
8951
+ id: (_b = (_a15 = result2.response) == null ? void 0 : _a15.id) != null ? _b : generateId2(),
8539
8952
  timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
8540
8953
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
8541
8954
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
@@ -8547,15 +8960,15 @@ async function generateObject(options) {
8547
8960
  throw new NoObjectGeneratedError({
8548
8961
  message: "No object generated: the model did not return a response.",
8549
8962
  response: responseData,
8550
- usage: result2.usage,
8551
- finishReason: result2.finishReason
8963
+ usage: asLanguageModelUsage(result2.usage),
8964
+ finishReason: result2.finishReason.unified
8552
8965
  });
8553
8966
  }
8554
8967
  span2.setAttributes(
8555
8968
  await selectTelemetryAttributes({
8556
8969
  telemetry,
8557
8970
  attributes: {
8558
- "ai.response.finishReason": result2.finishReason,
8971
+ "ai.response.finishReason": result2.finishReason.unified,
8559
8972
  "ai.response.object": { output: () => text2 },
8560
8973
  "ai.response.id": responseData.id,
8561
8974
  "ai.response.model": responseData.modelId,
@@ -8564,14 +8977,16 @@ async function generateObject(options) {
8564
8977
  result2.providerMetadata
8565
8978
  ),
8566
8979
  // TODO rename telemetry attributes to inputTokens and outputTokens
8567
- "ai.usage.promptTokens": result2.usage.inputTokens,
8568
- "ai.usage.completionTokens": result2.usage.outputTokens,
8980
+ "ai.usage.promptTokens": result2.usage.inputTokens.total,
8981
+ "ai.usage.completionTokens": result2.usage.outputTokens.total,
8569
8982
  // standardized gen-ai llm span attributes:
8570
- "gen_ai.response.finish_reasons": [result2.finishReason],
8983
+ "gen_ai.response.finish_reasons": [
8984
+ result2.finishReason.unified
8985
+ ],
8571
8986
  "gen_ai.response.id": responseData.id,
8572
8987
  "gen_ai.response.model": responseData.modelId,
8573
- "gen_ai.usage.input_tokens": result2.usage.inputTokens,
8574
- "gen_ai.usage.output_tokens": result2.usage.outputTokens
8988
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens.total,
8989
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens.total
8575
8990
  }
8576
8991
  })
8577
8992
  );
@@ -8585,11 +9000,11 @@ async function generateObject(options) {
8585
9000
  })
8586
9001
  );
8587
9002
  result = generateResult.objectText;
8588
- finishReason = generateResult.finishReason;
8589
- usage = generateResult.usage;
9003
+ finishReason = generateResult.finishReason.unified;
9004
+ usage = asLanguageModelUsage(generateResult.usage);
8590
9005
  warnings = generateResult.warnings;
8591
9006
  resultProviderMetadata = generateResult.providerMetadata;
8592
- request = (_a15 = generateResult.request) != null ? _a15 : {};
9007
+ request = (_a14 = generateResult.request) != null ? _a14 : {};
8593
9008
  response = generateResult.responseData;
8594
9009
  reasoning = generateResult.reasoning;
8595
9010
  logWarnings({
@@ -8652,9 +9067,9 @@ var DefaultGenerateObjectResult = class {
8652
9067
  this.reasoning = options.reasoning;
8653
9068
  }
8654
9069
  toJsonResponse(init) {
8655
- var _a15;
9070
+ var _a14;
8656
9071
  return new Response(JSON.stringify(this.object), {
8657
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
9072
+ status: (_a14 = init == null ? void 0 : init.status) != null ? _a14 : 200,
8658
9073
  headers: prepareHeaders(init == null ? void 0 : init.headers, {
8659
9074
  "content-type": "application/json; charset=utf-8"
8660
9075
  })
@@ -8664,7 +9079,8 @@ var DefaultGenerateObjectResult = class {
8664
9079
 
8665
9080
  // src/generate-object/stream-object.ts
8666
9081
  import {
8667
- createIdGenerator as createIdGenerator4
9082
+ createIdGenerator as createIdGenerator4,
9083
+ DelayedPromise as DelayedPromise2
8668
9084
  } from "@ai-sdk/provider-utils";
8669
9085
 
8670
9086
  // src/util/cosine-similarity.ts
@@ -8782,8 +9198,8 @@ function simulateReadableStream({
8782
9198
  chunkDelayInMs = 0,
8783
9199
  _internal
8784
9200
  }) {
8785
- var _a15;
8786
- const delay2 = (_a15 = _internal == null ? void 0 : _internal.delay) != null ? _a15 : delayFunction;
9201
+ var _a14;
9202
+ const delay2 = (_a14 = _internal == null ? void 0 : _internal.delay) != null ? _a14 : delayFunction;
8787
9203
  let index = 0;
8788
9204
  return new ReadableStream({
8789
9205
  async pull(controller) {
@@ -8888,13 +9304,13 @@ var DefaultStreamObjectResult = class {
8888
9304
  currentDate,
8889
9305
  now: now2
8890
9306
  }) {
8891
- this._object = new DelayedPromise();
8892
- this._usage = new DelayedPromise();
8893
- this._providerMetadata = new DelayedPromise();
8894
- this._warnings = new DelayedPromise();
8895
- this._request = new DelayedPromise();
8896
- this._response = new DelayedPromise();
8897
- this._finishReason = new DelayedPromise();
9307
+ this._object = new DelayedPromise2();
9308
+ this._usage = new DelayedPromise2();
9309
+ this._providerMetadata = new DelayedPromise2();
9310
+ this._warnings = new DelayedPromise2();
9311
+ this._request = new DelayedPromise2();
9312
+ this._response = new DelayedPromise2();
9313
+ this._finishReason = new DelayedPromise2();
8898
9314
  const model = resolveLanguageModel(modelArg);
8899
9315
  const { maxRetries, retry } = prepareRetries({
8900
9316
  maxRetries: maxRetriesArg,
@@ -9022,11 +9438,7 @@ var DefaultStreamObjectResult = class {
9022
9438
  );
9023
9439
  self._request.resolve(request != null ? request : {});
9024
9440
  let warnings;
9025
- let usage = {
9026
- inputTokens: void 0,
9027
- outputTokens: void 0,
9028
- totalTokens: void 0
9029
- };
9441
+ let usage = createNullLanguageModelUsage();
9030
9442
  let finishReason;
9031
9443
  let providerMetadata;
9032
9444
  let object2;
@@ -9045,7 +9457,7 @@ var DefaultStreamObjectResult = class {
9045
9457
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
9046
9458
  new TransformStream({
9047
9459
  async transform(chunk, controller) {
9048
- var _a15, _b, _c;
9460
+ var _a14, _b, _c;
9049
9461
  if (typeof chunk === "object" && chunk.type === "stream-start") {
9050
9462
  warnings = chunk.warnings;
9051
9463
  return;
@@ -9095,7 +9507,7 @@ var DefaultStreamObjectResult = class {
9095
9507
  switch (chunk.type) {
9096
9508
  case "response-metadata": {
9097
9509
  fullResponse = {
9098
- id: (_a15 = chunk.id) != null ? _a15 : fullResponse.id,
9510
+ id: (_a14 = chunk.id) != null ? _a14 : fullResponse.id,
9099
9511
  timestamp: (_b = chunk.timestamp) != null ? _b : fullResponse.timestamp,
9100
9512
  modelId: (_c = chunk.modelId) != null ? _c : fullResponse.modelId
9101
9513
  };
@@ -9105,11 +9517,12 @@ var DefaultStreamObjectResult = class {
9105
9517
  if (textDelta !== "") {
9106
9518
  controller.enqueue({ type: "text-delta", textDelta });
9107
9519
  }
9108
- finishReason = chunk.finishReason;
9109
- usage = chunk.usage;
9520
+ finishReason = chunk.finishReason.unified;
9521
+ usage = asLanguageModelUsage(chunk.usage);
9110
9522
  providerMetadata = chunk.providerMetadata;
9111
9523
  controller.enqueue({
9112
9524
  ...chunk,
9525
+ finishReason: chunk.finishReason.unified,
9113
9526
  usage,
9114
9527
  response: fullResponse
9115
9528
  });
@@ -9125,7 +9538,7 @@ var DefaultStreamObjectResult = class {
9125
9538
  ...fullResponse,
9126
9539
  headers: response == null ? void 0 : response.headers
9127
9540
  });
9128
- self._finishReason.resolve(finishReason != null ? finishReason : "unknown");
9541
+ self._finishReason.resolve(finishReason != null ? finishReason : "other");
9129
9542
  try {
9130
9543
  object2 = await parseAndValidateObjectResultWithRepair(
9131
9544
  accumulatedText,
@@ -9366,7 +9779,7 @@ async function generateSpeech({
9366
9779
  abortSignal,
9367
9780
  headers
9368
9781
  }) {
9369
- var _a15;
9782
+ var _a14;
9370
9783
  const resolvedModel = resolveSpeechModel(model);
9371
9784
  if (!resolvedModel) {
9372
9785
  throw new Error("Model could not be resolved");
@@ -9403,10 +9816,10 @@ async function generateSpeech({
9403
9816
  return new DefaultSpeechResult({
9404
9817
  audio: new DefaultGeneratedAudioFile({
9405
9818
  data: result.audio,
9406
- mediaType: (_a15 = detectMediaType({
9819
+ mediaType: (_a14 = detectMediaType({
9407
9820
  data: result.audio,
9408
9821
  signatures: audioMediaTypeSignatures
9409
- })) != null ? _a15 : "audio/mp3"
9822
+ })) != null ? _a14 : "audio/mp3"
9410
9823
  }),
9411
9824
  warnings: result.warnings,
9412
9825
  responses: [result.response],
@@ -9415,11 +9828,11 @@ async function generateSpeech({
9415
9828
  }
9416
9829
  var DefaultSpeechResult = class {
9417
9830
  constructor(options) {
9418
- var _a15;
9831
+ var _a14;
9419
9832
  this.audio = options.audio;
9420
9833
  this.warnings = options.warnings;
9421
9834
  this.responses = options.responses;
9422
- this.providerMetadata = (_a15 = options.providerMetadata) != null ? _a15 : {};
9835
+ this.providerMetadata = (_a14 = options.providerMetadata) != null ? _a14 : {};
9423
9836
  }
9424
9837
  };
9425
9838
 
@@ -9457,7 +9870,7 @@ function pruneMessages({
9457
9870
  const keptToolCallIds = /* @__PURE__ */ new Set();
9458
9871
  const keptApprovalIds = /* @__PURE__ */ new Set();
9459
9872
  if (keepLastMessagesCount != null) {
9460
- for (const message of messages.slice(0, -keepLastMessagesCount)) {
9873
+ for (const message of messages.slice(-keepLastMessagesCount)) {
9461
9874
  if ((message.role === "assistant" || message.role === "tool") && typeof message.content !== "string") {
9462
9875
  for (const part of message.content) {
9463
9876
  if (part.type === "tool-call" || part.type === "tool-result") {
@@ -9584,10 +9997,7 @@ function defaultEmbeddingSettingsMiddleware({
9584
9997
  return {
9585
9998
  specificationVersion: "v3",
9586
9999
  transformParams: async ({ params }) => {
9587
- return mergeObjects(
9588
- settings,
9589
- params
9590
- );
10000
+ return mergeObjects(settings, params);
9591
10001
  }
9592
10002
  };
9593
10003
  }
@@ -9842,6 +10252,47 @@ function simulateStreamingMiddleware() {
9842
10252
  };
9843
10253
  }
9844
10254
 
10255
+ // src/middleware/add-tool-input-examples-middleware.ts
10256
+ function defaultFormatExample(example) {
10257
+ return JSON.stringify(example.input);
10258
+ }
10259
+ function addToolInputExamplesMiddleware({
10260
+ prefix = "Input Examples:",
10261
+ format = defaultFormatExample,
10262
+ remove = true
10263
+ } = {}) {
10264
+ return {
10265
+ specificationVersion: "v3",
10266
+ transformParams: async ({ params }) => {
10267
+ var _a14;
10268
+ if (!((_a14 = params.tools) == null ? void 0 : _a14.length)) {
10269
+ return params;
10270
+ }
10271
+ const transformedTools = params.tools.map((tool2) => {
10272
+ var _a15;
10273
+ if (tool2.type !== "function" || !((_a15 = tool2.inputExamples) == null ? void 0 : _a15.length)) {
10274
+ return tool2;
10275
+ }
10276
+ const formattedExamples = tool2.inputExamples.map((example, index) => format(example, index)).join("\n");
10277
+ const examplesSection = `${prefix}
10278
+ ${formattedExamples}`;
10279
+ const toolDescription = tool2.description ? `${tool2.description}
10280
+
10281
+ ${examplesSection}` : examplesSection;
10282
+ return {
10283
+ ...tool2,
10284
+ description: toolDescription,
10285
+ inputExamples: remove ? void 0 : tool2.inputExamples
10286
+ };
10287
+ });
10288
+ return {
10289
+ ...params,
10290
+ tools: transformedTools
10291
+ };
10292
+ }
10293
+ };
10294
+ }
10295
+
9845
10296
  // src/middleware/wrap-language-model.ts
9846
10297
  var wrapLanguageModel = ({
9847
10298
  model,
@@ -9866,7 +10317,7 @@ var doWrap = ({
9866
10317
  modelId,
9867
10318
  providerId
9868
10319
  }) => {
9869
- var _a15, _b, _c;
10320
+ var _a14, _b, _c;
9870
10321
  async function doTransform({
9871
10322
  params,
9872
10323
  type
@@ -9875,7 +10326,7 @@ var doWrap = ({
9875
10326
  }
9876
10327
  return {
9877
10328
  specificationVersion: "v3",
9878
- provider: (_a15 = providerId != null ? providerId : overrideProvider == null ? void 0 : overrideProvider({ model })) != null ? _a15 : model.provider,
10329
+ provider: (_a14 = providerId != null ? providerId : overrideProvider == null ? void 0 : overrideProvider({ model })) != null ? _a14 : model.provider,
9879
10330
  modelId: (_b = modelId != null ? modelId : overrideModelId == null ? void 0 : overrideModelId({ model })) != null ? _b : model.modelId,
9880
10331
  supportedUrls: (_c = overrideSupportedUrls == null ? void 0 : overrideSupportedUrls({ model })) != null ? _c : model.supportedUrls,
9881
10332
  async doGenerate(params) {
@@ -9922,7 +10373,7 @@ var doWrap2 = ({
9922
10373
  modelId,
9923
10374
  providerId
9924
10375
  }) => {
9925
- var _a15, _b, _c, _d;
10376
+ var _a14, _b, _c, _d;
9926
10377
  async function doTransform({
9927
10378
  params
9928
10379
  }) {
@@ -9930,7 +10381,7 @@ var doWrap2 = ({
9930
10381
  }
9931
10382
  return {
9932
10383
  specificationVersion: "v3",
9933
- provider: (_a15 = providerId != null ? providerId : overrideProvider == null ? void 0 : overrideProvider({ model })) != null ? _a15 : model.provider,
10384
+ provider: (_a14 = providerId != null ? providerId : overrideProvider == null ? void 0 : overrideProvider({ model })) != null ? _a14 : model.provider,
9934
10385
  modelId: (_b = modelId != null ? modelId : overrideModelId == null ? void 0 : overrideModelId({ model })) != null ? _b : model.modelId,
9935
10386
  maxEmbeddingsPerCall: (_c = overrideMaxEmbeddingsPerCall == null ? void 0 : overrideMaxEmbeddingsPerCall({ model })) != null ? _c : model.maxEmbeddingsPerCall,
9936
10387
  supportsParallelCalls: (_d = overrideSupportsParallelCalls == null ? void 0 : overrideSupportsParallelCalls({ model })) != null ? _d : model.supportsParallelCalls,
@@ -9946,32 +10397,19 @@ var doWrap2 = ({
9946
10397
  };
9947
10398
  };
9948
10399
 
9949
- // src/model/as-image-model-v3.ts
9950
- function asImageModelV3(model) {
9951
- if (model.specificationVersion === "v3") {
9952
- return model;
9953
- }
9954
- return new Proxy(model, {
9955
- get(target, prop) {
9956
- if (prop === "specificationVersion")
9957
- return "v3";
9958
- return target[prop];
9959
- }
9960
- });
9961
- }
9962
-
9963
10400
  // src/model/as-provider-v3.ts
9964
10401
  function asProviderV3(provider) {
9965
10402
  if ("specificationVersion" in provider && provider.specificationVersion === "v3") {
9966
10403
  return provider;
9967
10404
  }
10405
+ const v2Provider = provider;
9968
10406
  return {
9969
10407
  specificationVersion: "v3",
9970
- languageModel: (modelId) => asLanguageModelV3(provider.languageModel(modelId)),
9971
- textEmbeddingModel: (modelId) => asEmbeddingModelV3(provider.textEmbeddingModel(modelId)),
9972
- imageModel: (modelId) => asImageModelV3(provider.imageModel(modelId)),
9973
- transcriptionModel: provider.transcriptionModel ? (modelId) => asTranscriptionModelV3(provider.transcriptionModel(modelId)) : void 0,
9974
- speechModel: provider.speechModel ? (modelId) => asSpeechModelV3(provider.speechModel(modelId)) : void 0,
10408
+ languageModel: (modelId) => asLanguageModelV3(v2Provider.languageModel(modelId)),
10409
+ embeddingModel: (modelId) => asEmbeddingModelV3(v2Provider.textEmbeddingModel(modelId)),
10410
+ imageModel: (modelId) => asImageModelV3(v2Provider.imageModel(modelId)),
10411
+ transcriptionModel: v2Provider.transcriptionModel ? (modelId) => asTranscriptionModelV3(v2Provider.transcriptionModel(modelId)) : void 0,
10412
+ speechModel: v2Provider.speechModel ? (modelId) => asSpeechModelV3(v2Provider.speechModel(modelId)) : void 0,
9975
10413
  rerankingModel: void 0
9976
10414
  // v2 providers don't have reranking models
9977
10415
  };
@@ -9989,7 +10427,7 @@ function wrapProvider({
9989
10427
  model: providerV3.languageModel(modelId),
9990
10428
  middleware: languageModelMiddleware
9991
10429
  }),
9992
- textEmbeddingModel: providerV3.textEmbeddingModel,
10430
+ embeddingModel: providerV3.embeddingModel,
9993
10431
  imageModel: providerV3.imageModel,
9994
10432
  transcriptionModel: providerV3.transcriptionModel,
9995
10433
  speechModel: providerV3.speechModel,
@@ -10003,7 +10441,7 @@ import {
10003
10441
  } from "@ai-sdk/provider";
10004
10442
  function customProvider({
10005
10443
  languageModels,
10006
- textEmbeddingModels,
10444
+ embeddingModels,
10007
10445
  imageModels,
10008
10446
  transcriptionModels,
10009
10447
  speechModels,
@@ -10022,14 +10460,14 @@ function customProvider({
10022
10460
  }
10023
10461
  throw new NoSuchModelError2({ modelId, modelType: "languageModel" });
10024
10462
  },
10025
- textEmbeddingModel(modelId) {
10026
- if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
10027
- return textEmbeddingModels[modelId];
10463
+ embeddingModel(modelId) {
10464
+ if (embeddingModels != null && modelId in embeddingModels) {
10465
+ return embeddingModels[modelId];
10028
10466
  }
10029
10467
  if (fallbackProvider) {
10030
- return fallbackProvider.textEmbeddingModel(modelId);
10468
+ return fallbackProvider.embeddingModel(modelId);
10031
10469
  }
10032
- throw new NoSuchModelError2({ modelId, modelType: "textEmbeddingModel" });
10470
+ throw new NoSuchModelError2({ modelId, modelType: "embeddingModel" });
10033
10471
  },
10034
10472
  imageModel(modelId) {
10035
10473
  if (imageModels != null && modelId in imageModels) {
@@ -10072,11 +10510,11 @@ function customProvider({
10072
10510
  var experimental_customProvider = customProvider;
10073
10511
 
10074
10512
  // src/registry/no-such-provider-error.ts
10075
- import { AISDKError as AISDKError19, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
10076
- var name14 = "AI_NoSuchProviderError";
10077
- var marker14 = `vercel.ai.error.${name14}`;
10078
- var symbol14 = Symbol.for(marker14);
10079
- var _a14;
10513
+ import { AISDKError as AISDKError18, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
10514
+ var name13 = "AI_NoSuchProviderError";
10515
+ var marker13 = `vercel.ai.error.${name13}`;
10516
+ var symbol13 = Symbol.for(marker13);
10517
+ var _a13;
10080
10518
  var NoSuchProviderError = class extends NoSuchModelError3 {
10081
10519
  constructor({
10082
10520
  modelId,
@@ -10085,16 +10523,16 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
10085
10523
  availableProviders,
10086
10524
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
10087
10525
  }) {
10088
- super({ errorName: name14, modelId, modelType, message });
10089
- this[_a14] = true;
10526
+ super({ errorName: name13, modelId, modelType, message });
10527
+ this[_a13] = true;
10090
10528
  this.providerId = providerId;
10091
10529
  this.availableProviders = availableProviders;
10092
10530
  }
10093
10531
  static isInstance(error) {
10094
- return AISDKError19.hasMarker(error, marker14);
10532
+ return AISDKError18.hasMarker(error, marker13);
10095
10533
  }
10096
10534
  };
10097
- _a14 = symbol14;
10535
+ _a13 = symbol13;
10098
10536
 
10099
10537
  // src/registry/provider-registry.ts
10100
10538
  import {
@@ -10153,10 +10591,10 @@ var DefaultProviderRegistry = class {
10153
10591
  return [id.slice(0, index), id.slice(index + this.separator.length)];
10154
10592
  }
10155
10593
  languageModel(id) {
10156
- var _a15, _b;
10594
+ var _a14, _b;
10157
10595
  const [providerId, modelId] = this.splitId(id, "languageModel");
10158
- let model = (_b = (_a15 = this.getProvider(providerId, "languageModel")).languageModel) == null ? void 0 : _b.call(
10159
- _a15,
10596
+ let model = (_b = (_a14 = this.getProvider(providerId, "languageModel")).languageModel) == null ? void 0 : _b.call(
10597
+ _a14,
10160
10598
  modelId
10161
10599
  );
10162
10600
  if (model == null) {
@@ -10170,34 +10608,34 @@ var DefaultProviderRegistry = class {
10170
10608
  }
10171
10609
  return model;
10172
10610
  }
10173
- textEmbeddingModel(id) {
10174
- var _a15;
10175
- const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
10176
- const provider = this.getProvider(providerId, "textEmbeddingModel");
10177
- const model = (_a15 = provider.textEmbeddingModel) == null ? void 0 : _a15.call(provider, modelId);
10611
+ embeddingModel(id) {
10612
+ var _a14;
10613
+ const [providerId, modelId] = this.splitId(id, "embeddingModel");
10614
+ const provider = this.getProvider(providerId, "embeddingModel");
10615
+ const model = (_a14 = provider.embeddingModel) == null ? void 0 : _a14.call(provider, modelId);
10178
10616
  if (model == null) {
10179
10617
  throw new NoSuchModelError4({
10180
10618
  modelId: id,
10181
- modelType: "textEmbeddingModel"
10619
+ modelType: "embeddingModel"
10182
10620
  });
10183
10621
  }
10184
10622
  return model;
10185
10623
  }
10186
10624
  imageModel(id) {
10187
- var _a15;
10625
+ var _a14;
10188
10626
  const [providerId, modelId] = this.splitId(id, "imageModel");
10189
10627
  const provider = this.getProvider(providerId, "imageModel");
10190
- const model = (_a15 = provider.imageModel) == null ? void 0 : _a15.call(provider, modelId);
10628
+ const model = (_a14 = provider.imageModel) == null ? void 0 : _a14.call(provider, modelId);
10191
10629
  if (model == null) {
10192
10630
  throw new NoSuchModelError4({ modelId: id, modelType: "imageModel" });
10193
10631
  }
10194
10632
  return model;
10195
10633
  }
10196
10634
  transcriptionModel(id) {
10197
- var _a15;
10635
+ var _a14;
10198
10636
  const [providerId, modelId] = this.splitId(id, "transcriptionModel");
10199
10637
  const provider = this.getProvider(providerId, "transcriptionModel");
10200
- const model = (_a15 = provider.transcriptionModel) == null ? void 0 : _a15.call(provider, modelId);
10638
+ const model = (_a14 = provider.transcriptionModel) == null ? void 0 : _a14.call(provider, modelId);
10201
10639
  if (model == null) {
10202
10640
  throw new NoSuchModelError4({
10203
10641
  modelId: id,
@@ -10207,20 +10645,20 @@ var DefaultProviderRegistry = class {
10207
10645
  return model;
10208
10646
  }
10209
10647
  speechModel(id) {
10210
- var _a15;
10648
+ var _a14;
10211
10649
  const [providerId, modelId] = this.splitId(id, "speechModel");
10212
10650
  const provider = this.getProvider(providerId, "speechModel");
10213
- const model = (_a15 = provider.speechModel) == null ? void 0 : _a15.call(provider, modelId);
10651
+ const model = (_a14 = provider.speechModel) == null ? void 0 : _a14.call(provider, modelId);
10214
10652
  if (model == null) {
10215
10653
  throw new NoSuchModelError4({ modelId: id, modelType: "speechModel" });
10216
10654
  }
10217
10655
  return model;
10218
10656
  }
10219
10657
  rerankingModel(id) {
10220
- var _a15;
10658
+ var _a14;
10221
10659
  const [providerId, modelId] = this.splitId(id, "rerankingModel");
10222
10660
  const provider = this.getProvider(providerId, "rerankingModel");
10223
- const model = (_a15 = provider.rerankingModel) == null ? void 0 : _a15.call(provider, modelId);
10661
+ const model = (_a14 = provider.rerankingModel) == null ? void 0 : _a14.call(provider, modelId);
10224
10662
  if (model == null) {
10225
10663
  throw new NoSuchModelError4({ modelId: id, modelType: "rerankingModel" });
10226
10664
  }
@@ -10277,7 +10715,7 @@ async function rerank({
10277
10715
  }),
10278
10716
  tracer,
10279
10717
  fn: async () => {
10280
- var _a15, _b;
10718
+ var _a14, _b;
10281
10719
  const { ranking, response, providerMetadata, warnings } = await retry(
10282
10720
  () => recordSpan({
10283
10721
  name: "ai.rerank.doRerank",
@@ -10341,7 +10779,7 @@ async function rerank({
10341
10779
  providerMetadata,
10342
10780
  response: {
10343
10781
  id: response == null ? void 0 : response.id,
10344
- timestamp: (_a15 = response == null ? void 0 : response.timestamp) != null ? _a15 : /* @__PURE__ */ new Date(),
10782
+ timestamp: (_a14 = response == null ? void 0 : response.timestamp) != null ? _a14 : /* @__PURE__ */ new Date(),
10345
10783
  modelId: (_b = response == null ? void 0 : response.modelId) != null ? _b : model.modelId,
10346
10784
  headers: response == null ? void 0 : response.headers,
10347
10785
  body: response == null ? void 0 : response.body
@@ -10366,8 +10804,8 @@ var DefaultRerankResult = class {
10366
10804
  import { withUserAgentSuffix as withUserAgentSuffix8 } from "@ai-sdk/provider-utils";
10367
10805
 
10368
10806
  // src/error/no-transcript-generated-error.ts
10369
- import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
10370
- var NoTranscriptGeneratedError = class extends AISDKError20 {
10807
+ import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
10808
+ var NoTranscriptGeneratedError = class extends AISDKError19 {
10371
10809
  constructor(options) {
10372
10810
  super({
10373
10811
  name: "AI_NoTranscriptGeneratedError",
@@ -10401,16 +10839,16 @@ async function transcribe({
10401
10839
  const audioData = audio instanceof URL ? (await download({ url: audio })).data : convertDataContentToUint8Array(audio);
10402
10840
  const result = await retry(
10403
10841
  () => {
10404
- var _a15;
10842
+ var _a14;
10405
10843
  return resolvedModel.doGenerate({
10406
10844
  audio: audioData,
10407
10845
  abortSignal,
10408
10846
  headers: headersWithUserAgent,
10409
10847
  providerOptions,
10410
- mediaType: (_a15 = detectMediaType({
10848
+ mediaType: (_a14 = detectMediaType({
10411
10849
  data: audioData,
10412
10850
  signatures: audioMediaTypeSignatures
10413
- })) != null ? _a15 : "audio/wav"
10851
+ })) != null ? _a14 : "audio/wav"
10414
10852
  });
10415
10853
  }
10416
10854
  );
@@ -10434,14 +10872,14 @@ async function transcribe({
10434
10872
  }
10435
10873
  var DefaultTranscriptionResult = class {
10436
10874
  constructor(options) {
10437
- var _a15;
10875
+ var _a14;
10438
10876
  this.text = options.text;
10439
10877
  this.segments = options.segments;
10440
10878
  this.language = options.language;
10441
10879
  this.durationInSeconds = options.durationInSeconds;
10442
10880
  this.warnings = options.warnings;
10443
10881
  this.responses = options.responses;
10444
- this.providerMetadata = (_a15 = options.providerMetadata) != null ? _a15 : {};
10882
+ this.providerMetadata = (_a14 = options.providerMetadata) != null ? _a14 : {};
10445
10883
  }
10446
10884
  };
10447
10885
 
@@ -10484,7 +10922,7 @@ async function callCompletionApi({
10484
10922
  onError,
10485
10923
  fetch: fetch2 = getOriginalFetch()
10486
10924
  }) {
10487
- var _a15;
10925
+ var _a14;
10488
10926
  try {
10489
10927
  setLoading(true);
10490
10928
  setError(void 0);
@@ -10512,7 +10950,7 @@ async function callCompletionApi({
10512
10950
  });
10513
10951
  if (!response.ok) {
10514
10952
  throw new Error(
10515
- (_a15 = await response.text()) != null ? _a15 : "Failed to fetch the chat response."
10953
+ (_a14 = await response.text()) != null ? _a14 : "Failed to fetch the chat response."
10516
10954
  );
10517
10955
  }
10518
10956
  if (!response.body) {
@@ -10598,12 +11036,12 @@ async function convertFileListToFileUIParts(files) {
10598
11036
  }
10599
11037
  return Promise.all(
10600
11038
  Array.from(files).map(async (file) => {
10601
- const { name: name15, type } = file;
11039
+ const { name: name14, type } = file;
10602
11040
  const dataUrl = await new Promise((resolve3, reject) => {
10603
11041
  const reader = new FileReader();
10604
11042
  reader.onload = (readerEvent) => {
10605
- var _a15;
10606
- resolve3((_a15 = readerEvent.target) == null ? void 0 : _a15.result);
11043
+ var _a14;
11044
+ resolve3((_a14 = readerEvent.target) == null ? void 0 : _a14.result);
10607
11045
  };
10608
11046
  reader.onerror = (error) => reject(error);
10609
11047
  reader.readAsDataURL(file);
@@ -10611,7 +11049,7 @@ async function convertFileListToFileUIParts(files) {
10611
11049
  return {
10612
11050
  type: "file",
10613
11051
  mediaType: type,
10614
- filename: name15,
11052
+ filename: name14,
10615
11053
  url: dataUrl
10616
11054
  };
10617
11055
  })
@@ -10650,7 +11088,7 @@ var HttpChatTransport = class {
10650
11088
  abortSignal,
10651
11089
  ...options
10652
11090
  }) {
10653
- var _a15, _b, _c, _d, _e;
11091
+ var _a14, _b, _c, _d, _e;
10654
11092
  const resolvedBody = await resolve2(this.body);
10655
11093
  const resolvedHeaders = await resolve2(this.headers);
10656
11094
  const resolvedCredentials = await resolve2(this.credentials);
@@ -10658,7 +11096,7 @@ var HttpChatTransport = class {
10658
11096
  ...normalizeHeaders(resolvedHeaders),
10659
11097
  ...normalizeHeaders(options.headers)
10660
11098
  };
10661
- const preparedRequest = await ((_a15 = this.prepareSendMessagesRequest) == null ? void 0 : _a15.call(this, {
11099
+ const preparedRequest = await ((_a14 = this.prepareSendMessagesRequest) == null ? void 0 : _a14.call(this, {
10662
11100
  api: this.api,
10663
11101
  id: options.chatId,
10664
11102
  messages: options.messages,
@@ -10706,7 +11144,7 @@ var HttpChatTransport = class {
10706
11144
  return this.processResponseStream(response.body);
10707
11145
  }
10708
11146
  async reconnectToStream(options) {
10709
- var _a15, _b, _c, _d, _e;
11147
+ var _a14, _b, _c, _d, _e;
10710
11148
  const resolvedBody = await resolve2(this.body);
10711
11149
  const resolvedHeaders = await resolve2(this.headers);
10712
11150
  const resolvedCredentials = await resolve2(this.credentials);
@@ -10714,7 +11152,7 @@ var HttpChatTransport = class {
10714
11152
  ...normalizeHeaders(resolvedHeaders),
10715
11153
  ...normalizeHeaders(options.headers)
10716
11154
  };
10717
- const preparedRequest = await ((_a15 = this.prepareReconnectToStreamRequest) == null ? void 0 : _a15.call(this, {
11155
+ const preparedRequest = await ((_a14 = this.prepareReconnectToStreamRequest) == null ? void 0 : _a14.call(this, {
10718
11156
  api: this.api,
10719
11157
  id: options.chatId,
10720
11158
  body: { ...resolvedBody, ...options.body },
@@ -10796,11 +11234,11 @@ var AbstractChat = class {
10796
11234
  * If a messageId is provided, the message will be replaced.
10797
11235
  */
10798
11236
  this.sendMessage = async (message, options) => {
10799
- var _a15, _b, _c, _d;
11237
+ var _a14, _b, _c, _d;
10800
11238
  if (message == null) {
10801
11239
  await this.makeRequest({
10802
11240
  trigger: "submit-message",
10803
- messageId: (_a15 = this.lastMessage) == null ? void 0 : _a15.id,
11241
+ messageId: (_a14 = this.lastMessage) == null ? void 0 : _a14.id,
10804
11242
  ...options
10805
11243
  });
10806
11244
  return;
@@ -10893,10 +11331,10 @@ var AbstractChat = class {
10893
11331
  approved,
10894
11332
  reason
10895
11333
  }) => this.jobExecutor.run(async () => {
10896
- var _a15, _b;
11334
+ var _a14, _b;
10897
11335
  const messages = this.state.messages;
10898
11336
  const lastMessage = messages[messages.length - 1];
10899
- const updatePart = (part) => isToolOrDynamicToolUIPart(part) && part.state === "approval-requested" && part.approval.id === id ? {
11337
+ const updatePart = (part) => isToolUIPart(part) && part.state === "approval-requested" && part.approval.id === id ? {
10900
11338
  ...part,
10901
11339
  state: "approval-responded",
10902
11340
  approval: { id, approved, reason }
@@ -10908,7 +11346,7 @@ var AbstractChat = class {
10908
11346
  if (this.activeResponse) {
10909
11347
  this.activeResponse.state.message.parts = this.activeResponse.state.message.parts.map(updatePart);
10910
11348
  }
10911
- if (this.status !== "streaming" && this.status !== "submitted" && ((_a15 = this.sendAutomaticallyWhen) == null ? void 0 : _a15.call(this, { messages: this.state.messages }))) {
11349
+ if (this.status !== "streaming" && this.status !== "submitted" && ((_a14 = this.sendAutomaticallyWhen) == null ? void 0 : _a14.call(this, { messages: this.state.messages }))) {
10912
11350
  this.makeRequest({
10913
11351
  trigger: "submit-message",
10914
11352
  messageId: (_b = this.lastMessage) == null ? void 0 : _b.id
@@ -10922,10 +11360,10 @@ var AbstractChat = class {
10922
11360
  output,
10923
11361
  errorText
10924
11362
  }) => this.jobExecutor.run(async () => {
10925
- var _a15, _b;
11363
+ var _a14, _b;
10926
11364
  const messages = this.state.messages;
10927
11365
  const lastMessage = messages[messages.length - 1];
10928
- const updatePart = (part) => isToolOrDynamicToolUIPart(part) && part.toolCallId === toolCallId ? { ...part, state, output, errorText } : part;
11366
+ const updatePart = (part) => isToolUIPart(part) && part.toolCallId === toolCallId ? { ...part, state, output, errorText } : part;
10929
11367
  this.state.replaceMessage(messages.length - 1, {
10930
11368
  ...lastMessage,
10931
11369
  parts: lastMessage.parts.map(updatePart)
@@ -10933,7 +11371,7 @@ var AbstractChat = class {
10933
11371
  if (this.activeResponse) {
10934
11372
  this.activeResponse.state.message.parts = this.activeResponse.state.message.parts.map(updatePart);
10935
11373
  }
10936
- if (this.status !== "streaming" && this.status !== "submitted" && ((_a15 = this.sendAutomaticallyWhen) == null ? void 0 : _a15.call(this, { messages: this.state.messages }))) {
11374
+ if (this.status !== "streaming" && this.status !== "submitted" && ((_a14 = this.sendAutomaticallyWhen) == null ? void 0 : _a14.call(this, { messages: this.state.messages }))) {
10937
11375
  this.makeRequest({
10938
11376
  trigger: "submit-message",
10939
11377
  messageId: (_b = this.lastMessage) == null ? void 0 : _b.id
@@ -10946,10 +11384,10 @@ var AbstractChat = class {
10946
11384
  * Abort the current request immediately, keep the generated tokens if any.
10947
11385
  */
10948
11386
  this.stop = async () => {
10949
- var _a15;
11387
+ var _a14;
10950
11388
  if (this.status !== "streaming" && this.status !== "submitted")
10951
11389
  return;
10952
- if ((_a15 = this.activeResponse) == null ? void 0 : _a15.abortController) {
11390
+ if ((_a14 = this.activeResponse) == null ? void 0 : _a14.abortController) {
10953
11391
  this.activeResponse.abortController.abort();
10954
11392
  }
10955
11393
  };
@@ -11004,7 +11442,7 @@ var AbstractChat = class {
11004
11442
  body,
11005
11443
  messageId
11006
11444
  }) {
11007
- var _a15, _b, _c, _d;
11445
+ var _a14, _b, _c, _d;
11008
11446
  this.setStatus({ status: "submitted", error: void 0 });
11009
11447
  const lastMessage = this.lastMessage;
11010
11448
  let isAbort = false;
@@ -11053,9 +11491,9 @@ var AbstractChat = class {
11053
11491
  () => job({
11054
11492
  state: activeResponse.state,
11055
11493
  write: () => {
11056
- var _a16;
11494
+ var _a15;
11057
11495
  this.setStatus({ status: "streaming" });
11058
- const replaceLastMessage = activeResponse.state.message.id === ((_a16 = this.lastMessage) == null ? void 0 : _a16.id);
11496
+ const replaceLastMessage = activeResponse.state.message.id === ((_a15 = this.lastMessage) == null ? void 0 : _a15.id);
11059
11497
  if (replaceLastMessage) {
11060
11498
  this.state.replaceMessage(
11061
11499
  this.state.messages.length - 1,
@@ -11107,7 +11545,7 @@ var AbstractChat = class {
11107
11545
  isAbort,
11108
11546
  isDisconnect,
11109
11547
  isError,
11110
- finishReason: (_a15 = this.activeResponse) == null ? void 0 : _a15.state.finishReason
11548
+ finishReason: (_a14 = this.activeResponse) == null ? void 0 : _a14.state.finishReason
11111
11549
  });
11112
11550
  } catch (err) {
11113
11551
  console.error(err);
@@ -11140,7 +11578,7 @@ function lastAssistantMessageIsCompleteWithApprovalResponses({
11140
11578
  const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
11141
11579
  return part.type === "step-start" ? index : lastIndex;
11142
11580
  }, -1);
11143
- const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter(isToolOrDynamicToolUIPart).filter((part) => !part.providerExecuted);
11581
+ const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter(isToolUIPart).filter((part) => !part.providerExecuted);
11144
11582
  return (
11145
11583
  // has at least one tool approval response
11146
11584
  lastStepToolInvocations.filter((part) => part.state === "approval-responded").length > 0 && // all tool approvals must have a response
@@ -11164,7 +11602,7 @@ function lastAssistantMessageIsCompleteWithToolCalls({
11164
11602
  const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
11165
11603
  return part.type === "step-start" ? index : lastIndex;
11166
11604
  }, -1);
11167
- const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter(isToolOrDynamicToolUIPart).filter((part) => !part.providerExecuted);
11605
+ const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter(isToolUIPart).filter((part) => !part.providerExecuted);
11168
11606
  return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every(
11169
11607
  (part) => part.state === "output-available" || part.state === "output-error"
11170
11608
  );
@@ -11205,7 +11643,7 @@ var TextStreamChatTransport = class extends HttpChatTransport {
11205
11643
  }
11206
11644
  };
11207
11645
  export {
11208
- AISDKError16 as AISDKError,
11646
+ AISDKError15 as AISDKError,
11209
11647
  APICallError,
11210
11648
  AbstractChat,
11211
11649
  DefaultChatTransport,
@@ -11244,18 +11682,13 @@ export {
11244
11682
  UI_MESSAGE_STREAM_HEADERS,
11245
11683
  UnsupportedFunctionalityError,
11246
11684
  UnsupportedModelVersionError,
11685
+ addToolInputExamplesMiddleware,
11247
11686
  asSchema5 as asSchema,
11248
11687
  assistantModelMessageSchema,
11249
11688
  callCompletionApi,
11250
11689
  consumeStream,
11251
11690
  convertFileListToFileUIParts,
11252
- convertToCoreMessages,
11253
11691
  convertToModelMessages,
11254
- coreAssistantMessageSchema,
11255
- coreMessageSchema,
11256
- coreSystemMessageSchema,
11257
- coreToolMessageSchema,
11258
- coreUserMessageSchema,
11259
11692
  cosineSimilarity,
11260
11693
  createAgentUIStream,
11261
11694
  createAgentUIStreamResponse,
@@ -11273,14 +11706,16 @@ export {
11273
11706
  embedMany,
11274
11707
  experimental_createProviderRegistry,
11275
11708
  experimental_customProvider,
11276
- generateImage as experimental_generateImage,
11709
+ experimental_generateImage,
11277
11710
  generateSpeech as experimental_generateSpeech,
11278
11711
  transcribe as experimental_transcribe,
11279
11712
  extractReasoningMiddleware,
11280
11713
  gateway2 as gateway,
11281
11714
  generateId,
11715
+ generateImage,
11282
11716
  generateObject,
11283
11717
  generateText,
11718
+ getStaticToolName,
11284
11719
  getTextFromDataUrl,
11285
11720
  getToolName,
11286
11721
  getToolOrDynamicToolName,
@@ -11289,6 +11724,7 @@ export {
11289
11724
  isDeepEqualData,
11290
11725
  isFileUIPart,
11291
11726
  isReasoningUIPart,
11727
+ isStaticToolUIPart,
11292
11728
  isTextUIPart,
11293
11729
  isToolOrDynamicToolUIPart,
11294
11730
  isToolUIPart,