ai 4.1.45 → 4.1.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -4,8 +4,8 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
6
  var __export = (target, all) => {
7
- for (var name15 in all)
8
- __defProp(target, name15, { get: all[name15], enumerable: true });
7
+ for (var name16 in all)
8
+ __defProp(target, name16, { get: all[name16], enumerable: true });
9
9
  };
10
10
  var __copyProps = (to, from, except, desc) => {
11
11
  if (from && typeof from === "object" || typeof from === "function") {
@@ -20,27 +20,28 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
20
20
  // streams/index.ts
21
21
  var streams_exports = {};
22
22
  __export(streams_exports, {
23
- AISDKError: () => import_provider18.AISDKError,
24
- APICallError: () => import_provider18.APICallError,
23
+ AISDKError: () => import_provider19.AISDKError,
24
+ APICallError: () => import_provider19.APICallError,
25
25
  AssistantResponse: () => AssistantResponse,
26
26
  DownloadError: () => DownloadError,
27
- EmptyResponseBodyError: () => import_provider18.EmptyResponseBodyError,
27
+ EmptyResponseBodyError: () => import_provider19.EmptyResponseBodyError,
28
28
  InvalidArgumentError: () => InvalidArgumentError,
29
29
  InvalidDataContentError: () => InvalidDataContentError,
30
30
  InvalidMessageRoleError: () => InvalidMessageRoleError,
31
- InvalidPromptError: () => import_provider18.InvalidPromptError,
32
- InvalidResponseDataError: () => import_provider18.InvalidResponseDataError,
31
+ InvalidPromptError: () => import_provider19.InvalidPromptError,
32
+ InvalidResponseDataError: () => import_provider19.InvalidResponseDataError,
33
+ InvalidStreamPartError: () => InvalidStreamPartError,
33
34
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
34
- JSONParseError: () => import_provider18.JSONParseError,
35
+ JSONParseError: () => import_provider19.JSONParseError,
35
36
  LangChainAdapter: () => langchain_adapter_exports,
36
37
  LlamaIndexAdapter: () => llamaindex_adapter_exports,
37
- LoadAPIKeyError: () => import_provider18.LoadAPIKeyError,
38
+ LoadAPIKeyError: () => import_provider19.LoadAPIKeyError,
38
39
  MessageConversionError: () => MessageConversionError,
39
- NoContentGeneratedError: () => import_provider18.NoContentGeneratedError,
40
+ NoContentGeneratedError: () => import_provider19.NoContentGeneratedError,
40
41
  NoImageGeneratedError: () => NoImageGeneratedError,
41
42
  NoObjectGeneratedError: () => NoObjectGeneratedError,
42
43
  NoOutputSpecifiedError: () => NoOutputSpecifiedError,
43
- NoSuchModelError: () => import_provider18.NoSuchModelError,
44
+ NoSuchModelError: () => import_provider19.NoSuchModelError,
44
45
  NoSuchProviderError: () => NoSuchProviderError,
45
46
  NoSuchToolError: () => NoSuchToolError,
46
47
  Output: () => output_exports,
@@ -48,8 +49,8 @@ __export(streams_exports, {
48
49
  StreamData: () => StreamData,
49
50
  ToolCallRepairError: () => ToolCallRepairError,
50
51
  ToolExecutionError: () => ToolExecutionError,
51
- TypeValidationError: () => import_provider18.TypeValidationError,
52
- UnsupportedFunctionalityError: () => import_provider18.UnsupportedFunctionalityError,
52
+ TypeValidationError: () => import_provider19.TypeValidationError,
53
+ UnsupportedFunctionalityError: () => import_provider19.UnsupportedFunctionalityError,
53
54
  appendClientMessage: () => appendClientMessage,
54
55
  appendResponseMessages: () => appendResponseMessages,
55
56
  convertToCoreMessages: () => convertToCoreMessages,
@@ -432,7 +433,7 @@ function getBaseTelemetryAttributes({
432
433
  telemetry,
433
434
  headers
434
435
  }) {
435
- var _a15;
436
+ var _a16;
436
437
  return {
437
438
  "ai.model.provider": model.provider,
438
439
  "ai.model.id": model.modelId,
@@ -442,7 +443,7 @@ function getBaseTelemetryAttributes({
442
443
  return attributes;
443
444
  }, {}),
444
445
  // add metadata as attributes:
445
- ...Object.entries((_a15 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a15 : {}).reduce(
446
+ ...Object.entries((_a16 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a16 : {}).reduce(
446
447
  (attributes, [key, value]) => {
447
448
  attributes[`ai.telemetry.metadata.${key}`] = value;
448
449
  return attributes;
@@ -467,7 +468,7 @@ var noopTracer = {
467
468
  startSpan() {
468
469
  return noopSpan;
469
470
  },
470
- startActiveSpan(name15, arg1, arg2, arg3) {
471
+ startActiveSpan(name16, arg1, arg2, arg3) {
471
472
  if (typeof arg1 === "function") {
472
473
  return arg1(noopSpan);
473
474
  }
@@ -537,13 +538,13 @@ function getTracer({
537
538
  // core/telemetry/record-span.ts
538
539
  var import_api2 = require("@opentelemetry/api");
539
540
  function recordSpan({
540
- name: name15,
541
+ name: name16,
541
542
  tracer,
542
543
  attributes,
543
544
  fn,
544
545
  endWhenDone = true
545
546
  }) {
546
- return tracer.startActiveSpan(name15, { attributes }, async (span) => {
547
+ return tracer.startActiveSpan(name16, { attributes }, async (span) => {
547
548
  try {
548
549
  const result = await fn(span);
549
550
  if (endWhenDone) {
@@ -651,14 +652,14 @@ async function embed({
651
652
  }),
652
653
  tracer,
653
654
  fn: async (doEmbedSpan) => {
654
- var _a15;
655
+ var _a16;
655
656
  const modelResponse = await model.doEmbed({
656
657
  values: [value],
657
658
  abortSignal,
658
659
  headers
659
660
  });
660
661
  const embedding2 = modelResponse.embeddings[0];
661
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
662
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
662
663
  doEmbedSpan.setAttributes(
663
664
  selectTelemetryAttributes({
664
665
  telemetry,
@@ -768,14 +769,14 @@ async function embedMany({
768
769
  }),
769
770
  tracer,
770
771
  fn: async (doEmbedSpan) => {
771
- var _a15;
772
+ var _a16;
772
773
  const modelResponse = await model.doEmbed({
773
774
  values,
774
775
  abortSignal,
775
776
  headers
776
777
  });
777
778
  const embeddings3 = modelResponse.embeddings;
778
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
779
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
779
780
  doEmbedSpan.setAttributes(
780
781
  selectTelemetryAttributes({
781
782
  telemetry,
@@ -827,14 +828,14 @@ async function embedMany({
827
828
  }),
828
829
  tracer,
829
830
  fn: async (doEmbedSpan) => {
830
- var _a15;
831
+ var _a16;
831
832
  const modelResponse = await model.doEmbed({
832
833
  values: chunk,
833
834
  abortSignal,
834
835
  headers
835
836
  });
836
837
  const embeddings2 = modelResponse.embeddings;
837
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
838
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
838
839
  doEmbedSpan.setAttributes(
839
840
  selectTelemetryAttributes({
840
841
  telemetry,
@@ -921,9 +922,9 @@ async function generateImage({
921
922
  currentDate: () => /* @__PURE__ */ new Date()
922
923
  }
923
924
  }) {
924
- var _a15;
925
+ var _a16;
925
926
  const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
926
- const maxImagesPerCall = (_a15 = model.maxImagesPerCall) != null ? _a15 : 1;
927
+ const maxImagesPerCall = (_a16 = model.maxImagesPerCall) != null ? _a16 : 1;
927
928
  const callCount = Math.ceil(n / maxImagesPerCall);
928
929
  const callImageCounts = Array.from({ length: callCount }, (_, i) => {
929
930
  if (i < callCount - 1) {
@@ -1056,7 +1057,7 @@ async function download({
1056
1057
  url,
1057
1058
  fetchImplementation = fetch
1058
1059
  }) {
1059
- var _a15;
1060
+ var _a16;
1060
1061
  const urlText = url.toString();
1061
1062
  try {
1062
1063
  const response = await fetchImplementation(urlText);
@@ -1069,7 +1070,7 @@ async function download({
1069
1070
  }
1070
1071
  return {
1071
1072
  data: new Uint8Array(await response.arrayBuffer()),
1072
- mimeType: (_a15 = response.headers.get("content-type")) != null ? _a15 : void 0
1073
+ mimeType: (_a16 = response.headers.get("content-type")) != null ? _a16 : void 0
1073
1074
  };
1074
1075
  } catch (error) {
1075
1076
  if (DownloadError.isInstance(error)) {
@@ -1129,8 +1130,8 @@ var dataContentSchema = import_zod.z.union([
1129
1130
  import_zod.z.custom(
1130
1131
  // Buffer might not be available in some environments such as CloudFlare:
1131
1132
  (value) => {
1132
- var _a15, _b;
1133
- return (_b = (_a15 = globalThis.Buffer) == null ? void 0 : _a15.isBuffer(value)) != null ? _b : false;
1133
+ var _a16, _b;
1134
+ return (_b = (_a16 = globalThis.Buffer) == null ? void 0 : _a16.isBuffer(value)) != null ? _b : false;
1134
1135
  },
1135
1136
  { message: "Must be a Buffer" }
1136
1137
  )
@@ -1230,14 +1231,14 @@ async function convertToLanguageModelPrompt({
1230
1231
  ];
1231
1232
  }
1232
1233
  function convertToLanguageModelMessage(message, downloadedAssets) {
1233
- var _a15, _b, _c, _d, _e, _f;
1234
+ var _a16, _b, _c, _d, _e, _f;
1234
1235
  const role = message.role;
1235
1236
  switch (role) {
1236
1237
  case "system": {
1237
1238
  return {
1238
1239
  role: "system",
1239
1240
  content: message.content,
1240
- providerMetadata: (_a15 = message.providerOptions) != null ? _a15 : message.experimental_providerMetadata
1241
+ providerMetadata: (_a16 = message.providerOptions) != null ? _a16 : message.experimental_providerMetadata
1241
1242
  };
1242
1243
  }
1243
1244
  case "user": {
@@ -1281,7 +1282,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1281
1282
  return {
1282
1283
  role: "tool",
1283
1284
  content: message.content.map((part) => {
1284
- var _a16;
1285
+ var _a17;
1285
1286
  return {
1286
1287
  type: "tool-result",
1287
1288
  toolCallId: part.toolCallId,
@@ -1289,7 +1290,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1289
1290
  result: part.result,
1290
1291
  content: part.experimental_content,
1291
1292
  isError: part.isError,
1292
- providerMetadata: (_a16 = part.providerOptions) != null ? _a16 : part.experimental_providerMetadata
1293
+ providerMetadata: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
1293
1294
  };
1294
1295
  }),
1295
1296
  providerMetadata: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
@@ -1325,7 +1326,7 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
1325
1326
  );
1326
1327
  }
1327
1328
  function convertPartToLanguageModelPart(part, downloadedAssets) {
1328
- var _a15;
1329
+ var _a16;
1329
1330
  if (part.type === "text") {
1330
1331
  return {
1331
1332
  type: "text",
@@ -1378,7 +1379,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
1378
1379
  switch (type) {
1379
1380
  case "image": {
1380
1381
  if (normalizedData instanceof Uint8Array) {
1381
- mimeType = (_a15 = detectImageMimeType(normalizedData)) != null ? _a15 : mimeType;
1382
+ mimeType = (_a16 = detectImageMimeType(normalizedData)) != null ? _a16 : mimeType;
1382
1383
  }
1383
1384
  return {
1384
1385
  type: "image",
@@ -1501,7 +1502,7 @@ var import_zod7 = require("zod");
1501
1502
 
1502
1503
  // core/prompt/attachments-to-parts.ts
1503
1504
  function attachmentsToParts(attachments) {
1504
- var _a15, _b, _c;
1505
+ var _a16, _b, _c;
1505
1506
  const parts = [];
1506
1507
  for (const attachment of attachments) {
1507
1508
  let url;
@@ -1513,7 +1514,7 @@ function attachmentsToParts(attachments) {
1513
1514
  switch (url.protocol) {
1514
1515
  case "http:":
1515
1516
  case "https:": {
1516
- if ((_a15 = attachment.contentType) == null ? void 0 : _a15.startsWith("image/")) {
1517
+ if ((_a16 = attachment.contentType) == null ? void 0 : _a16.startsWith("image/")) {
1517
1518
  parts.push({ type: "image", image: url });
1518
1519
  } else {
1519
1520
  if (!attachment.contentType) {
@@ -1599,8 +1600,8 @@ _a8 = symbol8;
1599
1600
 
1600
1601
  // core/prompt/convert-to-core-messages.ts
1601
1602
  function convertToCoreMessages(messages, options) {
1602
- var _a15, _b;
1603
- const tools = (_a15 = options == null ? void 0 : options.tools) != null ? _a15 : {};
1603
+ var _a16, _b;
1604
+ const tools = (_a16 = options == null ? void 0 : options.tools) != null ? _a16 : {};
1604
1605
  const coreMessages = [];
1605
1606
  for (let i = 0; i < messages.length; i++) {
1606
1607
  const message = messages[i];
@@ -1627,24 +1628,52 @@ function convertToCoreMessages(messages, options) {
1627
1628
  case "assistant": {
1628
1629
  if (message.parts != null) {
1629
1630
  let processBlock2 = function() {
1631
+ const content2 = [];
1632
+ for (const part of block) {
1633
+ switch (part.type) {
1634
+ case "text":
1635
+ content2.push({
1636
+ type: "text",
1637
+ text: part.text
1638
+ });
1639
+ break;
1640
+ case "reasoning": {
1641
+ for (const detail of part.details) {
1642
+ switch (detail.type) {
1643
+ case "text":
1644
+ content2.push({
1645
+ type: "reasoning",
1646
+ text: detail.text,
1647
+ signature: detail.signature
1648
+ });
1649
+ break;
1650
+ case "redacted":
1651
+ content2.push({
1652
+ type: "redacted-reasoning",
1653
+ data: detail.data
1654
+ });
1655
+ break;
1656
+ }
1657
+ }
1658
+ break;
1659
+ }
1660
+ case "tool-invocation":
1661
+ content2.push({
1662
+ type: "tool-call",
1663
+ toolCallId: part.toolInvocation.toolCallId,
1664
+ toolName: part.toolInvocation.toolName,
1665
+ args: part.toolInvocation.args
1666
+ });
1667
+ break;
1668
+ default: {
1669
+ const _exhaustiveCheck = part;
1670
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
1671
+ }
1672
+ }
1673
+ }
1630
1674
  coreMessages.push({
1631
1675
  role: "assistant",
1632
- content: block.map((part) => {
1633
- switch (part.type) {
1634
- case "text":
1635
- return {
1636
- type: "text",
1637
- text: part.text
1638
- };
1639
- default:
1640
- return {
1641
- type: "tool-call",
1642
- toolCallId: part.toolInvocation.toolCallId,
1643
- toolName: part.toolInvocation.toolName,
1644
- args: part.toolInvocation.args
1645
- };
1646
- }
1647
- })
1676
+ content: content2
1648
1677
  });
1649
1678
  const stepInvocations = block.filter(
1650
1679
  (part) => part.type === "tool-invocation"
@@ -1689,6 +1718,7 @@ function convertToCoreMessages(messages, options) {
1689
1718
  for (const part of message.parts) {
1690
1719
  switch (part.type) {
1691
1720
  case "reasoning":
1721
+ block.push(part);
1692
1722
  break;
1693
1723
  case "text": {
1694
1724
  if (blockHasToolInvocations) {
@@ -1716,14 +1746,14 @@ function convertToCoreMessages(messages, options) {
1716
1746
  break;
1717
1747
  }
1718
1748
  const maxStep = toolInvocations.reduce((max, toolInvocation) => {
1719
- var _a16;
1720
- return Math.max(max, (_a16 = toolInvocation.step) != null ? _a16 : 0);
1749
+ var _a17;
1750
+ return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
1721
1751
  }, 0);
1722
1752
  for (let i2 = 0; i2 <= maxStep; i2++) {
1723
1753
  const stepInvocations = toolInvocations.filter(
1724
1754
  (toolInvocation) => {
1725
- var _a16;
1726
- return ((_a16 = toolInvocation.step) != null ? _a16 : 0) === i2;
1755
+ var _a17;
1756
+ return ((_a17 = toolInvocation.step) != null ? _a17 : 0) === i2;
1727
1757
  }
1728
1758
  );
1729
1759
  if (stepInvocations.length === 0) {
@@ -1886,6 +1916,18 @@ var filePartSchema = import_zod5.z.object({
1886
1916
  providerOptions: providerMetadataSchema.optional(),
1887
1917
  experimental_providerMetadata: providerMetadataSchema.optional()
1888
1918
  });
1919
+ var reasoningPartSchema = import_zod5.z.object({
1920
+ type: import_zod5.z.literal("reasoning"),
1921
+ text: import_zod5.z.string(),
1922
+ providerOptions: providerMetadataSchema.optional(),
1923
+ experimental_providerMetadata: providerMetadataSchema.optional()
1924
+ });
1925
+ var redactedReasoningPartSchema = import_zod5.z.object({
1926
+ type: import_zod5.z.literal("redacted-reasoning"),
1927
+ data: import_zod5.z.string(),
1928
+ providerOptions: providerMetadataSchema.optional(),
1929
+ experimental_providerMetadata: providerMetadataSchema.optional()
1930
+ });
1889
1931
  var toolCallPartSchema = import_zod5.z.object({
1890
1932
  type: import_zod5.z.literal("tool-call"),
1891
1933
  toolCallId: import_zod5.z.string(),
@@ -1925,7 +1967,14 @@ var coreAssistantMessageSchema = import_zod6.z.object({
1925
1967
  role: import_zod6.z.literal("assistant"),
1926
1968
  content: import_zod6.z.union([
1927
1969
  import_zod6.z.string(),
1928
- import_zod6.z.array(import_zod6.z.union([textPartSchema, toolCallPartSchema]))
1970
+ import_zod6.z.array(
1971
+ import_zod6.z.union([
1972
+ textPartSchema,
1973
+ reasoningPartSchema,
1974
+ redactedReasoningPartSchema,
1975
+ toolCallPartSchema
1976
+ ])
1977
+ )
1929
1978
  ]),
1930
1979
  providerOptions: providerMetadataSchema.optional(),
1931
1980
  experimental_providerMetadata: providerMetadataSchema.optional()
@@ -2137,7 +2186,7 @@ var arrayOutputStrategy = (schema) => {
2137
2186
  additionalProperties: false
2138
2187
  },
2139
2188
  validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
2140
- var _a15;
2189
+ var _a16;
2141
2190
  if (!(0, import_provider11.isJSONObject)(value) || !(0, import_provider11.isJSONArray)(value.elements)) {
2142
2191
  return {
2143
2192
  success: false,
@@ -2160,7 +2209,7 @@ var arrayOutputStrategy = (schema) => {
2160
2209
  }
2161
2210
  resultArray.push(result.value);
2162
2211
  }
2163
- const publishedElementCount = (_a15 = latestObject == null ? void 0 : latestObject.length) != null ? _a15 : 0;
2212
+ const publishedElementCount = (_a16 = latestObject == null ? void 0 : latestObject.length) != null ? _a16 : 0;
2164
2213
  let textDelta = "";
2165
2214
  if (isFirstDelta) {
2166
2215
  textDelta += "[";
@@ -2498,7 +2547,7 @@ async function generateObject({
2498
2547
  }),
2499
2548
  tracer,
2500
2549
  fn: async (span) => {
2501
- var _a15, _b, _c, _d;
2550
+ var _a16, _b, _c, _d;
2502
2551
  if (mode === "auto" || mode == null) {
2503
2552
  mode = model.defaultObjectGenerationMode;
2504
2553
  }
@@ -2527,7 +2576,7 @@ async function generateObject({
2527
2576
  const promptMessages = await convertToLanguageModelPrompt({
2528
2577
  prompt: standardizedPrompt,
2529
2578
  modelSupportsImageUrls: model.supportsImageUrls,
2530
- modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
2579
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
2531
2580
  // support 'this' context
2532
2581
  });
2533
2582
  const generateResult = await retry(
@@ -2561,7 +2610,7 @@ async function generateObject({
2561
2610
  }),
2562
2611
  tracer,
2563
2612
  fn: async (span2) => {
2564
- var _a16, _b2, _c2, _d2, _e, _f;
2613
+ var _a17, _b2, _c2, _d2, _e, _f;
2565
2614
  const result2 = await model.doGenerate({
2566
2615
  mode: {
2567
2616
  type: "object-json",
@@ -2577,7 +2626,7 @@ async function generateObject({
2577
2626
  headers
2578
2627
  });
2579
2628
  const responseData = {
2580
- id: (_b2 = (_a16 = result2.response) == null ? void 0 : _a16.id) != null ? _b2 : generateId3(),
2629
+ id: (_b2 = (_a17 = result2.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId3(),
2581
2630
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2582
2631
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2583
2632
  };
@@ -2666,7 +2715,7 @@ async function generateObject({
2666
2715
  }),
2667
2716
  tracer,
2668
2717
  fn: async (span2) => {
2669
- var _a16, _b2, _c2, _d2, _e, _f, _g, _h;
2718
+ var _a17, _b2, _c2, _d2, _e, _f, _g, _h;
2670
2719
  const result2 = await model.doGenerate({
2671
2720
  mode: {
2672
2721
  type: "object-tool",
@@ -2684,7 +2733,7 @@ async function generateObject({
2684
2733
  abortSignal,
2685
2734
  headers
2686
2735
  });
2687
- const objectText = (_b2 = (_a16 = result2.toolCalls) == null ? void 0 : _a16[0]) == null ? void 0 : _b2.args;
2736
+ const objectText = (_b2 = (_a17 = result2.toolCalls) == null ? void 0 : _a17[0]) == null ? void 0 : _b2.args;
2688
2737
  const responseData = {
2689
2738
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
2690
2739
  timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
@@ -2831,9 +2880,9 @@ var DefaultGenerateObjectResult = class {
2831
2880
  this.logprobs = options.logprobs;
2832
2881
  }
2833
2882
  toJsonResponse(init) {
2834
- var _a15;
2883
+ var _a16;
2835
2884
  return new Response(JSON.stringify(this.object), {
2836
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
2885
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
2837
2886
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
2838
2887
  contentType: "application/json; charset=utf-8"
2839
2888
  })
@@ -2868,17 +2917,17 @@ var DelayedPromise = class {
2868
2917
  return this.promise;
2869
2918
  }
2870
2919
  resolve(value) {
2871
- var _a15;
2920
+ var _a16;
2872
2921
  this.status = { type: "resolved", value };
2873
2922
  if (this.promise) {
2874
- (_a15 = this._resolve) == null ? void 0 : _a15.call(this, value);
2923
+ (_a16 = this._resolve) == null ? void 0 : _a16.call(this, value);
2875
2924
  }
2876
2925
  }
2877
2926
  reject(error) {
2878
- var _a15;
2927
+ var _a16;
2879
2928
  this.status = { type: "rejected", error };
2880
2929
  if (this.promise) {
2881
- (_a15 = this._reject) == null ? void 0 : _a15.call(this, error);
2930
+ (_a16 = this._reject) == null ? void 0 : _a16.call(this, error);
2882
2931
  }
2883
2932
  }
2884
2933
  };
@@ -2982,8 +3031,8 @@ function createStitchableStream() {
2982
3031
 
2983
3032
  // core/util/now.ts
2984
3033
  function now() {
2985
- var _a15, _b;
2986
- return (_b = (_a15 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a15.now()) != null ? _b : Date.now();
3034
+ var _a16, _b;
3035
+ return (_b = (_a16 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a16.now()) != null ? _b : Date.now();
2987
3036
  }
2988
3037
 
2989
3038
  // core/generate-object/stream-object.ts
@@ -3119,7 +3168,7 @@ var DefaultStreamObjectResult = class {
3119
3168
  tracer,
3120
3169
  endWhenDone: false,
3121
3170
  fn: async (rootSpan) => {
3122
- var _a15, _b;
3171
+ var _a16, _b;
3123
3172
  if (mode === "auto" || mode == null) {
3124
3173
  mode = model.defaultObjectGenerationMode;
3125
3174
  }
@@ -3150,7 +3199,7 @@ var DefaultStreamObjectResult = class {
3150
3199
  prompt: await convertToLanguageModelPrompt({
3151
3200
  prompt: standardizedPrompt,
3152
3201
  modelSupportsImageUrls: model.supportsImageUrls,
3153
- modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
3202
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
3154
3203
  // support 'this' context
3155
3204
  }),
3156
3205
  providerMetadata: providerOptions,
@@ -3288,7 +3337,7 @@ var DefaultStreamObjectResult = class {
3288
3337
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
3289
3338
  new TransformStream({
3290
3339
  async transform(chunk, controller) {
3291
- var _a16, _b2, _c;
3340
+ var _a17, _b2, _c;
3292
3341
  if (isFirstChunk) {
3293
3342
  const msToFirstChunk = now2() - startTimestampMs;
3294
3343
  isFirstChunk = false;
@@ -3334,7 +3383,7 @@ var DefaultStreamObjectResult = class {
3334
3383
  switch (chunk.type) {
3335
3384
  case "response-metadata": {
3336
3385
  response = {
3337
- id: (_a16 = chunk.id) != null ? _a16 : response.id,
3386
+ id: (_a17 = chunk.id) != null ? _a17 : response.id,
3338
3387
  timestamp: (_b2 = chunk.timestamp) != null ? _b2 : response.timestamp,
3339
3388
  modelId: (_c = chunk.modelId) != null ? _c : response.modelId
3340
3389
  };
@@ -3547,9 +3596,9 @@ var DefaultStreamObjectResult = class {
3547
3596
  });
3548
3597
  }
3549
3598
  toTextStreamResponse(init) {
3550
- var _a15;
3599
+ var _a16;
3551
3600
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3552
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
3601
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
3553
3602
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
3554
3603
  contentType: "text/plain; charset=utf-8"
3555
3604
  })
@@ -3625,24 +3674,24 @@ function prepareToolsAndToolChoice({
3625
3674
  };
3626
3675
  }
3627
3676
  const filteredTools = activeTools != null ? Object.entries(tools).filter(
3628
- ([name15]) => activeTools.includes(name15)
3677
+ ([name16]) => activeTools.includes(name16)
3629
3678
  ) : Object.entries(tools);
3630
3679
  return {
3631
- tools: filteredTools.map(([name15, tool2]) => {
3680
+ tools: filteredTools.map(([name16, tool2]) => {
3632
3681
  const toolType = tool2.type;
3633
3682
  switch (toolType) {
3634
3683
  case void 0:
3635
3684
  case "function":
3636
3685
  return {
3637
3686
  type: "function",
3638
- name: name15,
3687
+ name: name16,
3639
3688
  description: tool2.description,
3640
3689
  parameters: (0, import_ui_utils4.asSchema)(tool2.parameters).jsonSchema
3641
3690
  };
3642
3691
  case "provider-defined":
3643
3692
  return {
3644
3693
  type: "provider-defined",
3645
- name: name15,
3694
+ name: name16,
3646
3695
  id: tool2.id,
3647
3696
  args: tool2.args
3648
3697
  };
@@ -3815,6 +3864,7 @@ async function doParseToolCall({
3815
3864
  // core/generate-text/to-response-messages.ts
3816
3865
  function toResponseMessages({
3817
3866
  text: text2 = "",
3867
+ reasoning,
3818
3868
  tools,
3819
3869
  toolCalls,
3820
3870
  toolResults,
@@ -3824,7 +3874,13 @@ function toResponseMessages({
3824
3874
  const responseMessages = [];
3825
3875
  responseMessages.push({
3826
3876
  role: "assistant",
3827
- content: [{ type: "text", text: text2 }, ...toolCalls],
3877
+ content: [
3878
+ ...reasoning.map(
3879
+ (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
3880
+ ),
3881
+ { type: "text", text: text2 },
3882
+ ...toolCalls
3883
+ ],
3828
3884
  id: messageId
3829
3885
  });
3830
3886
  if (toolResults.length > 0) {
@@ -3853,6 +3909,12 @@ function toResponseMessages({
3853
3909
  return responseMessages;
3854
3910
  }
3855
3911
 
3912
+ // core/generate-text/reasoning-detail.ts
3913
+ function asReasoningText(reasoning) {
3914
+ const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
3915
+ return reasoningText.length > 0 ? reasoningText : void 0;
3916
+ }
3917
+
3856
3918
  // core/generate-text/generate-text.ts
3857
3919
  var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({
3858
3920
  prefix: "aitxt",
@@ -3888,7 +3950,7 @@ async function generateText({
3888
3950
  onStepFinish,
3889
3951
  ...settings
3890
3952
  }) {
3891
- var _a15;
3953
+ var _a16;
3892
3954
  if (maxSteps < 1) {
3893
3955
  throw new InvalidArgumentError({
3894
3956
  parameter: "maxSteps",
@@ -3905,7 +3967,7 @@ async function generateText({
3905
3967
  });
3906
3968
  const initialPrompt = standardizePrompt({
3907
3969
  prompt: {
3908
- system: (_a15 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a15 : system,
3970
+ system: (_a16 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a16 : system,
3909
3971
  prompt,
3910
3972
  messages
3911
3973
  },
@@ -3931,7 +3993,7 @@ async function generateText({
3931
3993
  }),
3932
3994
  tracer,
3933
3995
  fn: async (span) => {
3934
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
3996
+ var _a17, _b, _c, _d, _e, _f, _g, _h, _i;
3935
3997
  const mode = {
3936
3998
  type: "regular",
3937
3999
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3940,6 +4002,7 @@ async function generateText({
3940
4002
  let currentModelResponse;
3941
4003
  let currentToolCalls = [];
3942
4004
  let currentToolResults = [];
4005
+ let currentReasoningDetails = [];
3943
4006
  let stepCount = 0;
3944
4007
  const responseMessages = [];
3945
4008
  let text2 = "";
@@ -3964,7 +4027,7 @@ async function generateText({
3964
4027
  messages: stepInputMessages
3965
4028
  },
3966
4029
  modelSupportsImageUrls: model.supportsImageUrls,
3967
- modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
4030
+ modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
3968
4031
  // support 'this' context
3969
4032
  });
3970
4033
  currentModelResponse = await retry(
@@ -3985,8 +4048,8 @@ async function generateText({
3985
4048
  "ai.prompt.tools": {
3986
4049
  // convert the language model level tools:
3987
4050
  input: () => {
3988
- var _a17;
3989
- return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
4051
+ var _a18;
4052
+ return (_a18 = mode.tools) == null ? void 0 : _a18.map((tool2) => JSON.stringify(tool2));
3990
4053
  }
3991
4054
  },
3992
4055
  "ai.prompt.toolChoice": {
@@ -4006,7 +4069,7 @@ async function generateText({
4006
4069
  }),
4007
4070
  tracer,
4008
4071
  fn: async (span2) => {
4009
- var _a17, _b2, _c2, _d2, _e2, _f2;
4072
+ var _a18, _b2, _c2, _d2, _e2, _f2;
4010
4073
  const result = await model.doGenerate({
4011
4074
  mode,
4012
4075
  ...callSettings,
@@ -4018,7 +4081,7 @@ async function generateText({
4018
4081
  headers
4019
4082
  });
4020
4083
  const responseData = {
4021
- id: (_b2 = (_a17 = result.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId3(),
4084
+ id: (_b2 = (_a18 = result.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4022
4085
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4023
4086
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
4024
4087
  };
@@ -4092,6 +4155,9 @@ async function generateText({
4092
4155
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
4093
4156
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
4094
4157
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
4158
+ currentReasoningDetails = asReasoningDetails(
4159
+ currentModelResponse.reasoning
4160
+ );
4095
4161
  sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
4096
4162
  if (stepType === "continue") {
4097
4163
  const lastMessage = responseMessages[responseMessages.length - 1];
@@ -4107,6 +4173,7 @@ async function generateText({
4107
4173
  responseMessages.push(
4108
4174
  ...toResponseMessages({
4109
4175
  text: text2,
4176
+ reasoning: asReasoningDetails(currentModelResponse.reasoning),
4110
4177
  tools: tools != null ? tools : {},
4111
4178
  toolCalls: currentToolCalls,
4112
4179
  toolResults: currentToolResults,
@@ -4118,7 +4185,9 @@ async function generateText({
4118
4185
  const currentStepResult = {
4119
4186
  stepType,
4120
4187
  text: stepText,
4121
- reasoning: currentModelResponse.reasoning,
4188
+ // TODO v5: rename reasoning to reasoningText (and use reasoning for composite array)
4189
+ reasoning: asReasoningText(currentReasoningDetails),
4190
+ reasoningDetails: currentReasoningDetails,
4122
4191
  sources: (_e = currentModelResponse.sources) != null ? _e : [],
4123
4192
  toolCalls: currentToolCalls,
4124
4193
  toolResults: currentToolResults,
@@ -4159,7 +4228,8 @@ async function generateText({
4159
4228
  );
4160
4229
  return new DefaultGenerateTextResult({
4161
4230
  text: text2,
4162
- reasoning: currentModelResponse.reasoning,
4231
+ reasoning: asReasoningText(currentReasoningDetails),
4232
+ reasoningDetails: currentReasoningDetails,
4163
4233
  sources,
4164
4234
  outputResolver: () => {
4165
4235
  if (output == null) {
@@ -4267,6 +4337,7 @@ var DefaultGenerateTextResult = class {
4267
4337
  constructor(options) {
4268
4338
  this.text = options.text;
4269
4339
  this.reasoning = options.reasoning;
4340
+ this.reasoningDetails = options.reasoningDetails;
4270
4341
  this.toolCalls = options.toolCalls;
4271
4342
  this.toolResults = options.toolResults;
4272
4343
  this.finishReason = options.finishReason;
@@ -4285,6 +4356,15 @@ var DefaultGenerateTextResult = class {
4285
4356
  return this.outputResolver();
4286
4357
  }
4287
4358
  };
4359
+ function asReasoningDetails(reasoning) {
4360
+ if (reasoning == null) {
4361
+ return [];
4362
+ }
4363
+ if (typeof reasoning === "string") {
4364
+ return [{ type: "text", text: reasoning }];
4365
+ }
4366
+ return reasoning;
4367
+ }
4288
4368
 
4289
4369
  // core/generate-text/output.ts
4290
4370
  var output_exports = {};
@@ -4296,7 +4376,28 @@ var import_provider_utils10 = require("@ai-sdk/provider-utils");
4296
4376
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
4297
4377
 
4298
4378
  // errors/index.ts
4379
+ var import_provider19 = require("@ai-sdk/provider");
4380
+
4381
+ // errors/invalid-stream-part-error.ts
4299
4382
  var import_provider18 = require("@ai-sdk/provider");
4383
+ var name14 = "AI_InvalidStreamPartError";
4384
+ var marker14 = `vercel.ai.error.${name14}`;
4385
+ var symbol14 = Symbol.for(marker14);
4386
+ var _a14;
4387
+ var InvalidStreamPartError = class extends import_provider18.AISDKError {
4388
+ constructor({
4389
+ chunk,
4390
+ message
4391
+ }) {
4392
+ super({ name: name14, message });
4393
+ this[_a14] = true;
4394
+ this.chunk = chunk;
4395
+ }
4396
+ static isInstance(error) {
4397
+ return import_provider18.AISDKError.hasMarker(error, marker14);
4398
+ }
4399
+ };
4400
+ _a14 = symbol14;
4300
4401
 
4301
4402
  // core/generate-text/output.ts
4302
4403
  var text = () => ({
@@ -4376,7 +4477,7 @@ var object = ({
4376
4477
  };
4377
4478
 
4378
4479
  // core/generate-text/smooth-stream.ts
4379
- var import_provider19 = require("@ai-sdk/provider");
4480
+ var import_provider20 = require("@ai-sdk/provider");
4380
4481
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
4381
4482
  var CHUNKING_REGEXPS = {
4382
4483
  word: /\s*\S+\s+/m,
@@ -4389,7 +4490,7 @@ function smoothStream({
4389
4490
  } = {}) {
4390
4491
  const chunkingRegexp = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
4391
4492
  if (chunkingRegexp == null) {
4392
- throw new import_provider19.InvalidArgumentError({
4493
+ throw new import_provider20.InvalidArgumentError({
4393
4494
  argument: "chunking",
4394
4495
  message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
4395
4496
  });
@@ -4424,6 +4525,7 @@ function smoothStream({
4424
4525
  }
4425
4526
 
4426
4527
  // core/generate-text/stream-text.ts
4528
+ var import_provider21 = require("@ai-sdk/provider");
4427
4529
  var import_provider_utils12 = require("@ai-sdk/provider-utils");
4428
4530
  var import_ui_utils8 = require("@ai-sdk/ui-utils");
4429
4531
 
@@ -4557,6 +4659,8 @@ function runToolsTransformation({
4557
4659
  switch (chunkType) {
4558
4660
  case "text-delta":
4559
4661
  case "reasoning":
4662
+ case "reasoning-signature":
4663
+ case "redacted-reasoning":
4560
4664
  case "source":
4561
4665
  case "response-metadata":
4562
4666
  case "error": {
@@ -4868,13 +4972,14 @@ var DefaultStreamTextResult = class {
4868
4972
  this.providerMetadataPromise = new DelayedPromise();
4869
4973
  this.textPromise = new DelayedPromise();
4870
4974
  this.reasoningPromise = new DelayedPromise();
4975
+ this.reasoningDetailsPromise = new DelayedPromise();
4871
4976
  this.sourcesPromise = new DelayedPromise();
4872
4977
  this.toolCallsPromise = new DelayedPromise();
4873
4978
  this.toolResultsPromise = new DelayedPromise();
4874
4979
  this.requestPromise = new DelayedPromise();
4875
4980
  this.responsePromise = new DelayedPromise();
4876
4981
  this.stepsPromise = new DelayedPromise();
4877
- var _a15;
4982
+ var _a16;
4878
4983
  if (maxSteps < 1) {
4879
4984
  throw new InvalidArgumentError({
4880
4985
  parameter: "maxSteps",
@@ -4886,7 +4991,8 @@ var DefaultStreamTextResult = class {
4886
4991
  let recordedStepText = "";
4887
4992
  let recordedContinuationText = "";
4888
4993
  let recordedFullText = "";
4889
- let recordedReasoningText = void 0;
4994
+ const stepReasoning = [];
4995
+ let activeReasoningText = void 0;
4890
4996
  let recordedStepSources = [];
4891
4997
  const recordedSources = [];
4892
4998
  const recordedResponse = {
@@ -4918,7 +5024,25 @@ var DefaultStreamTextResult = class {
4918
5024
  recordedFullText += part.textDelta;
4919
5025
  }
4920
5026
  if (part.type === "reasoning") {
4921
- recordedReasoningText = (recordedReasoningText != null ? recordedReasoningText : "") + part.textDelta;
5027
+ if (activeReasoningText == null) {
5028
+ activeReasoningText = { type: "text", text: part.textDelta };
5029
+ stepReasoning.push(activeReasoningText);
5030
+ } else {
5031
+ activeReasoningText.text += part.textDelta;
5032
+ }
5033
+ }
5034
+ if (part.type === "reasoning-signature") {
5035
+ if (activeReasoningText == null) {
5036
+ throw new import_provider21.AISDKError({
5037
+ name: "InvalidStreamPart",
5038
+ message: "reasoning-signature without reasoning"
5039
+ });
5040
+ }
5041
+ activeReasoningText.signature = part.signature;
5042
+ activeReasoningText = void 0;
5043
+ }
5044
+ if (part.type === "redacted-reasoning") {
5045
+ stepReasoning.push({ type: "redacted", data: part.data });
4922
5046
  }
4923
5047
  if (part.type === "source") {
4924
5048
  recordedSources.push(part.source);
@@ -4933,6 +5057,7 @@ var DefaultStreamTextResult = class {
4933
5057
  if (part.type === "step-finish") {
4934
5058
  const stepMessages = toResponseMessages({
4935
5059
  text: recordedContinuationText,
5060
+ reasoning: stepReasoning,
4936
5061
  tools: tools != null ? tools : {},
4937
5062
  toolCalls: recordedToolCalls,
4938
5063
  toolResults: recordedToolResults,
@@ -4956,7 +5081,8 @@ var DefaultStreamTextResult = class {
4956
5081
  const currentStepResult = {
4957
5082
  stepType,
4958
5083
  text: recordedStepText,
4959
- reasoning: recordedReasoningText,
5084
+ reasoning: asReasoningText(stepReasoning),
5085
+ reasoningDetails: stepReasoning,
4960
5086
  sources: recordedStepSources,
4961
5087
  toolCalls: recordedToolCalls,
4962
5088
  toolResults: recordedToolResults,
@@ -4997,7 +5123,7 @@ var DefaultStreamTextResult = class {
4997
5123
  }
4998
5124
  },
4999
5125
  async flush(controller) {
5000
- var _a16;
5126
+ var _a17;
5001
5127
  try {
5002
5128
  if (recordedSteps.length === 0) {
5003
5129
  return;
@@ -5011,6 +5137,8 @@ var DefaultStreamTextResult = class {
5011
5137
  self.providerMetadataPromise.resolve(
5012
5138
  lastStep.experimental_providerMetadata
5013
5139
  );
5140
+ self.reasoningPromise.resolve(lastStep.reasoning);
5141
+ self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
5014
5142
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
5015
5143
  const usage = recordedUsage != null ? recordedUsage : {
5016
5144
  completionTokens: NaN,
@@ -5020,7 +5148,6 @@ var DefaultStreamTextResult = class {
5020
5148
  self.finishReasonPromise.resolve(finishReason);
5021
5149
  self.usagePromise.resolve(usage);
5022
5150
  self.textPromise.resolve(recordedFullText);
5023
- self.reasoningPromise.resolve(recordedReasoningText);
5024
5151
  self.sourcesPromise.resolve(recordedSources);
5025
5152
  self.stepsPromise.resolve(recordedSteps);
5026
5153
  await (onFinish == null ? void 0 : onFinish({
@@ -5029,10 +5156,11 @@ var DefaultStreamTextResult = class {
5029
5156
  usage,
5030
5157
  text: recordedFullText,
5031
5158
  reasoning: lastStep.reasoning,
5159
+ reasoningDetails: lastStep.reasoningDetails,
5032
5160
  sources: lastStep.sources,
5033
5161
  toolCalls: lastStep.toolCalls,
5034
5162
  toolResults: lastStep.toolResults,
5035
- request: (_a16 = lastStep.request) != null ? _a16 : {},
5163
+ request: (_a17 = lastStep.request) != null ? _a17 : {},
5036
5164
  response: lastStep.response,
5037
5165
  warnings: lastStep.warnings,
5038
5166
  providerMetadata: lastStep.providerMetadata,
@@ -5047,8 +5175,8 @@ var DefaultStreamTextResult = class {
5047
5175
  "ai.response.text": { output: () => recordedFullText },
5048
5176
  "ai.response.toolCalls": {
5049
5177
  output: () => {
5050
- var _a17;
5051
- return ((_a17 = lastStep.toolCalls) == null ? void 0 : _a17.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
5178
+ var _a18;
5179
+ return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
5052
5180
  }
5053
5181
  },
5054
5182
  "ai.usage.promptTokens": usage.promptTokens,
@@ -5090,7 +5218,7 @@ var DefaultStreamTextResult = class {
5090
5218
  });
5091
5219
  const initialPrompt = standardizePrompt({
5092
5220
  prompt: {
5093
- system: (_a15 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a15 : system,
5221
+ system: (_a16 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a16 : system,
5094
5222
  prompt,
5095
5223
  messages
5096
5224
  },
@@ -5124,7 +5252,7 @@ var DefaultStreamTextResult = class {
5124
5252
  hasLeadingWhitespace,
5125
5253
  messageId
5126
5254
  }) {
5127
- var _a16;
5255
+ var _a17;
5128
5256
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
5129
5257
  const stepInputMessages = [
5130
5258
  ...initialPrompt.messages,
@@ -5137,7 +5265,7 @@ var DefaultStreamTextResult = class {
5137
5265
  messages: stepInputMessages
5138
5266
  },
5139
5267
  modelSupportsImageUrls: model.supportsImageUrls,
5140
- modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
5268
+ modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
5141
5269
  // support 'this' context
5142
5270
  });
5143
5271
  const mode = {
@@ -5168,8 +5296,8 @@ var DefaultStreamTextResult = class {
5168
5296
  "ai.prompt.tools": {
5169
5297
  // convert the language model level tools:
5170
5298
  input: () => {
5171
- var _a17;
5172
- return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
5299
+ var _a18;
5300
+ return (_a18 = mode.tools) == null ? void 0 : _a18.map((tool2) => JSON.stringify(tool2));
5173
5301
  }
5174
5302
  },
5175
5303
  "ai.prompt.toolChoice": {
@@ -5220,6 +5348,8 @@ var DefaultStreamTextResult = class {
5220
5348
  const stepRequest = request != null ? request : {};
5221
5349
  const stepToolCalls = [];
5222
5350
  const stepToolResults = [];
5351
+ const stepReasoning2 = [];
5352
+ let activeReasoningText2 = void 0;
5223
5353
  let stepFinishReason = "unknown";
5224
5354
  let stepUsage = {
5225
5355
  promptTokens: 0,
@@ -5229,7 +5359,6 @@ var DefaultStreamTextResult = class {
5229
5359
  let stepProviderMetadata;
5230
5360
  let stepFirstChunk = true;
5231
5361
  let stepText = "";
5232
- let stepReasoning = "";
5233
5362
  let fullStepText = stepType2 === "continue" ? previousStepText : "";
5234
5363
  let stepLogProbs;
5235
5364
  let stepResponse = {
@@ -5255,7 +5384,7 @@ var DefaultStreamTextResult = class {
5255
5384
  transformedStream.pipeThrough(
5256
5385
  new TransformStream({
5257
5386
  async transform(chunk, controller) {
5258
- var _a17, _b, _c;
5387
+ var _a18, _b, _c;
5259
5388
  if (stepFirstChunk) {
5260
5389
  const msToFirstChunk = now2() - startTimestampMs;
5261
5390
  stepFirstChunk = false;
@@ -5303,11 +5432,35 @@ var DefaultStreamTextResult = class {
5303
5432
  }
5304
5433
  case "reasoning": {
5305
5434
  controller.enqueue(chunk);
5306
- stepReasoning += chunk.textDelta;
5435
+ if (activeReasoningText2 == null) {
5436
+ activeReasoningText2 = {
5437
+ type: "text",
5438
+ text: chunk.textDelta
5439
+ };
5440
+ stepReasoning2.push(activeReasoningText2);
5441
+ } else {
5442
+ activeReasoningText2.text += chunk.textDelta;
5443
+ }
5444
+ break;
5445
+ }
5446
+ case "reasoning-signature": {
5447
+ controller.enqueue(chunk);
5448
+ if (activeReasoningText2 == null) {
5449
+ throw new InvalidStreamPartError({
5450
+ chunk,
5451
+ message: "reasoning-signature without reasoning"
5452
+ });
5453
+ }
5454
+ activeReasoningText2.signature = chunk.signature;
5455
+ activeReasoningText2 = void 0;
5307
5456
  break;
5308
5457
  }
5309
- case "source": {
5458
+ case "redacted-reasoning": {
5310
5459
  controller.enqueue(chunk);
5460
+ stepReasoning2.push({
5461
+ type: "redacted",
5462
+ data: chunk.data
5463
+ });
5311
5464
  break;
5312
5465
  }
5313
5466
  case "tool-call": {
@@ -5322,7 +5475,7 @@ var DefaultStreamTextResult = class {
5322
5475
  }
5323
5476
  case "response-metadata": {
5324
5477
  stepResponse = {
5325
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
5478
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
5326
5479
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
5327
5480
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
5328
5481
  };
@@ -5341,6 +5494,7 @@ var DefaultStreamTextResult = class {
5341
5494
  });
5342
5495
  break;
5343
5496
  }
5497
+ case "source":
5344
5498
  case "tool-call-streaming-start":
5345
5499
  case "tool-call-delta": {
5346
5500
  controller.enqueue(chunk);
@@ -5458,6 +5612,7 @@ var DefaultStreamTextResult = class {
5458
5612
  responseMessages.push(
5459
5613
  ...toResponseMessages({
5460
5614
  text: stepText,
5615
+ reasoning: stepReasoning2,
5461
5616
  tools: tools != null ? tools : {},
5462
5617
  toolCalls: stepToolCalls,
5463
5618
  toolResults: stepToolResults,
@@ -5531,6 +5686,9 @@ var DefaultStreamTextResult = class {
5531
5686
  get reasoning() {
5532
5687
  return this.reasoningPromise.value;
5533
5688
  }
5689
+ get reasoningDetails() {
5690
+ return this.reasoningDetailsPromise.value;
5691
+ }
5534
5692
  get sources() {
5535
5693
  return this.sourcesPromise.value;
5536
5694
  }
@@ -5631,6 +5789,26 @@ var DefaultStreamTextResult = class {
5631
5789
  }
5632
5790
  break;
5633
5791
  }
5792
+ case "redacted-reasoning": {
5793
+ if (sendReasoning) {
5794
+ controller.enqueue(
5795
+ (0, import_ui_utils8.formatDataStreamPart)("redacted_reasoning", {
5796
+ data: chunk.data
5797
+ })
5798
+ );
5799
+ }
5800
+ break;
5801
+ }
5802
+ case "reasoning-signature": {
5803
+ if (sendReasoning) {
5804
+ controller.enqueue(
5805
+ (0, import_ui_utils8.formatDataStreamPart)("reasoning_signature", {
5806
+ signature: chunk.signature
5807
+ })
5808
+ );
5809
+ }
5810
+ break;
5811
+ }
5634
5812
  case "source": {
5635
5813
  if (sendSources) {
5636
5814
  controller.enqueue(
@@ -5811,9 +5989,9 @@ var DefaultStreamTextResult = class {
5811
5989
  );
5812
5990
  }
5813
5991
  toTextStreamResponse(init) {
5814
- var _a15;
5992
+ var _a16;
5815
5993
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
5816
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
5994
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
5817
5995
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5818
5996
  contentType: "text/plain; charset=utf-8"
5819
5997
  })
@@ -5948,7 +6126,7 @@ var doWrap = ({
5948
6126
  modelId,
5949
6127
  providerId
5950
6128
  }) => {
5951
- var _a15;
6129
+ var _a16;
5952
6130
  async function doTransform({
5953
6131
  params,
5954
6132
  type
@@ -5961,7 +6139,7 @@ var doWrap = ({
5961
6139
  modelId: modelId != null ? modelId : model.modelId,
5962
6140
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
5963
6141
  supportsImageUrls: model.supportsImageUrls,
5964
- supportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model),
6142
+ supportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model),
5965
6143
  supportsStructuredOutputs: model.supportsStructuredOutputs,
5966
6144
  async doGenerate(params) {
5967
6145
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -5995,7 +6173,7 @@ function appendResponseMessages({
5995
6173
  responseMessages,
5996
6174
  _internal: { currentDate = () => /* @__PURE__ */ new Date() } = {}
5997
6175
  }) {
5998
- var _a15, _b, _c, _d;
6176
+ var _a16, _b, _c, _d;
5999
6177
  const clonedMessages = structuredClone(messages);
6000
6178
  for (const message of responseMessages) {
6001
6179
  const role = message.role;
@@ -6018,7 +6196,7 @@ function appendResponseMessages({
6018
6196
  const maxStep = (0, import_ui_utils9.extractMaxToolInvocationStep)(
6019
6197
  lastMessage.toolInvocations
6020
6198
  );
6021
- (_a15 = lastMessage.parts) != null ? _a15 : lastMessage.parts = [];
6199
+ (_a16 = lastMessage.parts) != null ? _a16 : lastMessage.parts = [];
6022
6200
  lastMessage.content = textContent;
6023
6201
  if (textContent.length > 0) {
6024
6202
  lastMessage.parts.push({
@@ -6096,7 +6274,7 @@ function appendResponseMessages({
6096
6274
  }
6097
6275
 
6098
6276
  // core/registry/custom-provider.ts
6099
- var import_provider20 = require("@ai-sdk/provider");
6277
+ var import_provider22 = require("@ai-sdk/provider");
6100
6278
  function customProvider({
6101
6279
  languageModels,
6102
6280
  textEmbeddingModels,
@@ -6111,7 +6289,7 @@ function customProvider({
6111
6289
  if (fallbackProvider) {
6112
6290
  return fallbackProvider.languageModel(modelId);
6113
6291
  }
6114
- throw new import_provider20.NoSuchModelError({ modelId, modelType: "languageModel" });
6292
+ throw new import_provider22.NoSuchModelError({ modelId, modelType: "languageModel" });
6115
6293
  },
6116
6294
  textEmbeddingModel(modelId) {
6117
6295
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -6120,7 +6298,7 @@ function customProvider({
6120
6298
  if (fallbackProvider) {
6121
6299
  return fallbackProvider.textEmbeddingModel(modelId);
6122
6300
  }
6123
- throw new import_provider20.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
6301
+ throw new import_provider22.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
6124
6302
  },
6125
6303
  imageModel(modelId) {
6126
6304
  if (imageModels != null && modelId in imageModels) {
@@ -6129,19 +6307,19 @@ function customProvider({
6129
6307
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
6130
6308
  return fallbackProvider.imageModel(modelId);
6131
6309
  }
6132
- throw new import_provider20.NoSuchModelError({ modelId, modelType: "imageModel" });
6310
+ throw new import_provider22.NoSuchModelError({ modelId, modelType: "imageModel" });
6133
6311
  }
6134
6312
  };
6135
6313
  }
6136
6314
  var experimental_customProvider = customProvider;
6137
6315
 
6138
6316
  // core/registry/no-such-provider-error.ts
6139
- var import_provider21 = require("@ai-sdk/provider");
6140
- var name14 = "AI_NoSuchProviderError";
6141
- var marker14 = `vercel.ai.error.${name14}`;
6142
- var symbol14 = Symbol.for(marker14);
6143
- var _a14;
6144
- var NoSuchProviderError = class extends import_provider21.NoSuchModelError {
6317
+ var import_provider23 = require("@ai-sdk/provider");
6318
+ var name15 = "AI_NoSuchProviderError";
6319
+ var marker15 = `vercel.ai.error.${name15}`;
6320
+ var symbol15 = Symbol.for(marker15);
6321
+ var _a15;
6322
+ var NoSuchProviderError = class extends import_provider23.NoSuchModelError {
6145
6323
  constructor({
6146
6324
  modelId,
6147
6325
  modelType,
@@ -6149,19 +6327,19 @@ var NoSuchProviderError = class extends import_provider21.NoSuchModelError {
6149
6327
  availableProviders,
6150
6328
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
6151
6329
  }) {
6152
- super({ errorName: name14, modelId, modelType, message });
6153
- this[_a14] = true;
6330
+ super({ errorName: name15, modelId, modelType, message });
6331
+ this[_a15] = true;
6154
6332
  this.providerId = providerId;
6155
6333
  this.availableProviders = availableProviders;
6156
6334
  }
6157
6335
  static isInstance(error) {
6158
- return import_provider21.AISDKError.hasMarker(error, marker14);
6336
+ return import_provider23.AISDKError.hasMarker(error, marker15);
6159
6337
  }
6160
6338
  };
6161
- _a14 = symbol14;
6339
+ _a15 = symbol15;
6162
6340
 
6163
6341
  // core/registry/provider-registry.ts
6164
- var import_provider22 = require("@ai-sdk/provider");
6342
+ var import_provider24 = require("@ai-sdk/provider");
6165
6343
  function experimental_createProviderRegistry(providers) {
6166
6344
  const registry = new DefaultProviderRegistry();
6167
6345
  for (const [id, provider] of Object.entries(providers)) {
@@ -6194,7 +6372,7 @@ var DefaultProviderRegistry = class {
6194
6372
  splitId(id, modelType) {
6195
6373
  const index = id.indexOf(":");
6196
6374
  if (index === -1) {
6197
- throw new import_provider22.NoSuchModelError({
6375
+ throw new import_provider24.NoSuchModelError({
6198
6376
  modelId: id,
6199
6377
  modelType,
6200
6378
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
@@ -6203,21 +6381,21 @@ var DefaultProviderRegistry = class {
6203
6381
  return [id.slice(0, index), id.slice(index + 1)];
6204
6382
  }
6205
6383
  languageModel(id) {
6206
- var _a15, _b;
6384
+ var _a16, _b;
6207
6385
  const [providerId, modelId] = this.splitId(id, "languageModel");
6208
- const model = (_b = (_a15 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a15, modelId);
6386
+ const model = (_b = (_a16 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a16, modelId);
6209
6387
  if (model == null) {
6210
- throw new import_provider22.NoSuchModelError({ modelId: id, modelType: "languageModel" });
6388
+ throw new import_provider24.NoSuchModelError({ modelId: id, modelType: "languageModel" });
6211
6389
  }
6212
6390
  return model;
6213
6391
  }
6214
6392
  textEmbeddingModel(id) {
6215
- var _a15;
6393
+ var _a16;
6216
6394
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
6217
6395
  const provider = this.getProvider(providerId);
6218
- const model = (_a15 = provider.textEmbeddingModel) == null ? void 0 : _a15.call(provider, modelId);
6396
+ const model = (_a16 = provider.textEmbeddingModel) == null ? void 0 : _a16.call(provider, modelId);
6219
6397
  if (model == null) {
6220
- throw new import_provider22.NoSuchModelError({
6398
+ throw new import_provider24.NoSuchModelError({
6221
6399
  modelId: id,
6222
6400
  modelType: "textEmbeddingModel"
6223
6401
  });
@@ -6225,12 +6403,12 @@ var DefaultProviderRegistry = class {
6225
6403
  return model;
6226
6404
  }
6227
6405
  imageModel(id) {
6228
- var _a15;
6406
+ var _a16;
6229
6407
  const [providerId, modelId] = this.splitId(id, "imageModel");
6230
6408
  const provider = this.getProvider(providerId);
6231
- const model = (_a15 = provider.imageModel) == null ? void 0 : _a15.call(provider, modelId);
6409
+ const model = (_a16 = provider.imageModel) == null ? void 0 : _a16.call(provider, modelId);
6232
6410
  if (model == null) {
6233
- throw new import_provider22.NoSuchModelError({ modelId: id, modelType: "imageModel" });
6411
+ throw new import_provider24.NoSuchModelError({ modelId: id, modelType: "imageModel" });
6234
6412
  }
6235
6413
  return model;
6236
6414
  }
@@ -6289,8 +6467,8 @@ function simulateReadableStream({
6289
6467
  chunkDelayInMs = 0,
6290
6468
  _internal
6291
6469
  }) {
6292
- var _a15;
6293
- const delay2 = (_a15 = _internal == null ? void 0 : _internal.delay) != null ? _a15 : import_provider_utils13.delay;
6470
+ var _a16;
6471
+ const delay2 = (_a16 = _internal == null ? void 0 : _internal.delay) != null ? _a16 : import_provider_utils13.delay;
6294
6472
  let index = 0;
6295
6473
  return new ReadableStream({
6296
6474
  async pull(controller) {
@@ -6309,7 +6487,7 @@ var import_ui_utils11 = require("@ai-sdk/ui-utils");
6309
6487
  function AssistantResponse({ threadId, messageId }, process2) {
6310
6488
  const stream = new ReadableStream({
6311
6489
  async start(controller) {
6312
- var _a15;
6490
+ var _a16;
6313
6491
  const textEncoder = new TextEncoder();
6314
6492
  const sendMessage = (message) => {
6315
6493
  controller.enqueue(
@@ -6331,7 +6509,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6331
6509
  );
6332
6510
  };
6333
6511
  const forwardStream = async (stream2) => {
6334
- var _a16, _b;
6512
+ var _a17, _b;
6335
6513
  let result = void 0;
6336
6514
  for await (const value of stream2) {
6337
6515
  switch (value.event) {
@@ -6348,7 +6526,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6348
6526
  break;
6349
6527
  }
6350
6528
  case "thread.message.delta": {
6351
- const content = (_a16 = value.data.delta.content) == null ? void 0 : _a16[0];
6529
+ const content = (_a17 = value.data.delta.content) == null ? void 0 : _a17[0];
6352
6530
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
6353
6531
  controller.enqueue(
6354
6532
  textEncoder.encode(
@@ -6382,7 +6560,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6382
6560
  forwardStream
6383
6561
  });
6384
6562
  } catch (error) {
6385
- sendError((_a15 = error.message) != null ? _a15 : `${error}`);
6563
+ sendError((_a16 = error.message) != null ? _a16 : `${error}`);
6386
6564
  } finally {
6387
6565
  controller.close();
6388
6566
  }
@@ -6443,7 +6621,7 @@ function toDataStreamInternal(stream, callbacks) {
6443
6621
  return stream.pipeThrough(
6444
6622
  new TransformStream({
6445
6623
  transform: async (value, controller) => {
6446
- var _a15;
6624
+ var _a16;
6447
6625
  if (typeof value === "string") {
6448
6626
  controller.enqueue(value);
6449
6627
  return;
@@ -6451,7 +6629,7 @@ function toDataStreamInternal(stream, callbacks) {
6451
6629
  if ("event" in value) {
6452
6630
  if (value.event === "on_chat_model_stream") {
6453
6631
  forwardAIMessageChunk(
6454
- (_a15 = value.data) == null ? void 0 : _a15.chunk,
6632
+ (_a16 = value.data) == null ? void 0 : _a16.chunk,
6455
6633
  controller
6456
6634
  );
6457
6635
  }
@@ -6474,7 +6652,7 @@ function toDataStream(stream, callbacks) {
6474
6652
  );
6475
6653
  }
6476
6654
  function toDataStreamResponse(stream, options) {
6477
- var _a15;
6655
+ var _a16;
6478
6656
  const dataStream = toDataStreamInternal(
6479
6657
  stream,
6480
6658
  options == null ? void 0 : options.callbacks
@@ -6483,7 +6661,7 @@ function toDataStreamResponse(stream, options) {
6483
6661
  const init = options == null ? void 0 : options.init;
6484
6662
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
6485
6663
  return new Response(responseStream, {
6486
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
6664
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
6487
6665
  statusText: init == null ? void 0 : init.statusText,
6488
6666
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
6489
6667
  contentType: "text/plain; charset=utf-8",
@@ -6538,14 +6716,14 @@ function toDataStream2(stream, callbacks) {
6538
6716
  );
6539
6717
  }
6540
6718
  function toDataStreamResponse2(stream, options = {}) {
6541
- var _a15;
6719
+ var _a16;
6542
6720
  const { init, data, callbacks } = options;
6543
6721
  const dataStream = toDataStreamInternal2(stream, callbacks).pipeThrough(
6544
6722
  new TextEncoderStream()
6545
6723
  );
6546
6724
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
6547
6725
  return new Response(responseStream, {
6548
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
6726
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
6549
6727
  statusText: init == null ? void 0 : init.statusText,
6550
6728
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
6551
6729
  contentType: "text/plain; charset=utf-8",
@@ -6648,6 +6826,7 @@ var StreamData = class {
6648
6826
  InvalidMessageRoleError,
6649
6827
  InvalidPromptError,
6650
6828
  InvalidResponseDataError,
6829
+ InvalidStreamPartError,
6651
6830
  InvalidToolArgumentsError,
6652
6831
  JSONParseError,
6653
6832
  LangChainAdapter,