ai 4.1.44 → 4.1.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  var __defProp = Object.defineProperty;
2
2
  var __export = (target, all) => {
3
- for (var name15 in all)
4
- __defProp(target, name15, { get: all[name15], enumerable: true });
3
+ for (var name16 in all)
4
+ __defProp(target, name16, { get: all[name16], enumerable: true });
5
5
  };
6
6
 
7
7
  // core/index.ts
@@ -48,6 +48,9 @@ function createDataStream({
48
48
  writeMessageAnnotation(annotation) {
49
49
  safeEnqueue(formatDataStreamPart("message_annotations", [annotation]));
50
50
  },
51
+ writeSource(source) {
52
+ safeEnqueue(formatDataStreamPart("source", source));
53
+ },
51
54
  merge(streamArg) {
52
55
  ongoingStreamPromises.push(
53
56
  (async () => {
@@ -351,7 +354,7 @@ function getBaseTelemetryAttributes({
351
354
  telemetry,
352
355
  headers
353
356
  }) {
354
- var _a15;
357
+ var _a16;
355
358
  return {
356
359
  "ai.model.provider": model.provider,
357
360
  "ai.model.id": model.modelId,
@@ -361,7 +364,7 @@ function getBaseTelemetryAttributes({
361
364
  return attributes;
362
365
  }, {}),
363
366
  // add metadata as attributes:
364
- ...Object.entries((_a15 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a15 : {}).reduce(
367
+ ...Object.entries((_a16 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a16 : {}).reduce(
365
368
  (attributes, [key, value]) => {
366
369
  attributes[`ai.telemetry.metadata.${key}`] = value;
367
370
  return attributes;
@@ -386,7 +389,7 @@ var noopTracer = {
386
389
  startSpan() {
387
390
  return noopSpan;
388
391
  },
389
- startActiveSpan(name15, arg1, arg2, arg3) {
392
+ startActiveSpan(name16, arg1, arg2, arg3) {
390
393
  if (typeof arg1 === "function") {
391
394
  return arg1(noopSpan);
392
395
  }
@@ -456,13 +459,13 @@ function getTracer({
456
459
  // core/telemetry/record-span.ts
457
460
  import { SpanStatusCode } from "@opentelemetry/api";
458
461
  function recordSpan({
459
- name: name15,
462
+ name: name16,
460
463
  tracer,
461
464
  attributes,
462
465
  fn,
463
466
  endWhenDone = true
464
467
  }) {
465
- return tracer.startActiveSpan(name15, { attributes }, async (span) => {
468
+ return tracer.startActiveSpan(name16, { attributes }, async (span) => {
466
469
  try {
467
470
  const result = await fn(span);
468
471
  if (endWhenDone) {
@@ -570,14 +573,14 @@ async function embed({
570
573
  }),
571
574
  tracer,
572
575
  fn: async (doEmbedSpan) => {
573
- var _a15;
576
+ var _a16;
574
577
  const modelResponse = await model.doEmbed({
575
578
  values: [value],
576
579
  abortSignal,
577
580
  headers
578
581
  });
579
582
  const embedding2 = modelResponse.embeddings[0];
580
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
583
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
581
584
  doEmbedSpan.setAttributes(
582
585
  selectTelemetryAttributes({
583
586
  telemetry,
@@ -687,14 +690,14 @@ async function embedMany({
687
690
  }),
688
691
  tracer,
689
692
  fn: async (doEmbedSpan) => {
690
- var _a15;
693
+ var _a16;
691
694
  const modelResponse = await model.doEmbed({
692
695
  values,
693
696
  abortSignal,
694
697
  headers
695
698
  });
696
699
  const embeddings3 = modelResponse.embeddings;
697
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
700
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
698
701
  doEmbedSpan.setAttributes(
699
702
  selectTelemetryAttributes({
700
703
  telemetry,
@@ -746,14 +749,14 @@ async function embedMany({
746
749
  }),
747
750
  tracer,
748
751
  fn: async (doEmbedSpan) => {
749
- var _a15;
752
+ var _a16;
750
753
  const modelResponse = await model.doEmbed({
751
754
  values: chunk,
752
755
  abortSignal,
753
756
  headers
754
757
  });
755
758
  const embeddings2 = modelResponse.embeddings;
756
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
759
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
757
760
  doEmbedSpan.setAttributes(
758
761
  selectTelemetryAttributes({
759
762
  telemetry,
@@ -843,9 +846,9 @@ async function generateImage({
843
846
  currentDate: () => /* @__PURE__ */ new Date()
844
847
  }
845
848
  }) {
846
- var _a15;
849
+ var _a16;
847
850
  const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
848
- const maxImagesPerCall = (_a15 = model.maxImagesPerCall) != null ? _a15 : 1;
851
+ const maxImagesPerCall = (_a16 = model.maxImagesPerCall) != null ? _a16 : 1;
849
852
  const callCount = Math.ceil(n / maxImagesPerCall);
850
853
  const callImageCounts = Array.from({ length: callCount }, (_, i) => {
851
854
  if (i < callCount - 1) {
@@ -981,7 +984,7 @@ async function download({
981
984
  url,
982
985
  fetchImplementation = fetch
983
986
  }) {
984
- var _a15;
987
+ var _a16;
985
988
  const urlText = url.toString();
986
989
  try {
987
990
  const response = await fetchImplementation(urlText);
@@ -994,7 +997,7 @@ async function download({
994
997
  }
995
998
  return {
996
999
  data: new Uint8Array(await response.arrayBuffer()),
997
- mimeType: (_a15 = response.headers.get("content-type")) != null ? _a15 : void 0
1000
+ mimeType: (_a16 = response.headers.get("content-type")) != null ? _a16 : void 0
998
1001
  };
999
1002
  } catch (error) {
1000
1003
  if (DownloadError.isInstance(error)) {
@@ -1057,8 +1060,8 @@ var dataContentSchema = z.union([
1057
1060
  z.custom(
1058
1061
  // Buffer might not be available in some environments such as CloudFlare:
1059
1062
  (value) => {
1060
- var _a15, _b;
1061
- return (_b = (_a15 = globalThis.Buffer) == null ? void 0 : _a15.isBuffer(value)) != null ? _b : false;
1063
+ var _a16, _b;
1064
+ return (_b = (_a16 = globalThis.Buffer) == null ? void 0 : _a16.isBuffer(value)) != null ? _b : false;
1062
1065
  },
1063
1066
  { message: "Must be a Buffer" }
1064
1067
  )
@@ -1158,14 +1161,14 @@ async function convertToLanguageModelPrompt({
1158
1161
  ];
1159
1162
  }
1160
1163
  function convertToLanguageModelMessage(message, downloadedAssets) {
1161
- var _a15, _b, _c, _d, _e, _f;
1164
+ var _a16, _b, _c, _d, _e, _f;
1162
1165
  const role = message.role;
1163
1166
  switch (role) {
1164
1167
  case "system": {
1165
1168
  return {
1166
1169
  role: "system",
1167
1170
  content: message.content,
1168
- providerMetadata: (_a15 = message.providerOptions) != null ? _a15 : message.experimental_providerMetadata
1171
+ providerMetadata: (_a16 = message.providerOptions) != null ? _a16 : message.experimental_providerMetadata
1169
1172
  };
1170
1173
  }
1171
1174
  case "user": {
@@ -1209,7 +1212,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1209
1212
  return {
1210
1213
  role: "tool",
1211
1214
  content: message.content.map((part) => {
1212
- var _a16;
1215
+ var _a17;
1213
1216
  return {
1214
1217
  type: "tool-result",
1215
1218
  toolCallId: part.toolCallId,
@@ -1217,7 +1220,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1217
1220
  result: part.result,
1218
1221
  content: part.experimental_content,
1219
1222
  isError: part.isError,
1220
- providerMetadata: (_a16 = part.providerOptions) != null ? _a16 : part.experimental_providerMetadata
1223
+ providerMetadata: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
1221
1224
  };
1222
1225
  }),
1223
1226
  providerMetadata: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
@@ -1253,7 +1256,7 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
1253
1256
  );
1254
1257
  }
1255
1258
  function convertPartToLanguageModelPart(part, downloadedAssets) {
1256
- var _a15;
1259
+ var _a16;
1257
1260
  if (part.type === "text") {
1258
1261
  return {
1259
1262
  type: "text",
@@ -1306,7 +1309,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
1306
1309
  switch (type) {
1307
1310
  case "image": {
1308
1311
  if (normalizedData instanceof Uint8Array) {
1309
- mimeType = (_a15 = detectImageMimeType(normalizedData)) != null ? _a15 : mimeType;
1312
+ mimeType = (_a16 = detectImageMimeType(normalizedData)) != null ? _a16 : mimeType;
1310
1313
  }
1311
1314
  return {
1312
1315
  type: "image",
@@ -1429,7 +1432,7 @@ import { z as z7 } from "zod";
1429
1432
 
1430
1433
  // core/prompt/attachments-to-parts.ts
1431
1434
  function attachmentsToParts(attachments) {
1432
- var _a15, _b, _c;
1435
+ var _a16, _b, _c;
1433
1436
  const parts = [];
1434
1437
  for (const attachment of attachments) {
1435
1438
  let url;
@@ -1441,7 +1444,7 @@ function attachmentsToParts(attachments) {
1441
1444
  switch (url.protocol) {
1442
1445
  case "http:":
1443
1446
  case "https:": {
1444
- if ((_a15 = attachment.contentType) == null ? void 0 : _a15.startsWith("image/")) {
1447
+ if ((_a16 = attachment.contentType) == null ? void 0 : _a16.startsWith("image/")) {
1445
1448
  parts.push({ type: "image", image: url });
1446
1449
  } else {
1447
1450
  if (!attachment.contentType) {
@@ -1527,8 +1530,8 @@ _a8 = symbol8;
1527
1530
 
1528
1531
  // core/prompt/convert-to-core-messages.ts
1529
1532
  function convertToCoreMessages(messages, options) {
1530
- var _a15, _b;
1531
- const tools = (_a15 = options == null ? void 0 : options.tools) != null ? _a15 : {};
1533
+ var _a16, _b;
1534
+ const tools = (_a16 = options == null ? void 0 : options.tools) != null ? _a16 : {};
1532
1535
  const coreMessages = [];
1533
1536
  for (let i = 0; i < messages.length; i++) {
1534
1537
  const message = messages[i];
@@ -1555,24 +1558,52 @@ function convertToCoreMessages(messages, options) {
1555
1558
  case "assistant": {
1556
1559
  if (message.parts != null) {
1557
1560
  let processBlock2 = function() {
1561
+ const content2 = [];
1562
+ for (const part of block) {
1563
+ switch (part.type) {
1564
+ case "text":
1565
+ content2.push({
1566
+ type: "text",
1567
+ text: part.text
1568
+ });
1569
+ break;
1570
+ case "reasoning": {
1571
+ for (const detail of part.details) {
1572
+ switch (detail.type) {
1573
+ case "text":
1574
+ content2.push({
1575
+ type: "reasoning",
1576
+ text: detail.text,
1577
+ signature: detail.signature
1578
+ });
1579
+ break;
1580
+ case "redacted":
1581
+ content2.push({
1582
+ type: "redacted-reasoning",
1583
+ data: detail.data
1584
+ });
1585
+ break;
1586
+ }
1587
+ }
1588
+ break;
1589
+ }
1590
+ case "tool-invocation":
1591
+ content2.push({
1592
+ type: "tool-call",
1593
+ toolCallId: part.toolInvocation.toolCallId,
1594
+ toolName: part.toolInvocation.toolName,
1595
+ args: part.toolInvocation.args
1596
+ });
1597
+ break;
1598
+ default: {
1599
+ const _exhaustiveCheck = part;
1600
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
1601
+ }
1602
+ }
1603
+ }
1558
1604
  coreMessages.push({
1559
1605
  role: "assistant",
1560
- content: block.map((part) => {
1561
- switch (part.type) {
1562
- case "text":
1563
- return {
1564
- type: "text",
1565
- text: part.text
1566
- };
1567
- default:
1568
- return {
1569
- type: "tool-call",
1570
- toolCallId: part.toolInvocation.toolCallId,
1571
- toolName: part.toolInvocation.toolName,
1572
- args: part.toolInvocation.args
1573
- };
1574
- }
1575
- })
1606
+ content: content2
1576
1607
  });
1577
1608
  const stepInvocations = block.filter(
1578
1609
  (part) => part.type === "tool-invocation"
@@ -1617,6 +1648,7 @@ function convertToCoreMessages(messages, options) {
1617
1648
  for (const part of message.parts) {
1618
1649
  switch (part.type) {
1619
1650
  case "reasoning":
1651
+ block.push(part);
1620
1652
  break;
1621
1653
  case "text": {
1622
1654
  if (blockHasToolInvocations) {
@@ -1644,14 +1676,14 @@ function convertToCoreMessages(messages, options) {
1644
1676
  break;
1645
1677
  }
1646
1678
  const maxStep = toolInvocations.reduce((max, toolInvocation) => {
1647
- var _a16;
1648
- return Math.max(max, (_a16 = toolInvocation.step) != null ? _a16 : 0);
1679
+ var _a17;
1680
+ return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
1649
1681
  }, 0);
1650
1682
  for (let i2 = 0; i2 <= maxStep; i2++) {
1651
1683
  const stepInvocations = toolInvocations.filter(
1652
1684
  (toolInvocation) => {
1653
- var _a16;
1654
- return ((_a16 = toolInvocation.step) != null ? _a16 : 0) === i2;
1685
+ var _a17;
1686
+ return ((_a17 = toolInvocation.step) != null ? _a17 : 0) === i2;
1655
1687
  }
1656
1688
  );
1657
1689
  if (stepInvocations.length === 0) {
@@ -1814,6 +1846,18 @@ var filePartSchema = z5.object({
1814
1846
  providerOptions: providerMetadataSchema.optional(),
1815
1847
  experimental_providerMetadata: providerMetadataSchema.optional()
1816
1848
  });
1849
+ var reasoningPartSchema = z5.object({
1850
+ type: z5.literal("reasoning"),
1851
+ text: z5.string(),
1852
+ providerOptions: providerMetadataSchema.optional(),
1853
+ experimental_providerMetadata: providerMetadataSchema.optional()
1854
+ });
1855
+ var redactedReasoningPartSchema = z5.object({
1856
+ type: z5.literal("redacted-reasoning"),
1857
+ data: z5.string(),
1858
+ providerOptions: providerMetadataSchema.optional(),
1859
+ experimental_providerMetadata: providerMetadataSchema.optional()
1860
+ });
1817
1861
  var toolCallPartSchema = z5.object({
1818
1862
  type: z5.literal("tool-call"),
1819
1863
  toolCallId: z5.string(),
@@ -1853,7 +1897,14 @@ var coreAssistantMessageSchema = z6.object({
1853
1897
  role: z6.literal("assistant"),
1854
1898
  content: z6.union([
1855
1899
  z6.string(),
1856
- z6.array(z6.union([textPartSchema, toolCallPartSchema]))
1900
+ z6.array(
1901
+ z6.union([
1902
+ textPartSchema,
1903
+ reasoningPartSchema,
1904
+ redactedReasoningPartSchema,
1905
+ toolCallPartSchema
1906
+ ])
1907
+ )
1857
1908
  ]),
1858
1909
  providerOptions: providerMetadataSchema.optional(),
1859
1910
  experimental_providerMetadata: providerMetadataSchema.optional()
@@ -2070,7 +2121,7 @@ var arrayOutputStrategy = (schema) => {
2070
2121
  additionalProperties: false
2071
2122
  },
2072
2123
  validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
2073
- var _a15;
2124
+ var _a16;
2074
2125
  if (!isJSONObject(value) || !isJSONArray(value.elements)) {
2075
2126
  return {
2076
2127
  success: false,
@@ -2093,7 +2144,7 @@ var arrayOutputStrategy = (schema) => {
2093
2144
  }
2094
2145
  resultArray.push(result.value);
2095
2146
  }
2096
- const publishedElementCount = (_a15 = latestObject == null ? void 0 : latestObject.length) != null ? _a15 : 0;
2147
+ const publishedElementCount = (_a16 = latestObject == null ? void 0 : latestObject.length) != null ? _a16 : 0;
2097
2148
  let textDelta = "";
2098
2149
  if (isFirstDelta) {
2099
2150
  textDelta += "[";
@@ -2431,7 +2482,7 @@ async function generateObject({
2431
2482
  }),
2432
2483
  tracer,
2433
2484
  fn: async (span) => {
2434
- var _a15, _b, _c, _d;
2485
+ var _a16, _b, _c, _d;
2435
2486
  if (mode === "auto" || mode == null) {
2436
2487
  mode = model.defaultObjectGenerationMode;
2437
2488
  }
@@ -2460,7 +2511,7 @@ async function generateObject({
2460
2511
  const promptMessages = await convertToLanguageModelPrompt({
2461
2512
  prompt: standardizedPrompt,
2462
2513
  modelSupportsImageUrls: model.supportsImageUrls,
2463
- modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
2514
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
2464
2515
  // support 'this' context
2465
2516
  });
2466
2517
  const generateResult = await retry(
@@ -2494,7 +2545,7 @@ async function generateObject({
2494
2545
  }),
2495
2546
  tracer,
2496
2547
  fn: async (span2) => {
2497
- var _a16, _b2, _c2, _d2, _e, _f;
2548
+ var _a17, _b2, _c2, _d2, _e, _f;
2498
2549
  const result2 = await model.doGenerate({
2499
2550
  mode: {
2500
2551
  type: "object-json",
@@ -2510,7 +2561,7 @@ async function generateObject({
2510
2561
  headers
2511
2562
  });
2512
2563
  const responseData = {
2513
- id: (_b2 = (_a16 = result2.response) == null ? void 0 : _a16.id) != null ? _b2 : generateId3(),
2564
+ id: (_b2 = (_a17 = result2.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId3(),
2514
2565
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2515
2566
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2516
2567
  };
@@ -2599,7 +2650,7 @@ async function generateObject({
2599
2650
  }),
2600
2651
  tracer,
2601
2652
  fn: async (span2) => {
2602
- var _a16, _b2, _c2, _d2, _e, _f, _g, _h;
2653
+ var _a17, _b2, _c2, _d2, _e, _f, _g, _h;
2603
2654
  const result2 = await model.doGenerate({
2604
2655
  mode: {
2605
2656
  type: "object-tool",
@@ -2617,7 +2668,7 @@ async function generateObject({
2617
2668
  abortSignal,
2618
2669
  headers
2619
2670
  });
2620
- const objectText = (_b2 = (_a16 = result2.toolCalls) == null ? void 0 : _a16[0]) == null ? void 0 : _b2.args;
2671
+ const objectText = (_b2 = (_a17 = result2.toolCalls) == null ? void 0 : _a17[0]) == null ? void 0 : _b2.args;
2621
2672
  const responseData = {
2622
2673
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
2623
2674
  timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
@@ -2764,9 +2815,9 @@ var DefaultGenerateObjectResult = class {
2764
2815
  this.logprobs = options.logprobs;
2765
2816
  }
2766
2817
  toJsonResponse(init) {
2767
- var _a15;
2818
+ var _a16;
2768
2819
  return new Response(JSON.stringify(this.object), {
2769
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
2820
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
2770
2821
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
2771
2822
  contentType: "application/json; charset=utf-8"
2772
2823
  })
@@ -2804,17 +2855,17 @@ var DelayedPromise = class {
2804
2855
  return this.promise;
2805
2856
  }
2806
2857
  resolve(value) {
2807
- var _a15;
2858
+ var _a16;
2808
2859
  this.status = { type: "resolved", value };
2809
2860
  if (this.promise) {
2810
- (_a15 = this._resolve) == null ? void 0 : _a15.call(this, value);
2861
+ (_a16 = this._resolve) == null ? void 0 : _a16.call(this, value);
2811
2862
  }
2812
2863
  }
2813
2864
  reject(error) {
2814
- var _a15;
2865
+ var _a16;
2815
2866
  this.status = { type: "rejected", error };
2816
2867
  if (this.promise) {
2817
- (_a15 = this._reject) == null ? void 0 : _a15.call(this, error);
2868
+ (_a16 = this._reject) == null ? void 0 : _a16.call(this, error);
2818
2869
  }
2819
2870
  }
2820
2871
  };
@@ -2918,8 +2969,8 @@ function createStitchableStream() {
2918
2969
 
2919
2970
  // core/util/now.ts
2920
2971
  function now() {
2921
- var _a15, _b;
2922
- return (_b = (_a15 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a15.now()) != null ? _b : Date.now();
2972
+ var _a16, _b;
2973
+ return (_b = (_a16 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a16.now()) != null ? _b : Date.now();
2923
2974
  }
2924
2975
 
2925
2976
  // core/generate-object/stream-object.ts
@@ -3055,7 +3106,7 @@ var DefaultStreamObjectResult = class {
3055
3106
  tracer,
3056
3107
  endWhenDone: false,
3057
3108
  fn: async (rootSpan) => {
3058
- var _a15, _b;
3109
+ var _a16, _b;
3059
3110
  if (mode === "auto" || mode == null) {
3060
3111
  mode = model.defaultObjectGenerationMode;
3061
3112
  }
@@ -3086,7 +3137,7 @@ var DefaultStreamObjectResult = class {
3086
3137
  prompt: await convertToLanguageModelPrompt({
3087
3138
  prompt: standardizedPrompt,
3088
3139
  modelSupportsImageUrls: model.supportsImageUrls,
3089
- modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
3140
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
3090
3141
  // support 'this' context
3091
3142
  }),
3092
3143
  providerMetadata: providerOptions,
@@ -3224,7 +3275,7 @@ var DefaultStreamObjectResult = class {
3224
3275
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
3225
3276
  new TransformStream({
3226
3277
  async transform(chunk, controller) {
3227
- var _a16, _b2, _c;
3278
+ var _a17, _b2, _c;
3228
3279
  if (isFirstChunk) {
3229
3280
  const msToFirstChunk = now2() - startTimestampMs;
3230
3281
  isFirstChunk = false;
@@ -3270,7 +3321,7 @@ var DefaultStreamObjectResult = class {
3270
3321
  switch (chunk.type) {
3271
3322
  case "response-metadata": {
3272
3323
  response = {
3273
- id: (_a16 = chunk.id) != null ? _a16 : response.id,
3324
+ id: (_a17 = chunk.id) != null ? _a17 : response.id,
3274
3325
  timestamp: (_b2 = chunk.timestamp) != null ? _b2 : response.timestamp,
3275
3326
  modelId: (_c = chunk.modelId) != null ? _c : response.modelId
3276
3327
  };
@@ -3483,9 +3534,9 @@ var DefaultStreamObjectResult = class {
3483
3534
  });
3484
3535
  }
3485
3536
  toTextStreamResponse(init) {
3486
- var _a15;
3537
+ var _a16;
3487
3538
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3488
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
3539
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
3489
3540
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
3490
3541
  contentType: "text/plain; charset=utf-8"
3491
3542
  })
@@ -3561,24 +3612,24 @@ function prepareToolsAndToolChoice({
3561
3612
  };
3562
3613
  }
3563
3614
  const filteredTools = activeTools != null ? Object.entries(tools).filter(
3564
- ([name15]) => activeTools.includes(name15)
3615
+ ([name16]) => activeTools.includes(name16)
3565
3616
  ) : Object.entries(tools);
3566
3617
  return {
3567
- tools: filteredTools.map(([name15, tool2]) => {
3618
+ tools: filteredTools.map(([name16, tool2]) => {
3568
3619
  const toolType = tool2.type;
3569
3620
  switch (toolType) {
3570
3621
  case void 0:
3571
3622
  case "function":
3572
3623
  return {
3573
3624
  type: "function",
3574
- name: name15,
3625
+ name: name16,
3575
3626
  description: tool2.description,
3576
3627
  parameters: asSchema2(tool2.parameters).jsonSchema
3577
3628
  };
3578
3629
  case "provider-defined":
3579
3630
  return {
3580
3631
  type: "provider-defined",
3581
- name: name15,
3632
+ name: name16,
3582
3633
  id: tool2.id,
3583
3634
  args: tool2.args
3584
3635
  };
@@ -3751,6 +3802,7 @@ async function doParseToolCall({
3751
3802
  // core/generate-text/to-response-messages.ts
3752
3803
  function toResponseMessages({
3753
3804
  text: text2 = "",
3805
+ reasoning,
3754
3806
  tools,
3755
3807
  toolCalls,
3756
3808
  toolResults,
@@ -3760,7 +3812,13 @@ function toResponseMessages({
3760
3812
  const responseMessages = [];
3761
3813
  responseMessages.push({
3762
3814
  role: "assistant",
3763
- content: [{ type: "text", text: text2 }, ...toolCalls],
3815
+ content: [
3816
+ ...reasoning.map(
3817
+ (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
3818
+ ),
3819
+ { type: "text", text: text2 },
3820
+ ...toolCalls
3821
+ ],
3764
3822
  id: messageId
3765
3823
  });
3766
3824
  if (toolResults.length > 0) {
@@ -3789,6 +3847,12 @@ function toResponseMessages({
3789
3847
  return responseMessages;
3790
3848
  }
3791
3849
 
3850
+ // core/generate-text/reasoning-detail.ts
3851
+ function asReasoningText(reasoning) {
3852
+ const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
3853
+ return reasoningText.length > 0 ? reasoningText : void 0;
3854
+ }
3855
+
3792
3856
  // core/generate-text/generate-text.ts
3793
3857
  var originalGenerateId3 = createIdGenerator3({
3794
3858
  prefix: "aitxt",
@@ -3824,7 +3888,7 @@ async function generateText({
3824
3888
  onStepFinish,
3825
3889
  ...settings
3826
3890
  }) {
3827
- var _a15;
3891
+ var _a16;
3828
3892
  if (maxSteps < 1) {
3829
3893
  throw new InvalidArgumentError({
3830
3894
  parameter: "maxSteps",
@@ -3841,7 +3905,7 @@ async function generateText({
3841
3905
  });
3842
3906
  const initialPrompt = standardizePrompt({
3843
3907
  prompt: {
3844
- system: (_a15 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a15 : system,
3908
+ system: (_a16 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a16 : system,
3845
3909
  prompt,
3846
3910
  messages
3847
3911
  },
@@ -3867,7 +3931,7 @@ async function generateText({
3867
3931
  }),
3868
3932
  tracer,
3869
3933
  fn: async (span) => {
3870
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
3934
+ var _a17, _b, _c, _d, _e, _f, _g, _h, _i;
3871
3935
  const mode = {
3872
3936
  type: "regular",
3873
3937
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3876,6 +3940,7 @@ async function generateText({
3876
3940
  let currentModelResponse;
3877
3941
  let currentToolCalls = [];
3878
3942
  let currentToolResults = [];
3943
+ let currentReasoningDetails = [];
3879
3944
  let stepCount = 0;
3880
3945
  const responseMessages = [];
3881
3946
  let text2 = "";
@@ -3900,7 +3965,7 @@ async function generateText({
3900
3965
  messages: stepInputMessages
3901
3966
  },
3902
3967
  modelSupportsImageUrls: model.supportsImageUrls,
3903
- modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
3968
+ modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
3904
3969
  // support 'this' context
3905
3970
  });
3906
3971
  currentModelResponse = await retry(
@@ -3921,8 +3986,8 @@ async function generateText({
3921
3986
  "ai.prompt.tools": {
3922
3987
  // convert the language model level tools:
3923
3988
  input: () => {
3924
- var _a17;
3925
- return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
3989
+ var _a18;
3990
+ return (_a18 = mode.tools) == null ? void 0 : _a18.map((tool2) => JSON.stringify(tool2));
3926
3991
  }
3927
3992
  },
3928
3993
  "ai.prompt.toolChoice": {
@@ -3942,7 +4007,7 @@ async function generateText({
3942
4007
  }),
3943
4008
  tracer,
3944
4009
  fn: async (span2) => {
3945
- var _a17, _b2, _c2, _d2, _e2, _f2;
4010
+ var _a18, _b2, _c2, _d2, _e2, _f2;
3946
4011
  const result = await model.doGenerate({
3947
4012
  mode,
3948
4013
  ...callSettings,
@@ -3954,7 +4019,7 @@ async function generateText({
3954
4019
  headers
3955
4020
  });
3956
4021
  const responseData = {
3957
- id: (_b2 = (_a17 = result.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId3(),
4022
+ id: (_b2 = (_a18 = result.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
3958
4023
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3959
4024
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3960
4025
  };
@@ -4028,6 +4093,9 @@ async function generateText({
4028
4093
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
4029
4094
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
4030
4095
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
4096
+ currentReasoningDetails = asReasoningDetails(
4097
+ currentModelResponse.reasoning
4098
+ );
4031
4099
  sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
4032
4100
  if (stepType === "continue") {
4033
4101
  const lastMessage = responseMessages[responseMessages.length - 1];
@@ -4043,6 +4111,7 @@ async function generateText({
4043
4111
  responseMessages.push(
4044
4112
  ...toResponseMessages({
4045
4113
  text: text2,
4114
+ reasoning: asReasoningDetails(currentModelResponse.reasoning),
4046
4115
  tools: tools != null ? tools : {},
4047
4116
  toolCalls: currentToolCalls,
4048
4117
  toolResults: currentToolResults,
@@ -4054,7 +4123,9 @@ async function generateText({
4054
4123
  const currentStepResult = {
4055
4124
  stepType,
4056
4125
  text: stepText,
4057
- reasoning: currentModelResponse.reasoning,
4126
+ // TODO v5: rename reasoning to reasoningText (and use reasoning for composite array)
4127
+ reasoning: asReasoningText(currentReasoningDetails),
4128
+ reasoningDetails: currentReasoningDetails,
4058
4129
  sources: (_e = currentModelResponse.sources) != null ? _e : [],
4059
4130
  toolCalls: currentToolCalls,
4060
4131
  toolResults: currentToolResults,
@@ -4095,7 +4166,8 @@ async function generateText({
4095
4166
  );
4096
4167
  return new DefaultGenerateTextResult({
4097
4168
  text: text2,
4098
- reasoning: currentModelResponse.reasoning,
4169
+ reasoning: asReasoningText(currentReasoningDetails),
4170
+ reasoningDetails: currentReasoningDetails,
4099
4171
  sources,
4100
4172
  outputResolver: () => {
4101
4173
  if (output == null) {
@@ -4203,6 +4275,7 @@ var DefaultGenerateTextResult = class {
4203
4275
  constructor(options) {
4204
4276
  this.text = options.text;
4205
4277
  this.reasoning = options.reasoning;
4278
+ this.reasoningDetails = options.reasoningDetails;
4206
4279
  this.toolCalls = options.toolCalls;
4207
4280
  this.toolResults = options.toolResults;
4208
4281
  this.finishReason = options.finishReason;
@@ -4221,6 +4294,15 @@ var DefaultGenerateTextResult = class {
4221
4294
  return this.outputResolver();
4222
4295
  }
4223
4296
  };
4297
+ function asReasoningDetails(reasoning) {
4298
+ if (reasoning == null) {
4299
+ return [];
4300
+ }
4301
+ if (typeof reasoning === "string") {
4302
+ return [{ type: "text", text: reasoning }];
4303
+ }
4304
+ return reasoning;
4305
+ }
4224
4306
 
4225
4307
  // core/generate-text/output.ts
4226
4308
  var output_exports = {};
@@ -4236,7 +4318,7 @@ import {
4236
4318
 
4237
4319
  // errors/index.ts
4238
4320
  import {
4239
- AISDKError as AISDKError14,
4321
+ AISDKError as AISDKError15,
4240
4322
  APICallError as APICallError2,
4241
4323
  EmptyResponseBodyError,
4242
4324
  InvalidPromptError as InvalidPromptError2,
@@ -4249,6 +4331,27 @@ import {
4249
4331
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
4250
4332
  } from "@ai-sdk/provider";
4251
4333
 
4334
+ // errors/invalid-stream-part-error.ts
4335
+ import { AISDKError as AISDKError14 } from "@ai-sdk/provider";
4336
+ var name14 = "AI_InvalidStreamPartError";
4337
+ var marker14 = `vercel.ai.error.${name14}`;
4338
+ var symbol14 = Symbol.for(marker14);
4339
+ var _a14;
4340
+ var InvalidStreamPartError = class extends AISDKError14 {
4341
+ constructor({
4342
+ chunk,
4343
+ message
4344
+ }) {
4345
+ super({ name: name14, message });
4346
+ this[_a14] = true;
4347
+ this.chunk = chunk;
4348
+ }
4349
+ static isInstance(error) {
4350
+ return AISDKError14.hasMarker(error, marker14);
4351
+ }
4352
+ };
4353
+ _a14 = symbol14;
4354
+
4252
4355
  // core/generate-text/output.ts
4253
4356
  var text = () => ({
4254
4357
  type: "text",
@@ -4375,6 +4478,7 @@ function smoothStream({
4375
4478
  }
4376
4479
 
4377
4480
  // core/generate-text/stream-text.ts
4481
+ import { AISDKError as AISDKError16 } from "@ai-sdk/provider";
4378
4482
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
4379
4483
  import { formatDataStreamPart as formatDataStreamPart2 } from "@ai-sdk/ui-utils";
4380
4484
 
@@ -4508,6 +4612,8 @@ function runToolsTransformation({
4508
4612
  switch (chunkType) {
4509
4613
  case "text-delta":
4510
4614
  case "reasoning":
4615
+ case "reasoning-signature":
4616
+ case "redacted-reasoning":
4511
4617
  case "source":
4512
4618
  case "response-metadata":
4513
4619
  case "error": {
@@ -4819,13 +4925,14 @@ var DefaultStreamTextResult = class {
4819
4925
  this.providerMetadataPromise = new DelayedPromise();
4820
4926
  this.textPromise = new DelayedPromise();
4821
4927
  this.reasoningPromise = new DelayedPromise();
4928
+ this.reasoningDetailsPromise = new DelayedPromise();
4822
4929
  this.sourcesPromise = new DelayedPromise();
4823
4930
  this.toolCallsPromise = new DelayedPromise();
4824
4931
  this.toolResultsPromise = new DelayedPromise();
4825
4932
  this.requestPromise = new DelayedPromise();
4826
4933
  this.responsePromise = new DelayedPromise();
4827
4934
  this.stepsPromise = new DelayedPromise();
4828
- var _a15;
4935
+ var _a16;
4829
4936
  if (maxSteps < 1) {
4830
4937
  throw new InvalidArgumentError({
4831
4938
  parameter: "maxSteps",
@@ -4837,7 +4944,8 @@ var DefaultStreamTextResult = class {
4837
4944
  let recordedStepText = "";
4838
4945
  let recordedContinuationText = "";
4839
4946
  let recordedFullText = "";
4840
- let recordedReasoningText = void 0;
4947
+ const stepReasoning = [];
4948
+ let activeReasoningText = void 0;
4841
4949
  let recordedStepSources = [];
4842
4950
  const recordedSources = [];
4843
4951
  const recordedResponse = {
@@ -4869,7 +4977,25 @@ var DefaultStreamTextResult = class {
4869
4977
  recordedFullText += part.textDelta;
4870
4978
  }
4871
4979
  if (part.type === "reasoning") {
4872
- recordedReasoningText = (recordedReasoningText != null ? recordedReasoningText : "") + part.textDelta;
4980
+ if (activeReasoningText == null) {
4981
+ activeReasoningText = { type: "text", text: part.textDelta };
4982
+ stepReasoning.push(activeReasoningText);
4983
+ } else {
4984
+ activeReasoningText.text += part.textDelta;
4985
+ }
4986
+ }
4987
+ if (part.type === "reasoning-signature") {
4988
+ if (activeReasoningText == null) {
4989
+ throw new AISDKError16({
4990
+ name: "InvalidStreamPart",
4991
+ message: "reasoning-signature without reasoning"
4992
+ });
4993
+ }
4994
+ activeReasoningText.signature = part.signature;
4995
+ activeReasoningText = void 0;
4996
+ }
4997
+ if (part.type === "redacted-reasoning") {
4998
+ stepReasoning.push({ type: "redacted", data: part.data });
4873
4999
  }
4874
5000
  if (part.type === "source") {
4875
5001
  recordedSources.push(part.source);
@@ -4884,6 +5010,7 @@ var DefaultStreamTextResult = class {
4884
5010
  if (part.type === "step-finish") {
4885
5011
  const stepMessages = toResponseMessages({
4886
5012
  text: recordedContinuationText,
5013
+ reasoning: stepReasoning,
4887
5014
  tools: tools != null ? tools : {},
4888
5015
  toolCalls: recordedToolCalls,
4889
5016
  toolResults: recordedToolResults,
@@ -4907,7 +5034,8 @@ var DefaultStreamTextResult = class {
4907
5034
  const currentStepResult = {
4908
5035
  stepType,
4909
5036
  text: recordedStepText,
4910
- reasoning: recordedReasoningText,
5037
+ reasoning: asReasoningText(stepReasoning),
5038
+ reasoningDetails: stepReasoning,
4911
5039
  sources: recordedStepSources,
4912
5040
  toolCalls: recordedToolCalls,
4913
5041
  toolResults: recordedToolResults,
@@ -4948,7 +5076,7 @@ var DefaultStreamTextResult = class {
4948
5076
  }
4949
5077
  },
4950
5078
  async flush(controller) {
4951
- var _a16;
5079
+ var _a17;
4952
5080
  try {
4953
5081
  if (recordedSteps.length === 0) {
4954
5082
  return;
@@ -4962,6 +5090,8 @@ var DefaultStreamTextResult = class {
4962
5090
  self.providerMetadataPromise.resolve(
4963
5091
  lastStep.experimental_providerMetadata
4964
5092
  );
5093
+ self.reasoningPromise.resolve(lastStep.reasoning);
5094
+ self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
4965
5095
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
4966
5096
  const usage = recordedUsage != null ? recordedUsage : {
4967
5097
  completionTokens: NaN,
@@ -4971,7 +5101,6 @@ var DefaultStreamTextResult = class {
4971
5101
  self.finishReasonPromise.resolve(finishReason);
4972
5102
  self.usagePromise.resolve(usage);
4973
5103
  self.textPromise.resolve(recordedFullText);
4974
- self.reasoningPromise.resolve(recordedReasoningText);
4975
5104
  self.sourcesPromise.resolve(recordedSources);
4976
5105
  self.stepsPromise.resolve(recordedSteps);
4977
5106
  await (onFinish == null ? void 0 : onFinish({
@@ -4980,10 +5109,11 @@ var DefaultStreamTextResult = class {
4980
5109
  usage,
4981
5110
  text: recordedFullText,
4982
5111
  reasoning: lastStep.reasoning,
5112
+ reasoningDetails: lastStep.reasoningDetails,
4983
5113
  sources: lastStep.sources,
4984
5114
  toolCalls: lastStep.toolCalls,
4985
5115
  toolResults: lastStep.toolResults,
4986
- request: (_a16 = lastStep.request) != null ? _a16 : {},
5116
+ request: (_a17 = lastStep.request) != null ? _a17 : {},
4987
5117
  response: lastStep.response,
4988
5118
  warnings: lastStep.warnings,
4989
5119
  providerMetadata: lastStep.providerMetadata,
@@ -4998,8 +5128,8 @@ var DefaultStreamTextResult = class {
4998
5128
  "ai.response.text": { output: () => recordedFullText },
4999
5129
  "ai.response.toolCalls": {
5000
5130
  output: () => {
5001
- var _a17;
5002
- return ((_a17 = lastStep.toolCalls) == null ? void 0 : _a17.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
5131
+ var _a18;
5132
+ return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
5003
5133
  }
5004
5134
  },
5005
5135
  "ai.usage.promptTokens": usage.promptTokens,
@@ -5041,7 +5171,7 @@ var DefaultStreamTextResult = class {
5041
5171
  });
5042
5172
  const initialPrompt = standardizePrompt({
5043
5173
  prompt: {
5044
- system: (_a15 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a15 : system,
5174
+ system: (_a16 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a16 : system,
5045
5175
  prompt,
5046
5176
  messages
5047
5177
  },
@@ -5075,7 +5205,7 @@ var DefaultStreamTextResult = class {
5075
5205
  hasLeadingWhitespace,
5076
5206
  messageId
5077
5207
  }) {
5078
- var _a16;
5208
+ var _a17;
5079
5209
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
5080
5210
  const stepInputMessages = [
5081
5211
  ...initialPrompt.messages,
@@ -5088,7 +5218,7 @@ var DefaultStreamTextResult = class {
5088
5218
  messages: stepInputMessages
5089
5219
  },
5090
5220
  modelSupportsImageUrls: model.supportsImageUrls,
5091
- modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
5221
+ modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
5092
5222
  // support 'this' context
5093
5223
  });
5094
5224
  const mode = {
@@ -5119,8 +5249,8 @@ var DefaultStreamTextResult = class {
5119
5249
  "ai.prompt.tools": {
5120
5250
  // convert the language model level tools:
5121
5251
  input: () => {
5122
- var _a17;
5123
- return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
5252
+ var _a18;
5253
+ return (_a18 = mode.tools) == null ? void 0 : _a18.map((tool2) => JSON.stringify(tool2));
5124
5254
  }
5125
5255
  },
5126
5256
  "ai.prompt.toolChoice": {
@@ -5171,6 +5301,8 @@ var DefaultStreamTextResult = class {
5171
5301
  const stepRequest = request != null ? request : {};
5172
5302
  const stepToolCalls = [];
5173
5303
  const stepToolResults = [];
5304
+ const stepReasoning2 = [];
5305
+ let activeReasoningText2 = void 0;
5174
5306
  let stepFinishReason = "unknown";
5175
5307
  let stepUsage = {
5176
5308
  promptTokens: 0,
@@ -5180,7 +5312,6 @@ var DefaultStreamTextResult = class {
5180
5312
  let stepProviderMetadata;
5181
5313
  let stepFirstChunk = true;
5182
5314
  let stepText = "";
5183
- let stepReasoning = "";
5184
5315
  let fullStepText = stepType2 === "continue" ? previousStepText : "";
5185
5316
  let stepLogProbs;
5186
5317
  let stepResponse = {
@@ -5206,7 +5337,7 @@ var DefaultStreamTextResult = class {
5206
5337
  transformedStream.pipeThrough(
5207
5338
  new TransformStream({
5208
5339
  async transform(chunk, controller) {
5209
- var _a17, _b, _c;
5340
+ var _a18, _b, _c;
5210
5341
  if (stepFirstChunk) {
5211
5342
  const msToFirstChunk = now2() - startTimestampMs;
5212
5343
  stepFirstChunk = false;
@@ -5254,11 +5385,35 @@ var DefaultStreamTextResult = class {
5254
5385
  }
5255
5386
  case "reasoning": {
5256
5387
  controller.enqueue(chunk);
5257
- stepReasoning += chunk.textDelta;
5388
+ if (activeReasoningText2 == null) {
5389
+ activeReasoningText2 = {
5390
+ type: "text",
5391
+ text: chunk.textDelta
5392
+ };
5393
+ stepReasoning2.push(activeReasoningText2);
5394
+ } else {
5395
+ activeReasoningText2.text += chunk.textDelta;
5396
+ }
5258
5397
  break;
5259
5398
  }
5260
- case "source": {
5399
+ case "reasoning-signature": {
5261
5400
  controller.enqueue(chunk);
5401
+ if (activeReasoningText2 == null) {
5402
+ throw new InvalidStreamPartError({
5403
+ chunk,
5404
+ message: "reasoning-signature without reasoning"
5405
+ });
5406
+ }
5407
+ activeReasoningText2.signature = chunk.signature;
5408
+ activeReasoningText2 = void 0;
5409
+ break;
5410
+ }
5411
+ case "redacted-reasoning": {
5412
+ controller.enqueue(chunk);
5413
+ stepReasoning2.push({
5414
+ type: "redacted",
5415
+ data: chunk.data
5416
+ });
5262
5417
  break;
5263
5418
  }
5264
5419
  case "tool-call": {
@@ -5273,7 +5428,7 @@ var DefaultStreamTextResult = class {
5273
5428
  }
5274
5429
  case "response-metadata": {
5275
5430
  stepResponse = {
5276
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
5431
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
5277
5432
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
5278
5433
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
5279
5434
  };
@@ -5292,6 +5447,7 @@ var DefaultStreamTextResult = class {
5292
5447
  });
5293
5448
  break;
5294
5449
  }
5450
+ case "source":
5295
5451
  case "tool-call-streaming-start":
5296
5452
  case "tool-call-delta": {
5297
5453
  controller.enqueue(chunk);
@@ -5409,6 +5565,7 @@ var DefaultStreamTextResult = class {
5409
5565
  responseMessages.push(
5410
5566
  ...toResponseMessages({
5411
5567
  text: stepText,
5568
+ reasoning: stepReasoning2,
5412
5569
  tools: tools != null ? tools : {},
5413
5570
  toolCalls: stepToolCalls,
5414
5571
  toolResults: stepToolResults,
@@ -5482,6 +5639,9 @@ var DefaultStreamTextResult = class {
5482
5639
  get reasoning() {
5483
5640
  return this.reasoningPromise.value;
5484
5641
  }
5642
+ get reasoningDetails() {
5643
+ return this.reasoningDetailsPromise.value;
5644
+ }
5485
5645
  get sources() {
5486
5646
  return this.sourcesPromise.value;
5487
5647
  }
@@ -5582,6 +5742,26 @@ var DefaultStreamTextResult = class {
5582
5742
  }
5583
5743
  break;
5584
5744
  }
5745
+ case "redacted-reasoning": {
5746
+ if (sendReasoning) {
5747
+ controller.enqueue(
5748
+ formatDataStreamPart2("redacted_reasoning", {
5749
+ data: chunk.data
5750
+ })
5751
+ );
5752
+ }
5753
+ break;
5754
+ }
5755
+ case "reasoning-signature": {
5756
+ if (sendReasoning) {
5757
+ controller.enqueue(
5758
+ formatDataStreamPart2("reasoning_signature", {
5759
+ signature: chunk.signature
5760
+ })
5761
+ );
5762
+ }
5763
+ break;
5764
+ }
5585
5765
  case "source": {
5586
5766
  if (sendSources) {
5587
5767
  controller.enqueue(
@@ -5762,9 +5942,9 @@ var DefaultStreamTextResult = class {
5762
5942
  );
5763
5943
  }
5764
5944
  toTextStreamResponse(init) {
5765
- var _a15;
5945
+ var _a16;
5766
5946
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
5767
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
5947
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
5768
5948
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5769
5949
  contentType: "text/plain; charset=utf-8"
5770
5950
  })
@@ -5899,7 +6079,7 @@ var doWrap = ({
5899
6079
  modelId,
5900
6080
  providerId
5901
6081
  }) => {
5902
- var _a15;
6082
+ var _a16;
5903
6083
  async function doTransform({
5904
6084
  params,
5905
6085
  type
@@ -5912,7 +6092,7 @@ var doWrap = ({
5912
6092
  modelId: modelId != null ? modelId : model.modelId,
5913
6093
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
5914
6094
  supportsImageUrls: model.supportsImageUrls,
5915
- supportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model),
6095
+ supportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model),
5916
6096
  supportsStructuredOutputs: model.supportsStructuredOutputs,
5917
6097
  async doGenerate(params) {
5918
6098
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -5948,7 +6128,7 @@ function appendResponseMessages({
5948
6128
  responseMessages,
5949
6129
  _internal: { currentDate = () => /* @__PURE__ */ new Date() } = {}
5950
6130
  }) {
5951
- var _a15, _b, _c, _d;
6131
+ var _a16, _b, _c, _d;
5952
6132
  const clonedMessages = structuredClone(messages);
5953
6133
  for (const message of responseMessages) {
5954
6134
  const role = message.role;
@@ -5971,7 +6151,7 @@ function appendResponseMessages({
5971
6151
  const maxStep = extractMaxToolInvocationStep(
5972
6152
  lastMessage.toolInvocations
5973
6153
  );
5974
- (_a15 = lastMessage.parts) != null ? _a15 : lastMessage.parts = [];
6154
+ (_a16 = lastMessage.parts) != null ? _a16 : lastMessage.parts = [];
5975
6155
  lastMessage.content = textContent;
5976
6156
  if (textContent.length > 0) {
5977
6157
  lastMessage.parts.push({
@@ -6089,11 +6269,11 @@ function customProvider({
6089
6269
  var experimental_customProvider = customProvider;
6090
6270
 
6091
6271
  // core/registry/no-such-provider-error.ts
6092
- import { AISDKError as AISDKError15, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
6093
- var name14 = "AI_NoSuchProviderError";
6094
- var marker14 = `vercel.ai.error.${name14}`;
6095
- var symbol14 = Symbol.for(marker14);
6096
- var _a14;
6272
+ import { AISDKError as AISDKError17, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
6273
+ var name15 = "AI_NoSuchProviderError";
6274
+ var marker15 = `vercel.ai.error.${name15}`;
6275
+ var symbol15 = Symbol.for(marker15);
6276
+ var _a15;
6097
6277
  var NoSuchProviderError = class extends NoSuchModelError3 {
6098
6278
  constructor({
6099
6279
  modelId,
@@ -6102,16 +6282,16 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
6102
6282
  availableProviders,
6103
6283
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
6104
6284
  }) {
6105
- super({ errorName: name14, modelId, modelType, message });
6106
- this[_a14] = true;
6285
+ super({ errorName: name15, modelId, modelType, message });
6286
+ this[_a15] = true;
6107
6287
  this.providerId = providerId;
6108
6288
  this.availableProviders = availableProviders;
6109
6289
  }
6110
6290
  static isInstance(error) {
6111
- return AISDKError15.hasMarker(error, marker14);
6291
+ return AISDKError17.hasMarker(error, marker15);
6112
6292
  }
6113
6293
  };
6114
- _a14 = symbol14;
6294
+ _a15 = symbol15;
6115
6295
 
6116
6296
  // core/registry/provider-registry.ts
6117
6297
  import { NoSuchModelError as NoSuchModelError4 } from "@ai-sdk/provider";
@@ -6156,19 +6336,19 @@ var DefaultProviderRegistry = class {
6156
6336
  return [id.slice(0, index), id.slice(index + 1)];
6157
6337
  }
6158
6338
  languageModel(id) {
6159
- var _a15, _b;
6339
+ var _a16, _b;
6160
6340
  const [providerId, modelId] = this.splitId(id, "languageModel");
6161
- const model = (_b = (_a15 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a15, modelId);
6341
+ const model = (_b = (_a16 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a16, modelId);
6162
6342
  if (model == null) {
6163
6343
  throw new NoSuchModelError4({ modelId: id, modelType: "languageModel" });
6164
6344
  }
6165
6345
  return model;
6166
6346
  }
6167
6347
  textEmbeddingModel(id) {
6168
- var _a15;
6348
+ var _a16;
6169
6349
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
6170
6350
  const provider = this.getProvider(providerId);
6171
- const model = (_a15 = provider.textEmbeddingModel) == null ? void 0 : _a15.call(provider, modelId);
6351
+ const model = (_a16 = provider.textEmbeddingModel) == null ? void 0 : _a16.call(provider, modelId);
6172
6352
  if (model == null) {
6173
6353
  throw new NoSuchModelError4({
6174
6354
  modelId: id,
@@ -6178,10 +6358,10 @@ var DefaultProviderRegistry = class {
6178
6358
  return model;
6179
6359
  }
6180
6360
  imageModel(id) {
6181
- var _a15;
6361
+ var _a16;
6182
6362
  const [providerId, modelId] = this.splitId(id, "imageModel");
6183
6363
  const provider = this.getProvider(providerId);
6184
- const model = (_a15 = provider.imageModel) == null ? void 0 : _a15.call(provider, modelId);
6364
+ const model = (_a16 = provider.imageModel) == null ? void 0 : _a16.call(provider, modelId);
6185
6365
  if (model == null) {
6186
6366
  throw new NoSuchModelError4({ modelId: id, modelType: "imageModel" });
6187
6367
  }
@@ -6242,8 +6422,8 @@ function simulateReadableStream({
6242
6422
  chunkDelayInMs = 0,
6243
6423
  _internal
6244
6424
  }) {
6245
- var _a15;
6246
- const delay2 = (_a15 = _internal == null ? void 0 : _internal.delay) != null ? _a15 : delayFunction;
6425
+ var _a16;
6426
+ const delay2 = (_a16 = _internal == null ? void 0 : _internal.delay) != null ? _a16 : delayFunction;
6247
6427
  let index = 0;
6248
6428
  return new ReadableStream({
6249
6429
  async pull(controller) {
@@ -6264,7 +6444,7 @@ import {
6264
6444
  function AssistantResponse({ threadId, messageId }, process2) {
6265
6445
  const stream = new ReadableStream({
6266
6446
  async start(controller) {
6267
- var _a15;
6447
+ var _a16;
6268
6448
  const textEncoder = new TextEncoder();
6269
6449
  const sendMessage = (message) => {
6270
6450
  controller.enqueue(
@@ -6286,7 +6466,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6286
6466
  );
6287
6467
  };
6288
6468
  const forwardStream = async (stream2) => {
6289
- var _a16, _b;
6469
+ var _a17, _b;
6290
6470
  let result = void 0;
6291
6471
  for await (const value of stream2) {
6292
6472
  switch (value.event) {
@@ -6303,7 +6483,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6303
6483
  break;
6304
6484
  }
6305
6485
  case "thread.message.delta": {
6306
- const content = (_a16 = value.data.delta.content) == null ? void 0 : _a16[0];
6486
+ const content = (_a17 = value.data.delta.content) == null ? void 0 : _a17[0];
6307
6487
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
6308
6488
  controller.enqueue(
6309
6489
  textEncoder.encode(
@@ -6337,7 +6517,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6337
6517
  forwardStream
6338
6518
  });
6339
6519
  } catch (error) {
6340
- sendError((_a15 = error.message) != null ? _a15 : `${error}`);
6520
+ sendError((_a16 = error.message) != null ? _a16 : `${error}`);
6341
6521
  } finally {
6342
6522
  controller.close();
6343
6523
  }
@@ -6398,7 +6578,7 @@ function toDataStreamInternal(stream, callbacks) {
6398
6578
  return stream.pipeThrough(
6399
6579
  new TransformStream({
6400
6580
  transform: async (value, controller) => {
6401
- var _a15;
6581
+ var _a16;
6402
6582
  if (typeof value === "string") {
6403
6583
  controller.enqueue(value);
6404
6584
  return;
@@ -6406,7 +6586,7 @@ function toDataStreamInternal(stream, callbacks) {
6406
6586
  if ("event" in value) {
6407
6587
  if (value.event === "on_chat_model_stream") {
6408
6588
  forwardAIMessageChunk(
6409
- (_a15 = value.data) == null ? void 0 : _a15.chunk,
6589
+ (_a16 = value.data) == null ? void 0 : _a16.chunk,
6410
6590
  controller
6411
6591
  );
6412
6592
  }
@@ -6429,7 +6609,7 @@ function toDataStream(stream, callbacks) {
6429
6609
  );
6430
6610
  }
6431
6611
  function toDataStreamResponse(stream, options) {
6432
- var _a15;
6612
+ var _a16;
6433
6613
  const dataStream = toDataStreamInternal(
6434
6614
  stream,
6435
6615
  options == null ? void 0 : options.callbacks
@@ -6438,7 +6618,7 @@ function toDataStreamResponse(stream, options) {
6438
6618
  const init = options == null ? void 0 : options.init;
6439
6619
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
6440
6620
  return new Response(responseStream, {
6441
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
6621
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
6442
6622
  statusText: init == null ? void 0 : init.statusText,
6443
6623
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
6444
6624
  contentType: "text/plain; charset=utf-8",
@@ -6493,14 +6673,14 @@ function toDataStream2(stream, callbacks) {
6493
6673
  );
6494
6674
  }
6495
6675
  function toDataStreamResponse2(stream, options = {}) {
6496
- var _a15;
6676
+ var _a16;
6497
6677
  const { init, data, callbacks } = options;
6498
6678
  const dataStream = toDataStreamInternal2(stream, callbacks).pipeThrough(
6499
6679
  new TextEncoderStream()
6500
6680
  );
6501
6681
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
6502
6682
  return new Response(responseStream, {
6503
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
6683
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
6504
6684
  statusText: init == null ? void 0 : init.statusText,
6505
6685
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
6506
6686
  contentType: "text/plain; charset=utf-8",
@@ -6592,7 +6772,7 @@ var StreamData = class {
6592
6772
  }
6593
6773
  };
6594
6774
  export {
6595
- AISDKError14 as AISDKError,
6775
+ AISDKError15 as AISDKError,
6596
6776
  APICallError2 as APICallError,
6597
6777
  AssistantResponse,
6598
6778
  DownloadError,
@@ -6602,6 +6782,7 @@ export {
6602
6782
  InvalidMessageRoleError,
6603
6783
  InvalidPromptError2 as InvalidPromptError,
6604
6784
  InvalidResponseDataError,
6785
+ InvalidStreamPartError,
6605
6786
  InvalidToolArgumentsError,
6606
6787
  JSONParseError2 as JSONParseError,
6607
6788
  langchain_adapter_exports as LangChainAdapter,