ai 4.1.44 → 4.1.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -4,8 +4,8 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
6
  var __export = (target, all) => {
7
- for (var name15 in all)
8
- __defProp(target, name15, { get: all[name15], enumerable: true });
7
+ for (var name16 in all)
8
+ __defProp(target, name16, { get: all[name16], enumerable: true });
9
9
  };
10
10
  var __copyProps = (to, from, except, desc) => {
11
11
  if (from && typeof from === "object" || typeof from === "function") {
@@ -20,27 +20,28 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
20
20
  // streams/index.ts
21
21
  var streams_exports = {};
22
22
  __export(streams_exports, {
23
- AISDKError: () => import_provider18.AISDKError,
24
- APICallError: () => import_provider18.APICallError,
23
+ AISDKError: () => import_provider19.AISDKError,
24
+ APICallError: () => import_provider19.APICallError,
25
25
  AssistantResponse: () => AssistantResponse,
26
26
  DownloadError: () => DownloadError,
27
- EmptyResponseBodyError: () => import_provider18.EmptyResponseBodyError,
27
+ EmptyResponseBodyError: () => import_provider19.EmptyResponseBodyError,
28
28
  InvalidArgumentError: () => InvalidArgumentError,
29
29
  InvalidDataContentError: () => InvalidDataContentError,
30
30
  InvalidMessageRoleError: () => InvalidMessageRoleError,
31
- InvalidPromptError: () => import_provider18.InvalidPromptError,
32
- InvalidResponseDataError: () => import_provider18.InvalidResponseDataError,
31
+ InvalidPromptError: () => import_provider19.InvalidPromptError,
32
+ InvalidResponseDataError: () => import_provider19.InvalidResponseDataError,
33
+ InvalidStreamPartError: () => InvalidStreamPartError,
33
34
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
34
- JSONParseError: () => import_provider18.JSONParseError,
35
+ JSONParseError: () => import_provider19.JSONParseError,
35
36
  LangChainAdapter: () => langchain_adapter_exports,
36
37
  LlamaIndexAdapter: () => llamaindex_adapter_exports,
37
- LoadAPIKeyError: () => import_provider18.LoadAPIKeyError,
38
+ LoadAPIKeyError: () => import_provider19.LoadAPIKeyError,
38
39
  MessageConversionError: () => MessageConversionError,
39
- NoContentGeneratedError: () => import_provider18.NoContentGeneratedError,
40
+ NoContentGeneratedError: () => import_provider19.NoContentGeneratedError,
40
41
  NoImageGeneratedError: () => NoImageGeneratedError,
41
42
  NoObjectGeneratedError: () => NoObjectGeneratedError,
42
43
  NoOutputSpecifiedError: () => NoOutputSpecifiedError,
43
- NoSuchModelError: () => import_provider18.NoSuchModelError,
44
+ NoSuchModelError: () => import_provider19.NoSuchModelError,
44
45
  NoSuchProviderError: () => NoSuchProviderError,
45
46
  NoSuchToolError: () => NoSuchToolError,
46
47
  Output: () => output_exports,
@@ -48,8 +49,8 @@ __export(streams_exports, {
48
49
  StreamData: () => StreamData,
49
50
  ToolCallRepairError: () => ToolCallRepairError,
50
51
  ToolExecutionError: () => ToolExecutionError,
51
- TypeValidationError: () => import_provider18.TypeValidationError,
52
- UnsupportedFunctionalityError: () => import_provider18.UnsupportedFunctionalityError,
52
+ TypeValidationError: () => import_provider19.TypeValidationError,
53
+ UnsupportedFunctionalityError: () => import_provider19.UnsupportedFunctionalityError,
53
54
  appendClientMessage: () => appendClientMessage,
54
55
  appendResponseMessages: () => appendResponseMessages,
55
56
  convertToCoreMessages: () => convertToCoreMessages,
@@ -126,6 +127,9 @@ function createDataStream({
126
127
  writeMessageAnnotation(annotation) {
127
128
  safeEnqueue((0, import_ui_utils.formatDataStreamPart)("message_annotations", [annotation]));
128
129
  },
130
+ writeSource(source) {
131
+ safeEnqueue((0, import_ui_utils.formatDataStreamPart)("source", source));
132
+ },
129
133
  merge(streamArg) {
130
134
  ongoingStreamPromises.push(
131
135
  (async () => {
@@ -429,7 +433,7 @@ function getBaseTelemetryAttributes({
429
433
  telemetry,
430
434
  headers
431
435
  }) {
432
- var _a15;
436
+ var _a16;
433
437
  return {
434
438
  "ai.model.provider": model.provider,
435
439
  "ai.model.id": model.modelId,
@@ -439,7 +443,7 @@ function getBaseTelemetryAttributes({
439
443
  return attributes;
440
444
  }, {}),
441
445
  // add metadata as attributes:
442
- ...Object.entries((_a15 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a15 : {}).reduce(
446
+ ...Object.entries((_a16 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a16 : {}).reduce(
443
447
  (attributes, [key, value]) => {
444
448
  attributes[`ai.telemetry.metadata.${key}`] = value;
445
449
  return attributes;
@@ -464,7 +468,7 @@ var noopTracer = {
464
468
  startSpan() {
465
469
  return noopSpan;
466
470
  },
467
- startActiveSpan(name15, arg1, arg2, arg3) {
471
+ startActiveSpan(name16, arg1, arg2, arg3) {
468
472
  if (typeof arg1 === "function") {
469
473
  return arg1(noopSpan);
470
474
  }
@@ -534,13 +538,13 @@ function getTracer({
534
538
  // core/telemetry/record-span.ts
535
539
  var import_api2 = require("@opentelemetry/api");
536
540
  function recordSpan({
537
- name: name15,
541
+ name: name16,
538
542
  tracer,
539
543
  attributes,
540
544
  fn,
541
545
  endWhenDone = true
542
546
  }) {
543
- return tracer.startActiveSpan(name15, { attributes }, async (span) => {
547
+ return tracer.startActiveSpan(name16, { attributes }, async (span) => {
544
548
  try {
545
549
  const result = await fn(span);
546
550
  if (endWhenDone) {
@@ -648,14 +652,14 @@ async function embed({
648
652
  }),
649
653
  tracer,
650
654
  fn: async (doEmbedSpan) => {
651
- var _a15;
655
+ var _a16;
652
656
  const modelResponse = await model.doEmbed({
653
657
  values: [value],
654
658
  abortSignal,
655
659
  headers
656
660
  });
657
661
  const embedding2 = modelResponse.embeddings[0];
658
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
662
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
659
663
  doEmbedSpan.setAttributes(
660
664
  selectTelemetryAttributes({
661
665
  telemetry,
@@ -765,14 +769,14 @@ async function embedMany({
765
769
  }),
766
770
  tracer,
767
771
  fn: async (doEmbedSpan) => {
768
- var _a15;
772
+ var _a16;
769
773
  const modelResponse = await model.doEmbed({
770
774
  values,
771
775
  abortSignal,
772
776
  headers
773
777
  });
774
778
  const embeddings3 = modelResponse.embeddings;
775
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
779
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
776
780
  doEmbedSpan.setAttributes(
777
781
  selectTelemetryAttributes({
778
782
  telemetry,
@@ -824,14 +828,14 @@ async function embedMany({
824
828
  }),
825
829
  tracer,
826
830
  fn: async (doEmbedSpan) => {
827
- var _a15;
831
+ var _a16;
828
832
  const modelResponse = await model.doEmbed({
829
833
  values: chunk,
830
834
  abortSignal,
831
835
  headers
832
836
  });
833
837
  const embeddings2 = modelResponse.embeddings;
834
- const usage2 = (_a15 = modelResponse.usage) != null ? _a15 : { tokens: NaN };
838
+ const usage2 = (_a16 = modelResponse.usage) != null ? _a16 : { tokens: NaN };
835
839
  doEmbedSpan.setAttributes(
836
840
  selectTelemetryAttributes({
837
841
  telemetry,
@@ -918,9 +922,9 @@ async function generateImage({
918
922
  currentDate: () => /* @__PURE__ */ new Date()
919
923
  }
920
924
  }) {
921
- var _a15;
925
+ var _a16;
922
926
  const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
923
- const maxImagesPerCall = (_a15 = model.maxImagesPerCall) != null ? _a15 : 1;
927
+ const maxImagesPerCall = (_a16 = model.maxImagesPerCall) != null ? _a16 : 1;
924
928
  const callCount = Math.ceil(n / maxImagesPerCall);
925
929
  const callImageCounts = Array.from({ length: callCount }, (_, i) => {
926
930
  if (i < callCount - 1) {
@@ -1053,7 +1057,7 @@ async function download({
1053
1057
  url,
1054
1058
  fetchImplementation = fetch
1055
1059
  }) {
1056
- var _a15;
1060
+ var _a16;
1057
1061
  const urlText = url.toString();
1058
1062
  try {
1059
1063
  const response = await fetchImplementation(urlText);
@@ -1066,7 +1070,7 @@ async function download({
1066
1070
  }
1067
1071
  return {
1068
1072
  data: new Uint8Array(await response.arrayBuffer()),
1069
- mimeType: (_a15 = response.headers.get("content-type")) != null ? _a15 : void 0
1073
+ mimeType: (_a16 = response.headers.get("content-type")) != null ? _a16 : void 0
1070
1074
  };
1071
1075
  } catch (error) {
1072
1076
  if (DownloadError.isInstance(error)) {
@@ -1126,8 +1130,8 @@ var dataContentSchema = import_zod.z.union([
1126
1130
  import_zod.z.custom(
1127
1131
  // Buffer might not be available in some environments such as CloudFlare:
1128
1132
  (value) => {
1129
- var _a15, _b;
1130
- return (_b = (_a15 = globalThis.Buffer) == null ? void 0 : _a15.isBuffer(value)) != null ? _b : false;
1133
+ var _a16, _b;
1134
+ return (_b = (_a16 = globalThis.Buffer) == null ? void 0 : _a16.isBuffer(value)) != null ? _b : false;
1131
1135
  },
1132
1136
  { message: "Must be a Buffer" }
1133
1137
  )
@@ -1227,14 +1231,14 @@ async function convertToLanguageModelPrompt({
1227
1231
  ];
1228
1232
  }
1229
1233
  function convertToLanguageModelMessage(message, downloadedAssets) {
1230
- var _a15, _b, _c, _d, _e, _f;
1234
+ var _a16, _b, _c, _d, _e, _f;
1231
1235
  const role = message.role;
1232
1236
  switch (role) {
1233
1237
  case "system": {
1234
1238
  return {
1235
1239
  role: "system",
1236
1240
  content: message.content,
1237
- providerMetadata: (_a15 = message.providerOptions) != null ? _a15 : message.experimental_providerMetadata
1241
+ providerMetadata: (_a16 = message.providerOptions) != null ? _a16 : message.experimental_providerMetadata
1238
1242
  };
1239
1243
  }
1240
1244
  case "user": {
@@ -1278,7 +1282,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1278
1282
  return {
1279
1283
  role: "tool",
1280
1284
  content: message.content.map((part) => {
1281
- var _a16;
1285
+ var _a17;
1282
1286
  return {
1283
1287
  type: "tool-result",
1284
1288
  toolCallId: part.toolCallId,
@@ -1286,7 +1290,7 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
1286
1290
  result: part.result,
1287
1291
  content: part.experimental_content,
1288
1292
  isError: part.isError,
1289
- providerMetadata: (_a16 = part.providerOptions) != null ? _a16 : part.experimental_providerMetadata
1293
+ providerMetadata: (_a17 = part.providerOptions) != null ? _a17 : part.experimental_providerMetadata
1290
1294
  };
1291
1295
  }),
1292
1296
  providerMetadata: (_f = message.providerOptions) != null ? _f : message.experimental_providerMetadata
@@ -1322,7 +1326,7 @@ async function downloadAssets(messages, downloadImplementation, modelSupportsIma
1322
1326
  );
1323
1327
  }
1324
1328
  function convertPartToLanguageModelPart(part, downloadedAssets) {
1325
- var _a15;
1329
+ var _a16;
1326
1330
  if (part.type === "text") {
1327
1331
  return {
1328
1332
  type: "text",
@@ -1375,7 +1379,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
1375
1379
  switch (type) {
1376
1380
  case "image": {
1377
1381
  if (normalizedData instanceof Uint8Array) {
1378
- mimeType = (_a15 = detectImageMimeType(normalizedData)) != null ? _a15 : mimeType;
1382
+ mimeType = (_a16 = detectImageMimeType(normalizedData)) != null ? _a16 : mimeType;
1379
1383
  }
1380
1384
  return {
1381
1385
  type: "image",
@@ -1498,7 +1502,7 @@ var import_zod7 = require("zod");
1498
1502
 
1499
1503
  // core/prompt/attachments-to-parts.ts
1500
1504
  function attachmentsToParts(attachments) {
1501
- var _a15, _b, _c;
1505
+ var _a16, _b, _c;
1502
1506
  const parts = [];
1503
1507
  for (const attachment of attachments) {
1504
1508
  let url;
@@ -1510,7 +1514,7 @@ function attachmentsToParts(attachments) {
1510
1514
  switch (url.protocol) {
1511
1515
  case "http:":
1512
1516
  case "https:": {
1513
- if ((_a15 = attachment.contentType) == null ? void 0 : _a15.startsWith("image/")) {
1517
+ if ((_a16 = attachment.contentType) == null ? void 0 : _a16.startsWith("image/")) {
1514
1518
  parts.push({ type: "image", image: url });
1515
1519
  } else {
1516
1520
  if (!attachment.contentType) {
@@ -1596,8 +1600,8 @@ _a8 = symbol8;
1596
1600
 
1597
1601
  // core/prompt/convert-to-core-messages.ts
1598
1602
  function convertToCoreMessages(messages, options) {
1599
- var _a15, _b;
1600
- const tools = (_a15 = options == null ? void 0 : options.tools) != null ? _a15 : {};
1603
+ var _a16, _b;
1604
+ const tools = (_a16 = options == null ? void 0 : options.tools) != null ? _a16 : {};
1601
1605
  const coreMessages = [];
1602
1606
  for (let i = 0; i < messages.length; i++) {
1603
1607
  const message = messages[i];
@@ -1624,24 +1628,52 @@ function convertToCoreMessages(messages, options) {
1624
1628
  case "assistant": {
1625
1629
  if (message.parts != null) {
1626
1630
  let processBlock2 = function() {
1631
+ const content2 = [];
1632
+ for (const part of block) {
1633
+ switch (part.type) {
1634
+ case "text":
1635
+ content2.push({
1636
+ type: "text",
1637
+ text: part.text
1638
+ });
1639
+ break;
1640
+ case "reasoning": {
1641
+ for (const detail of part.details) {
1642
+ switch (detail.type) {
1643
+ case "text":
1644
+ content2.push({
1645
+ type: "reasoning",
1646
+ text: detail.text,
1647
+ signature: detail.signature
1648
+ });
1649
+ break;
1650
+ case "redacted":
1651
+ content2.push({
1652
+ type: "redacted-reasoning",
1653
+ data: detail.data
1654
+ });
1655
+ break;
1656
+ }
1657
+ }
1658
+ break;
1659
+ }
1660
+ case "tool-invocation":
1661
+ content2.push({
1662
+ type: "tool-call",
1663
+ toolCallId: part.toolInvocation.toolCallId,
1664
+ toolName: part.toolInvocation.toolName,
1665
+ args: part.toolInvocation.args
1666
+ });
1667
+ break;
1668
+ default: {
1669
+ const _exhaustiveCheck = part;
1670
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
1671
+ }
1672
+ }
1673
+ }
1627
1674
  coreMessages.push({
1628
1675
  role: "assistant",
1629
- content: block.map((part) => {
1630
- switch (part.type) {
1631
- case "text":
1632
- return {
1633
- type: "text",
1634
- text: part.text
1635
- };
1636
- default:
1637
- return {
1638
- type: "tool-call",
1639
- toolCallId: part.toolInvocation.toolCallId,
1640
- toolName: part.toolInvocation.toolName,
1641
- args: part.toolInvocation.args
1642
- };
1643
- }
1644
- })
1676
+ content: content2
1645
1677
  });
1646
1678
  const stepInvocations = block.filter(
1647
1679
  (part) => part.type === "tool-invocation"
@@ -1686,6 +1718,7 @@ function convertToCoreMessages(messages, options) {
1686
1718
  for (const part of message.parts) {
1687
1719
  switch (part.type) {
1688
1720
  case "reasoning":
1721
+ block.push(part);
1689
1722
  break;
1690
1723
  case "text": {
1691
1724
  if (blockHasToolInvocations) {
@@ -1713,14 +1746,14 @@ function convertToCoreMessages(messages, options) {
1713
1746
  break;
1714
1747
  }
1715
1748
  const maxStep = toolInvocations.reduce((max, toolInvocation) => {
1716
- var _a16;
1717
- return Math.max(max, (_a16 = toolInvocation.step) != null ? _a16 : 0);
1749
+ var _a17;
1750
+ return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
1718
1751
  }, 0);
1719
1752
  for (let i2 = 0; i2 <= maxStep; i2++) {
1720
1753
  const stepInvocations = toolInvocations.filter(
1721
1754
  (toolInvocation) => {
1722
- var _a16;
1723
- return ((_a16 = toolInvocation.step) != null ? _a16 : 0) === i2;
1755
+ var _a17;
1756
+ return ((_a17 = toolInvocation.step) != null ? _a17 : 0) === i2;
1724
1757
  }
1725
1758
  );
1726
1759
  if (stepInvocations.length === 0) {
@@ -1883,6 +1916,18 @@ var filePartSchema = import_zod5.z.object({
1883
1916
  providerOptions: providerMetadataSchema.optional(),
1884
1917
  experimental_providerMetadata: providerMetadataSchema.optional()
1885
1918
  });
1919
+ var reasoningPartSchema = import_zod5.z.object({
1920
+ type: import_zod5.z.literal("reasoning"),
1921
+ text: import_zod5.z.string(),
1922
+ providerOptions: providerMetadataSchema.optional(),
1923
+ experimental_providerMetadata: providerMetadataSchema.optional()
1924
+ });
1925
+ var redactedReasoningPartSchema = import_zod5.z.object({
1926
+ type: import_zod5.z.literal("redacted-reasoning"),
1927
+ data: import_zod5.z.string(),
1928
+ providerOptions: providerMetadataSchema.optional(),
1929
+ experimental_providerMetadata: providerMetadataSchema.optional()
1930
+ });
1886
1931
  var toolCallPartSchema = import_zod5.z.object({
1887
1932
  type: import_zod5.z.literal("tool-call"),
1888
1933
  toolCallId: import_zod5.z.string(),
@@ -1922,7 +1967,14 @@ var coreAssistantMessageSchema = import_zod6.z.object({
1922
1967
  role: import_zod6.z.literal("assistant"),
1923
1968
  content: import_zod6.z.union([
1924
1969
  import_zod6.z.string(),
1925
- import_zod6.z.array(import_zod6.z.union([textPartSchema, toolCallPartSchema]))
1970
+ import_zod6.z.array(
1971
+ import_zod6.z.union([
1972
+ textPartSchema,
1973
+ reasoningPartSchema,
1974
+ redactedReasoningPartSchema,
1975
+ toolCallPartSchema
1976
+ ])
1977
+ )
1926
1978
  ]),
1927
1979
  providerOptions: providerMetadataSchema.optional(),
1928
1980
  experimental_providerMetadata: providerMetadataSchema.optional()
@@ -2134,7 +2186,7 @@ var arrayOutputStrategy = (schema) => {
2134
2186
  additionalProperties: false
2135
2187
  },
2136
2188
  validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
2137
- var _a15;
2189
+ var _a16;
2138
2190
  if (!(0, import_provider11.isJSONObject)(value) || !(0, import_provider11.isJSONArray)(value.elements)) {
2139
2191
  return {
2140
2192
  success: false,
@@ -2157,7 +2209,7 @@ var arrayOutputStrategy = (schema) => {
2157
2209
  }
2158
2210
  resultArray.push(result.value);
2159
2211
  }
2160
- const publishedElementCount = (_a15 = latestObject == null ? void 0 : latestObject.length) != null ? _a15 : 0;
2212
+ const publishedElementCount = (_a16 = latestObject == null ? void 0 : latestObject.length) != null ? _a16 : 0;
2161
2213
  let textDelta = "";
2162
2214
  if (isFirstDelta) {
2163
2215
  textDelta += "[";
@@ -2495,7 +2547,7 @@ async function generateObject({
2495
2547
  }),
2496
2548
  tracer,
2497
2549
  fn: async (span) => {
2498
- var _a15, _b, _c, _d;
2550
+ var _a16, _b, _c, _d;
2499
2551
  if (mode === "auto" || mode == null) {
2500
2552
  mode = model.defaultObjectGenerationMode;
2501
2553
  }
@@ -2524,7 +2576,7 @@ async function generateObject({
2524
2576
  const promptMessages = await convertToLanguageModelPrompt({
2525
2577
  prompt: standardizedPrompt,
2526
2578
  modelSupportsImageUrls: model.supportsImageUrls,
2527
- modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
2579
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
2528
2580
  // support 'this' context
2529
2581
  });
2530
2582
  const generateResult = await retry(
@@ -2558,7 +2610,7 @@ async function generateObject({
2558
2610
  }),
2559
2611
  tracer,
2560
2612
  fn: async (span2) => {
2561
- var _a16, _b2, _c2, _d2, _e, _f;
2613
+ var _a17, _b2, _c2, _d2, _e, _f;
2562
2614
  const result2 = await model.doGenerate({
2563
2615
  mode: {
2564
2616
  type: "object-json",
@@ -2574,7 +2626,7 @@ async function generateObject({
2574
2626
  headers
2575
2627
  });
2576
2628
  const responseData = {
2577
- id: (_b2 = (_a16 = result2.response) == null ? void 0 : _a16.id) != null ? _b2 : generateId3(),
2629
+ id: (_b2 = (_a17 = result2.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId3(),
2578
2630
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
2579
2631
  modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
2580
2632
  };
@@ -2663,7 +2715,7 @@ async function generateObject({
2663
2715
  }),
2664
2716
  tracer,
2665
2717
  fn: async (span2) => {
2666
- var _a16, _b2, _c2, _d2, _e, _f, _g, _h;
2718
+ var _a17, _b2, _c2, _d2, _e, _f, _g, _h;
2667
2719
  const result2 = await model.doGenerate({
2668
2720
  mode: {
2669
2721
  type: "object-tool",
@@ -2681,7 +2733,7 @@ async function generateObject({
2681
2733
  abortSignal,
2682
2734
  headers
2683
2735
  });
2684
- const objectText = (_b2 = (_a16 = result2.toolCalls) == null ? void 0 : _a16[0]) == null ? void 0 : _b2.args;
2736
+ const objectText = (_b2 = (_a17 = result2.toolCalls) == null ? void 0 : _a17[0]) == null ? void 0 : _b2.args;
2685
2737
  const responseData = {
2686
2738
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
2687
2739
  timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
@@ -2828,9 +2880,9 @@ var DefaultGenerateObjectResult = class {
2828
2880
  this.logprobs = options.logprobs;
2829
2881
  }
2830
2882
  toJsonResponse(init) {
2831
- var _a15;
2883
+ var _a16;
2832
2884
  return new Response(JSON.stringify(this.object), {
2833
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
2885
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
2834
2886
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
2835
2887
  contentType: "application/json; charset=utf-8"
2836
2888
  })
@@ -2865,17 +2917,17 @@ var DelayedPromise = class {
2865
2917
  return this.promise;
2866
2918
  }
2867
2919
  resolve(value) {
2868
- var _a15;
2920
+ var _a16;
2869
2921
  this.status = { type: "resolved", value };
2870
2922
  if (this.promise) {
2871
- (_a15 = this._resolve) == null ? void 0 : _a15.call(this, value);
2923
+ (_a16 = this._resolve) == null ? void 0 : _a16.call(this, value);
2872
2924
  }
2873
2925
  }
2874
2926
  reject(error) {
2875
- var _a15;
2927
+ var _a16;
2876
2928
  this.status = { type: "rejected", error };
2877
2929
  if (this.promise) {
2878
- (_a15 = this._reject) == null ? void 0 : _a15.call(this, error);
2930
+ (_a16 = this._reject) == null ? void 0 : _a16.call(this, error);
2879
2931
  }
2880
2932
  }
2881
2933
  };
@@ -2979,8 +3031,8 @@ function createStitchableStream() {
2979
3031
 
2980
3032
  // core/util/now.ts
2981
3033
  function now() {
2982
- var _a15, _b;
2983
- return (_b = (_a15 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a15.now()) != null ? _b : Date.now();
3034
+ var _a16, _b;
3035
+ return (_b = (_a16 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a16.now()) != null ? _b : Date.now();
2984
3036
  }
2985
3037
 
2986
3038
  // core/generate-object/stream-object.ts
@@ -3116,7 +3168,7 @@ var DefaultStreamObjectResult = class {
3116
3168
  tracer,
3117
3169
  endWhenDone: false,
3118
3170
  fn: async (rootSpan) => {
3119
- var _a15, _b;
3171
+ var _a16, _b;
3120
3172
  if (mode === "auto" || mode == null) {
3121
3173
  mode = model.defaultObjectGenerationMode;
3122
3174
  }
@@ -3147,7 +3199,7 @@ var DefaultStreamObjectResult = class {
3147
3199
  prompt: await convertToLanguageModelPrompt({
3148
3200
  prompt: standardizedPrompt,
3149
3201
  modelSupportsImageUrls: model.supportsImageUrls,
3150
- modelSupportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model)
3202
+ modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
3151
3203
  // support 'this' context
3152
3204
  }),
3153
3205
  providerMetadata: providerOptions,
@@ -3285,7 +3337,7 @@ var DefaultStreamObjectResult = class {
3285
3337
  const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
3286
3338
  new TransformStream({
3287
3339
  async transform(chunk, controller) {
3288
- var _a16, _b2, _c;
3340
+ var _a17, _b2, _c;
3289
3341
  if (isFirstChunk) {
3290
3342
  const msToFirstChunk = now2() - startTimestampMs;
3291
3343
  isFirstChunk = false;
@@ -3331,7 +3383,7 @@ var DefaultStreamObjectResult = class {
3331
3383
  switch (chunk.type) {
3332
3384
  case "response-metadata": {
3333
3385
  response = {
3334
- id: (_a16 = chunk.id) != null ? _a16 : response.id,
3386
+ id: (_a17 = chunk.id) != null ? _a17 : response.id,
3335
3387
  timestamp: (_b2 = chunk.timestamp) != null ? _b2 : response.timestamp,
3336
3388
  modelId: (_c = chunk.modelId) != null ? _c : response.modelId
3337
3389
  };
@@ -3544,9 +3596,9 @@ var DefaultStreamObjectResult = class {
3544
3596
  });
3545
3597
  }
3546
3598
  toTextStreamResponse(init) {
3547
- var _a15;
3599
+ var _a16;
3548
3600
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
3549
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
3601
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
3550
3602
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
3551
3603
  contentType: "text/plain; charset=utf-8"
3552
3604
  })
@@ -3622,24 +3674,24 @@ function prepareToolsAndToolChoice({
3622
3674
  };
3623
3675
  }
3624
3676
  const filteredTools = activeTools != null ? Object.entries(tools).filter(
3625
- ([name15]) => activeTools.includes(name15)
3677
+ ([name16]) => activeTools.includes(name16)
3626
3678
  ) : Object.entries(tools);
3627
3679
  return {
3628
- tools: filteredTools.map(([name15, tool2]) => {
3680
+ tools: filteredTools.map(([name16, tool2]) => {
3629
3681
  const toolType = tool2.type;
3630
3682
  switch (toolType) {
3631
3683
  case void 0:
3632
3684
  case "function":
3633
3685
  return {
3634
3686
  type: "function",
3635
- name: name15,
3687
+ name: name16,
3636
3688
  description: tool2.description,
3637
3689
  parameters: (0, import_ui_utils4.asSchema)(tool2.parameters).jsonSchema
3638
3690
  };
3639
3691
  case "provider-defined":
3640
3692
  return {
3641
3693
  type: "provider-defined",
3642
- name: name15,
3694
+ name: name16,
3643
3695
  id: tool2.id,
3644
3696
  args: tool2.args
3645
3697
  };
@@ -3812,6 +3864,7 @@ async function doParseToolCall({
3812
3864
  // core/generate-text/to-response-messages.ts
3813
3865
  function toResponseMessages({
3814
3866
  text: text2 = "",
3867
+ reasoning,
3815
3868
  tools,
3816
3869
  toolCalls,
3817
3870
  toolResults,
@@ -3821,7 +3874,13 @@ function toResponseMessages({
3821
3874
  const responseMessages = [];
3822
3875
  responseMessages.push({
3823
3876
  role: "assistant",
3824
- content: [{ type: "text", text: text2 }, ...toolCalls],
3877
+ content: [
3878
+ ...reasoning.map(
3879
+ (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
3880
+ ),
3881
+ { type: "text", text: text2 },
3882
+ ...toolCalls
3883
+ ],
3825
3884
  id: messageId
3826
3885
  });
3827
3886
  if (toolResults.length > 0) {
@@ -3850,6 +3909,12 @@ function toResponseMessages({
3850
3909
  return responseMessages;
3851
3910
  }
3852
3911
 
3912
+ // core/generate-text/reasoning-detail.ts
3913
+ function asReasoningText(reasoning) {
3914
+ const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
3915
+ return reasoningText.length > 0 ? reasoningText : void 0;
3916
+ }
3917
+
3853
3918
  // core/generate-text/generate-text.ts
3854
3919
  var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({
3855
3920
  prefix: "aitxt",
@@ -3885,7 +3950,7 @@ async function generateText({
3885
3950
  onStepFinish,
3886
3951
  ...settings
3887
3952
  }) {
3888
- var _a15;
3953
+ var _a16;
3889
3954
  if (maxSteps < 1) {
3890
3955
  throw new InvalidArgumentError({
3891
3956
  parameter: "maxSteps",
@@ -3902,7 +3967,7 @@ async function generateText({
3902
3967
  });
3903
3968
  const initialPrompt = standardizePrompt({
3904
3969
  prompt: {
3905
- system: (_a15 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a15 : system,
3970
+ system: (_a16 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a16 : system,
3906
3971
  prompt,
3907
3972
  messages
3908
3973
  },
@@ -3928,7 +3993,7 @@ async function generateText({
3928
3993
  }),
3929
3994
  tracer,
3930
3995
  fn: async (span) => {
3931
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
3996
+ var _a17, _b, _c, _d, _e, _f, _g, _h, _i;
3932
3997
  const mode = {
3933
3998
  type: "regular",
3934
3999
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -3937,6 +4002,7 @@ async function generateText({
3937
4002
  let currentModelResponse;
3938
4003
  let currentToolCalls = [];
3939
4004
  let currentToolResults = [];
4005
+ let currentReasoningDetails = [];
3940
4006
  let stepCount = 0;
3941
4007
  const responseMessages = [];
3942
4008
  let text2 = "";
@@ -3961,7 +4027,7 @@ async function generateText({
3961
4027
  messages: stepInputMessages
3962
4028
  },
3963
4029
  modelSupportsImageUrls: model.supportsImageUrls,
3964
- modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
4030
+ modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
3965
4031
  // support 'this' context
3966
4032
  });
3967
4033
  currentModelResponse = await retry(
@@ -3982,8 +4048,8 @@ async function generateText({
3982
4048
  "ai.prompt.tools": {
3983
4049
  // convert the language model level tools:
3984
4050
  input: () => {
3985
- var _a17;
3986
- return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
4051
+ var _a18;
4052
+ return (_a18 = mode.tools) == null ? void 0 : _a18.map((tool2) => JSON.stringify(tool2));
3987
4053
  }
3988
4054
  },
3989
4055
  "ai.prompt.toolChoice": {
@@ -4003,7 +4069,7 @@ async function generateText({
4003
4069
  }),
4004
4070
  tracer,
4005
4071
  fn: async (span2) => {
4006
- var _a17, _b2, _c2, _d2, _e2, _f2;
4072
+ var _a18, _b2, _c2, _d2, _e2, _f2;
4007
4073
  const result = await model.doGenerate({
4008
4074
  mode,
4009
4075
  ...callSettings,
@@ -4015,7 +4081,7 @@ async function generateText({
4015
4081
  headers
4016
4082
  });
4017
4083
  const responseData = {
4018
- id: (_b2 = (_a17 = result.response) == null ? void 0 : _a17.id) != null ? _b2 : generateId3(),
4084
+ id: (_b2 = (_a18 = result.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4019
4085
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4020
4086
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
4021
4087
  };
@@ -4089,6 +4155,9 @@ async function generateText({
4089
4155
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
4090
4156
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
4091
4157
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
4158
+ currentReasoningDetails = asReasoningDetails(
4159
+ currentModelResponse.reasoning
4160
+ );
4092
4161
  sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
4093
4162
  if (stepType === "continue") {
4094
4163
  const lastMessage = responseMessages[responseMessages.length - 1];
@@ -4104,6 +4173,7 @@ async function generateText({
4104
4173
  responseMessages.push(
4105
4174
  ...toResponseMessages({
4106
4175
  text: text2,
4176
+ reasoning: asReasoningDetails(currentModelResponse.reasoning),
4107
4177
  tools: tools != null ? tools : {},
4108
4178
  toolCalls: currentToolCalls,
4109
4179
  toolResults: currentToolResults,
@@ -4115,7 +4185,9 @@ async function generateText({
4115
4185
  const currentStepResult = {
4116
4186
  stepType,
4117
4187
  text: stepText,
4118
- reasoning: currentModelResponse.reasoning,
4188
+ // TODO v5: rename reasoning to reasoningText (and use reasoning for composite array)
4189
+ reasoning: asReasoningText(currentReasoningDetails),
4190
+ reasoningDetails: currentReasoningDetails,
4119
4191
  sources: (_e = currentModelResponse.sources) != null ? _e : [],
4120
4192
  toolCalls: currentToolCalls,
4121
4193
  toolResults: currentToolResults,
@@ -4156,7 +4228,8 @@ async function generateText({
4156
4228
  );
4157
4229
  return new DefaultGenerateTextResult({
4158
4230
  text: text2,
4159
- reasoning: currentModelResponse.reasoning,
4231
+ reasoning: asReasoningText(currentReasoningDetails),
4232
+ reasoningDetails: currentReasoningDetails,
4160
4233
  sources,
4161
4234
  outputResolver: () => {
4162
4235
  if (output == null) {
@@ -4264,6 +4337,7 @@ var DefaultGenerateTextResult = class {
4264
4337
  constructor(options) {
4265
4338
  this.text = options.text;
4266
4339
  this.reasoning = options.reasoning;
4340
+ this.reasoningDetails = options.reasoningDetails;
4267
4341
  this.toolCalls = options.toolCalls;
4268
4342
  this.toolResults = options.toolResults;
4269
4343
  this.finishReason = options.finishReason;
@@ -4282,6 +4356,15 @@ var DefaultGenerateTextResult = class {
4282
4356
  return this.outputResolver();
4283
4357
  }
4284
4358
  };
4359
+ function asReasoningDetails(reasoning) {
4360
+ if (reasoning == null) {
4361
+ return [];
4362
+ }
4363
+ if (typeof reasoning === "string") {
4364
+ return [{ type: "text", text: reasoning }];
4365
+ }
4366
+ return reasoning;
4367
+ }
4285
4368
 
4286
4369
  // core/generate-text/output.ts
4287
4370
  var output_exports = {};
@@ -4293,7 +4376,28 @@ var import_provider_utils10 = require("@ai-sdk/provider-utils");
4293
4376
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
4294
4377
 
4295
4378
  // errors/index.ts
4379
+ var import_provider19 = require("@ai-sdk/provider");
4380
+
4381
+ // errors/invalid-stream-part-error.ts
4296
4382
  var import_provider18 = require("@ai-sdk/provider");
4383
+ var name14 = "AI_InvalidStreamPartError";
4384
+ var marker14 = `vercel.ai.error.${name14}`;
4385
+ var symbol14 = Symbol.for(marker14);
4386
+ var _a14;
4387
+ var InvalidStreamPartError = class extends import_provider18.AISDKError {
4388
+ constructor({
4389
+ chunk,
4390
+ message
4391
+ }) {
4392
+ super({ name: name14, message });
4393
+ this[_a14] = true;
4394
+ this.chunk = chunk;
4395
+ }
4396
+ static isInstance(error) {
4397
+ return import_provider18.AISDKError.hasMarker(error, marker14);
4398
+ }
4399
+ };
4400
+ _a14 = symbol14;
4297
4401
 
4298
4402
  // core/generate-text/output.ts
4299
4403
  var text = () => ({
@@ -4373,7 +4477,7 @@ var object = ({
4373
4477
  };
4374
4478
 
4375
4479
  // core/generate-text/smooth-stream.ts
4376
- var import_provider19 = require("@ai-sdk/provider");
4480
+ var import_provider20 = require("@ai-sdk/provider");
4377
4481
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
4378
4482
  var CHUNKING_REGEXPS = {
4379
4483
  word: /\s*\S+\s+/m,
@@ -4386,7 +4490,7 @@ function smoothStream({
4386
4490
  } = {}) {
4387
4491
  const chunkingRegexp = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
4388
4492
  if (chunkingRegexp == null) {
4389
- throw new import_provider19.InvalidArgumentError({
4493
+ throw new import_provider20.InvalidArgumentError({
4390
4494
  argument: "chunking",
4391
4495
  message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
4392
4496
  });
@@ -4421,6 +4525,7 @@ function smoothStream({
4421
4525
  }
4422
4526
 
4423
4527
  // core/generate-text/stream-text.ts
4528
+ var import_provider21 = require("@ai-sdk/provider");
4424
4529
  var import_provider_utils12 = require("@ai-sdk/provider-utils");
4425
4530
  var import_ui_utils8 = require("@ai-sdk/ui-utils");
4426
4531
 
@@ -4554,6 +4659,8 @@ function runToolsTransformation({
4554
4659
  switch (chunkType) {
4555
4660
  case "text-delta":
4556
4661
  case "reasoning":
4662
+ case "reasoning-signature":
4663
+ case "redacted-reasoning":
4557
4664
  case "source":
4558
4665
  case "response-metadata":
4559
4666
  case "error": {
@@ -4865,13 +4972,14 @@ var DefaultStreamTextResult = class {
4865
4972
  this.providerMetadataPromise = new DelayedPromise();
4866
4973
  this.textPromise = new DelayedPromise();
4867
4974
  this.reasoningPromise = new DelayedPromise();
4975
+ this.reasoningDetailsPromise = new DelayedPromise();
4868
4976
  this.sourcesPromise = new DelayedPromise();
4869
4977
  this.toolCallsPromise = new DelayedPromise();
4870
4978
  this.toolResultsPromise = new DelayedPromise();
4871
4979
  this.requestPromise = new DelayedPromise();
4872
4980
  this.responsePromise = new DelayedPromise();
4873
4981
  this.stepsPromise = new DelayedPromise();
4874
- var _a15;
4982
+ var _a16;
4875
4983
  if (maxSteps < 1) {
4876
4984
  throw new InvalidArgumentError({
4877
4985
  parameter: "maxSteps",
@@ -4883,7 +4991,8 @@ var DefaultStreamTextResult = class {
4883
4991
  let recordedStepText = "";
4884
4992
  let recordedContinuationText = "";
4885
4993
  let recordedFullText = "";
4886
- let recordedReasoningText = void 0;
4994
+ const stepReasoning = [];
4995
+ let activeReasoningText = void 0;
4887
4996
  let recordedStepSources = [];
4888
4997
  const recordedSources = [];
4889
4998
  const recordedResponse = {
@@ -4915,7 +5024,25 @@ var DefaultStreamTextResult = class {
4915
5024
  recordedFullText += part.textDelta;
4916
5025
  }
4917
5026
  if (part.type === "reasoning") {
4918
- recordedReasoningText = (recordedReasoningText != null ? recordedReasoningText : "") + part.textDelta;
5027
+ if (activeReasoningText == null) {
5028
+ activeReasoningText = { type: "text", text: part.textDelta };
5029
+ stepReasoning.push(activeReasoningText);
5030
+ } else {
5031
+ activeReasoningText.text += part.textDelta;
5032
+ }
5033
+ }
5034
+ if (part.type === "reasoning-signature") {
5035
+ if (activeReasoningText == null) {
5036
+ throw new import_provider21.AISDKError({
5037
+ name: "InvalidStreamPart",
5038
+ message: "reasoning-signature without reasoning"
5039
+ });
5040
+ }
5041
+ activeReasoningText.signature = part.signature;
5042
+ activeReasoningText = void 0;
5043
+ }
5044
+ if (part.type === "redacted-reasoning") {
5045
+ stepReasoning.push({ type: "redacted", data: part.data });
4919
5046
  }
4920
5047
  if (part.type === "source") {
4921
5048
  recordedSources.push(part.source);
@@ -4930,6 +5057,7 @@ var DefaultStreamTextResult = class {
4930
5057
  if (part.type === "step-finish") {
4931
5058
  const stepMessages = toResponseMessages({
4932
5059
  text: recordedContinuationText,
5060
+ reasoning: stepReasoning,
4933
5061
  tools: tools != null ? tools : {},
4934
5062
  toolCalls: recordedToolCalls,
4935
5063
  toolResults: recordedToolResults,
@@ -4953,7 +5081,8 @@ var DefaultStreamTextResult = class {
4953
5081
  const currentStepResult = {
4954
5082
  stepType,
4955
5083
  text: recordedStepText,
4956
- reasoning: recordedReasoningText,
5084
+ reasoning: asReasoningText(stepReasoning),
5085
+ reasoningDetails: stepReasoning,
4957
5086
  sources: recordedStepSources,
4958
5087
  toolCalls: recordedToolCalls,
4959
5088
  toolResults: recordedToolResults,
@@ -4994,7 +5123,7 @@ var DefaultStreamTextResult = class {
4994
5123
  }
4995
5124
  },
4996
5125
  async flush(controller) {
4997
- var _a16;
5126
+ var _a17;
4998
5127
  try {
4999
5128
  if (recordedSteps.length === 0) {
5000
5129
  return;
@@ -5008,6 +5137,8 @@ var DefaultStreamTextResult = class {
5008
5137
  self.providerMetadataPromise.resolve(
5009
5138
  lastStep.experimental_providerMetadata
5010
5139
  );
5140
+ self.reasoningPromise.resolve(lastStep.reasoning);
5141
+ self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
5011
5142
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
5012
5143
  const usage = recordedUsage != null ? recordedUsage : {
5013
5144
  completionTokens: NaN,
@@ -5017,7 +5148,6 @@ var DefaultStreamTextResult = class {
5017
5148
  self.finishReasonPromise.resolve(finishReason);
5018
5149
  self.usagePromise.resolve(usage);
5019
5150
  self.textPromise.resolve(recordedFullText);
5020
- self.reasoningPromise.resolve(recordedReasoningText);
5021
5151
  self.sourcesPromise.resolve(recordedSources);
5022
5152
  self.stepsPromise.resolve(recordedSteps);
5023
5153
  await (onFinish == null ? void 0 : onFinish({
@@ -5026,10 +5156,11 @@ var DefaultStreamTextResult = class {
5026
5156
  usage,
5027
5157
  text: recordedFullText,
5028
5158
  reasoning: lastStep.reasoning,
5159
+ reasoningDetails: lastStep.reasoningDetails,
5029
5160
  sources: lastStep.sources,
5030
5161
  toolCalls: lastStep.toolCalls,
5031
5162
  toolResults: lastStep.toolResults,
5032
- request: (_a16 = lastStep.request) != null ? _a16 : {},
5163
+ request: (_a17 = lastStep.request) != null ? _a17 : {},
5033
5164
  response: lastStep.response,
5034
5165
  warnings: lastStep.warnings,
5035
5166
  providerMetadata: lastStep.providerMetadata,
@@ -5044,8 +5175,8 @@ var DefaultStreamTextResult = class {
5044
5175
  "ai.response.text": { output: () => recordedFullText },
5045
5176
  "ai.response.toolCalls": {
5046
5177
  output: () => {
5047
- var _a17;
5048
- return ((_a17 = lastStep.toolCalls) == null ? void 0 : _a17.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
5178
+ var _a18;
5179
+ return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
5049
5180
  }
5050
5181
  },
5051
5182
  "ai.usage.promptTokens": usage.promptTokens,
@@ -5087,7 +5218,7 @@ var DefaultStreamTextResult = class {
5087
5218
  });
5088
5219
  const initialPrompt = standardizePrompt({
5089
5220
  prompt: {
5090
- system: (_a15 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a15 : system,
5221
+ system: (_a16 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a16 : system,
5091
5222
  prompt,
5092
5223
  messages
5093
5224
  },
@@ -5121,7 +5252,7 @@ var DefaultStreamTextResult = class {
5121
5252
  hasLeadingWhitespace,
5122
5253
  messageId
5123
5254
  }) {
5124
- var _a16;
5255
+ var _a17;
5125
5256
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
5126
5257
  const stepInputMessages = [
5127
5258
  ...initialPrompt.messages,
@@ -5134,7 +5265,7 @@ var DefaultStreamTextResult = class {
5134
5265
  messages: stepInputMessages
5135
5266
  },
5136
5267
  modelSupportsImageUrls: model.supportsImageUrls,
5137
- modelSupportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model)
5268
+ modelSupportsUrl: (_a17 = model.supportsUrl) == null ? void 0 : _a17.bind(model)
5138
5269
  // support 'this' context
5139
5270
  });
5140
5271
  const mode = {
@@ -5165,8 +5296,8 @@ var DefaultStreamTextResult = class {
5165
5296
  "ai.prompt.tools": {
5166
5297
  // convert the language model level tools:
5167
5298
  input: () => {
5168
- var _a17;
5169
- return (_a17 = mode.tools) == null ? void 0 : _a17.map((tool2) => JSON.stringify(tool2));
5299
+ var _a18;
5300
+ return (_a18 = mode.tools) == null ? void 0 : _a18.map((tool2) => JSON.stringify(tool2));
5170
5301
  }
5171
5302
  },
5172
5303
  "ai.prompt.toolChoice": {
@@ -5217,6 +5348,8 @@ var DefaultStreamTextResult = class {
5217
5348
  const stepRequest = request != null ? request : {};
5218
5349
  const stepToolCalls = [];
5219
5350
  const stepToolResults = [];
5351
+ const stepReasoning2 = [];
5352
+ let activeReasoningText2 = void 0;
5220
5353
  let stepFinishReason = "unknown";
5221
5354
  let stepUsage = {
5222
5355
  promptTokens: 0,
@@ -5226,7 +5359,6 @@ var DefaultStreamTextResult = class {
5226
5359
  let stepProviderMetadata;
5227
5360
  let stepFirstChunk = true;
5228
5361
  let stepText = "";
5229
- let stepReasoning = "";
5230
5362
  let fullStepText = stepType2 === "continue" ? previousStepText : "";
5231
5363
  let stepLogProbs;
5232
5364
  let stepResponse = {
@@ -5252,7 +5384,7 @@ var DefaultStreamTextResult = class {
5252
5384
  transformedStream.pipeThrough(
5253
5385
  new TransformStream({
5254
5386
  async transform(chunk, controller) {
5255
- var _a17, _b, _c;
5387
+ var _a18, _b, _c;
5256
5388
  if (stepFirstChunk) {
5257
5389
  const msToFirstChunk = now2() - startTimestampMs;
5258
5390
  stepFirstChunk = false;
@@ -5300,11 +5432,35 @@ var DefaultStreamTextResult = class {
5300
5432
  }
5301
5433
  case "reasoning": {
5302
5434
  controller.enqueue(chunk);
5303
- stepReasoning += chunk.textDelta;
5435
+ if (activeReasoningText2 == null) {
5436
+ activeReasoningText2 = {
5437
+ type: "text",
5438
+ text: chunk.textDelta
5439
+ };
5440
+ stepReasoning2.push(activeReasoningText2);
5441
+ } else {
5442
+ activeReasoningText2.text += chunk.textDelta;
5443
+ }
5444
+ break;
5445
+ }
5446
+ case "reasoning-signature": {
5447
+ controller.enqueue(chunk);
5448
+ if (activeReasoningText2 == null) {
5449
+ throw new InvalidStreamPartError({
5450
+ chunk,
5451
+ message: "reasoning-signature without reasoning"
5452
+ });
5453
+ }
5454
+ activeReasoningText2.signature = chunk.signature;
5455
+ activeReasoningText2 = void 0;
5304
5456
  break;
5305
5457
  }
5306
- case "source": {
5458
+ case "redacted-reasoning": {
5307
5459
  controller.enqueue(chunk);
5460
+ stepReasoning2.push({
5461
+ type: "redacted",
5462
+ data: chunk.data
5463
+ });
5308
5464
  break;
5309
5465
  }
5310
5466
  case "tool-call": {
@@ -5319,7 +5475,7 @@ var DefaultStreamTextResult = class {
5319
5475
  }
5320
5476
  case "response-metadata": {
5321
5477
  stepResponse = {
5322
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
5478
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
5323
5479
  timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
5324
5480
  modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
5325
5481
  };
@@ -5338,6 +5494,7 @@ var DefaultStreamTextResult = class {
5338
5494
  });
5339
5495
  break;
5340
5496
  }
5497
+ case "source":
5341
5498
  case "tool-call-streaming-start":
5342
5499
  case "tool-call-delta": {
5343
5500
  controller.enqueue(chunk);
@@ -5455,6 +5612,7 @@ var DefaultStreamTextResult = class {
5455
5612
  responseMessages.push(
5456
5613
  ...toResponseMessages({
5457
5614
  text: stepText,
5615
+ reasoning: stepReasoning2,
5458
5616
  tools: tools != null ? tools : {},
5459
5617
  toolCalls: stepToolCalls,
5460
5618
  toolResults: stepToolResults,
@@ -5528,6 +5686,9 @@ var DefaultStreamTextResult = class {
5528
5686
  get reasoning() {
5529
5687
  return this.reasoningPromise.value;
5530
5688
  }
5689
+ get reasoningDetails() {
5690
+ return this.reasoningDetailsPromise.value;
5691
+ }
5531
5692
  get sources() {
5532
5693
  return this.sourcesPromise.value;
5533
5694
  }
@@ -5628,6 +5789,26 @@ var DefaultStreamTextResult = class {
5628
5789
  }
5629
5790
  break;
5630
5791
  }
5792
+ case "redacted-reasoning": {
5793
+ if (sendReasoning) {
5794
+ controller.enqueue(
5795
+ (0, import_ui_utils8.formatDataStreamPart)("redacted_reasoning", {
5796
+ data: chunk.data
5797
+ })
5798
+ );
5799
+ }
5800
+ break;
5801
+ }
5802
+ case "reasoning-signature": {
5803
+ if (sendReasoning) {
5804
+ controller.enqueue(
5805
+ (0, import_ui_utils8.formatDataStreamPart)("reasoning_signature", {
5806
+ signature: chunk.signature
5807
+ })
5808
+ );
5809
+ }
5810
+ break;
5811
+ }
5631
5812
  case "source": {
5632
5813
  if (sendSources) {
5633
5814
  controller.enqueue(
@@ -5808,9 +5989,9 @@ var DefaultStreamTextResult = class {
5808
5989
  );
5809
5990
  }
5810
5991
  toTextStreamResponse(init) {
5811
- var _a15;
5992
+ var _a16;
5812
5993
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
5813
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
5994
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
5814
5995
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
5815
5996
  contentType: "text/plain; charset=utf-8"
5816
5997
  })
@@ -5945,7 +6126,7 @@ var doWrap = ({
5945
6126
  modelId,
5946
6127
  providerId
5947
6128
  }) => {
5948
- var _a15;
6129
+ var _a16;
5949
6130
  async function doTransform({
5950
6131
  params,
5951
6132
  type
@@ -5958,7 +6139,7 @@ var doWrap = ({
5958
6139
  modelId: modelId != null ? modelId : model.modelId,
5959
6140
  defaultObjectGenerationMode: model.defaultObjectGenerationMode,
5960
6141
  supportsImageUrls: model.supportsImageUrls,
5961
- supportsUrl: (_a15 = model.supportsUrl) == null ? void 0 : _a15.bind(model),
6142
+ supportsUrl: (_a16 = model.supportsUrl) == null ? void 0 : _a16.bind(model),
5962
6143
  supportsStructuredOutputs: model.supportsStructuredOutputs,
5963
6144
  async doGenerate(params) {
5964
6145
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -5992,7 +6173,7 @@ function appendResponseMessages({
5992
6173
  responseMessages,
5993
6174
  _internal: { currentDate = () => /* @__PURE__ */ new Date() } = {}
5994
6175
  }) {
5995
- var _a15, _b, _c, _d;
6176
+ var _a16, _b, _c, _d;
5996
6177
  const clonedMessages = structuredClone(messages);
5997
6178
  for (const message of responseMessages) {
5998
6179
  const role = message.role;
@@ -6015,7 +6196,7 @@ function appendResponseMessages({
6015
6196
  const maxStep = (0, import_ui_utils9.extractMaxToolInvocationStep)(
6016
6197
  lastMessage.toolInvocations
6017
6198
  );
6018
- (_a15 = lastMessage.parts) != null ? _a15 : lastMessage.parts = [];
6199
+ (_a16 = lastMessage.parts) != null ? _a16 : lastMessage.parts = [];
6019
6200
  lastMessage.content = textContent;
6020
6201
  if (textContent.length > 0) {
6021
6202
  lastMessage.parts.push({
@@ -6093,7 +6274,7 @@ function appendResponseMessages({
6093
6274
  }
6094
6275
 
6095
6276
  // core/registry/custom-provider.ts
6096
- var import_provider20 = require("@ai-sdk/provider");
6277
+ var import_provider22 = require("@ai-sdk/provider");
6097
6278
  function customProvider({
6098
6279
  languageModels,
6099
6280
  textEmbeddingModels,
@@ -6108,7 +6289,7 @@ function customProvider({
6108
6289
  if (fallbackProvider) {
6109
6290
  return fallbackProvider.languageModel(modelId);
6110
6291
  }
6111
- throw new import_provider20.NoSuchModelError({ modelId, modelType: "languageModel" });
6292
+ throw new import_provider22.NoSuchModelError({ modelId, modelType: "languageModel" });
6112
6293
  },
6113
6294
  textEmbeddingModel(modelId) {
6114
6295
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -6117,7 +6298,7 @@ function customProvider({
6117
6298
  if (fallbackProvider) {
6118
6299
  return fallbackProvider.textEmbeddingModel(modelId);
6119
6300
  }
6120
- throw new import_provider20.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
6301
+ throw new import_provider22.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
6121
6302
  },
6122
6303
  imageModel(modelId) {
6123
6304
  if (imageModels != null && modelId in imageModels) {
@@ -6126,19 +6307,19 @@ function customProvider({
6126
6307
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
6127
6308
  return fallbackProvider.imageModel(modelId);
6128
6309
  }
6129
- throw new import_provider20.NoSuchModelError({ modelId, modelType: "imageModel" });
6310
+ throw new import_provider22.NoSuchModelError({ modelId, modelType: "imageModel" });
6130
6311
  }
6131
6312
  };
6132
6313
  }
6133
6314
  var experimental_customProvider = customProvider;
6134
6315
 
6135
6316
  // core/registry/no-such-provider-error.ts
6136
- var import_provider21 = require("@ai-sdk/provider");
6137
- var name14 = "AI_NoSuchProviderError";
6138
- var marker14 = `vercel.ai.error.${name14}`;
6139
- var symbol14 = Symbol.for(marker14);
6140
- var _a14;
6141
- var NoSuchProviderError = class extends import_provider21.NoSuchModelError {
6317
+ var import_provider23 = require("@ai-sdk/provider");
6318
+ var name15 = "AI_NoSuchProviderError";
6319
+ var marker15 = `vercel.ai.error.${name15}`;
6320
+ var symbol15 = Symbol.for(marker15);
6321
+ var _a15;
6322
+ var NoSuchProviderError = class extends import_provider23.NoSuchModelError {
6142
6323
  constructor({
6143
6324
  modelId,
6144
6325
  modelType,
@@ -6146,19 +6327,19 @@ var NoSuchProviderError = class extends import_provider21.NoSuchModelError {
6146
6327
  availableProviders,
6147
6328
  message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
6148
6329
  }) {
6149
- super({ errorName: name14, modelId, modelType, message });
6150
- this[_a14] = true;
6330
+ super({ errorName: name15, modelId, modelType, message });
6331
+ this[_a15] = true;
6151
6332
  this.providerId = providerId;
6152
6333
  this.availableProviders = availableProviders;
6153
6334
  }
6154
6335
  static isInstance(error) {
6155
- return import_provider21.AISDKError.hasMarker(error, marker14);
6336
+ return import_provider23.AISDKError.hasMarker(error, marker15);
6156
6337
  }
6157
6338
  };
6158
- _a14 = symbol14;
6339
+ _a15 = symbol15;
6159
6340
 
6160
6341
  // core/registry/provider-registry.ts
6161
- var import_provider22 = require("@ai-sdk/provider");
6342
+ var import_provider24 = require("@ai-sdk/provider");
6162
6343
  function experimental_createProviderRegistry(providers) {
6163
6344
  const registry = new DefaultProviderRegistry();
6164
6345
  for (const [id, provider] of Object.entries(providers)) {
@@ -6191,7 +6372,7 @@ var DefaultProviderRegistry = class {
6191
6372
  splitId(id, modelType) {
6192
6373
  const index = id.indexOf(":");
6193
6374
  if (index === -1) {
6194
- throw new import_provider22.NoSuchModelError({
6375
+ throw new import_provider24.NoSuchModelError({
6195
6376
  modelId: id,
6196
6377
  modelType,
6197
6378
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
@@ -6200,21 +6381,21 @@ var DefaultProviderRegistry = class {
6200
6381
  return [id.slice(0, index), id.slice(index + 1)];
6201
6382
  }
6202
6383
  languageModel(id) {
6203
- var _a15, _b;
6384
+ var _a16, _b;
6204
6385
  const [providerId, modelId] = this.splitId(id, "languageModel");
6205
- const model = (_b = (_a15 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a15, modelId);
6386
+ const model = (_b = (_a16 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a16, modelId);
6206
6387
  if (model == null) {
6207
- throw new import_provider22.NoSuchModelError({ modelId: id, modelType: "languageModel" });
6388
+ throw new import_provider24.NoSuchModelError({ modelId: id, modelType: "languageModel" });
6208
6389
  }
6209
6390
  return model;
6210
6391
  }
6211
6392
  textEmbeddingModel(id) {
6212
- var _a15;
6393
+ var _a16;
6213
6394
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
6214
6395
  const provider = this.getProvider(providerId);
6215
- const model = (_a15 = provider.textEmbeddingModel) == null ? void 0 : _a15.call(provider, modelId);
6396
+ const model = (_a16 = provider.textEmbeddingModel) == null ? void 0 : _a16.call(provider, modelId);
6216
6397
  if (model == null) {
6217
- throw new import_provider22.NoSuchModelError({
6398
+ throw new import_provider24.NoSuchModelError({
6218
6399
  modelId: id,
6219
6400
  modelType: "textEmbeddingModel"
6220
6401
  });
@@ -6222,12 +6403,12 @@ var DefaultProviderRegistry = class {
6222
6403
  return model;
6223
6404
  }
6224
6405
  imageModel(id) {
6225
- var _a15;
6406
+ var _a16;
6226
6407
  const [providerId, modelId] = this.splitId(id, "imageModel");
6227
6408
  const provider = this.getProvider(providerId);
6228
- const model = (_a15 = provider.imageModel) == null ? void 0 : _a15.call(provider, modelId);
6409
+ const model = (_a16 = provider.imageModel) == null ? void 0 : _a16.call(provider, modelId);
6229
6410
  if (model == null) {
6230
- throw new import_provider22.NoSuchModelError({ modelId: id, modelType: "imageModel" });
6411
+ throw new import_provider24.NoSuchModelError({ modelId: id, modelType: "imageModel" });
6231
6412
  }
6232
6413
  return model;
6233
6414
  }
@@ -6286,8 +6467,8 @@ function simulateReadableStream({
6286
6467
  chunkDelayInMs = 0,
6287
6468
  _internal
6288
6469
  }) {
6289
- var _a15;
6290
- const delay2 = (_a15 = _internal == null ? void 0 : _internal.delay) != null ? _a15 : import_provider_utils13.delay;
6470
+ var _a16;
6471
+ const delay2 = (_a16 = _internal == null ? void 0 : _internal.delay) != null ? _a16 : import_provider_utils13.delay;
6291
6472
  let index = 0;
6292
6473
  return new ReadableStream({
6293
6474
  async pull(controller) {
@@ -6306,7 +6487,7 @@ var import_ui_utils11 = require("@ai-sdk/ui-utils");
6306
6487
  function AssistantResponse({ threadId, messageId }, process2) {
6307
6488
  const stream = new ReadableStream({
6308
6489
  async start(controller) {
6309
- var _a15;
6490
+ var _a16;
6310
6491
  const textEncoder = new TextEncoder();
6311
6492
  const sendMessage = (message) => {
6312
6493
  controller.enqueue(
@@ -6328,7 +6509,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6328
6509
  );
6329
6510
  };
6330
6511
  const forwardStream = async (stream2) => {
6331
- var _a16, _b;
6512
+ var _a17, _b;
6332
6513
  let result = void 0;
6333
6514
  for await (const value of stream2) {
6334
6515
  switch (value.event) {
@@ -6345,7 +6526,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6345
6526
  break;
6346
6527
  }
6347
6528
  case "thread.message.delta": {
6348
- const content = (_a16 = value.data.delta.content) == null ? void 0 : _a16[0];
6529
+ const content = (_a17 = value.data.delta.content) == null ? void 0 : _a17[0];
6349
6530
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
6350
6531
  controller.enqueue(
6351
6532
  textEncoder.encode(
@@ -6379,7 +6560,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
6379
6560
  forwardStream
6380
6561
  });
6381
6562
  } catch (error) {
6382
- sendError((_a15 = error.message) != null ? _a15 : `${error}`);
6563
+ sendError((_a16 = error.message) != null ? _a16 : `${error}`);
6383
6564
  } finally {
6384
6565
  controller.close();
6385
6566
  }
@@ -6440,7 +6621,7 @@ function toDataStreamInternal(stream, callbacks) {
6440
6621
  return stream.pipeThrough(
6441
6622
  new TransformStream({
6442
6623
  transform: async (value, controller) => {
6443
- var _a15;
6624
+ var _a16;
6444
6625
  if (typeof value === "string") {
6445
6626
  controller.enqueue(value);
6446
6627
  return;
@@ -6448,7 +6629,7 @@ function toDataStreamInternal(stream, callbacks) {
6448
6629
  if ("event" in value) {
6449
6630
  if (value.event === "on_chat_model_stream") {
6450
6631
  forwardAIMessageChunk(
6451
- (_a15 = value.data) == null ? void 0 : _a15.chunk,
6632
+ (_a16 = value.data) == null ? void 0 : _a16.chunk,
6452
6633
  controller
6453
6634
  );
6454
6635
  }
@@ -6471,7 +6652,7 @@ function toDataStream(stream, callbacks) {
6471
6652
  );
6472
6653
  }
6473
6654
  function toDataStreamResponse(stream, options) {
6474
- var _a15;
6655
+ var _a16;
6475
6656
  const dataStream = toDataStreamInternal(
6476
6657
  stream,
6477
6658
  options == null ? void 0 : options.callbacks
@@ -6480,7 +6661,7 @@ function toDataStreamResponse(stream, options) {
6480
6661
  const init = options == null ? void 0 : options.init;
6481
6662
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
6482
6663
  return new Response(responseStream, {
6483
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
6664
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
6484
6665
  statusText: init == null ? void 0 : init.statusText,
6485
6666
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
6486
6667
  contentType: "text/plain; charset=utf-8",
@@ -6535,14 +6716,14 @@ function toDataStream2(stream, callbacks) {
6535
6716
  );
6536
6717
  }
6537
6718
  function toDataStreamResponse2(stream, options = {}) {
6538
- var _a15;
6719
+ var _a16;
6539
6720
  const { init, data, callbacks } = options;
6540
6721
  const dataStream = toDataStreamInternal2(stream, callbacks).pipeThrough(
6541
6722
  new TextEncoderStream()
6542
6723
  );
6543
6724
  const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
6544
6725
  return new Response(responseStream, {
6545
- status: (_a15 = init == null ? void 0 : init.status) != null ? _a15 : 200,
6726
+ status: (_a16 = init == null ? void 0 : init.status) != null ? _a16 : 200,
6546
6727
  statusText: init == null ? void 0 : init.statusText,
6547
6728
  headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
6548
6729
  contentType: "text/plain; charset=utf-8",
@@ -6645,6 +6826,7 @@ var StreamData = class {
6645
6826
  InvalidMessageRoleError,
6646
6827
  InvalidPromptError,
6647
6828
  InvalidResponseDataError,
6829
+ InvalidStreamPartError,
6648
6830
  InvalidToolArgumentsError,
6649
6831
  JSONParseError,
6650
6832
  LangChainAdapter,