ai 5.0.0-canary.12 → 5.0.0-canary.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -347,15 +347,15 @@ function fixJson(input) {
347
347
  }
348
348
 
349
349
  // core/util/parse-partial-json.ts
350
- function parsePartialJson(jsonText) {
350
+ async function parsePartialJson(jsonText) {
351
351
  if (jsonText === void 0) {
352
352
  return { value: void 0, state: "undefined-input" };
353
353
  }
354
- let result = safeParseJSON({ text: jsonText });
354
+ let result = await safeParseJSON({ text: jsonText });
355
355
  if (result.success) {
356
356
  return { value: result.value, state: "successful-parse" };
357
357
  }
358
- result = safeParseJSON({ text: fixJson(jsonText) });
358
+ result = await safeParseJSON({ text: fixJson(jsonText) });
359
359
  if (result.success) {
360
360
  return { value: result.value, state: "repaired-parse" };
361
361
  }
@@ -536,10 +536,18 @@ var reasoningStreamPart = {
536
536
  code: "g",
537
537
  name: "reasoning",
538
538
  parse: (value) => {
539
- if (typeof value !== "string") {
540
- throw new Error('"reasoning" parts expect a string value.');
539
+ if (value == null || typeof value !== "object" || !("text" in value) || typeof value.text !== "string" || "providerMetadata" in value && typeof value.providerMetadata !== "object") {
540
+ throw new Error(
541
+ '"reasoning" parts expect an object with a "text" property.'
542
+ );
541
543
  }
542
- return { type: "reasoning", value };
544
+ return {
545
+ type: "reasoning",
546
+ value: {
547
+ text: value.text,
548
+ providerMetadata: value.providerMetadata
549
+ }
550
+ };
543
551
  }
544
552
  };
545
553
  var sourcePart = {
@@ -555,33 +563,6 @@ var sourcePart = {
555
563
  };
556
564
  }
557
565
  };
558
- var redactedReasoningStreamPart = {
559
- code: "i",
560
- name: "redacted_reasoning",
561
- parse: (value) => {
562
- if (value == null || typeof value !== "object" || !("data" in value) || typeof value.data !== "string") {
563
- throw new Error(
564
- '"redacted_reasoning" parts expect an object with a "data" property.'
565
- );
566
- }
567
- return { type: "redacted_reasoning", value: { data: value.data } };
568
- }
569
- };
570
- var reasoningSignatureStreamPart = {
571
- code: "j",
572
- name: "reasoning_signature",
573
- parse: (value) => {
574
- if (value == null || typeof value !== "object" || !("signature" in value) || typeof value.signature !== "string") {
575
- throw new Error(
576
- '"reasoning_signature" parts expect an object with a "signature" property.'
577
- );
578
- }
579
- return {
580
- type: "reasoning_signature",
581
- value: { signature: value.signature }
582
- };
583
- }
584
- };
585
566
  var fileStreamPart = {
586
567
  code: "k",
587
568
  name: "file",
@@ -594,6 +575,16 @@ var fileStreamPart = {
594
575
  return { type: "file", value };
595
576
  }
596
577
  };
578
+ var reasoningPartFinishStreamPart = {
579
+ code: "l",
580
+ name: "reasoning_part_finish",
581
+ parse: () => {
582
+ return {
583
+ type: "reasoning_part_finish",
584
+ value: {}
585
+ };
586
+ }
587
+ };
597
588
  var dataStreamParts = [
598
589
  textStreamPart,
599
590
  dataStreamPart,
@@ -608,8 +599,7 @@ var dataStreamParts = [
608
599
  startStepStreamPart,
609
600
  reasoningStreamPart,
610
601
  sourcePart,
611
- redactedReasoningStreamPart,
612
- reasoningSignatureStreamPart,
602
+ reasoningPartFinishStreamPart,
613
603
  fileStreamPart
614
604
  ];
615
605
  var dataStreamPartsByCode = Object.fromEntries(
@@ -658,8 +648,7 @@ async function processDataStream({
658
648
  stream,
659
649
  onTextPart,
660
650
  onReasoningPart,
661
- onReasoningSignaturePart,
662
- onRedactedReasoningPart,
651
+ onReasoningPartFinish,
663
652
  onSourcePart,
664
653
  onFilePart,
665
654
  onDataPart,
@@ -700,11 +689,8 @@ async function processDataStream({
700
689
  case "reasoning":
701
690
  await (onReasoningPart == null ? void 0 : onReasoningPart(value2));
702
691
  break;
703
- case "reasoning_signature":
704
- await (onReasoningSignaturePart == null ? void 0 : onReasoningSignaturePart(value2));
705
- break;
706
- case "redacted_reasoning":
707
- await (onRedactedReasoningPart == null ? void 0 : onRedactedReasoningPart(value2));
692
+ case "reasoning_part_finish":
693
+ await (onReasoningPartFinish == null ? void 0 : onReasoningPartFinish(value2));
708
694
  break;
709
695
  case "file":
710
696
  await (onFilePart == null ? void 0 : onFilePart(value2));
@@ -777,7 +763,6 @@ async function processChatResponse({
777
763
  };
778
764
  let currentTextPart = void 0;
779
765
  let currentReasoningPart = void 0;
780
- let currentReasoningTextDetail = void 0;
781
766
  function updateToolInvocationPart(toolCallId, invocation) {
782
767
  const part = message.parts.find(
783
768
  (part2) => part2.type === "tool-invocation" && part2.toolInvocation.toolCallId === toolCallId
@@ -839,48 +824,25 @@ async function processChatResponse({
839
824
  },
840
825
  onReasoningPart(value) {
841
826
  var _a18;
842
- if (currentReasoningTextDetail == null) {
843
- currentReasoningTextDetail = { type: "text", text: value };
844
- if (currentReasoningPart != null) {
845
- currentReasoningPart.details.push(currentReasoningTextDetail);
846
- }
847
- } else {
848
- currentReasoningTextDetail.text += value;
849
- }
850
827
  if (currentReasoningPart == null) {
851
828
  currentReasoningPart = {
852
829
  type: "reasoning",
853
- reasoning: value,
854
- details: [currentReasoningTextDetail]
830
+ reasoning: value.text,
831
+ providerMetadata: value.providerMetadata
855
832
  };
856
833
  message.parts.push(currentReasoningPart);
857
834
  } else {
858
- currentReasoningPart.reasoning += value;
835
+ currentReasoningPart.reasoning += value.text;
836
+ currentReasoningPart.providerMetadata = value.providerMetadata;
859
837
  }
860
- message.reasoning = ((_a18 = message.reasoning) != null ? _a18 : "") + value;
838
+ message.reasoning = ((_a18 = message.reasoning) != null ? _a18 : "") + value.text;
861
839
  execUpdate();
862
840
  },
863
- onReasoningSignaturePart(value) {
864
- if (currentReasoningTextDetail != null) {
865
- currentReasoningTextDetail.signature = value.signature;
841
+ onReasoningPartFinish(value) {
842
+ if (currentReasoningPart != null) {
843
+ currentReasoningPart = void 0;
866
844
  }
867
845
  },
868
- onRedactedReasoningPart(value) {
869
- if (currentReasoningPart == null) {
870
- currentReasoningPart = {
871
- type: "reasoning",
872
- reasoning: "",
873
- details: []
874
- };
875
- message.parts.push(currentReasoningPart);
876
- }
877
- currentReasoningPart.details.push({
878
- type: "redacted",
879
- data: value.data
880
- });
881
- currentReasoningTextDetail = void 0;
882
- execUpdate();
883
- },
884
846
  onFilePart(value) {
885
847
  message.parts.push({
886
848
  type: "file",
@@ -917,10 +879,12 @@ async function processChatResponse({
917
879
  updateToolInvocationPart(value.toolCallId, invocation);
918
880
  execUpdate();
919
881
  },
920
- onToolCallDeltaPart(value) {
882
+ async onToolCallDeltaPart(value) {
921
883
  const partialToolCall = partialToolCalls[value.toolCallId];
922
884
  partialToolCall.text += value.argsTextDelta;
923
- const { value: partialArgs } = parsePartialJson(partialToolCall.text);
885
+ const { value: partialArgs } = await parsePartialJson(
886
+ partialToolCall.text
887
+ );
924
888
  const invocation = {
925
889
  state: "partial-call",
926
890
  step: partialToolCall.step,
@@ -1001,7 +965,6 @@ async function processChatResponse({
1001
965
  step += 1;
1002
966
  currentTextPart = value.isContinued ? currentTextPart : void 0;
1003
967
  currentReasoningPart = void 0;
1004
- currentReasoningTextDetail = void 0;
1005
968
  },
1006
969
  onStartStepPart(value) {
1007
970
  if (!replaceLastMessage) {
@@ -1298,8 +1261,7 @@ function getMessageParts(message) {
1298
1261
  ...message.reasoning ? [
1299
1262
  {
1300
1263
  type: "reasoning",
1301
- reasoning: message.reasoning,
1302
- details: [{ type: "text", text: message.reasoning }]
1264
+ reasoning: message.reasoning
1303
1265
  }
1304
1266
  ] : [],
1305
1267
  ...message.content ? [{ type: "text", text: message.content }] : []
@@ -2141,8 +2103,8 @@ async function embedMany({
2141
2103
  }),
2142
2104
  tracer,
2143
2105
  fn: async (span) => {
2144
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2145
- if (maxEmbeddingsPerCall == null) {
2106
+ const maxEmbeddingsPerCall = await model.maxEmbeddingsPerCall;
2107
+ if (maxEmbeddingsPerCall == null || maxEmbeddingsPerCall === Infinity) {
2146
2108
  const { embeddings: embeddings2, usage, response } = await retry(() => {
2147
2109
  return recordSpan({
2148
2110
  name: "ai.embedMany.doEmbed",
@@ -2884,14 +2846,6 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2884
2846
  return {
2885
2847
  type: "reasoning",
2886
2848
  text: part.text,
2887
- signature: part.signature,
2888
- providerOptions
2889
- };
2890
- }
2891
- case "redacted-reasoning": {
2892
- return {
2893
- type: "redacted-reasoning",
2894
- data: part.data,
2895
2849
  providerOptions
2896
2850
  };
2897
2851
  }
@@ -3285,23 +3239,11 @@ function convertToCoreMessages(messages, options) {
3285
3239
  break;
3286
3240
  }
3287
3241
  case "reasoning": {
3288
- for (const detail of part.details) {
3289
- switch (detail.type) {
3290
- case "text":
3291
- content2.push({
3292
- type: "reasoning",
3293
- text: detail.text,
3294
- signature: detail.signature
3295
- });
3296
- break;
3297
- case "redacted":
3298
- content2.push({
3299
- type: "redacted-reasoning",
3300
- data: detail.data
3301
- });
3302
- break;
3303
- }
3304
- }
3242
+ content2.push({
3243
+ type: "reasoning",
3244
+ text: part.reasoning,
3245
+ providerOptions: part.providerMetadata
3246
+ });
3305
3247
  break;
3306
3248
  }
3307
3249
  case "tool-invocation":
@@ -3571,11 +3513,6 @@ var reasoningPartSchema = z5.object({
3571
3513
  text: z5.string(),
3572
3514
  providerOptions: providerMetadataSchema.optional()
3573
3515
  });
3574
- var redactedReasoningPartSchema = z5.object({
3575
- type: z5.literal("redacted-reasoning"),
3576
- data: z5.string(),
3577
- providerOptions: providerMetadataSchema.optional()
3578
- });
3579
3516
  var toolCallPartSchema = z5.object({
3580
3517
  type: z5.literal("tool-call"),
3581
3518
  toolCallId: z5.string(),
@@ -3616,7 +3553,6 @@ var coreAssistantMessageSchema = z6.object({
3616
3553
  textPartSchema,
3617
3554
  filePartSchema,
3618
3555
  reasoningPartSchema,
3619
- redactedReasoningPartSchema,
3620
3556
  toolCallPartSchema
3621
3557
  ])
3622
3558
  )
@@ -3636,7 +3572,7 @@ var coreMessageSchema = z6.union([
3636
3572
  ]);
3637
3573
 
3638
3574
  // core/prompt/standardize-prompt.ts
3639
- function standardizePrompt({
3575
+ async function standardizePrompt({
3640
3576
  prompt,
3641
3577
  tools
3642
3578
  }) {
@@ -3666,7 +3602,6 @@ function standardizePrompt({
3666
3602
  });
3667
3603
  }
3668
3604
  return {
3669
- type: "prompt",
3670
3605
  system: prompt.system,
3671
3606
  messages: [
3672
3607
  {
@@ -3693,7 +3628,7 @@ function standardizePrompt({
3693
3628
  message: "messages must not be empty"
3694
3629
  });
3695
3630
  }
3696
- const validationResult = safeValidateTypes({
3631
+ const validationResult = await safeValidateTypes({
3697
3632
  value: messages,
3698
3633
  schema: z7.array(coreMessageSchema)
3699
3634
  });
@@ -3705,7 +3640,6 @@ function standardizePrompt({
3705
3640
  });
3706
3641
  }
3707
3642
  return {
3708
- type: "messages",
3709
3643
  messages,
3710
3644
  system: prompt.system
3711
3645
  };
@@ -3760,10 +3694,10 @@ function createAsyncIterableStream(source) {
3760
3694
  var noSchemaOutputStrategy = {
3761
3695
  type: "no-schema",
3762
3696
  jsonSchema: void 0,
3763
- validatePartialResult({ value, textDelta }) {
3697
+ async validatePartialResult({ value, textDelta }) {
3764
3698
  return { success: true, value: { partial: value, textDelta } };
3765
3699
  },
3766
- validateFinalResult(value, context) {
3700
+ async validateFinalResult(value, context) {
3767
3701
  return value === void 0 ? {
3768
3702
  success: false,
3769
3703
  error: new NoObjectGeneratedError({
@@ -3784,7 +3718,7 @@ var noSchemaOutputStrategy = {
3784
3718
  var objectOutputStrategy = (schema) => ({
3785
3719
  type: "object",
3786
3720
  jsonSchema: schema.jsonSchema,
3787
- validatePartialResult({ value, textDelta }) {
3721
+ async validatePartialResult({ value, textDelta }) {
3788
3722
  return {
3789
3723
  success: true,
3790
3724
  value: {
@@ -3794,7 +3728,7 @@ var objectOutputStrategy = (schema) => ({
3794
3728
  }
3795
3729
  };
3796
3730
  },
3797
- validateFinalResult(value) {
3731
+ async validateFinalResult(value) {
3798
3732
  return safeValidateTypes2({ value, schema });
3799
3733
  },
3800
3734
  createElementStream() {
@@ -3819,7 +3753,12 @@ var arrayOutputStrategy = (schema) => {
3819
3753
  required: ["elements"],
3820
3754
  additionalProperties: false
3821
3755
  },
3822
- validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
3756
+ async validatePartialResult({
3757
+ value,
3758
+ latestObject,
3759
+ isFirstDelta,
3760
+ isFinalDelta
3761
+ }) {
3823
3762
  var _a17;
3824
3763
  if (!isJSONObject(value) || !isJSONArray(value.elements)) {
3825
3764
  return {
@@ -3834,7 +3773,7 @@ var arrayOutputStrategy = (schema) => {
3834
3773
  const resultArray = [];
3835
3774
  for (let i = 0; i < inputArray.length; i++) {
3836
3775
  const element = inputArray[i];
3837
- const result = safeValidateTypes2({ value: element, schema });
3776
+ const result = await safeValidateTypes2({ value: element, schema });
3838
3777
  if (i === inputArray.length - 1 && !isFinalDelta) {
3839
3778
  continue;
3840
3779
  }
@@ -3863,7 +3802,7 @@ var arrayOutputStrategy = (schema) => {
3863
3802
  }
3864
3803
  };
3865
3804
  },
3866
- validateFinalResult(value) {
3805
+ async validateFinalResult(value) {
3867
3806
  if (!isJSONObject(value) || !isJSONArray(value.elements)) {
3868
3807
  return {
3869
3808
  success: false,
@@ -3875,7 +3814,7 @@ var arrayOutputStrategy = (schema) => {
3875
3814
  }
3876
3815
  const inputArray = value.elements;
3877
3816
  for (const element of inputArray) {
3878
- const result = safeValidateTypes2({ value: element, schema });
3817
+ const result = await safeValidateTypes2({ value: element, schema });
3879
3818
  if (!result.success) {
3880
3819
  return result;
3881
3820
  }
@@ -3929,7 +3868,7 @@ var enumOutputStrategy = (enumValues) => {
3929
3868
  required: ["result"],
3930
3869
  additionalProperties: false
3931
3870
  },
3932
- validateFinalResult(value) {
3871
+ async validateFinalResult(value) {
3933
3872
  if (!isJSONObject(value) || typeof value.result !== "string") {
3934
3873
  return {
3935
3874
  success: false,
@@ -4101,29 +4040,31 @@ function validateObjectGenerationInput({
4101
4040
 
4102
4041
  // core/generate-object/generate-object.ts
4103
4042
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
4104
- async function generateObject({
4105
- model,
4106
- enum: enumValues,
4107
- // rename bc enum is reserved by typescript
4108
- schema: inputSchema,
4109
- schemaName,
4110
- schemaDescription,
4111
- output = "object",
4112
- system,
4113
- prompt,
4114
- messages,
4115
- maxRetries: maxRetriesArg,
4116
- abortSignal,
4117
- headers,
4118
- experimental_repairText: repairText,
4119
- experimental_telemetry: telemetry,
4120
- providerOptions,
4121
- _internal: {
4122
- generateId: generateId3 = originalGenerateId,
4123
- currentDate = () => /* @__PURE__ */ new Date()
4124
- } = {},
4125
- ...settings
4126
- }) {
4043
+ async function generateObject(options) {
4044
+ const {
4045
+ model,
4046
+ output = "object",
4047
+ system,
4048
+ prompt,
4049
+ messages,
4050
+ maxRetries: maxRetriesArg,
4051
+ abortSignal,
4052
+ headers,
4053
+ experimental_repairText: repairText,
4054
+ experimental_telemetry: telemetry,
4055
+ providerOptions,
4056
+ _internal: {
4057
+ generateId: generateId3 = originalGenerateId,
4058
+ currentDate = () => /* @__PURE__ */ new Date()
4059
+ } = {},
4060
+ ...settings
4061
+ } = options;
4062
+ const enumValues = "enum" in options ? options.enum : void 0;
4063
+ const {
4064
+ schema: inputSchema,
4065
+ schemaDescription,
4066
+ schemaName
4067
+ } = "schema" in options ? options : {};
4127
4068
  validateObjectGenerationInput({
4128
4069
  output,
4129
4070
  schema: inputSchema,
@@ -4175,7 +4116,7 @@ async function generateObject({
4175
4116
  let response;
4176
4117
  let request;
4177
4118
  let resultProviderMetadata;
4178
- const standardizedPrompt = standardizePrompt({
4119
+ const standardizedPrompt = await standardizePrompt({
4179
4120
  prompt: { system, prompt, messages },
4180
4121
  tools: void 0
4181
4122
  });
@@ -4194,9 +4135,6 @@ async function generateObject({
4194
4135
  telemetry
4195
4136
  }),
4196
4137
  ...baseTelemetryAttributes,
4197
- "ai.prompt.format": {
4198
- input: () => standardizedPrompt.type
4199
- },
4200
4138
  "ai.prompt.messages": {
4201
4139
  input: () => JSON.stringify(promptMessages)
4202
4140
  },
@@ -4222,7 +4160,6 @@ async function generateObject({
4222
4160
  description: schemaDescription
4223
4161
  },
4224
4162
  ...prepareCallSettings(settings),
4225
- inputFormat: standardizedPrompt.type,
4226
4163
  prompt: promptMessages,
4227
4164
  providerOptions,
4228
4165
  abortSignal,
@@ -4276,8 +4213,8 @@ async function generateObject({
4276
4213
  resultProviderMetadata = generateResult.providerMetadata;
4277
4214
  request = (_a17 = generateResult.request) != null ? _a17 : {};
4278
4215
  response = generateResult.responseData;
4279
- function processResult(result2) {
4280
- const parseResult = safeParseJSON2({ text: result2 });
4216
+ async function processResult(result2) {
4217
+ const parseResult = await safeParseJSON2({ text: result2 });
4281
4218
  if (!parseResult.success) {
4282
4219
  throw new NoObjectGeneratedError({
4283
4220
  message: "No object generated: could not parse the response.",
@@ -4288,7 +4225,7 @@ async function generateObject({
4288
4225
  finishReason
4289
4226
  });
4290
4227
  }
4291
- const validationResult = outputStrategy.validateFinalResult(
4228
+ const validationResult = await outputStrategy.validateFinalResult(
4292
4229
  parseResult.value,
4293
4230
  {
4294
4231
  text: result2,
@@ -4310,7 +4247,7 @@ async function generateObject({
4310
4247
  }
4311
4248
  let object2;
4312
4249
  try {
4313
- object2 = processResult(result);
4250
+ object2 = await processResult(result);
4314
4251
  } catch (error) {
4315
4252
  if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError.isInstance(error.cause) || TypeValidationError2.isInstance(error.cause))) {
4316
4253
  const repairedText = await repairText({
@@ -4320,7 +4257,7 @@ async function generateObject({
4320
4257
  if (repairedText === null) {
4321
4258
  throw error;
4322
4259
  }
4323
- object2 = processResult(repairedText);
4260
+ object2 = await processResult(repairedText);
4324
4261
  } else {
4325
4262
  throw error;
4326
4263
  }
@@ -4518,29 +4455,32 @@ function now() {
4518
4455
 
4519
4456
  // core/generate-object/stream-object.ts
4520
4457
  var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
4521
- function streamObject({
4522
- model,
4523
- schema: inputSchema,
4524
- schemaName,
4525
- schemaDescription,
4526
- output = "object",
4527
- system,
4528
- prompt,
4529
- messages,
4530
- maxRetries,
4531
- abortSignal,
4532
- headers,
4533
- experimental_telemetry: telemetry,
4534
- providerOptions,
4535
- onError,
4536
- onFinish,
4537
- _internal: {
4538
- generateId: generateId3 = originalGenerateId2,
4539
- currentDate = () => /* @__PURE__ */ new Date(),
4540
- now: now2 = now
4541
- } = {},
4542
- ...settings
4543
- }) {
4458
+ function streamObject(options) {
4459
+ const {
4460
+ model,
4461
+ output = "object",
4462
+ system,
4463
+ prompt,
4464
+ messages,
4465
+ maxRetries,
4466
+ abortSignal,
4467
+ headers,
4468
+ experimental_telemetry: telemetry,
4469
+ providerOptions,
4470
+ onError,
4471
+ onFinish,
4472
+ _internal: {
4473
+ generateId: generateId3 = originalGenerateId2,
4474
+ currentDate = () => /* @__PURE__ */ new Date(),
4475
+ now: now2 = now
4476
+ } = {},
4477
+ ...settings
4478
+ } = options;
4479
+ const {
4480
+ schema: inputSchema,
4481
+ schemaDescription,
4482
+ schemaName
4483
+ } = "schema" in options ? options : {};
4544
4484
  validateObjectGenerationInput({
4545
4485
  output,
4546
4486
  schema: inputSchema,
@@ -4641,7 +4581,7 @@ var DefaultStreamObjectResult = class {
4641
4581
  tracer,
4642
4582
  endWhenDone: false,
4643
4583
  fn: async (rootSpan) => {
4644
- const standardizedPrompt = standardizePrompt({
4584
+ const standardizedPrompt = await standardizePrompt({
4645
4585
  prompt: { system, prompt, messages },
4646
4586
  tools: void 0
4647
4587
  });
@@ -4653,7 +4593,6 @@ var DefaultStreamObjectResult = class {
4653
4593
  description: schemaDescription
4654
4594
  },
4655
4595
  ...prepareCallSettings(settings),
4656
- inputFormat: standardizedPrompt.type,
4657
4596
  prompt: await convertToLanguageModelPrompt({
4658
4597
  prompt: standardizedPrompt,
4659
4598
  supportedUrls: await model.getSupportedUrls()
@@ -4691,9 +4630,6 @@ var DefaultStreamObjectResult = class {
4691
4630
  telemetry
4692
4631
  }),
4693
4632
  ...baseTelemetryAttributes,
4694
- "ai.prompt.format": {
4695
- input: () => callOptions.inputFormat
4696
- },
4697
4633
  "ai.prompt.messages": {
4698
4634
  input: () => JSON.stringify(callOptions.prompt)
4699
4635
  },
@@ -4756,9 +4692,9 @@ var DefaultStreamObjectResult = class {
4756
4692
  if (typeof chunk === "string") {
4757
4693
  accumulatedText += chunk;
4758
4694
  textDelta += chunk;
4759
- const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
4695
+ const { value: currentObjectJson, state: parseState } = await parsePartialJson(accumulatedText);
4760
4696
  if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
4761
- const validationResult = outputStrategy.validatePartialResult({
4697
+ const validationResult = await outputStrategy.validatePartialResult({
4762
4698
  value: currentObjectJson,
4763
4699
  textDelta,
4764
4700
  latestObject,
@@ -4812,7 +4748,7 @@ var DefaultStreamObjectResult = class {
4812
4748
  ...fullResponse,
4813
4749
  headers: response == null ? void 0 : response.headers
4814
4750
  });
4815
- const validationResult = outputStrategy.validateFinalResult(
4751
+ const validationResult = await outputStrategy.validateFinalResult(
4816
4752
  latestObjectJson,
4817
4753
  {
4818
4754
  text: accumulatedText,
@@ -5250,7 +5186,7 @@ async function doParseToolCall({
5250
5186
  });
5251
5187
  }
5252
5188
  const schema = asSchema(tool2.parameters);
5253
- const parseResult = toolCall.args.trim() === "" ? safeValidateTypes3({ value: {}, schema }) : safeParseJSON3({ text: toolCall.args, schema });
5189
+ const parseResult = toolCall.args.trim() === "" ? await safeValidateTypes3({ value: {}, schema }) : await safeParseJSON3({ text: toolCall.args, schema });
5254
5190
  if (parseResult.success === false) {
5255
5191
  throw new InvalidToolArgumentsError({
5256
5192
  toolName,
@@ -5267,10 +5203,17 @@ async function doParseToolCall({
5267
5203
  }
5268
5204
 
5269
5205
  // core/generate-text/reasoning.ts
5270
- function asReasoningText(reasoning) {
5271
- const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
5206
+ function asReasoningText(reasoningParts) {
5207
+ const reasoningText = reasoningParts.map((part) => part.text).join("");
5272
5208
  return reasoningText.length > 0 ? reasoningText : void 0;
5273
5209
  }
5210
+ function convertReasoningContentToParts(content) {
5211
+ return content.filter((part) => part.type === "reasoning").map((part) => ({
5212
+ type: "reasoning",
5213
+ text: part.text,
5214
+ providerOptions: part.providerMetadata
5215
+ }));
5216
+ }
5274
5217
 
5275
5218
  // core/generate-text/to-response-messages.ts
5276
5219
  function toResponseMessages({
@@ -5285,12 +5228,8 @@ function toResponseMessages({
5285
5228
  }) {
5286
5229
  const responseMessages = [];
5287
5230
  const content = [];
5288
- if (reasoning.length > 0) {
5289
- content.push(
5290
- ...reasoning.map(
5291
- (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5292
- )
5293
- );
5231
+ for (const part of reasoning) {
5232
+ content.push(part);
5294
5233
  }
5295
5234
  if (files.length > 0) {
5296
5235
  content.push(
@@ -5366,6 +5305,7 @@ async function generateText({
5366
5305
  experimental_telemetry: telemetry,
5367
5306
  providerOptions,
5368
5307
  experimental_activeTools: activeTools,
5308
+ experimental_prepareStep: prepareStep,
5369
5309
  experimental_repairToolCall: repairToolCall,
5370
5310
  _internal: {
5371
5311
  generateId: generateId3 = originalGenerateId3,
@@ -5389,7 +5329,7 @@ async function generateText({
5389
5329
  headers,
5390
5330
  settings: { ...callSettings, maxRetries }
5391
5331
  });
5392
- const initialPrompt = standardizePrompt({
5332
+ const initialPrompt = await standardizePrompt({
5393
5333
  prompt: { system, prompt, messages },
5394
5334
  tools
5395
5335
  });
@@ -5404,6 +5344,9 @@ async function generateText({
5404
5344
  telemetry
5405
5345
  }),
5406
5346
  ...baseTelemetryAttributes,
5347
+ // model:
5348
+ "ai.model.provider": model.provider,
5349
+ "ai.model.id": model.modelId,
5407
5350
  // specific settings that only make sense on the outer level:
5408
5351
  "ai.prompt": {
5409
5352
  input: () => JSON.stringify({ system, prompt, messages })
@@ -5413,14 +5356,12 @@ async function generateText({
5413
5356
  }),
5414
5357
  tracer,
5415
5358
  fn: async (span) => {
5416
- var _a17, _b, _c;
5417
- const toolsAndToolChoice = {
5418
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5419
- };
5359
+ var _a17, _b, _c, _d, _e, _f;
5360
+ const callSettings2 = prepareCallSettings(settings);
5420
5361
  let currentModelResponse;
5421
5362
  let currentToolCalls = [];
5422
5363
  let currentToolResults = [];
5423
- let currentReasoningDetails = [];
5364
+ let currentReasoning = [];
5424
5365
  let stepCount = 0;
5425
5366
  const responseMessages = [];
5426
5367
  let text2 = "";
@@ -5433,19 +5374,29 @@ async function generateText({
5433
5374
  };
5434
5375
  let stepType = "initial";
5435
5376
  do {
5436
- const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
5437
5377
  const stepInputMessages = [
5438
5378
  ...initialPrompt.messages,
5439
5379
  ...responseMessages
5440
5380
  ];
5381
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5382
+ model,
5383
+ steps,
5384
+ maxSteps,
5385
+ stepNumber: stepCount
5386
+ }));
5441
5387
  const promptMessages = await convertToLanguageModelPrompt({
5442
5388
  prompt: {
5443
- type: promptFormat,
5444
5389
  system: initialPrompt.system,
5445
5390
  messages: stepInputMessages
5446
5391
  },
5447
5392
  supportedUrls: await model.getSupportedUrls()
5448
5393
  });
5394
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
5395
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5396
+ tools,
5397
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5398
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5399
+ });
5449
5400
  currentModelResponse = await retry(
5450
5401
  () => {
5451
5402
  var _a18;
@@ -5459,23 +5410,23 @@ async function generateText({
5459
5410
  telemetry
5460
5411
  }),
5461
5412
  ...baseTelemetryAttributes,
5462
- "ai.prompt.format": { input: () => promptFormat },
5413
+ // model:
5414
+ "ai.model.provider": stepModel.provider,
5415
+ "ai.model.id": stepModel.modelId,
5416
+ // prompt:
5463
5417
  "ai.prompt.messages": {
5464
5418
  input: () => JSON.stringify(promptMessages)
5465
5419
  },
5466
5420
  "ai.prompt.tools": {
5467
5421
  // convert the language model level tools:
5468
- input: () => {
5469
- var _a19;
5470
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5471
- }
5422
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
5472
5423
  },
5473
5424
  "ai.prompt.toolChoice": {
5474
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5425
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
5475
5426
  },
5476
5427
  // standardized gen-ai llm span attributes:
5477
- "gen_ai.system": model.provider,
5478
- "gen_ai.request.model": model.modelId,
5428
+ "gen_ai.system": stepModel.provider,
5429
+ "gen_ai.request.model": stepModel.modelId,
5479
5430
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5480
5431
  "gen_ai.request.max_tokens": settings.maxOutputTokens,
5481
5432
  "gen_ai.request.presence_penalty": settings.presencePenalty,
@@ -5487,11 +5438,11 @@ async function generateText({
5487
5438
  }),
5488
5439
  tracer,
5489
5440
  fn: async (span2) => {
5490
- var _a19, _b2, _c2, _d, _e, _f, _g, _h;
5491
- const result = await model.doGenerate({
5492
- ...callSettings,
5493
- ...toolsAndToolChoice,
5494
- inputFormat: promptFormat,
5441
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g, _h;
5442
+ const result = await stepModel.doGenerate({
5443
+ ...callSettings2,
5444
+ tools: stepTools,
5445
+ toolChoice: stepToolChoice,
5495
5446
  responseFormat: output == null ? void 0 : output.responseFormat,
5496
5447
  prompt: promptMessages,
5497
5448
  providerOptions,
@@ -5500,8 +5451,8 @@ async function generateText({
5500
5451
  });
5501
5452
  const responseData = {
5502
5453
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5503
- timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
5504
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5454
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5455
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
5505
5456
  headers: (_g = result.response) == null ? void 0 : _g.headers,
5506
5457
  body: (_h = result.response) == null ? void 0 : _h.body
5507
5458
  };
@@ -5577,12 +5528,12 @@ async function generateText({
5577
5528
  nextStepType = "tool-result";
5578
5529
  }
5579
5530
  }
5580
- const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
5531
+ const originalText = (_d = extractContentText(currentModelResponse.content)) != null ? _d : "";
5581
5532
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5582
5533
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5583
5534
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
5584
5535
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
5585
- currentReasoningDetails = asReasoningDetails(
5536
+ currentReasoning = convertReasoningContentToParts(
5586
5537
  currentModelResponse.content
5587
5538
  );
5588
5539
  sources.push(
@@ -5605,7 +5556,9 @@ async function generateText({
5605
5556
  ...toResponseMessages({
5606
5557
  text: text2,
5607
5558
  files: asFiles(currentModelResponse.content),
5608
- reasoning: asReasoningDetails(currentModelResponse.content),
5559
+ reasoning: convertReasoningContentToParts(
5560
+ currentModelResponse.content
5561
+ ),
5609
5562
  tools: tools != null ? tools : {},
5610
5563
  toolCalls: currentToolCalls,
5611
5564
  toolResults: currentToolResults,
@@ -5617,8 +5570,8 @@ async function generateText({
5617
5570
  const currentStepResult = {
5618
5571
  stepType,
5619
5572
  text: stepText,
5620
- reasoningText: asReasoningText(currentReasoningDetails),
5621
- reasoning: currentReasoningDetails,
5573
+ reasoningText: asReasoningText(currentReasoning),
5574
+ reasoning: currentReasoning,
5622
5575
  files: asFiles(currentModelResponse.content),
5623
5576
  sources: currentModelResponse.content.filter(
5624
5577
  (part) => part.type === "source"
@@ -5628,7 +5581,7 @@ async function generateText({
5628
5581
  finishReason: currentModelResponse.finishReason,
5629
5582
  usage: currentUsage,
5630
5583
  warnings: currentModelResponse.warnings,
5631
- request: (_b = currentModelResponse.request) != null ? _b : {},
5584
+ request: (_e = currentModelResponse.request) != null ? _e : {},
5632
5585
  response: {
5633
5586
  ...currentModelResponse.response,
5634
5587
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5661,31 +5614,27 @@ async function generateText({
5661
5614
  }
5662
5615
  })
5663
5616
  );
5617
+ const resolvedOutput = await (output == null ? void 0 : output.parseOutput(
5618
+ { text: text2 },
5619
+ {
5620
+ response: currentModelResponse.response,
5621
+ usage,
5622
+ finishReason: currentModelResponse.finishReason
5623
+ }
5624
+ ));
5664
5625
  return new DefaultGenerateTextResult({
5665
5626
  text: text2,
5666
5627
  files: asFiles(currentModelResponse.content),
5667
- reasoning: asReasoningText(currentReasoningDetails),
5668
- reasoningDetails: currentReasoningDetails,
5628
+ reasoning: asReasoningText(currentReasoning),
5629
+ reasoningDetails: currentReasoning,
5669
5630
  sources,
5670
- outputResolver: () => {
5671
- if (output == null) {
5672
- throw new NoOutputSpecifiedError();
5673
- }
5674
- return output.parseOutput(
5675
- { text: text2 },
5676
- {
5677
- response: currentModelResponse.response,
5678
- usage,
5679
- finishReason: currentModelResponse.finishReason
5680
- }
5681
- );
5682
- },
5631
+ resolvedOutput,
5683
5632
  toolCalls: currentToolCalls,
5684
5633
  toolResults: currentToolResults,
5685
5634
  finishReason: currentModelResponse.finishReason,
5686
5635
  usage,
5687
5636
  warnings: currentModelResponse.warnings,
5688
- request: (_c = currentModelResponse.request) != null ? _c : {},
5637
+ request: (_f = currentModelResponse.request) != null ? _f : {},
5689
5638
  response: {
5690
5639
  ...currentModelResponse.response,
5691
5640
  messages: responseMessages
@@ -5786,41 +5735,16 @@ var DefaultGenerateTextResult = class {
5786
5735
  this.response = options.response;
5787
5736
  this.steps = options.steps;
5788
5737
  this.providerMetadata = options.providerMetadata;
5789
- this.outputResolver = options.outputResolver;
5738
+ this.resolvedOutput = options.resolvedOutput;
5790
5739
  this.sources = options.sources;
5791
5740
  }
5792
5741
  get experimental_output() {
5793
- return this.outputResolver();
5794
- }
5795
- };
5796
- function asReasoningDetails(content) {
5797
- const reasoning = content.filter((part) => part.type === "reasoning");
5798
- if (reasoning.length === 0) {
5799
- return [];
5800
- }
5801
- const result = [];
5802
- let activeReasoningText;
5803
- for (const part of reasoning) {
5804
- if (part.reasoningType === "text") {
5805
- if (activeReasoningText == null) {
5806
- activeReasoningText = { type: "text", text: part.text };
5807
- result.push(activeReasoningText);
5808
- } else {
5809
- activeReasoningText.text += part.text;
5810
- }
5811
- } else if (part.reasoningType === "signature") {
5812
- if (activeReasoningText == null) {
5813
- activeReasoningText = { type: "text", text: "" };
5814
- result.push(activeReasoningText);
5815
- }
5816
- activeReasoningText.signature = part.signature;
5817
- activeReasoningText = void 0;
5818
- } else if (part.reasoningType === "redacted") {
5819
- result.push({ type: "redacted", data: part.data });
5742
+ if (this.resolvedOutput == null) {
5743
+ throw new NoOutputSpecifiedError();
5820
5744
  }
5745
+ return this.resolvedOutput;
5821
5746
  }
5822
- return result;
5823
- }
5747
+ };
5824
5748
  function asFiles(content) {
5825
5749
  return content.filter((part) => part.type === "file").map((part) => new DefaultGeneratedFile(part));
5826
5750
  }
@@ -5908,10 +5832,10 @@ _a15 = symbol15;
5908
5832
  var text = () => ({
5909
5833
  type: "text",
5910
5834
  responseFormat: { type: "text" },
5911
- parsePartial({ text: text2 }) {
5835
+ async parsePartial({ text: text2 }) {
5912
5836
  return { partial: text2 };
5913
5837
  },
5914
- parseOutput({ text: text2 }) {
5838
+ async parseOutput({ text: text2 }) {
5915
5839
  return text2;
5916
5840
  }
5917
5841
  });
@@ -5925,8 +5849,8 @@ var object = ({
5925
5849
  type: "json",
5926
5850
  schema: schema.jsonSchema
5927
5851
  },
5928
- parsePartial({ text: text2 }) {
5929
- const result = parsePartialJson(text2);
5852
+ async parsePartial({ text: text2 }) {
5853
+ const result = await parsePartialJson(text2);
5930
5854
  switch (result.state) {
5931
5855
  case "failed-parse":
5932
5856
  case "undefined-input":
@@ -5943,8 +5867,8 @@ var object = ({
5943
5867
  }
5944
5868
  }
5945
5869
  },
5946
- parseOutput({ text: text2 }, context) {
5947
- const parseResult = safeParseJSON4({ text: text2 });
5870
+ async parseOutput({ text: text2 }, context) {
5871
+ const parseResult = await safeParseJSON4({ text: text2 });
5948
5872
  if (!parseResult.success) {
5949
5873
  throw new NoObjectGeneratedError({
5950
5874
  message: "No object generated: could not parse the response.",
@@ -5955,7 +5879,7 @@ var object = ({
5955
5879
  finishReason: context.finishReason
5956
5880
  });
5957
5881
  }
5958
- const validationResult = safeValidateTypes4({
5882
+ const validationResult = await safeValidateTypes4({
5959
5883
  value: parseResult.value,
5960
5884
  schema
5961
5885
  });
@@ -6044,9 +5968,6 @@ function smoothStream({
6044
5968
  }
6045
5969
 
6046
5970
  // core/generate-text/stream-text.ts
6047
- import {
6048
- AISDKError as AISDKError18
6049
- } from "@ai-sdk/provider";
6050
5971
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
6051
5972
 
6052
5973
  // util/as-array.ts
@@ -6198,6 +6119,7 @@ function runToolsTransformation({
6198
6119
  case "stream-start":
6199
6120
  case "text":
6200
6121
  case "reasoning":
6122
+ case "reasoning-part-finish":
6201
6123
  case "source":
6202
6124
  case "response-metadata":
6203
6125
  case "error": {
@@ -6455,7 +6377,7 @@ function createOutputTransformStream(output) {
6455
6377
  textChunk = "";
6456
6378
  }
6457
6379
  return new TransformStream({
6458
- transform(chunk, controller) {
6380
+ async transform(chunk, controller) {
6459
6381
  if (chunk.type === "step-finish") {
6460
6382
  publishTextChunk({ controller });
6461
6383
  }
@@ -6465,7 +6387,7 @@ function createOutputTransformStream(output) {
6465
6387
  }
6466
6388
  text2 += chunk.text;
6467
6389
  textChunk += chunk.text;
6468
- const result = output.parsePartial({ text: text2 });
6390
+ const result = await output.parsePartial({ text: text2 });
6469
6391
  if (result != null) {
6470
6392
  const currentJson = JSON.stringify(result.partial);
6471
6393
  if (currentJson !== lastPublishedJson) {
@@ -6538,7 +6460,7 @@ var DefaultStreamTextResult = class {
6538
6460
  let recordedFullText = "";
6539
6461
  let stepReasoning = [];
6540
6462
  let stepFiles = [];
6541
- let activeReasoningText = void 0;
6463
+ let activeReasoningPart = void 0;
6542
6464
  let recordedStepSources = [];
6543
6465
  const recordedSources = [];
6544
6466
  const recordedResponse = {
@@ -6570,26 +6492,21 @@ var DefaultStreamTextResult = class {
6570
6492
  recordedFullText += part.text;
6571
6493
  }
6572
6494
  if (part.type === "reasoning") {
6573
- if (part.reasoningType === "text") {
6574
- if (activeReasoningText == null) {
6575
- activeReasoningText = { type: "text", text: part.text };
6576
- stepReasoning.push(activeReasoningText);
6577
- } else {
6578
- activeReasoningText.text += part.text;
6579
- }
6580
- } else if (part.reasoningType === "signature") {
6581
- if (activeReasoningText == null) {
6582
- throw new AISDKError18({
6583
- name: "InvalidStreamPart",
6584
- message: "reasoning-signature without reasoning"
6585
- });
6586
- }
6587
- activeReasoningText.signature = part.signature;
6588
- activeReasoningText = void 0;
6589
- } else if (part.reasoningType === "redacted") {
6590
- stepReasoning.push({ type: "redacted", data: part.data });
6495
+ if (activeReasoningPart == null) {
6496
+ activeReasoningPart = {
6497
+ type: "reasoning",
6498
+ text: part.text,
6499
+ providerOptions: part.providerMetadata
6500
+ };
6501
+ stepReasoning.push(activeReasoningPart);
6502
+ } else {
6503
+ activeReasoningPart.text += part.text;
6504
+ activeReasoningPart.providerOptions = part.providerMetadata;
6591
6505
  }
6592
6506
  }
6507
+ if (part.type === "reasoning-part-finish") {
6508
+ activeReasoningPart = void 0;
6509
+ }
6593
6510
  if (part.type === "file") {
6594
6511
  stepFiles.push(part.file);
6595
6512
  }
@@ -6656,7 +6573,7 @@ var DefaultStreamTextResult = class {
6656
6573
  recordedStepSources = [];
6657
6574
  stepReasoning = [];
6658
6575
  stepFiles = [];
6659
- activeReasoningText = void 0;
6576
+ activeReasoningPart = void 0;
6660
6577
  if (nextStepType !== "done") {
6661
6578
  stepType = nextStepType;
6662
6579
  }
@@ -6767,10 +6684,6 @@ var DefaultStreamTextResult = class {
6767
6684
  headers,
6768
6685
  settings: { ...callSettings, maxRetries }
6769
6686
  });
6770
- const initialPrompt = standardizePrompt({
6771
- prompt: { system, prompt, messages },
6772
- tools
6773
- });
6774
6687
  const self = this;
6775
6688
  recordSpan({
6776
6689
  name: "ai.streamText",
@@ -6799,14 +6712,16 @@ var DefaultStreamTextResult = class {
6799
6712
  hasLeadingWhitespace,
6800
6713
  messageId
6801
6714
  }) {
6802
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
6715
+ const initialPrompt = await standardizePrompt({
6716
+ prompt: { system, prompt, messages },
6717
+ tools
6718
+ });
6803
6719
  const stepInputMessages = [
6804
6720
  ...initialPrompt.messages,
6805
6721
  ...responseMessages
6806
6722
  ];
6807
6723
  const promptMessages = await convertToLanguageModelPrompt({
6808
6724
  prompt: {
6809
- type: promptFormat,
6810
6725
  system: initialPrompt.system,
6811
6726
  messages: stepInputMessages
6812
6727
  },
@@ -6830,9 +6745,6 @@ var DefaultStreamTextResult = class {
6830
6745
  telemetry
6831
6746
  }),
6832
6747
  ...baseTelemetryAttributes,
6833
- "ai.prompt.format": {
6834
- input: () => promptFormat
6835
- },
6836
6748
  "ai.prompt.messages": {
6837
6749
  input: () => JSON.stringify(promptMessages)
6838
6750
  },
@@ -6870,7 +6782,6 @@ var DefaultStreamTextResult = class {
6870
6782
  result: await model.doStream({
6871
6783
  ...callSettings,
6872
6784
  ...toolsAndToolChoice,
6873
- inputFormat: promptFormat,
6874
6785
  responseFormat: output == null ? void 0 : output.responseFormat,
6875
6786
  prompt: promptMessages,
6876
6787
  providerOptions,
@@ -6898,7 +6809,7 @@ var DefaultStreamTextResult = class {
6898
6809
  let warnings;
6899
6810
  const stepReasoning2 = [];
6900
6811
  const stepFiles2 = [];
6901
- let activeReasoningText2 = void 0;
6812
+ let activeReasoningPart2 = void 0;
6902
6813
  let stepFinishReason = "unknown";
6903
6814
  let stepUsage = {
6904
6815
  promptTokens: 0,
@@ -6984,33 +6895,24 @@ var DefaultStreamTextResult = class {
6984
6895
  }
6985
6896
  case "reasoning": {
6986
6897
  controller.enqueue(chunk);
6987
- if (chunk.reasoningType === "text") {
6988
- if (activeReasoningText2 == null) {
6989
- activeReasoningText2 = {
6990
- type: "text",
6991
- text: chunk.text
6992
- };
6993
- stepReasoning2.push(activeReasoningText2);
6994
- } else {
6995
- activeReasoningText2.text += chunk.text;
6996
- }
6997
- } else if (chunk.reasoningType === "signature") {
6998
- if (activeReasoningText2 == null) {
6999
- throw new InvalidStreamPartError({
7000
- chunk,
7001
- message: "reasoning-signature without reasoning"
7002
- });
7003
- }
7004
- activeReasoningText2.signature = chunk.signature;
7005
- activeReasoningText2 = void 0;
7006
- } else if (chunk.reasoningType === "redacted") {
7007
- stepReasoning2.push({
7008
- type: "redacted",
7009
- data: chunk.data
7010
- });
6898
+ if (activeReasoningPart2 == null) {
6899
+ activeReasoningPart2 = {
6900
+ type: "reasoning",
6901
+ text: chunk.text,
6902
+ providerOptions: chunk.providerMetadata
6903
+ };
6904
+ stepReasoning2.push(activeReasoningPart2);
6905
+ } else {
6906
+ activeReasoningPart2.text += chunk.text;
6907
+ activeReasoningPart2.providerOptions = chunk.providerMetadata;
7011
6908
  }
7012
6909
  break;
7013
6910
  }
6911
+ case "reasoning-part-finish": {
6912
+ activeReasoningPart2 = void 0;
6913
+ controller.enqueue(chunk);
6914
+ break;
6915
+ }
7014
6916
  case "tool-call": {
7015
6917
  controller.enqueue(chunk);
7016
6918
  stepToolCalls.push(chunk);
@@ -7336,23 +7238,15 @@ var DefaultStreamTextResult = class {
7336
7238
  }
7337
7239
  case "reasoning": {
7338
7240
  if (sendReasoning) {
7339
- if (chunk.reasoningType === "text") {
7340
- controller.enqueue(
7341
- formatDataStreamPart("reasoning", chunk.text)
7342
- );
7343
- } else if (chunk.reasoningType === "signature") {
7344
- controller.enqueue(
7345
- formatDataStreamPart("reasoning_signature", {
7346
- signature: chunk.signature
7347
- })
7348
- );
7349
- } else if (chunk.reasoningType === "redacted") {
7350
- controller.enqueue(
7351
- formatDataStreamPart("redacted_reasoning", {
7352
- data: chunk.data
7353
- })
7354
- );
7355
- }
7241
+ controller.enqueue(formatDataStreamPart("reasoning", chunk));
7242
+ }
7243
+ break;
7244
+ }
7245
+ case "reasoning-part-finish": {
7246
+ if (sendReasoning) {
7247
+ controller.enqueue(
7248
+ formatDataStreamPart("reasoning_part_finish", {})
7249
+ );
7356
7250
  }
7357
7251
  break;
7358
7252
  }
@@ -7563,8 +7457,8 @@ var DefaultStreamTextResult = class {
7563
7457
  };
7564
7458
 
7565
7459
  // errors/no-speech-generated-error.ts
7566
- import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
7567
- var NoSpeechGeneratedError = class extends AISDKError19 {
7460
+ import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
7461
+ var NoSpeechGeneratedError = class extends AISDKError18 {
7568
7462
  constructor(options) {
7569
7463
  super({
7570
7464
  name: "AI_NoSpeechGeneratedError",
@@ -7653,8 +7547,8 @@ var DefaultSpeechResult = class {
7653
7547
  };
7654
7548
 
7655
7549
  // errors/no-transcript-generated-error.ts
7656
- import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
7657
- var NoTranscriptGeneratedError = class extends AISDKError20 {
7550
+ import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
7551
+ var NoTranscriptGeneratedError = class extends AISDKError19 {
7658
7552
  constructor(options) {
7659
7553
  super({
7660
7554
  name: "AI_NoTranscriptGeneratedError",
@@ -7842,7 +7736,6 @@ function extractReasoningMiddleware({
7842
7736
  }
7843
7737
  transformedContent.push({
7844
7738
  type: "reasoning",
7845
- reasoningType: "text",
7846
7739
  text: reasoningText
7847
7740
  });
7848
7741
  transformedContent.push({
@@ -7874,7 +7767,6 @@ function extractReasoningMiddleware({
7874
7767
  controller.enqueue(
7875
7768
  isReasoning ? {
7876
7769
  type: "reasoning",
7877
- reasoningType: "text",
7878
7770
  text: prefix + text2
7879
7771
  } : {
7880
7772
  type: "text",
@@ -7901,6 +7793,9 @@ function extractReasoningMiddleware({
7901
7793
  const foundFullMatch = startIndex + nextTag.length <= buffer.length;
7902
7794
  if (foundFullMatch) {
7903
7795
  buffer = buffer.slice(startIndex + nextTag.length);
7796
+ if (isReasoning) {
7797
+ controller.enqueue({ type: "reasoning-part-finish" });
7798
+ }
7904
7799
  isReasoning = !isReasoning;
7905
7800
  afterSwitch = true;
7906
7801
  } else {
@@ -8014,7 +7909,7 @@ function appendClientMessage({
8014
7909
  }
8015
7910
 
8016
7911
  // core/prompt/append-response-messages.ts
8017
- import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
7912
+ import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
8018
7913
  function appendResponseMessages({
8019
7914
  messages,
8020
7915
  responseMessages,
@@ -8064,40 +7959,20 @@ function appendResponseMessages({
8064
7959
  if (reasoningPart == null) {
8065
7960
  reasoningPart = {
8066
7961
  type: "reasoning",
8067
- reasoning: "",
8068
- details: []
7962
+ reasoning: ""
8069
7963
  };
8070
7964
  parts.push(reasoningPart);
8071
7965
  }
8072
7966
  reasoningTextContent = (reasoningTextContent != null ? reasoningTextContent : "") + part.text;
8073
7967
  reasoningPart.reasoning += part.text;
8074
- reasoningPart.details.push({
8075
- type: "text",
8076
- text: part.text,
8077
- signature: part.signature
8078
- });
8079
- break;
8080
- }
8081
- case "redacted-reasoning": {
8082
- if (reasoningPart == null) {
8083
- reasoningPart = {
8084
- type: "reasoning",
8085
- reasoning: "",
8086
- details: []
8087
- };
8088
- parts.push(reasoningPart);
8089
- }
8090
- reasoningPart.details.push({
8091
- type: "redacted",
8092
- data: part.data
8093
- });
7968
+ reasoningPart.providerMetadata = part.providerOptions;
8094
7969
  break;
8095
7970
  }
8096
7971
  case "tool-call":
8097
7972
  break;
8098
7973
  case "file":
8099
7974
  if (part.data instanceof URL) {
8100
- throw new AISDKError21({
7975
+ throw new AISDKError20({
8101
7976
  name: "InvalidAssistantFileData",
8102
7977
  message: "File data cannot be a URL"
8103
7978
  });
@@ -8231,7 +8106,7 @@ function customProvider({
8231
8106
  var experimental_customProvider = customProvider;
8232
8107
 
8233
8108
  // core/registry/no-such-provider-error.ts
8234
- import { AISDKError as AISDKError22, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8109
+ import { AISDKError as AISDKError21, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8235
8110
  var name16 = "AI_NoSuchProviderError";
8236
8111
  var marker16 = `vercel.ai.error.${name16}`;
8237
8112
  var symbol16 = Symbol.for(marker16);
@@ -8250,7 +8125,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
8250
8125
  this.availableProviders = availableProviders;
8251
8126
  }
8252
8127
  static isInstance(error) {
8253
- return AISDKError22.hasMarker(error, marker16);
8128
+ return AISDKError21.hasMarker(error, marker16);
8254
8129
  }
8255
8130
  };
8256
8131
  _a16 = symbol16;
@@ -8941,172 +8816,6 @@ function simulateReadableStream({
8941
8816
  });
8942
8817
  }
8943
8818
 
8944
- // streams/langchain-adapter.ts
8945
- var langchain_adapter_exports = {};
8946
- __export(langchain_adapter_exports, {
8947
- mergeIntoDataStream: () => mergeIntoDataStream,
8948
- toDataStream: () => toDataStream,
8949
- toDataStreamResponse: () => toDataStreamResponse
8950
- });
8951
-
8952
- // streams/stream-callbacks.ts
8953
- function createCallbacksTransformer(callbacks = {}) {
8954
- const textEncoder = new TextEncoder();
8955
- let aggregatedResponse = "";
8956
- return new TransformStream({
8957
- async start() {
8958
- if (callbacks.onStart)
8959
- await callbacks.onStart();
8960
- },
8961
- async transform(message, controller) {
8962
- controller.enqueue(textEncoder.encode(message));
8963
- aggregatedResponse += message;
8964
- if (callbacks.onToken)
8965
- await callbacks.onToken(message);
8966
- if (callbacks.onText && typeof message === "string") {
8967
- await callbacks.onText(message);
8968
- }
8969
- },
8970
- async flush() {
8971
- if (callbacks.onCompletion) {
8972
- await callbacks.onCompletion(aggregatedResponse);
8973
- }
8974
- if (callbacks.onFinal) {
8975
- await callbacks.onFinal(aggregatedResponse);
8976
- }
8977
- }
8978
- });
8979
- }
8980
-
8981
- // streams/langchain-adapter.ts
8982
- function toDataStreamInternal(stream, callbacks) {
8983
- return stream.pipeThrough(
8984
- new TransformStream({
8985
- transform: async (value, controller) => {
8986
- var _a17;
8987
- if (typeof value === "string") {
8988
- controller.enqueue(value);
8989
- return;
8990
- }
8991
- if ("event" in value) {
8992
- if (value.event === "on_chat_model_stream") {
8993
- forwardAIMessageChunk(
8994
- (_a17 = value.data) == null ? void 0 : _a17.chunk,
8995
- controller
8996
- );
8997
- }
8998
- return;
8999
- }
9000
- forwardAIMessageChunk(value, controller);
9001
- }
9002
- })
9003
- ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
9004
- new TransformStream({
9005
- transform: async (chunk, controller) => {
9006
- controller.enqueue(formatDataStreamPart("text", chunk));
9007
- }
9008
- })
9009
- );
9010
- }
9011
- function toDataStream(stream, callbacks) {
9012
- return toDataStreamInternal(stream, callbacks).pipeThrough(
9013
- new TextEncoderStream()
9014
- );
9015
- }
9016
- function toDataStreamResponse(stream, options) {
9017
- var _a17;
9018
- const dataStream = toDataStreamInternal(
9019
- stream,
9020
- options == null ? void 0 : options.callbacks
9021
- ).pipeThrough(new TextEncoderStream());
9022
- const data = options == null ? void 0 : options.data;
9023
- const init = options == null ? void 0 : options.init;
9024
- const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
9025
- return new Response(responseStream, {
9026
- status: (_a17 = init == null ? void 0 : init.status) != null ? _a17 : 200,
9027
- statusText: init == null ? void 0 : init.statusText,
9028
- headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
9029
- contentType: "text/plain; charset=utf-8",
9030
- dataStreamVersion: "v1"
9031
- })
9032
- });
9033
- }
9034
- function mergeIntoDataStream(stream, options) {
9035
- options.dataStream.merge(toDataStreamInternal(stream, options.callbacks));
9036
- }
9037
- function forwardAIMessageChunk(chunk, controller) {
9038
- if (typeof chunk.content === "string") {
9039
- controller.enqueue(chunk.content);
9040
- } else {
9041
- const content = chunk.content;
9042
- for (const item of content) {
9043
- if (item.type === "text") {
9044
- controller.enqueue(item.text);
9045
- }
9046
- }
9047
- }
9048
- }
9049
-
9050
- // streams/llamaindex-adapter.ts
9051
- var llamaindex_adapter_exports = {};
9052
- __export(llamaindex_adapter_exports, {
9053
- mergeIntoDataStream: () => mergeIntoDataStream2,
9054
- toDataStream: () => toDataStream2,
9055
- toDataStreamResponse: () => toDataStreamResponse2
9056
- });
9057
- import { convertAsyncIteratorToReadableStream } from "@ai-sdk/provider-utils";
9058
- function toDataStreamInternal2(stream, callbacks) {
9059
- const trimStart = trimStartOfStream();
9060
- return convertAsyncIteratorToReadableStream(stream[Symbol.asyncIterator]()).pipeThrough(
9061
- new TransformStream({
9062
- async transform(message, controller) {
9063
- controller.enqueue(trimStart(message.delta));
9064
- }
9065
- })
9066
- ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
9067
- new TransformStream({
9068
- transform: async (chunk, controller) => {
9069
- controller.enqueue(formatDataStreamPart("text", chunk));
9070
- }
9071
- })
9072
- );
9073
- }
9074
- function toDataStream2(stream, callbacks) {
9075
- return toDataStreamInternal2(stream, callbacks).pipeThrough(
9076
- new TextEncoderStream()
9077
- );
9078
- }
9079
- function toDataStreamResponse2(stream, options = {}) {
9080
- var _a17;
9081
- const { init, data, callbacks } = options;
9082
- const dataStream = toDataStreamInternal2(stream, callbacks).pipeThrough(
9083
- new TextEncoderStream()
9084
- );
9085
- const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
9086
- return new Response(responseStream, {
9087
- status: (_a17 = init == null ? void 0 : init.status) != null ? _a17 : 200,
9088
- statusText: init == null ? void 0 : init.statusText,
9089
- headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
9090
- contentType: "text/plain; charset=utf-8",
9091
- dataStreamVersion: "v1"
9092
- })
9093
- });
9094
- }
9095
- function mergeIntoDataStream2(stream, options) {
9096
- options.dataStream.merge(toDataStreamInternal2(stream, options.callbacks));
9097
- }
9098
- function trimStartOfStream() {
9099
- let isStreamStart = true;
9100
- return (text2) => {
9101
- if (isStreamStart) {
9102
- text2 = text2.trimStart();
9103
- if (text2)
9104
- isStreamStart = false;
9105
- }
9106
- return text2;
9107
- };
9108
- }
9109
-
9110
8819
  // util/constants.ts
9111
8820
  var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
9112
8821
 
@@ -9185,8 +8894,6 @@ export {
9185
8894
  InvalidStreamPartError,
9186
8895
  InvalidToolArgumentsError,
9187
8896
  JSONParseError2 as JSONParseError,
9188
- langchain_adapter_exports as LangChainAdapter,
9189
- llamaindex_adapter_exports as LlamaIndexAdapter,
9190
8897
  LoadAPIKeyError,
9191
8898
  MCPClientError,
9192
8899
  MessageConversionError,