ai 5.0.0-canary.12 → 5.0.0-canary.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -42,8 +42,6 @@ __export(ai_exports, {
42
42
  InvalidStreamPartError: () => InvalidStreamPartError,
43
43
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
44
44
  JSONParseError: () => import_provider21.JSONParseError,
45
- LangChainAdapter: () => langchain_adapter_exports,
46
- LlamaIndexAdapter: () => llamaindex_adapter_exports,
47
45
  LoadAPIKeyError: () => import_provider21.LoadAPIKeyError,
48
46
  MCPClientError: () => MCPClientError,
49
47
  MessageConversionError: () => MessageConversionError,
@@ -461,15 +459,15 @@ function fixJson(input) {
461
459
  }
462
460
 
463
461
  // core/util/parse-partial-json.ts
464
- function parsePartialJson(jsonText) {
462
+ async function parsePartialJson(jsonText) {
465
463
  if (jsonText === void 0) {
466
464
  return { value: void 0, state: "undefined-input" };
467
465
  }
468
- let result = (0, import_provider_utils.safeParseJSON)({ text: jsonText });
466
+ let result = await (0, import_provider_utils.safeParseJSON)({ text: jsonText });
469
467
  if (result.success) {
470
468
  return { value: result.value, state: "successful-parse" };
471
469
  }
472
- result = (0, import_provider_utils.safeParseJSON)({ text: fixJson(jsonText) });
470
+ result = await (0, import_provider_utils.safeParseJSON)({ text: fixJson(jsonText) });
473
471
  if (result.success) {
474
472
  return { value: result.value, state: "repaired-parse" };
475
473
  }
@@ -650,10 +648,18 @@ var reasoningStreamPart = {
650
648
  code: "g",
651
649
  name: "reasoning",
652
650
  parse: (value) => {
653
- if (typeof value !== "string") {
654
- throw new Error('"reasoning" parts expect a string value.');
651
+ if (value == null || typeof value !== "object" || !("text" in value) || typeof value.text !== "string" || "providerMetadata" in value && typeof value.providerMetadata !== "object") {
652
+ throw new Error(
653
+ '"reasoning" parts expect an object with a "text" property.'
654
+ );
655
655
  }
656
- return { type: "reasoning", value };
656
+ return {
657
+ type: "reasoning",
658
+ value: {
659
+ text: value.text,
660
+ providerMetadata: value.providerMetadata
661
+ }
662
+ };
657
663
  }
658
664
  };
659
665
  var sourcePart = {
@@ -669,33 +675,6 @@ var sourcePart = {
669
675
  };
670
676
  }
671
677
  };
672
- var redactedReasoningStreamPart = {
673
- code: "i",
674
- name: "redacted_reasoning",
675
- parse: (value) => {
676
- if (value == null || typeof value !== "object" || !("data" in value) || typeof value.data !== "string") {
677
- throw new Error(
678
- '"redacted_reasoning" parts expect an object with a "data" property.'
679
- );
680
- }
681
- return { type: "redacted_reasoning", value: { data: value.data } };
682
- }
683
- };
684
- var reasoningSignatureStreamPart = {
685
- code: "j",
686
- name: "reasoning_signature",
687
- parse: (value) => {
688
- if (value == null || typeof value !== "object" || !("signature" in value) || typeof value.signature !== "string") {
689
- throw new Error(
690
- '"reasoning_signature" parts expect an object with a "signature" property.'
691
- );
692
- }
693
- return {
694
- type: "reasoning_signature",
695
- value: { signature: value.signature }
696
- };
697
- }
698
- };
699
678
  var fileStreamPart = {
700
679
  code: "k",
701
680
  name: "file",
@@ -708,6 +687,16 @@ var fileStreamPart = {
708
687
  return { type: "file", value };
709
688
  }
710
689
  };
690
+ var reasoningPartFinishStreamPart = {
691
+ code: "l",
692
+ name: "reasoning_part_finish",
693
+ parse: () => {
694
+ return {
695
+ type: "reasoning_part_finish",
696
+ value: {}
697
+ };
698
+ }
699
+ };
711
700
  var dataStreamParts = [
712
701
  textStreamPart,
713
702
  dataStreamPart,
@@ -722,8 +711,7 @@ var dataStreamParts = [
722
711
  startStepStreamPart,
723
712
  reasoningStreamPart,
724
713
  sourcePart,
725
- redactedReasoningStreamPart,
726
- reasoningSignatureStreamPart,
714
+ reasoningPartFinishStreamPart,
727
715
  fileStreamPart
728
716
  ];
729
717
  var dataStreamPartsByCode = Object.fromEntries(
@@ -772,8 +760,7 @@ async function processDataStream({
772
760
  stream,
773
761
  onTextPart,
774
762
  onReasoningPart,
775
- onReasoningSignaturePart,
776
- onRedactedReasoningPart,
763
+ onReasoningPartFinish,
777
764
  onSourcePart,
778
765
  onFilePart,
779
766
  onDataPart,
@@ -814,11 +801,8 @@ async function processDataStream({
814
801
  case "reasoning":
815
802
  await (onReasoningPart == null ? void 0 : onReasoningPart(value2));
816
803
  break;
817
- case "reasoning_signature":
818
- await (onReasoningSignaturePart == null ? void 0 : onReasoningSignaturePart(value2));
819
- break;
820
- case "redacted_reasoning":
821
- await (onRedactedReasoningPart == null ? void 0 : onRedactedReasoningPart(value2));
804
+ case "reasoning_part_finish":
805
+ await (onReasoningPartFinish == null ? void 0 : onReasoningPartFinish(value2));
822
806
  break;
823
807
  case "file":
824
808
  await (onFilePart == null ? void 0 : onFilePart(value2));
@@ -891,7 +875,6 @@ async function processChatResponse({
891
875
  };
892
876
  let currentTextPart = void 0;
893
877
  let currentReasoningPart = void 0;
894
- let currentReasoningTextDetail = void 0;
895
878
  function updateToolInvocationPart(toolCallId, invocation) {
896
879
  const part = message.parts.find(
897
880
  (part2) => part2.type === "tool-invocation" && part2.toolInvocation.toolCallId === toolCallId
@@ -953,48 +936,25 @@ async function processChatResponse({
953
936
  },
954
937
  onReasoningPart(value) {
955
938
  var _a18;
956
- if (currentReasoningTextDetail == null) {
957
- currentReasoningTextDetail = { type: "text", text: value };
958
- if (currentReasoningPart != null) {
959
- currentReasoningPart.details.push(currentReasoningTextDetail);
960
- }
961
- } else {
962
- currentReasoningTextDetail.text += value;
963
- }
964
939
  if (currentReasoningPart == null) {
965
940
  currentReasoningPart = {
966
941
  type: "reasoning",
967
- reasoning: value,
968
- details: [currentReasoningTextDetail]
942
+ reasoning: value.text,
943
+ providerMetadata: value.providerMetadata
969
944
  };
970
945
  message.parts.push(currentReasoningPart);
971
946
  } else {
972
- currentReasoningPart.reasoning += value;
947
+ currentReasoningPart.reasoning += value.text;
948
+ currentReasoningPart.providerMetadata = value.providerMetadata;
973
949
  }
974
- message.reasoning = ((_a18 = message.reasoning) != null ? _a18 : "") + value;
950
+ message.reasoning = ((_a18 = message.reasoning) != null ? _a18 : "") + value.text;
975
951
  execUpdate();
976
952
  },
977
- onReasoningSignaturePart(value) {
978
- if (currentReasoningTextDetail != null) {
979
- currentReasoningTextDetail.signature = value.signature;
953
+ onReasoningPartFinish(value) {
954
+ if (currentReasoningPart != null) {
955
+ currentReasoningPart = void 0;
980
956
  }
981
957
  },
982
- onRedactedReasoningPart(value) {
983
- if (currentReasoningPart == null) {
984
- currentReasoningPart = {
985
- type: "reasoning",
986
- reasoning: "",
987
- details: []
988
- };
989
- message.parts.push(currentReasoningPart);
990
- }
991
- currentReasoningPart.details.push({
992
- type: "redacted",
993
- data: value.data
994
- });
995
- currentReasoningTextDetail = void 0;
996
- execUpdate();
997
- },
998
958
  onFilePart(value) {
999
959
  message.parts.push({
1000
960
  type: "file",
@@ -1031,10 +991,12 @@ async function processChatResponse({
1031
991
  updateToolInvocationPart(value.toolCallId, invocation);
1032
992
  execUpdate();
1033
993
  },
1034
- onToolCallDeltaPart(value) {
994
+ async onToolCallDeltaPart(value) {
1035
995
  const partialToolCall = partialToolCalls[value.toolCallId];
1036
996
  partialToolCall.text += value.argsTextDelta;
1037
- const { value: partialArgs } = parsePartialJson(partialToolCall.text);
997
+ const { value: partialArgs } = await parsePartialJson(
998
+ partialToolCall.text
999
+ );
1038
1000
  const invocation = {
1039
1001
  state: "partial-call",
1040
1002
  step: partialToolCall.step,
@@ -1115,7 +1077,6 @@ async function processChatResponse({
1115
1077
  step += 1;
1116
1078
  currentTextPart = value.isContinued ? currentTextPart : void 0;
1117
1079
  currentReasoningPart = void 0;
1118
- currentReasoningTextDetail = void 0;
1119
1080
  },
1120
1081
  onStartStepPart(value) {
1121
1082
  if (!replaceLastMessage) {
@@ -1412,8 +1373,7 @@ function getMessageParts(message) {
1412
1373
  ...message.reasoning ? [
1413
1374
  {
1414
1375
  type: "reasoning",
1415
- reasoning: message.reasoning,
1416
- details: [{ type: "text", text: message.reasoning }]
1376
+ reasoning: message.reasoning
1417
1377
  }
1418
1378
  ] : [],
1419
1379
  ...message.content ? [{ type: "text", text: message.content }] : []
@@ -2255,8 +2215,8 @@ async function embedMany({
2255
2215
  }),
2256
2216
  tracer,
2257
2217
  fn: async (span) => {
2258
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2259
- if (maxEmbeddingsPerCall == null) {
2218
+ const maxEmbeddingsPerCall = await model.maxEmbeddingsPerCall;
2219
+ if (maxEmbeddingsPerCall == null || maxEmbeddingsPerCall === Infinity) {
2260
2220
  const { embeddings: embeddings2, usage, response } = await retry(() => {
2261
2221
  return recordSpan({
2262
2222
  name: "ai.embedMany.doEmbed",
@@ -2989,14 +2949,6 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
2989
2949
  return {
2990
2950
  type: "reasoning",
2991
2951
  text: part.text,
2992
- signature: part.signature,
2993
- providerOptions
2994
- };
2995
- }
2996
- case "redacted-reasoning": {
2997
- return {
2998
- type: "redacted-reasoning",
2999
- data: part.data,
3000
2952
  providerOptions
3001
2953
  };
3002
2954
  }
@@ -3390,23 +3342,11 @@ function convertToCoreMessages(messages, options) {
3390
3342
  break;
3391
3343
  }
3392
3344
  case "reasoning": {
3393
- for (const detail of part.details) {
3394
- switch (detail.type) {
3395
- case "text":
3396
- content2.push({
3397
- type: "reasoning",
3398
- text: detail.text,
3399
- signature: detail.signature
3400
- });
3401
- break;
3402
- case "redacted":
3403
- content2.push({
3404
- type: "redacted-reasoning",
3405
- data: detail.data
3406
- });
3407
- break;
3408
- }
3409
- }
3345
+ content2.push({
3346
+ type: "reasoning",
3347
+ text: part.reasoning,
3348
+ providerOptions: part.providerMetadata
3349
+ });
3410
3350
  break;
3411
3351
  }
3412
3352
  case "tool-invocation":
@@ -3676,11 +3616,6 @@ var reasoningPartSchema = import_zod5.z.object({
3676
3616
  text: import_zod5.z.string(),
3677
3617
  providerOptions: providerMetadataSchema.optional()
3678
3618
  });
3679
- var redactedReasoningPartSchema = import_zod5.z.object({
3680
- type: import_zod5.z.literal("redacted-reasoning"),
3681
- data: import_zod5.z.string(),
3682
- providerOptions: providerMetadataSchema.optional()
3683
- });
3684
3619
  var toolCallPartSchema = import_zod5.z.object({
3685
3620
  type: import_zod5.z.literal("tool-call"),
3686
3621
  toolCallId: import_zod5.z.string(),
@@ -3721,7 +3656,6 @@ var coreAssistantMessageSchema = import_zod6.z.object({
3721
3656
  textPartSchema,
3722
3657
  filePartSchema,
3723
3658
  reasoningPartSchema,
3724
- redactedReasoningPartSchema,
3725
3659
  toolCallPartSchema
3726
3660
  ])
3727
3661
  )
@@ -3741,7 +3675,7 @@ var coreMessageSchema = import_zod6.z.union([
3741
3675
  ]);
3742
3676
 
3743
3677
  // core/prompt/standardize-prompt.ts
3744
- function standardizePrompt({
3678
+ async function standardizePrompt({
3745
3679
  prompt,
3746
3680
  tools
3747
3681
  }) {
@@ -3771,7 +3705,6 @@ function standardizePrompt({
3771
3705
  });
3772
3706
  }
3773
3707
  return {
3774
- type: "prompt",
3775
3708
  system: prompt.system,
3776
3709
  messages: [
3777
3710
  {
@@ -3798,7 +3731,7 @@ function standardizePrompt({
3798
3731
  message: "messages must not be empty"
3799
3732
  });
3800
3733
  }
3801
- const validationResult = (0, import_provider_utils11.safeValidateTypes)({
3734
+ const validationResult = await (0, import_provider_utils11.safeValidateTypes)({
3802
3735
  value: messages,
3803
3736
  schema: import_zod7.z.array(coreMessageSchema)
3804
3737
  });
@@ -3810,7 +3743,6 @@ function standardizePrompt({
3810
3743
  });
3811
3744
  }
3812
3745
  return {
3813
- type: "messages",
3814
3746
  messages,
3815
3747
  system: prompt.system
3816
3748
  };
@@ -3860,10 +3792,10 @@ function createAsyncIterableStream(source) {
3860
3792
  var noSchemaOutputStrategy = {
3861
3793
  type: "no-schema",
3862
3794
  jsonSchema: void 0,
3863
- validatePartialResult({ value, textDelta }) {
3795
+ async validatePartialResult({ value, textDelta }) {
3864
3796
  return { success: true, value: { partial: value, textDelta } };
3865
3797
  },
3866
- validateFinalResult(value, context) {
3798
+ async validateFinalResult(value, context) {
3867
3799
  return value === void 0 ? {
3868
3800
  success: false,
3869
3801
  error: new NoObjectGeneratedError({
@@ -3884,7 +3816,7 @@ var noSchemaOutputStrategy = {
3884
3816
  var objectOutputStrategy = (schema) => ({
3885
3817
  type: "object",
3886
3818
  jsonSchema: schema.jsonSchema,
3887
- validatePartialResult({ value, textDelta }) {
3819
+ async validatePartialResult({ value, textDelta }) {
3888
3820
  return {
3889
3821
  success: true,
3890
3822
  value: {
@@ -3894,7 +3826,7 @@ var objectOutputStrategy = (schema) => ({
3894
3826
  }
3895
3827
  };
3896
3828
  },
3897
- validateFinalResult(value) {
3829
+ async validateFinalResult(value) {
3898
3830
  return (0, import_provider_utils12.safeValidateTypes)({ value, schema });
3899
3831
  },
3900
3832
  createElementStream() {
@@ -3919,7 +3851,12 @@ var arrayOutputStrategy = (schema) => {
3919
3851
  required: ["elements"],
3920
3852
  additionalProperties: false
3921
3853
  },
3922
- validatePartialResult({ value, latestObject, isFirstDelta, isFinalDelta }) {
3854
+ async validatePartialResult({
3855
+ value,
3856
+ latestObject,
3857
+ isFirstDelta,
3858
+ isFinalDelta
3859
+ }) {
3923
3860
  var _a17;
3924
3861
  if (!(0, import_provider12.isJSONObject)(value) || !(0, import_provider12.isJSONArray)(value.elements)) {
3925
3862
  return {
@@ -3934,7 +3871,7 @@ var arrayOutputStrategy = (schema) => {
3934
3871
  const resultArray = [];
3935
3872
  for (let i = 0; i < inputArray.length; i++) {
3936
3873
  const element = inputArray[i];
3937
- const result = (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
3874
+ const result = await (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
3938
3875
  if (i === inputArray.length - 1 && !isFinalDelta) {
3939
3876
  continue;
3940
3877
  }
@@ -3963,7 +3900,7 @@ var arrayOutputStrategy = (schema) => {
3963
3900
  }
3964
3901
  };
3965
3902
  },
3966
- validateFinalResult(value) {
3903
+ async validateFinalResult(value) {
3967
3904
  if (!(0, import_provider12.isJSONObject)(value) || !(0, import_provider12.isJSONArray)(value.elements)) {
3968
3905
  return {
3969
3906
  success: false,
@@ -3975,7 +3912,7 @@ var arrayOutputStrategy = (schema) => {
3975
3912
  }
3976
3913
  const inputArray = value.elements;
3977
3914
  for (const element of inputArray) {
3978
- const result = (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
3915
+ const result = await (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
3979
3916
  if (!result.success) {
3980
3917
  return result;
3981
3918
  }
@@ -4029,7 +3966,7 @@ var enumOutputStrategy = (enumValues) => {
4029
3966
  required: ["result"],
4030
3967
  additionalProperties: false
4031
3968
  },
4032
- validateFinalResult(value) {
3969
+ async validateFinalResult(value) {
4033
3970
  if (!(0, import_provider12.isJSONObject)(value) || typeof value.result !== "string") {
4034
3971
  return {
4035
3972
  success: false,
@@ -4201,29 +4138,31 @@ function validateObjectGenerationInput({
4201
4138
 
4202
4139
  // core/generate-object/generate-object.ts
4203
4140
  var originalGenerateId = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
4204
- async function generateObject({
4205
- model,
4206
- enum: enumValues,
4207
- // rename bc enum is reserved by typescript
4208
- schema: inputSchema,
4209
- schemaName,
4210
- schemaDescription,
4211
- output = "object",
4212
- system,
4213
- prompt,
4214
- messages,
4215
- maxRetries: maxRetriesArg,
4216
- abortSignal,
4217
- headers,
4218
- experimental_repairText: repairText,
4219
- experimental_telemetry: telemetry,
4220
- providerOptions,
4221
- _internal: {
4222
- generateId: generateId3 = originalGenerateId,
4223
- currentDate = () => /* @__PURE__ */ new Date()
4224
- } = {},
4225
- ...settings
4226
- }) {
4141
+ async function generateObject(options) {
4142
+ const {
4143
+ model,
4144
+ output = "object",
4145
+ system,
4146
+ prompt,
4147
+ messages,
4148
+ maxRetries: maxRetriesArg,
4149
+ abortSignal,
4150
+ headers,
4151
+ experimental_repairText: repairText,
4152
+ experimental_telemetry: telemetry,
4153
+ providerOptions,
4154
+ _internal: {
4155
+ generateId: generateId3 = originalGenerateId,
4156
+ currentDate = () => /* @__PURE__ */ new Date()
4157
+ } = {},
4158
+ ...settings
4159
+ } = options;
4160
+ const enumValues = "enum" in options ? options.enum : void 0;
4161
+ const {
4162
+ schema: inputSchema,
4163
+ schemaDescription,
4164
+ schemaName
4165
+ } = "schema" in options ? options : {};
4227
4166
  validateObjectGenerationInput({
4228
4167
  output,
4229
4168
  schema: inputSchema,
@@ -4275,7 +4214,7 @@ async function generateObject({
4275
4214
  let response;
4276
4215
  let request;
4277
4216
  let resultProviderMetadata;
4278
- const standardizedPrompt = standardizePrompt({
4217
+ const standardizedPrompt = await standardizePrompt({
4279
4218
  prompt: { system, prompt, messages },
4280
4219
  tools: void 0
4281
4220
  });
@@ -4294,9 +4233,6 @@ async function generateObject({
4294
4233
  telemetry
4295
4234
  }),
4296
4235
  ...baseTelemetryAttributes,
4297
- "ai.prompt.format": {
4298
- input: () => standardizedPrompt.type
4299
- },
4300
4236
  "ai.prompt.messages": {
4301
4237
  input: () => JSON.stringify(promptMessages)
4302
4238
  },
@@ -4322,7 +4258,6 @@ async function generateObject({
4322
4258
  description: schemaDescription
4323
4259
  },
4324
4260
  ...prepareCallSettings(settings),
4325
- inputFormat: standardizedPrompt.type,
4326
4261
  prompt: promptMessages,
4327
4262
  providerOptions,
4328
4263
  abortSignal,
@@ -4376,8 +4311,8 @@ async function generateObject({
4376
4311
  resultProviderMetadata = generateResult.providerMetadata;
4377
4312
  request = (_a17 = generateResult.request) != null ? _a17 : {};
4378
4313
  response = generateResult.responseData;
4379
- function processResult(result2) {
4380
- const parseResult = (0, import_provider_utils13.safeParseJSON)({ text: result2 });
4314
+ async function processResult(result2) {
4315
+ const parseResult = await (0, import_provider_utils13.safeParseJSON)({ text: result2 });
4381
4316
  if (!parseResult.success) {
4382
4317
  throw new NoObjectGeneratedError({
4383
4318
  message: "No object generated: could not parse the response.",
@@ -4388,7 +4323,7 @@ async function generateObject({
4388
4323
  finishReason
4389
4324
  });
4390
4325
  }
4391
- const validationResult = outputStrategy.validateFinalResult(
4326
+ const validationResult = await outputStrategy.validateFinalResult(
4392
4327
  parseResult.value,
4393
4328
  {
4394
4329
  text: result2,
@@ -4410,7 +4345,7 @@ async function generateObject({
4410
4345
  }
4411
4346
  let object2;
4412
4347
  try {
4413
- object2 = processResult(result);
4348
+ object2 = await processResult(result);
4414
4349
  } catch (error) {
4415
4350
  if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider13.JSONParseError.isInstance(error.cause) || import_provider13.TypeValidationError.isInstance(error.cause))) {
4416
4351
  const repairedText = await repairText({
@@ -4420,7 +4355,7 @@ async function generateObject({
4420
4355
  if (repairedText === null) {
4421
4356
  throw error;
4422
4357
  }
4423
- object2 = processResult(repairedText);
4358
+ object2 = await processResult(repairedText);
4424
4359
  } else {
4425
4360
  throw error;
4426
4361
  }
@@ -4618,29 +4553,32 @@ function now() {
4618
4553
 
4619
4554
  // core/generate-object/stream-object.ts
4620
4555
  var originalGenerateId2 = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
4621
- function streamObject({
4622
- model,
4623
- schema: inputSchema,
4624
- schemaName,
4625
- schemaDescription,
4626
- output = "object",
4627
- system,
4628
- prompt,
4629
- messages,
4630
- maxRetries,
4631
- abortSignal,
4632
- headers,
4633
- experimental_telemetry: telemetry,
4634
- providerOptions,
4635
- onError,
4636
- onFinish,
4637
- _internal: {
4638
- generateId: generateId3 = originalGenerateId2,
4639
- currentDate = () => /* @__PURE__ */ new Date(),
4640
- now: now2 = now
4641
- } = {},
4642
- ...settings
4643
- }) {
4556
+ function streamObject(options) {
4557
+ const {
4558
+ model,
4559
+ output = "object",
4560
+ system,
4561
+ prompt,
4562
+ messages,
4563
+ maxRetries,
4564
+ abortSignal,
4565
+ headers,
4566
+ experimental_telemetry: telemetry,
4567
+ providerOptions,
4568
+ onError,
4569
+ onFinish,
4570
+ _internal: {
4571
+ generateId: generateId3 = originalGenerateId2,
4572
+ currentDate = () => /* @__PURE__ */ new Date(),
4573
+ now: now2 = now
4574
+ } = {},
4575
+ ...settings
4576
+ } = options;
4577
+ const {
4578
+ schema: inputSchema,
4579
+ schemaDescription,
4580
+ schemaName
4581
+ } = "schema" in options ? options : {};
4644
4582
  validateObjectGenerationInput({
4645
4583
  output,
4646
4584
  schema: inputSchema,
@@ -4741,7 +4679,7 @@ var DefaultStreamObjectResult = class {
4741
4679
  tracer,
4742
4680
  endWhenDone: false,
4743
4681
  fn: async (rootSpan) => {
4744
- const standardizedPrompt = standardizePrompt({
4682
+ const standardizedPrompt = await standardizePrompt({
4745
4683
  prompt: { system, prompt, messages },
4746
4684
  tools: void 0
4747
4685
  });
@@ -4753,7 +4691,6 @@ var DefaultStreamObjectResult = class {
4753
4691
  description: schemaDescription
4754
4692
  },
4755
4693
  ...prepareCallSettings(settings),
4756
- inputFormat: standardizedPrompt.type,
4757
4694
  prompt: await convertToLanguageModelPrompt({
4758
4695
  prompt: standardizedPrompt,
4759
4696
  supportedUrls: await model.getSupportedUrls()
@@ -4791,9 +4728,6 @@ var DefaultStreamObjectResult = class {
4791
4728
  telemetry
4792
4729
  }),
4793
4730
  ...baseTelemetryAttributes,
4794
- "ai.prompt.format": {
4795
- input: () => callOptions.inputFormat
4796
- },
4797
4731
  "ai.prompt.messages": {
4798
4732
  input: () => JSON.stringify(callOptions.prompt)
4799
4733
  },
@@ -4856,9 +4790,9 @@ var DefaultStreamObjectResult = class {
4856
4790
  if (typeof chunk === "string") {
4857
4791
  accumulatedText += chunk;
4858
4792
  textDelta += chunk;
4859
- const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
4793
+ const { value: currentObjectJson, state: parseState } = await parsePartialJson(accumulatedText);
4860
4794
  if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
4861
- const validationResult = outputStrategy.validatePartialResult({
4795
+ const validationResult = await outputStrategy.validatePartialResult({
4862
4796
  value: currentObjectJson,
4863
4797
  textDelta,
4864
4798
  latestObject,
@@ -4912,7 +4846,7 @@ var DefaultStreamObjectResult = class {
4912
4846
  ...fullResponse,
4913
4847
  headers: response == null ? void 0 : response.headers
4914
4848
  });
4915
- const validationResult = outputStrategy.validateFinalResult(
4849
+ const validationResult = await outputStrategy.validateFinalResult(
4916
4850
  latestObjectJson,
4917
4851
  {
4918
4852
  text: accumulatedText,
@@ -5350,7 +5284,7 @@ async function doParseToolCall({
5350
5284
  });
5351
5285
  }
5352
5286
  const schema = asSchema(tool2.parameters);
5353
- const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils15.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils15.safeParseJSON)({ text: toolCall.args, schema });
5287
+ const parseResult = toolCall.args.trim() === "" ? await (0, import_provider_utils15.safeValidateTypes)({ value: {}, schema }) : await (0, import_provider_utils15.safeParseJSON)({ text: toolCall.args, schema });
5354
5288
  if (parseResult.success === false) {
5355
5289
  throw new InvalidToolArgumentsError({
5356
5290
  toolName,
@@ -5367,10 +5301,17 @@ async function doParseToolCall({
5367
5301
  }
5368
5302
 
5369
5303
  // core/generate-text/reasoning.ts
5370
- function asReasoningText(reasoning) {
5371
- const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
5304
+ function asReasoningText(reasoningParts) {
5305
+ const reasoningText = reasoningParts.map((part) => part.text).join("");
5372
5306
  return reasoningText.length > 0 ? reasoningText : void 0;
5373
5307
  }
5308
+ function convertReasoningContentToParts(content) {
5309
+ return content.filter((part) => part.type === "reasoning").map((part) => ({
5310
+ type: "reasoning",
5311
+ text: part.text,
5312
+ providerOptions: part.providerMetadata
5313
+ }));
5314
+ }
5374
5315
 
5375
5316
  // core/generate-text/to-response-messages.ts
5376
5317
  function toResponseMessages({
@@ -5385,12 +5326,8 @@ function toResponseMessages({
5385
5326
  }) {
5386
5327
  const responseMessages = [];
5387
5328
  const content = [];
5388
- if (reasoning.length > 0) {
5389
- content.push(
5390
- ...reasoning.map(
5391
- (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5392
- )
5393
- );
5329
+ for (const part of reasoning) {
5330
+ content.push(part);
5394
5331
  }
5395
5332
  if (files.length > 0) {
5396
5333
  content.push(
@@ -5466,6 +5403,7 @@ async function generateText({
5466
5403
  experimental_telemetry: telemetry,
5467
5404
  providerOptions,
5468
5405
  experimental_activeTools: activeTools,
5406
+ experimental_prepareStep: prepareStep,
5469
5407
  experimental_repairToolCall: repairToolCall,
5470
5408
  _internal: {
5471
5409
  generateId: generateId3 = originalGenerateId3,
@@ -5489,7 +5427,7 @@ async function generateText({
5489
5427
  headers,
5490
5428
  settings: { ...callSettings, maxRetries }
5491
5429
  });
5492
- const initialPrompt = standardizePrompt({
5430
+ const initialPrompt = await standardizePrompt({
5493
5431
  prompt: { system, prompt, messages },
5494
5432
  tools
5495
5433
  });
@@ -5504,6 +5442,9 @@ async function generateText({
5504
5442
  telemetry
5505
5443
  }),
5506
5444
  ...baseTelemetryAttributes,
5445
+ // model:
5446
+ "ai.model.provider": model.provider,
5447
+ "ai.model.id": model.modelId,
5507
5448
  // specific settings that only make sense on the outer level:
5508
5449
  "ai.prompt": {
5509
5450
  input: () => JSON.stringify({ system, prompt, messages })
@@ -5513,14 +5454,12 @@ async function generateText({
5513
5454
  }),
5514
5455
  tracer,
5515
5456
  fn: async (span) => {
5516
- var _a17, _b, _c;
5517
- const toolsAndToolChoice = {
5518
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5519
- };
5457
+ var _a17, _b, _c, _d, _e, _f;
5458
+ const callSettings2 = prepareCallSettings(settings);
5520
5459
  let currentModelResponse;
5521
5460
  let currentToolCalls = [];
5522
5461
  let currentToolResults = [];
5523
- let currentReasoningDetails = [];
5462
+ let currentReasoning = [];
5524
5463
  let stepCount = 0;
5525
5464
  const responseMessages = [];
5526
5465
  let text2 = "";
@@ -5533,19 +5472,29 @@ async function generateText({
5533
5472
  };
5534
5473
  let stepType = "initial";
5535
5474
  do {
5536
- const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
5537
5475
  const stepInputMessages = [
5538
5476
  ...initialPrompt.messages,
5539
5477
  ...responseMessages
5540
5478
  ];
5479
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5480
+ model,
5481
+ steps,
5482
+ maxSteps,
5483
+ stepNumber: stepCount
5484
+ }));
5541
5485
  const promptMessages = await convertToLanguageModelPrompt({
5542
5486
  prompt: {
5543
- type: promptFormat,
5544
5487
  system: initialPrompt.system,
5545
5488
  messages: stepInputMessages
5546
5489
  },
5547
5490
  supportedUrls: await model.getSupportedUrls()
5548
5491
  });
5492
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
5493
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5494
+ tools,
5495
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5496
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5497
+ });
5549
5498
  currentModelResponse = await retry(
5550
5499
  () => {
5551
5500
  var _a18;
@@ -5559,23 +5508,23 @@ async function generateText({
5559
5508
  telemetry
5560
5509
  }),
5561
5510
  ...baseTelemetryAttributes,
5562
- "ai.prompt.format": { input: () => promptFormat },
5511
+ // model:
5512
+ "ai.model.provider": stepModel.provider,
5513
+ "ai.model.id": stepModel.modelId,
5514
+ // prompt:
5563
5515
  "ai.prompt.messages": {
5564
5516
  input: () => JSON.stringify(promptMessages)
5565
5517
  },
5566
5518
  "ai.prompt.tools": {
5567
5519
  // convert the language model level tools:
5568
- input: () => {
5569
- var _a19;
5570
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5571
- }
5520
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
5572
5521
  },
5573
5522
  "ai.prompt.toolChoice": {
5574
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5523
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
5575
5524
  },
5576
5525
  // standardized gen-ai llm span attributes:
5577
- "gen_ai.system": model.provider,
5578
- "gen_ai.request.model": model.modelId,
5526
+ "gen_ai.system": stepModel.provider,
5527
+ "gen_ai.request.model": stepModel.modelId,
5579
5528
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5580
5529
  "gen_ai.request.max_tokens": settings.maxOutputTokens,
5581
5530
  "gen_ai.request.presence_penalty": settings.presencePenalty,
@@ -5587,11 +5536,11 @@ async function generateText({
5587
5536
  }),
5588
5537
  tracer,
5589
5538
  fn: async (span2) => {
5590
- var _a19, _b2, _c2, _d, _e, _f, _g, _h;
5591
- const result = await model.doGenerate({
5592
- ...callSettings,
5593
- ...toolsAndToolChoice,
5594
- inputFormat: promptFormat,
5539
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g, _h;
5540
+ const result = await stepModel.doGenerate({
5541
+ ...callSettings2,
5542
+ tools: stepTools,
5543
+ toolChoice: stepToolChoice,
5595
5544
  responseFormat: output == null ? void 0 : output.responseFormat,
5596
5545
  prompt: promptMessages,
5597
5546
  providerOptions,
@@ -5600,8 +5549,8 @@ async function generateText({
5600
5549
  });
5601
5550
  const responseData = {
5602
5551
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5603
- timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
5604
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5552
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5553
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
5605
5554
  headers: (_g = result.response) == null ? void 0 : _g.headers,
5606
5555
  body: (_h = result.response) == null ? void 0 : _h.body
5607
5556
  };
@@ -5677,12 +5626,12 @@ async function generateText({
5677
5626
  nextStepType = "tool-result";
5678
5627
  }
5679
5628
  }
5680
- const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
5629
+ const originalText = (_d = extractContentText(currentModelResponse.content)) != null ? _d : "";
5681
5630
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5682
5631
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5683
5632
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
5684
5633
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
5685
- currentReasoningDetails = asReasoningDetails(
5634
+ currentReasoning = convertReasoningContentToParts(
5686
5635
  currentModelResponse.content
5687
5636
  );
5688
5637
  sources.push(
@@ -5705,7 +5654,9 @@ async function generateText({
5705
5654
  ...toResponseMessages({
5706
5655
  text: text2,
5707
5656
  files: asFiles(currentModelResponse.content),
5708
- reasoning: asReasoningDetails(currentModelResponse.content),
5657
+ reasoning: convertReasoningContentToParts(
5658
+ currentModelResponse.content
5659
+ ),
5709
5660
  tools: tools != null ? tools : {},
5710
5661
  toolCalls: currentToolCalls,
5711
5662
  toolResults: currentToolResults,
@@ -5717,8 +5668,8 @@ async function generateText({
5717
5668
  const currentStepResult = {
5718
5669
  stepType,
5719
5670
  text: stepText,
5720
- reasoningText: asReasoningText(currentReasoningDetails),
5721
- reasoning: currentReasoningDetails,
5671
+ reasoningText: asReasoningText(currentReasoning),
5672
+ reasoning: currentReasoning,
5722
5673
  files: asFiles(currentModelResponse.content),
5723
5674
  sources: currentModelResponse.content.filter(
5724
5675
  (part) => part.type === "source"
@@ -5728,7 +5679,7 @@ async function generateText({
5728
5679
  finishReason: currentModelResponse.finishReason,
5729
5680
  usage: currentUsage,
5730
5681
  warnings: currentModelResponse.warnings,
5731
- request: (_b = currentModelResponse.request) != null ? _b : {},
5682
+ request: (_e = currentModelResponse.request) != null ? _e : {},
5732
5683
  response: {
5733
5684
  ...currentModelResponse.response,
5734
5685
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5761,31 +5712,27 @@ async function generateText({
5761
5712
  }
5762
5713
  })
5763
5714
  );
5715
+ const resolvedOutput = await (output == null ? void 0 : output.parseOutput(
5716
+ { text: text2 },
5717
+ {
5718
+ response: currentModelResponse.response,
5719
+ usage,
5720
+ finishReason: currentModelResponse.finishReason
5721
+ }
5722
+ ));
5764
5723
  return new DefaultGenerateTextResult({
5765
5724
  text: text2,
5766
5725
  files: asFiles(currentModelResponse.content),
5767
- reasoning: asReasoningText(currentReasoningDetails),
5768
- reasoningDetails: currentReasoningDetails,
5726
+ reasoning: asReasoningText(currentReasoning),
5727
+ reasoningDetails: currentReasoning,
5769
5728
  sources,
5770
- outputResolver: () => {
5771
- if (output == null) {
5772
- throw new NoOutputSpecifiedError();
5773
- }
5774
- return output.parseOutput(
5775
- { text: text2 },
5776
- {
5777
- response: currentModelResponse.response,
5778
- usage,
5779
- finishReason: currentModelResponse.finishReason
5780
- }
5781
- );
5782
- },
5729
+ resolvedOutput,
5783
5730
  toolCalls: currentToolCalls,
5784
5731
  toolResults: currentToolResults,
5785
5732
  finishReason: currentModelResponse.finishReason,
5786
5733
  usage,
5787
5734
  warnings: currentModelResponse.warnings,
5788
- request: (_c = currentModelResponse.request) != null ? _c : {},
5735
+ request: (_f = currentModelResponse.request) != null ? _f : {},
5789
5736
  response: {
5790
5737
  ...currentModelResponse.response,
5791
5738
  messages: responseMessages
@@ -5886,41 +5833,16 @@ var DefaultGenerateTextResult = class {
5886
5833
  this.response = options.response;
5887
5834
  this.steps = options.steps;
5888
5835
  this.providerMetadata = options.providerMetadata;
5889
- this.outputResolver = options.outputResolver;
5836
+ this.resolvedOutput = options.resolvedOutput;
5890
5837
  this.sources = options.sources;
5891
5838
  }
5892
5839
  get experimental_output() {
5893
- return this.outputResolver();
5894
- }
5895
- };
5896
- function asReasoningDetails(content) {
5897
- const reasoning = content.filter((part) => part.type === "reasoning");
5898
- if (reasoning.length === 0) {
5899
- return [];
5900
- }
5901
- const result = [];
5902
- let activeReasoningText;
5903
- for (const part of reasoning) {
5904
- if (part.reasoningType === "text") {
5905
- if (activeReasoningText == null) {
5906
- activeReasoningText = { type: "text", text: part.text };
5907
- result.push(activeReasoningText);
5908
- } else {
5909
- activeReasoningText.text += part.text;
5910
- }
5911
- } else if (part.reasoningType === "signature") {
5912
- if (activeReasoningText == null) {
5913
- activeReasoningText = { type: "text", text: "" };
5914
- result.push(activeReasoningText);
5915
- }
5916
- activeReasoningText.signature = part.signature;
5917
- activeReasoningText = void 0;
5918
- } else if (part.reasoningType === "redacted") {
5919
- result.push({ type: "redacted", data: part.data });
5840
+ if (this.resolvedOutput == null) {
5841
+ throw new NoOutputSpecifiedError();
5920
5842
  }
5843
+ return this.resolvedOutput;
5921
5844
  }
5922
- return result;
5923
- }
5845
+ };
5924
5846
  function asFiles(content) {
5925
5847
  return content.filter((part) => part.type === "file").map((part) => new DefaultGeneratedFile(part));
5926
5848
  }
@@ -5996,10 +5918,10 @@ _a15 = symbol15;
5996
5918
  var text = () => ({
5997
5919
  type: "text",
5998
5920
  responseFormat: { type: "text" },
5999
- parsePartial({ text: text2 }) {
5921
+ async parsePartial({ text: text2 }) {
6000
5922
  return { partial: text2 };
6001
5923
  },
6002
- parseOutput({ text: text2 }) {
5924
+ async parseOutput({ text: text2 }) {
6003
5925
  return text2;
6004
5926
  }
6005
5927
  });
@@ -6013,8 +5935,8 @@ var object = ({
6013
5935
  type: "json",
6014
5936
  schema: schema.jsonSchema
6015
5937
  },
6016
- parsePartial({ text: text2 }) {
6017
- const result = parsePartialJson(text2);
5938
+ async parsePartial({ text: text2 }) {
5939
+ const result = await parsePartialJson(text2);
6018
5940
  switch (result.state) {
6019
5941
  case "failed-parse":
6020
5942
  case "undefined-input":
@@ -6031,8 +5953,8 @@ var object = ({
6031
5953
  }
6032
5954
  }
6033
5955
  },
6034
- parseOutput({ text: text2 }, context) {
6035
- const parseResult = (0, import_provider_utils17.safeParseJSON)({ text: text2 });
5956
+ async parseOutput({ text: text2 }, context) {
5957
+ const parseResult = await (0, import_provider_utils17.safeParseJSON)({ text: text2 });
6036
5958
  if (!parseResult.success) {
6037
5959
  throw new NoObjectGeneratedError({
6038
5960
  message: "No object generated: could not parse the response.",
@@ -6043,7 +5965,7 @@ var object = ({
6043
5965
  finishReason: context.finishReason
6044
5966
  });
6045
5967
  }
6046
- const validationResult = (0, import_provider_utils17.safeValidateTypes)({
5968
+ const validationResult = await (0, import_provider_utils17.safeValidateTypes)({
6047
5969
  value: parseResult.value,
6048
5970
  schema
6049
5971
  });
@@ -6132,7 +6054,6 @@ function smoothStream({
6132
6054
  }
6133
6055
 
6134
6056
  // core/generate-text/stream-text.ts
6135
- var import_provider23 = require("@ai-sdk/provider");
6136
6057
  var import_provider_utils19 = require("@ai-sdk/provider-utils");
6137
6058
 
6138
6059
  // util/as-array.ts
@@ -6284,6 +6205,7 @@ function runToolsTransformation({
6284
6205
  case "stream-start":
6285
6206
  case "text":
6286
6207
  case "reasoning":
6208
+ case "reasoning-part-finish":
6287
6209
  case "source":
6288
6210
  case "response-metadata":
6289
6211
  case "error": {
@@ -6541,7 +6463,7 @@ function createOutputTransformStream(output) {
6541
6463
  textChunk = "";
6542
6464
  }
6543
6465
  return new TransformStream({
6544
- transform(chunk, controller) {
6466
+ async transform(chunk, controller) {
6545
6467
  if (chunk.type === "step-finish") {
6546
6468
  publishTextChunk({ controller });
6547
6469
  }
@@ -6551,7 +6473,7 @@ function createOutputTransformStream(output) {
6551
6473
  }
6552
6474
  text2 += chunk.text;
6553
6475
  textChunk += chunk.text;
6554
- const result = output.parsePartial({ text: text2 });
6476
+ const result = await output.parsePartial({ text: text2 });
6555
6477
  if (result != null) {
6556
6478
  const currentJson = JSON.stringify(result.partial);
6557
6479
  if (currentJson !== lastPublishedJson) {
@@ -6624,7 +6546,7 @@ var DefaultStreamTextResult = class {
6624
6546
  let recordedFullText = "";
6625
6547
  let stepReasoning = [];
6626
6548
  let stepFiles = [];
6627
- let activeReasoningText = void 0;
6549
+ let activeReasoningPart = void 0;
6628
6550
  let recordedStepSources = [];
6629
6551
  const recordedSources = [];
6630
6552
  const recordedResponse = {
@@ -6656,26 +6578,21 @@ var DefaultStreamTextResult = class {
6656
6578
  recordedFullText += part.text;
6657
6579
  }
6658
6580
  if (part.type === "reasoning") {
6659
- if (part.reasoningType === "text") {
6660
- if (activeReasoningText == null) {
6661
- activeReasoningText = { type: "text", text: part.text };
6662
- stepReasoning.push(activeReasoningText);
6663
- } else {
6664
- activeReasoningText.text += part.text;
6665
- }
6666
- } else if (part.reasoningType === "signature") {
6667
- if (activeReasoningText == null) {
6668
- throw new import_provider23.AISDKError({
6669
- name: "InvalidStreamPart",
6670
- message: "reasoning-signature without reasoning"
6671
- });
6672
- }
6673
- activeReasoningText.signature = part.signature;
6674
- activeReasoningText = void 0;
6675
- } else if (part.reasoningType === "redacted") {
6676
- stepReasoning.push({ type: "redacted", data: part.data });
6581
+ if (activeReasoningPart == null) {
6582
+ activeReasoningPart = {
6583
+ type: "reasoning",
6584
+ text: part.text,
6585
+ providerOptions: part.providerMetadata
6586
+ };
6587
+ stepReasoning.push(activeReasoningPart);
6588
+ } else {
6589
+ activeReasoningPart.text += part.text;
6590
+ activeReasoningPart.providerOptions = part.providerMetadata;
6677
6591
  }
6678
6592
  }
6593
+ if (part.type === "reasoning-part-finish") {
6594
+ activeReasoningPart = void 0;
6595
+ }
6679
6596
  if (part.type === "file") {
6680
6597
  stepFiles.push(part.file);
6681
6598
  }
@@ -6742,7 +6659,7 @@ var DefaultStreamTextResult = class {
6742
6659
  recordedStepSources = [];
6743
6660
  stepReasoning = [];
6744
6661
  stepFiles = [];
6745
- activeReasoningText = void 0;
6662
+ activeReasoningPart = void 0;
6746
6663
  if (nextStepType !== "done") {
6747
6664
  stepType = nextStepType;
6748
6665
  }
@@ -6853,10 +6770,6 @@ var DefaultStreamTextResult = class {
6853
6770
  headers,
6854
6771
  settings: { ...callSettings, maxRetries }
6855
6772
  });
6856
- const initialPrompt = standardizePrompt({
6857
- prompt: { system, prompt, messages },
6858
- tools
6859
- });
6860
6773
  const self = this;
6861
6774
  recordSpan({
6862
6775
  name: "ai.streamText",
@@ -6885,14 +6798,16 @@ var DefaultStreamTextResult = class {
6885
6798
  hasLeadingWhitespace,
6886
6799
  messageId
6887
6800
  }) {
6888
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
6801
+ const initialPrompt = await standardizePrompt({
6802
+ prompt: { system, prompt, messages },
6803
+ tools
6804
+ });
6889
6805
  const stepInputMessages = [
6890
6806
  ...initialPrompt.messages,
6891
6807
  ...responseMessages
6892
6808
  ];
6893
6809
  const promptMessages = await convertToLanguageModelPrompt({
6894
6810
  prompt: {
6895
- type: promptFormat,
6896
6811
  system: initialPrompt.system,
6897
6812
  messages: stepInputMessages
6898
6813
  },
@@ -6916,9 +6831,6 @@ var DefaultStreamTextResult = class {
6916
6831
  telemetry
6917
6832
  }),
6918
6833
  ...baseTelemetryAttributes,
6919
- "ai.prompt.format": {
6920
- input: () => promptFormat
6921
- },
6922
6834
  "ai.prompt.messages": {
6923
6835
  input: () => JSON.stringify(promptMessages)
6924
6836
  },
@@ -6956,7 +6868,6 @@ var DefaultStreamTextResult = class {
6956
6868
  result: await model.doStream({
6957
6869
  ...callSettings,
6958
6870
  ...toolsAndToolChoice,
6959
- inputFormat: promptFormat,
6960
6871
  responseFormat: output == null ? void 0 : output.responseFormat,
6961
6872
  prompt: promptMessages,
6962
6873
  providerOptions,
@@ -6984,7 +6895,7 @@ var DefaultStreamTextResult = class {
6984
6895
  let warnings;
6985
6896
  const stepReasoning2 = [];
6986
6897
  const stepFiles2 = [];
6987
- let activeReasoningText2 = void 0;
6898
+ let activeReasoningPart2 = void 0;
6988
6899
  let stepFinishReason = "unknown";
6989
6900
  let stepUsage = {
6990
6901
  promptTokens: 0,
@@ -7070,33 +6981,24 @@ var DefaultStreamTextResult = class {
7070
6981
  }
7071
6982
  case "reasoning": {
7072
6983
  controller.enqueue(chunk);
7073
- if (chunk.reasoningType === "text") {
7074
- if (activeReasoningText2 == null) {
7075
- activeReasoningText2 = {
7076
- type: "text",
7077
- text: chunk.text
7078
- };
7079
- stepReasoning2.push(activeReasoningText2);
7080
- } else {
7081
- activeReasoningText2.text += chunk.text;
7082
- }
7083
- } else if (chunk.reasoningType === "signature") {
7084
- if (activeReasoningText2 == null) {
7085
- throw new InvalidStreamPartError({
7086
- chunk,
7087
- message: "reasoning-signature without reasoning"
7088
- });
7089
- }
7090
- activeReasoningText2.signature = chunk.signature;
7091
- activeReasoningText2 = void 0;
7092
- } else if (chunk.reasoningType === "redacted") {
7093
- stepReasoning2.push({
7094
- type: "redacted",
7095
- data: chunk.data
7096
- });
6984
+ if (activeReasoningPart2 == null) {
6985
+ activeReasoningPart2 = {
6986
+ type: "reasoning",
6987
+ text: chunk.text,
6988
+ providerOptions: chunk.providerMetadata
6989
+ };
6990
+ stepReasoning2.push(activeReasoningPart2);
6991
+ } else {
6992
+ activeReasoningPart2.text += chunk.text;
6993
+ activeReasoningPart2.providerOptions = chunk.providerMetadata;
7097
6994
  }
7098
6995
  break;
7099
6996
  }
6997
+ case "reasoning-part-finish": {
6998
+ activeReasoningPart2 = void 0;
6999
+ controller.enqueue(chunk);
7000
+ break;
7001
+ }
7100
7002
  case "tool-call": {
7101
7003
  controller.enqueue(chunk);
7102
7004
  stepToolCalls.push(chunk);
@@ -7422,23 +7324,15 @@ var DefaultStreamTextResult = class {
7422
7324
  }
7423
7325
  case "reasoning": {
7424
7326
  if (sendReasoning) {
7425
- if (chunk.reasoningType === "text") {
7426
- controller.enqueue(
7427
- formatDataStreamPart("reasoning", chunk.text)
7428
- );
7429
- } else if (chunk.reasoningType === "signature") {
7430
- controller.enqueue(
7431
- formatDataStreamPart("reasoning_signature", {
7432
- signature: chunk.signature
7433
- })
7434
- );
7435
- } else if (chunk.reasoningType === "redacted") {
7436
- controller.enqueue(
7437
- formatDataStreamPart("redacted_reasoning", {
7438
- data: chunk.data
7439
- })
7440
- );
7441
- }
7327
+ controller.enqueue(formatDataStreamPart("reasoning", chunk));
7328
+ }
7329
+ break;
7330
+ }
7331
+ case "reasoning-part-finish": {
7332
+ if (sendReasoning) {
7333
+ controller.enqueue(
7334
+ formatDataStreamPart("reasoning_part_finish", {})
7335
+ );
7442
7336
  }
7443
7337
  break;
7444
7338
  }
@@ -7649,8 +7543,8 @@ var DefaultStreamTextResult = class {
7649
7543
  };
7650
7544
 
7651
7545
  // errors/no-speech-generated-error.ts
7652
- var import_provider24 = require("@ai-sdk/provider");
7653
- var NoSpeechGeneratedError = class extends import_provider24.AISDKError {
7546
+ var import_provider23 = require("@ai-sdk/provider");
7547
+ var NoSpeechGeneratedError = class extends import_provider23.AISDKError {
7654
7548
  constructor(options) {
7655
7549
  super({
7656
7550
  name: "AI_NoSpeechGeneratedError",
@@ -7739,8 +7633,8 @@ var DefaultSpeechResult = class {
7739
7633
  };
7740
7634
 
7741
7635
  // errors/no-transcript-generated-error.ts
7742
- var import_provider25 = require("@ai-sdk/provider");
7743
- var NoTranscriptGeneratedError = class extends import_provider25.AISDKError {
7636
+ var import_provider24 = require("@ai-sdk/provider");
7637
+ var NoTranscriptGeneratedError = class extends import_provider24.AISDKError {
7744
7638
  constructor(options) {
7745
7639
  super({
7746
7640
  name: "AI_NoTranscriptGeneratedError",
@@ -7928,7 +7822,6 @@ function extractReasoningMiddleware({
7928
7822
  }
7929
7823
  transformedContent.push({
7930
7824
  type: "reasoning",
7931
- reasoningType: "text",
7932
7825
  text: reasoningText
7933
7826
  });
7934
7827
  transformedContent.push({
@@ -7960,7 +7853,6 @@ function extractReasoningMiddleware({
7960
7853
  controller.enqueue(
7961
7854
  isReasoning ? {
7962
7855
  type: "reasoning",
7963
- reasoningType: "text",
7964
7856
  text: prefix + text2
7965
7857
  } : {
7966
7858
  type: "text",
@@ -7987,6 +7879,9 @@ function extractReasoningMiddleware({
7987
7879
  const foundFullMatch = startIndex + nextTag.length <= buffer.length;
7988
7880
  if (foundFullMatch) {
7989
7881
  buffer = buffer.slice(startIndex + nextTag.length);
7882
+ if (isReasoning) {
7883
+ controller.enqueue({ type: "reasoning-part-finish" });
7884
+ }
7990
7885
  isReasoning = !isReasoning;
7991
7886
  afterSwitch = true;
7992
7887
  } else {
@@ -8100,7 +7995,7 @@ function appendClientMessage({
8100
7995
  }
8101
7996
 
8102
7997
  // core/prompt/append-response-messages.ts
8103
- var import_provider26 = require("@ai-sdk/provider");
7998
+ var import_provider25 = require("@ai-sdk/provider");
8104
7999
  function appendResponseMessages({
8105
8000
  messages,
8106
8001
  responseMessages,
@@ -8150,40 +8045,20 @@ function appendResponseMessages({
8150
8045
  if (reasoningPart == null) {
8151
8046
  reasoningPart = {
8152
8047
  type: "reasoning",
8153
- reasoning: "",
8154
- details: []
8048
+ reasoning: ""
8155
8049
  };
8156
8050
  parts.push(reasoningPart);
8157
8051
  }
8158
8052
  reasoningTextContent = (reasoningTextContent != null ? reasoningTextContent : "") + part.text;
8159
8053
  reasoningPart.reasoning += part.text;
8160
- reasoningPart.details.push({
8161
- type: "text",
8162
- text: part.text,
8163
- signature: part.signature
8164
- });
8165
- break;
8166
- }
8167
- case "redacted-reasoning": {
8168
- if (reasoningPart == null) {
8169
- reasoningPart = {
8170
- type: "reasoning",
8171
- reasoning: "",
8172
- details: []
8173
- };
8174
- parts.push(reasoningPart);
8175
- }
8176
- reasoningPart.details.push({
8177
- type: "redacted",
8178
- data: part.data
8179
- });
8054
+ reasoningPart.providerMetadata = part.providerOptions;
8180
8055
  break;
8181
8056
  }
8182
8057
  case "tool-call":
8183
8058
  break;
8184
8059
  case "file":
8185
8060
  if (part.data instanceof URL) {
8186
- throw new import_provider26.AISDKError({
8061
+ throw new import_provider25.AISDKError({
8187
8062
  name: "InvalidAssistantFileData",
8188
8063
  message: "File data cannot be a URL"
8189
8064
  });
@@ -8277,7 +8152,7 @@ function appendResponseMessages({
8277
8152
  }
8278
8153
 
8279
8154
  // core/registry/custom-provider.ts
8280
- var import_provider27 = require("@ai-sdk/provider");
8155
+ var import_provider26 = require("@ai-sdk/provider");
8281
8156
  function customProvider({
8282
8157
  languageModels,
8283
8158
  textEmbeddingModels,
@@ -8292,7 +8167,7 @@ function customProvider({
8292
8167
  if (fallbackProvider) {
8293
8168
  return fallbackProvider.languageModel(modelId);
8294
8169
  }
8295
- throw new import_provider27.NoSuchModelError({ modelId, modelType: "languageModel" });
8170
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "languageModel" });
8296
8171
  },
8297
8172
  textEmbeddingModel(modelId) {
8298
8173
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -8301,7 +8176,7 @@ function customProvider({
8301
8176
  if (fallbackProvider) {
8302
8177
  return fallbackProvider.textEmbeddingModel(modelId);
8303
8178
  }
8304
- throw new import_provider27.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8179
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8305
8180
  },
8306
8181
  imageModel(modelId) {
8307
8182
  if (imageModels != null && modelId in imageModels) {
@@ -8310,19 +8185,19 @@ function customProvider({
8310
8185
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
8311
8186
  return fallbackProvider.imageModel(modelId);
8312
8187
  }
8313
- throw new import_provider27.NoSuchModelError({ modelId, modelType: "imageModel" });
8188
+ throw new import_provider26.NoSuchModelError({ modelId, modelType: "imageModel" });
8314
8189
  }
8315
8190
  };
8316
8191
  }
8317
8192
  var experimental_customProvider = customProvider;
8318
8193
 
8319
8194
  // core/registry/no-such-provider-error.ts
8320
- var import_provider28 = require("@ai-sdk/provider");
8195
+ var import_provider27 = require("@ai-sdk/provider");
8321
8196
  var name16 = "AI_NoSuchProviderError";
8322
8197
  var marker16 = `vercel.ai.error.${name16}`;
8323
8198
  var symbol16 = Symbol.for(marker16);
8324
8199
  var _a16;
8325
- var NoSuchProviderError = class extends import_provider28.NoSuchModelError {
8200
+ var NoSuchProviderError = class extends import_provider27.NoSuchModelError {
8326
8201
  constructor({
8327
8202
  modelId,
8328
8203
  modelType,
@@ -8336,13 +8211,13 @@ var NoSuchProviderError = class extends import_provider28.NoSuchModelError {
8336
8211
  this.availableProviders = availableProviders;
8337
8212
  }
8338
8213
  static isInstance(error) {
8339
- return import_provider28.AISDKError.hasMarker(error, marker16);
8214
+ return import_provider27.AISDKError.hasMarker(error, marker16);
8340
8215
  }
8341
8216
  };
8342
8217
  _a16 = symbol16;
8343
8218
 
8344
8219
  // core/registry/provider-registry.ts
8345
- var import_provider29 = require("@ai-sdk/provider");
8220
+ var import_provider28 = require("@ai-sdk/provider");
8346
8221
  function createProviderRegistry(providers, {
8347
8222
  separator = ":"
8348
8223
  } = {}) {
@@ -8381,7 +8256,7 @@ var DefaultProviderRegistry = class {
8381
8256
  splitId(id, modelType) {
8382
8257
  const index = id.indexOf(this.separator);
8383
8258
  if (index === -1) {
8384
- throw new import_provider29.NoSuchModelError({
8259
+ throw new import_provider28.NoSuchModelError({
8385
8260
  modelId: id,
8386
8261
  modelType,
8387
8262
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
@@ -8394,7 +8269,7 @@ var DefaultProviderRegistry = class {
8394
8269
  const [providerId, modelId] = this.splitId(id, "languageModel");
8395
8270
  const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
8396
8271
  if (model == null) {
8397
- throw new import_provider29.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8272
+ throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8398
8273
  }
8399
8274
  return model;
8400
8275
  }
@@ -8404,7 +8279,7 @@ var DefaultProviderRegistry = class {
8404
8279
  const provider = this.getProvider(providerId);
8405
8280
  const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
8406
8281
  if (model == null) {
8407
- throw new import_provider29.NoSuchModelError({
8282
+ throw new import_provider28.NoSuchModelError({
8408
8283
  modelId: id,
8409
8284
  modelType: "textEmbeddingModel"
8410
8285
  });
@@ -8417,7 +8292,7 @@ var DefaultProviderRegistry = class {
8417
8292
  const provider = this.getProvider(providerId);
8418
8293
  const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
8419
8294
  if (model == null) {
8420
- throw new import_provider29.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8295
+ throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8421
8296
  }
8422
8297
  return model;
8423
8298
  }
@@ -9027,172 +8902,6 @@ function simulateReadableStream({
9027
8902
  });
9028
8903
  }
9029
8904
 
9030
- // streams/langchain-adapter.ts
9031
- var langchain_adapter_exports = {};
9032
- __export(langchain_adapter_exports, {
9033
- mergeIntoDataStream: () => mergeIntoDataStream,
9034
- toDataStream: () => toDataStream,
9035
- toDataStreamResponse: () => toDataStreamResponse
9036
- });
9037
-
9038
- // streams/stream-callbacks.ts
9039
- function createCallbacksTransformer(callbacks = {}) {
9040
- const textEncoder = new TextEncoder();
9041
- let aggregatedResponse = "";
9042
- return new TransformStream({
9043
- async start() {
9044
- if (callbacks.onStart)
9045
- await callbacks.onStart();
9046
- },
9047
- async transform(message, controller) {
9048
- controller.enqueue(textEncoder.encode(message));
9049
- aggregatedResponse += message;
9050
- if (callbacks.onToken)
9051
- await callbacks.onToken(message);
9052
- if (callbacks.onText && typeof message === "string") {
9053
- await callbacks.onText(message);
9054
- }
9055
- },
9056
- async flush() {
9057
- if (callbacks.onCompletion) {
9058
- await callbacks.onCompletion(aggregatedResponse);
9059
- }
9060
- if (callbacks.onFinal) {
9061
- await callbacks.onFinal(aggregatedResponse);
9062
- }
9063
- }
9064
- });
9065
- }
9066
-
9067
- // streams/langchain-adapter.ts
9068
- function toDataStreamInternal(stream, callbacks) {
9069
- return stream.pipeThrough(
9070
- new TransformStream({
9071
- transform: async (value, controller) => {
9072
- var _a17;
9073
- if (typeof value === "string") {
9074
- controller.enqueue(value);
9075
- return;
9076
- }
9077
- if ("event" in value) {
9078
- if (value.event === "on_chat_model_stream") {
9079
- forwardAIMessageChunk(
9080
- (_a17 = value.data) == null ? void 0 : _a17.chunk,
9081
- controller
9082
- );
9083
- }
9084
- return;
9085
- }
9086
- forwardAIMessageChunk(value, controller);
9087
- }
9088
- })
9089
- ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
9090
- new TransformStream({
9091
- transform: async (chunk, controller) => {
9092
- controller.enqueue(formatDataStreamPart("text", chunk));
9093
- }
9094
- })
9095
- );
9096
- }
9097
- function toDataStream(stream, callbacks) {
9098
- return toDataStreamInternal(stream, callbacks).pipeThrough(
9099
- new TextEncoderStream()
9100
- );
9101
- }
9102
- function toDataStreamResponse(stream, options) {
9103
- var _a17;
9104
- const dataStream = toDataStreamInternal(
9105
- stream,
9106
- options == null ? void 0 : options.callbacks
9107
- ).pipeThrough(new TextEncoderStream());
9108
- const data = options == null ? void 0 : options.data;
9109
- const init = options == null ? void 0 : options.init;
9110
- const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
9111
- return new Response(responseStream, {
9112
- status: (_a17 = init == null ? void 0 : init.status) != null ? _a17 : 200,
9113
- statusText: init == null ? void 0 : init.statusText,
9114
- headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
9115
- contentType: "text/plain; charset=utf-8",
9116
- dataStreamVersion: "v1"
9117
- })
9118
- });
9119
- }
9120
- function mergeIntoDataStream(stream, options) {
9121
- options.dataStream.merge(toDataStreamInternal(stream, options.callbacks));
9122
- }
9123
- function forwardAIMessageChunk(chunk, controller) {
9124
- if (typeof chunk.content === "string") {
9125
- controller.enqueue(chunk.content);
9126
- } else {
9127
- const content = chunk.content;
9128
- for (const item of content) {
9129
- if (item.type === "text") {
9130
- controller.enqueue(item.text);
9131
- }
9132
- }
9133
- }
9134
- }
9135
-
9136
- // streams/llamaindex-adapter.ts
9137
- var llamaindex_adapter_exports = {};
9138
- __export(llamaindex_adapter_exports, {
9139
- mergeIntoDataStream: () => mergeIntoDataStream2,
9140
- toDataStream: () => toDataStream2,
9141
- toDataStreamResponse: () => toDataStreamResponse2
9142
- });
9143
- var import_provider_utils23 = require("@ai-sdk/provider-utils");
9144
- function toDataStreamInternal2(stream, callbacks) {
9145
- const trimStart = trimStartOfStream();
9146
- return (0, import_provider_utils23.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
9147
- new TransformStream({
9148
- async transform(message, controller) {
9149
- controller.enqueue(trimStart(message.delta));
9150
- }
9151
- })
9152
- ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
9153
- new TransformStream({
9154
- transform: async (chunk, controller) => {
9155
- controller.enqueue(formatDataStreamPart("text", chunk));
9156
- }
9157
- })
9158
- );
9159
- }
9160
- function toDataStream2(stream, callbacks) {
9161
- return toDataStreamInternal2(stream, callbacks).pipeThrough(
9162
- new TextEncoderStream()
9163
- );
9164
- }
9165
- function toDataStreamResponse2(stream, options = {}) {
9166
- var _a17;
9167
- const { init, data, callbacks } = options;
9168
- const dataStream = toDataStreamInternal2(stream, callbacks).pipeThrough(
9169
- new TextEncoderStream()
9170
- );
9171
- const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
9172
- return new Response(responseStream, {
9173
- status: (_a17 = init == null ? void 0 : init.status) != null ? _a17 : 200,
9174
- statusText: init == null ? void 0 : init.statusText,
9175
- headers: prepareResponseHeaders(init == null ? void 0 : init.headers, {
9176
- contentType: "text/plain; charset=utf-8",
9177
- dataStreamVersion: "v1"
9178
- })
9179
- });
9180
- }
9181
- function mergeIntoDataStream2(stream, options) {
9182
- options.dataStream.merge(toDataStreamInternal2(stream, options.callbacks));
9183
- }
9184
- function trimStartOfStream() {
9185
- let isStreamStart = true;
9186
- return (text2) => {
9187
- if (isStreamStart) {
9188
- text2 = text2.trimStart();
9189
- if (text2)
9190
- isStreamStart = false;
9191
- }
9192
- return text2;
9193
- };
9194
- }
9195
-
9196
8905
  // util/constants.ts
9197
8906
  var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
9198
8907
 
@@ -9272,8 +8981,6 @@ var StreamData = class {
9272
8981
  InvalidStreamPartError,
9273
8982
  InvalidToolArgumentsError,
9274
8983
  JSONParseError,
9275
- LangChainAdapter,
9276
- LlamaIndexAdapter,
9277
8984
  LoadAPIKeyError,
9278
8985
  MCPClientError,
9279
8986
  MessageConversionError,