@ai-sdk/openai 2.0.51 → 2.0.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2348,6 +2348,9 @@ async function convertToOpenAIResponsesInput({
2348
2348
  input.push(reasoningMessages[reasoningId]);
2349
2349
  } else {
2350
2350
  reasoningMessage.summary.push(...summaryParts);
2351
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2352
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2353
+ }
2351
2354
  }
2352
2355
  }
2353
2356
  } else {
@@ -2693,6 +2696,11 @@ var openaiResponsesChunkSchema = lazyValidator8(
2693
2696
  summary_index: z16.number(),
2694
2697
  delta: z16.string()
2695
2698
  }),
2699
+ z16.object({
2700
+ type: z16.literal("response.reasoning_summary_part.done"),
2701
+ item_id: z16.string(),
2702
+ summary_index: z16.number()
2703
+ }),
2696
2704
  z16.object({
2697
2705
  type: z16.literal("error"),
2698
2706
  code: z16.string(),
@@ -2973,6 +2981,7 @@ var openaiResponsesProviderOptionsSchema = lazyValidator9(
2973
2981
  store: z17.boolean().nullish(),
2974
2982
  strictJsonSchema: z17.boolean().nullish(),
2975
2983
  textVerbosity: z17.enum(["low", "medium", "high"]).nullish(),
2984
+ truncation: z17.enum(["auto", "disabled"]).nullish(),
2976
2985
  user: z17.string().nullish()
2977
2986
  })
2978
2987
  )
@@ -3252,6 +3261,7 @@ var OpenAIResponsesLanguageModel = class {
3252
3261
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3253
3262
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3254
3263
  top_logprobs: topLogprobs,
3264
+ truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3255
3265
  // model-specific settings:
3256
3266
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3257
3267
  reasoning: {
@@ -3262,9 +3272,6 @@ var OpenAIResponsesLanguageModel = class {
3262
3272
  summary: openaiOptions.reasoningSummary
3263
3273
  }
3264
3274
  }
3265
- },
3266
- ...modelConfig.requiredAutoTruncation && {
3267
- truncation: "auto"
3268
3275
  }
3269
3276
  };
3270
3277
  if (modelConfig.isReasoningModel) {
@@ -3332,7 +3339,8 @@ var OpenAIResponsesLanguageModel = class {
3332
3339
  tools: openaiTools2,
3333
3340
  tool_choice: openaiToolChoice
3334
3341
  },
3335
- warnings: [...warnings, ...toolWarnings]
3342
+ warnings: [...warnings, ...toolWarnings],
3343
+ store
3336
3344
  };
3337
3345
  }
3338
3346
  async doGenerate(options) {
@@ -3607,7 +3615,8 @@ var OpenAIResponsesLanguageModel = class {
3607
3615
  const {
3608
3616
  args: body,
3609
3617
  warnings,
3610
- webSearchToolName
3618
+ webSearchToolName,
3619
+ store
3611
3620
  } = await this.getArgs(options);
3612
3621
  const { responseHeaders, value: response } = await postJsonToApi5({
3613
3622
  url: this.config.url({
@@ -3646,7 +3655,7 @@ var OpenAIResponsesLanguageModel = class {
3646
3655
  controller.enqueue({ type: "stream-start", warnings });
3647
3656
  },
3648
3657
  transform(chunk, controller) {
3649
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
3658
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3650
3659
  if (options.includeRawChunks) {
3651
3660
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3652
3661
  }
@@ -3745,10 +3754,10 @@ var OpenAIResponsesLanguageModel = class {
3745
3754
  }
3746
3755
  }
3747
3756
  });
3748
- } else if (isResponseOutputItemAddedReasoningChunk(value)) {
3757
+ } else if (isResponseOutputItemAddedChunk(value) && value.item.type === "reasoning") {
3749
3758
  activeReasoning[value.item.id] = {
3750
3759
  encryptedContent: value.item.encrypted_content,
3751
- summaryParts: [0]
3760
+ summaryParts: { 0: "active" }
3752
3761
  };
3753
3762
  controller.enqueue({
3754
3763
  type: "reasoning-start",
@@ -3876,9 +3885,14 @@ var OpenAIResponsesLanguageModel = class {
3876
3885
  type: "text-end",
3877
3886
  id: value.item.id
3878
3887
  });
3879
- } else if (isResponseOutputItemDoneReasoningChunk(value)) {
3888
+ } else if (value.item.type === "reasoning") {
3880
3889
  const activeReasoningPart = activeReasoning[value.item.id];
3881
- for (const summaryIndex of activeReasoningPart.summaryParts) {
3890
+ const summaryPartIndices = Object.entries(
3891
+ activeReasoningPart.summaryParts
3892
+ ).filter(
3893
+ ([_, status]) => status === "active" || status === "can-conclude"
3894
+ ).map(([summaryIndex]) => summaryIndex);
3895
+ for (const summaryIndex of summaryPartIndices) {
3882
3896
  controller.enqueue({
3883
3897
  type: "reasoning-end",
3884
3898
  id: `${value.item.id}:${summaryIndex}`,
@@ -3952,23 +3966,34 @@ var OpenAIResponsesLanguageModel = class {
3952
3966
  if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
3953
3967
  logprobs.push(value.logprobs);
3954
3968
  }
3955
- } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3969
+ } else if (value.type === "response.reasoning_summary_part.added") {
3956
3970
  if (value.summary_index > 0) {
3957
- (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.summaryParts.push(
3958
- value.summary_index
3959
- );
3971
+ const activeReasoningPart = activeReasoning[value.item_id];
3972
+ activeReasoningPart.summaryParts[value.summary_index] = "active";
3973
+ for (const summaryIndex of Object.keys(
3974
+ activeReasoningPart.summaryParts
3975
+ )) {
3976
+ if (activeReasoningPart.summaryParts[summaryIndex] === "can-conclude") {
3977
+ controller.enqueue({
3978
+ type: "reasoning-end",
3979
+ id: `${value.item_id}:${summaryIndex}`,
3980
+ providerMetadata: { openai: { itemId: value.item_id } }
3981
+ });
3982
+ activeReasoningPart.summaryParts[summaryIndex] = "concluded";
3983
+ }
3984
+ }
3960
3985
  controller.enqueue({
3961
3986
  type: "reasoning-start",
3962
3987
  id: `${value.item_id}:${value.summary_index}`,
3963
3988
  providerMetadata: {
3964
3989
  openai: {
3965
3990
  itemId: value.item_id,
3966
- reasoningEncryptedContent: (_i = (_h = activeReasoning[value.item_id]) == null ? void 0 : _h.encryptedContent) != null ? _i : null
3991
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
3967
3992
  }
3968
3993
  }
3969
3994
  });
3970
3995
  }
3971
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
3996
+ } else if (value.type === "response.reasoning_summary_text.delta") {
3972
3997
  controller.enqueue({
3973
3998
  type: "reasoning-delta",
3974
3999
  id: `${value.item_id}:${value.summary_index}`,
@@ -3979,16 +4004,29 @@ var OpenAIResponsesLanguageModel = class {
3979
4004
  }
3980
4005
  }
3981
4006
  });
4007
+ } else if (value.type === "response.reasoning_summary_part.done") {
4008
+ if (store) {
4009
+ controller.enqueue({
4010
+ type: "reasoning-end",
4011
+ id: `${value.item_id}:${value.summary_index}`,
4012
+ providerMetadata: {
4013
+ openai: { itemId: value.item_id }
4014
+ }
4015
+ });
4016
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
4017
+ } else {
4018
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
4019
+ }
3982
4020
  } else if (isResponseFinishedChunk(value)) {
3983
4021
  finishReason = mapOpenAIResponseFinishReason({
3984
- finishReason: (_j = value.response.incomplete_details) == null ? void 0 : _j.reason,
4022
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
3985
4023
  hasFunctionCall
3986
4024
  });
3987
4025
  usage.inputTokens = value.response.usage.input_tokens;
3988
4026
  usage.outputTokens = value.response.usage.output_tokens;
3989
4027
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3990
- usage.reasoningTokens = (_l = (_k = value.response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0;
3991
- usage.cachedInputTokens = (_n = (_m = value.response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0;
4028
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
4029
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
3992
4030
  if (typeof value.response.service_tier === "string") {
3993
4031
  serviceTier = value.response.service_tier;
3994
4032
  }
@@ -3997,7 +4035,7 @@ var OpenAIResponsesLanguageModel = class {
3997
4035
  controller.enqueue({
3998
4036
  type: "source",
3999
4037
  sourceType: "url",
4000
- id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : generateId2(),
4038
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : generateId2(),
4001
4039
  url: value.annotation.url,
4002
4040
  title: value.annotation.title
4003
4041
  });
@@ -4005,10 +4043,10 @@ var OpenAIResponsesLanguageModel = class {
4005
4043
  controller.enqueue({
4006
4044
  type: "source",
4007
4045
  sourceType: "document",
4008
- id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : generateId2(),
4046
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : generateId2(),
4009
4047
  mediaType: "text/plain",
4010
- title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
4011
- filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
4048
+ title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
4049
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id
4012
4050
  });
4013
4051
  }
4014
4052
  } else if (isErrorChunk(value)) {
@@ -4047,9 +4085,6 @@ function isTextDeltaChunk(chunk) {
4047
4085
  function isResponseOutputItemDoneChunk(chunk) {
4048
4086
  return chunk.type === "response.output_item.done";
4049
4087
  }
4050
- function isResponseOutputItemDoneReasoningChunk(chunk) {
4051
- return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
4052
- }
4053
4088
  function isResponseFinishedChunk(chunk) {
4054
4089
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
4055
4090
  }
@@ -4068,18 +4103,9 @@ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
4068
4103
  function isResponseOutputItemAddedChunk(chunk) {
4069
4104
  return chunk.type === "response.output_item.added";
4070
4105
  }
4071
- function isResponseOutputItemAddedReasoningChunk(chunk) {
4072
- return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
4073
- }
4074
4106
  function isResponseAnnotationAddedChunk(chunk) {
4075
4107
  return chunk.type === "response.output_text.annotation.added";
4076
4108
  }
4077
- function isResponseReasoningSummaryPartAddedChunk(chunk) {
4078
- return chunk.type === "response.reasoning_summary_part.added";
4079
- }
4080
- function isResponseReasoningSummaryTextDeltaChunk(chunk) {
4081
- return chunk.type === "response.reasoning_summary_text.delta";
4082
- }
4083
4109
  function isErrorChunk(chunk) {
4084
4110
  return chunk.type === "error";
4085
4111
  }
@@ -4087,7 +4113,6 @@ function getResponsesModelConfig(modelId) {
4087
4113
  const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4088
4114
  const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4089
4115
  const defaults = {
4090
- requiredAutoTruncation: false,
4091
4116
  systemMessageMode: "system",
4092
4117
  supportsFlexProcessing: supportsFlexProcessing2,
4093
4118
  supportsPriorityProcessing: supportsPriorityProcessing2
@@ -4502,7 +4527,7 @@ var OpenAITranscriptionModel = class {
4502
4527
  };
4503
4528
 
4504
4529
  // src/version.ts
4505
- var VERSION = true ? "2.0.51" : "0.0.0-test";
4530
+ var VERSION = true ? "2.0.53" : "0.0.0-test";
4506
4531
 
4507
4532
  // src/openai-provider.ts
4508
4533
  function createOpenAI(options = {}) {