@ai-sdk/openai 2.0.51 → 2.0.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.53
4
+
5
+ ### Patch Changes
6
+
7
+ - 5464bf0: fix(provider/openai): add truncation parameter support for Responses API
8
+
9
+ ## 2.0.52
10
+
11
+ ### Patch Changes
12
+
13
+ - 8de8de5: fix(provider/openai): end reasoning parts earlier
14
+
3
15
  ## 2.0.51
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -322,6 +322,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyV
322
322
  store?: boolean | null | undefined;
323
323
  strictJsonSchema?: boolean | null | undefined;
324
324
  textVerbosity?: "low" | "medium" | "high" | null | undefined;
325
+ truncation?: "auto" | "disabled" | null | undefined;
325
326
  user?: string | null | undefined;
326
327
  }>;
327
328
  type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
package/dist/index.d.ts CHANGED
@@ -322,6 +322,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyV
322
322
  store?: boolean | null | undefined;
323
323
  strictJsonSchema?: boolean | null | undefined;
324
324
  textVerbosity?: "low" | "medium" | "high" | null | undefined;
325
+ truncation?: "auto" | "disabled" | null | undefined;
325
326
  user?: string | null | undefined;
326
327
  }>;
327
328
  type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
package/dist/index.js CHANGED
@@ -2283,6 +2283,9 @@ async function convertToOpenAIResponsesInput({
2283
2283
  input.push(reasoningMessages[reasoningId]);
2284
2284
  } else {
2285
2285
  reasoningMessage.summary.push(...summaryParts);
2286
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2287
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2288
+ }
2286
2289
  }
2287
2290
  }
2288
2291
  } else {
@@ -2625,6 +2628,11 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2625
2628
  summary_index: import_v416.z.number(),
2626
2629
  delta: import_v416.z.string()
2627
2630
  }),
2631
+ import_v416.z.object({
2632
+ type: import_v416.z.literal("response.reasoning_summary_part.done"),
2633
+ item_id: import_v416.z.string(),
2634
+ summary_index: import_v416.z.number()
2635
+ }),
2628
2636
  import_v416.z.object({
2629
2637
  type: import_v416.z.literal("error"),
2630
2638
  code: import_v416.z.string(),
@@ -2902,6 +2910,7 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
2902
2910
  store: import_v417.z.boolean().nullish(),
2903
2911
  strictJsonSchema: import_v417.z.boolean().nullish(),
2904
2912
  textVerbosity: import_v417.z.enum(["low", "medium", "high"]).nullish(),
2913
+ truncation: import_v417.z.enum(["auto", "disabled"]).nullish(),
2905
2914
  user: import_v417.z.string().nullish()
2906
2915
  })
2907
2916
  )
@@ -3179,6 +3188,7 @@ var OpenAIResponsesLanguageModel = class {
3179
3188
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3180
3189
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3181
3190
  top_logprobs: topLogprobs,
3191
+ truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3182
3192
  // model-specific settings:
3183
3193
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3184
3194
  reasoning: {
@@ -3189,9 +3199,6 @@ var OpenAIResponsesLanguageModel = class {
3189
3199
  summary: openaiOptions.reasoningSummary
3190
3200
  }
3191
3201
  }
3192
- },
3193
- ...modelConfig.requiredAutoTruncation && {
3194
- truncation: "auto"
3195
3202
  }
3196
3203
  };
3197
3204
  if (modelConfig.isReasoningModel) {
@@ -3259,7 +3266,8 @@ var OpenAIResponsesLanguageModel = class {
3259
3266
  tools: openaiTools2,
3260
3267
  tool_choice: openaiToolChoice
3261
3268
  },
3262
- warnings: [...warnings, ...toolWarnings]
3269
+ warnings: [...warnings, ...toolWarnings],
3270
+ store
3263
3271
  };
3264
3272
  }
3265
3273
  async doGenerate(options) {
@@ -3534,7 +3542,8 @@ var OpenAIResponsesLanguageModel = class {
3534
3542
  const {
3535
3543
  args: body,
3536
3544
  warnings,
3537
- webSearchToolName
3545
+ webSearchToolName,
3546
+ store
3538
3547
  } = await this.getArgs(options);
3539
3548
  const { responseHeaders, value: response } = await (0, import_provider_utils24.postJsonToApi)({
3540
3549
  url: this.config.url({
@@ -3573,7 +3582,7 @@ var OpenAIResponsesLanguageModel = class {
3573
3582
  controller.enqueue({ type: "stream-start", warnings });
3574
3583
  },
3575
3584
  transform(chunk, controller) {
3576
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
3585
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3577
3586
  if (options.includeRawChunks) {
3578
3587
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3579
3588
  }
@@ -3672,10 +3681,10 @@ var OpenAIResponsesLanguageModel = class {
3672
3681
  }
3673
3682
  }
3674
3683
  });
3675
- } else if (isResponseOutputItemAddedReasoningChunk(value)) {
3684
+ } else if (isResponseOutputItemAddedChunk(value) && value.item.type === "reasoning") {
3676
3685
  activeReasoning[value.item.id] = {
3677
3686
  encryptedContent: value.item.encrypted_content,
3678
- summaryParts: [0]
3687
+ summaryParts: { 0: "active" }
3679
3688
  };
3680
3689
  controller.enqueue({
3681
3690
  type: "reasoning-start",
@@ -3803,9 +3812,14 @@ var OpenAIResponsesLanguageModel = class {
3803
3812
  type: "text-end",
3804
3813
  id: value.item.id
3805
3814
  });
3806
- } else if (isResponseOutputItemDoneReasoningChunk(value)) {
3815
+ } else if (value.item.type === "reasoning") {
3807
3816
  const activeReasoningPart = activeReasoning[value.item.id];
3808
- for (const summaryIndex of activeReasoningPart.summaryParts) {
3817
+ const summaryPartIndices = Object.entries(
3818
+ activeReasoningPart.summaryParts
3819
+ ).filter(
3820
+ ([_, status]) => status === "active" || status === "can-conclude"
3821
+ ).map(([summaryIndex]) => summaryIndex);
3822
+ for (const summaryIndex of summaryPartIndices) {
3809
3823
  controller.enqueue({
3810
3824
  type: "reasoning-end",
3811
3825
  id: `${value.item.id}:${summaryIndex}`,
@@ -3879,23 +3893,34 @@ var OpenAIResponsesLanguageModel = class {
3879
3893
  if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
3880
3894
  logprobs.push(value.logprobs);
3881
3895
  }
3882
- } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3896
+ } else if (value.type === "response.reasoning_summary_part.added") {
3883
3897
  if (value.summary_index > 0) {
3884
- (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.summaryParts.push(
3885
- value.summary_index
3886
- );
3898
+ const activeReasoningPart = activeReasoning[value.item_id];
3899
+ activeReasoningPart.summaryParts[value.summary_index] = "active";
3900
+ for (const summaryIndex of Object.keys(
3901
+ activeReasoningPart.summaryParts
3902
+ )) {
3903
+ if (activeReasoningPart.summaryParts[summaryIndex] === "can-conclude") {
3904
+ controller.enqueue({
3905
+ type: "reasoning-end",
3906
+ id: `${value.item_id}:${summaryIndex}`,
3907
+ providerMetadata: { openai: { itemId: value.item_id } }
3908
+ });
3909
+ activeReasoningPart.summaryParts[summaryIndex] = "concluded";
3910
+ }
3911
+ }
3887
3912
  controller.enqueue({
3888
3913
  type: "reasoning-start",
3889
3914
  id: `${value.item_id}:${value.summary_index}`,
3890
3915
  providerMetadata: {
3891
3916
  openai: {
3892
3917
  itemId: value.item_id,
3893
- reasoningEncryptedContent: (_i = (_h = activeReasoning[value.item_id]) == null ? void 0 : _h.encryptedContent) != null ? _i : null
3918
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
3894
3919
  }
3895
3920
  }
3896
3921
  });
3897
3922
  }
3898
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
3923
+ } else if (value.type === "response.reasoning_summary_text.delta") {
3899
3924
  controller.enqueue({
3900
3925
  type: "reasoning-delta",
3901
3926
  id: `${value.item_id}:${value.summary_index}`,
@@ -3906,16 +3931,29 @@ var OpenAIResponsesLanguageModel = class {
3906
3931
  }
3907
3932
  }
3908
3933
  });
3934
+ } else if (value.type === "response.reasoning_summary_part.done") {
3935
+ if (store) {
3936
+ controller.enqueue({
3937
+ type: "reasoning-end",
3938
+ id: `${value.item_id}:${value.summary_index}`,
3939
+ providerMetadata: {
3940
+ openai: { itemId: value.item_id }
3941
+ }
3942
+ });
3943
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
3944
+ } else {
3945
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
3946
+ }
3909
3947
  } else if (isResponseFinishedChunk(value)) {
3910
3948
  finishReason = mapOpenAIResponseFinishReason({
3911
- finishReason: (_j = value.response.incomplete_details) == null ? void 0 : _j.reason,
3949
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
3912
3950
  hasFunctionCall
3913
3951
  });
3914
3952
  usage.inputTokens = value.response.usage.input_tokens;
3915
3953
  usage.outputTokens = value.response.usage.output_tokens;
3916
3954
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3917
- usage.reasoningTokens = (_l = (_k = value.response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0;
3918
- usage.cachedInputTokens = (_n = (_m = value.response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0;
3955
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
3956
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
3919
3957
  if (typeof value.response.service_tier === "string") {
3920
3958
  serviceTier = value.response.service_tier;
3921
3959
  }
@@ -3924,7 +3962,7 @@ var OpenAIResponsesLanguageModel = class {
3924
3962
  controller.enqueue({
3925
3963
  type: "source",
3926
3964
  sourceType: "url",
3927
- id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils24.generateId)(),
3965
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils24.generateId)(),
3928
3966
  url: value.annotation.url,
3929
3967
  title: value.annotation.title
3930
3968
  });
@@ -3932,10 +3970,10 @@ var OpenAIResponsesLanguageModel = class {
3932
3970
  controller.enqueue({
3933
3971
  type: "source",
3934
3972
  sourceType: "document",
3935
- id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils24.generateId)(),
3973
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : (0, import_provider_utils24.generateId)(),
3936
3974
  mediaType: "text/plain",
3937
- title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
3938
- filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
3975
+ title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
3976
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id
3939
3977
  });
3940
3978
  }
3941
3979
  } else if (isErrorChunk(value)) {
@@ -3974,9 +4012,6 @@ function isTextDeltaChunk(chunk) {
3974
4012
  function isResponseOutputItemDoneChunk(chunk) {
3975
4013
  return chunk.type === "response.output_item.done";
3976
4014
  }
3977
- function isResponseOutputItemDoneReasoningChunk(chunk) {
3978
- return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3979
- }
3980
4015
  function isResponseFinishedChunk(chunk) {
3981
4016
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3982
4017
  }
@@ -3995,18 +4030,9 @@ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
3995
4030
  function isResponseOutputItemAddedChunk(chunk) {
3996
4031
  return chunk.type === "response.output_item.added";
3997
4032
  }
3998
- function isResponseOutputItemAddedReasoningChunk(chunk) {
3999
- return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
4000
- }
4001
4033
  function isResponseAnnotationAddedChunk(chunk) {
4002
4034
  return chunk.type === "response.output_text.annotation.added";
4003
4035
  }
4004
- function isResponseReasoningSummaryPartAddedChunk(chunk) {
4005
- return chunk.type === "response.reasoning_summary_part.added";
4006
- }
4007
- function isResponseReasoningSummaryTextDeltaChunk(chunk) {
4008
- return chunk.type === "response.reasoning_summary_text.delta";
4009
- }
4010
4036
  function isErrorChunk(chunk) {
4011
4037
  return chunk.type === "error";
4012
4038
  }
@@ -4014,7 +4040,6 @@ function getResponsesModelConfig(modelId) {
4014
4040
  const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4015
4041
  const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4016
4042
  const defaults = {
4017
- requiredAutoTruncation: false,
4018
4043
  systemMessageMode: "system",
4019
4044
  supportsFlexProcessing: supportsFlexProcessing2,
4020
4045
  supportsPriorityProcessing: supportsPriorityProcessing2
@@ -4411,7 +4436,7 @@ var OpenAITranscriptionModel = class {
4411
4436
  };
4412
4437
 
4413
4438
  // src/version.ts
4414
- var VERSION = true ? "2.0.51" : "0.0.0-test";
4439
+ var VERSION = true ? "2.0.53" : "0.0.0-test";
4415
4440
 
4416
4441
  // src/openai-provider.ts
4417
4442
  function createOpenAI(options = {}) {