@ai-sdk/openai 2.0.51 → 2.0.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2356,6 +2356,9 @@ async function convertToOpenAIResponsesInput({
2356
2356
  input.push(reasoningMessages[reasoningId]);
2357
2357
  } else {
2358
2358
  reasoningMessage.summary.push(...summaryParts);
2359
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2360
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2361
+ }
2359
2362
  }
2360
2363
  }
2361
2364
  } else {
@@ -2698,6 +2701,11 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2698
2701
  summary_index: import_v414.z.number(),
2699
2702
  delta: import_v414.z.string()
2700
2703
  }),
2704
+ import_v414.z.object({
2705
+ type: import_v414.z.literal("response.reasoning_summary_part.done"),
2706
+ item_id: import_v414.z.string(),
2707
+ summary_index: import_v414.z.number()
2708
+ }),
2701
2709
  import_v414.z.object({
2702
2710
  type: import_v414.z.literal("error"),
2703
2711
  code: import_v414.z.string(),
@@ -2975,6 +2983,7 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
2975
2983
  store: import_v415.z.boolean().nullish(),
2976
2984
  strictJsonSchema: import_v415.z.boolean().nullish(),
2977
2985
  textVerbosity: import_v415.z.enum(["low", "medium", "high"]).nullish(),
2986
+ truncation: import_v415.z.enum(["auto", "disabled"]).nullish(),
2978
2987
  user: import_v415.z.string().nullish()
2979
2988
  })
2980
2989
  )
@@ -3480,6 +3489,7 @@ var OpenAIResponsesLanguageModel = class {
3480
3489
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3481
3490
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3482
3491
  top_logprobs: topLogprobs,
3492
+ truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3483
3493
  // model-specific settings:
3484
3494
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3485
3495
  reasoning: {
@@ -3490,9 +3500,6 @@ var OpenAIResponsesLanguageModel = class {
3490
3500
  summary: openaiOptions.reasoningSummary
3491
3501
  }
3492
3502
  }
3493
- },
3494
- ...modelConfig.requiredAutoTruncation && {
3495
- truncation: "auto"
3496
3503
  }
3497
3504
  };
3498
3505
  if (modelConfig.isReasoningModel) {
@@ -3560,7 +3567,8 @@ var OpenAIResponsesLanguageModel = class {
3560
3567
  tools: openaiTools,
3561
3568
  tool_choice: openaiToolChoice
3562
3569
  },
3563
- warnings: [...warnings, ...toolWarnings]
3570
+ warnings: [...warnings, ...toolWarnings],
3571
+ store
3564
3572
  };
3565
3573
  }
3566
3574
  async doGenerate(options) {
@@ -3835,7 +3843,8 @@ var OpenAIResponsesLanguageModel = class {
3835
3843
  const {
3836
3844
  args: body,
3837
3845
  warnings,
3838
- webSearchToolName
3846
+ webSearchToolName,
3847
+ store
3839
3848
  } = await this.getArgs(options);
3840
3849
  const { responseHeaders, value: response } = await (0, import_provider_utils29.postJsonToApi)({
3841
3850
  url: this.config.url({
@@ -3874,7 +3883,7 @@ var OpenAIResponsesLanguageModel = class {
3874
3883
  controller.enqueue({ type: "stream-start", warnings });
3875
3884
  },
3876
3885
  transform(chunk, controller) {
3877
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
3886
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3878
3887
  if (options.includeRawChunks) {
3879
3888
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3880
3889
  }
@@ -3973,10 +3982,10 @@ var OpenAIResponsesLanguageModel = class {
3973
3982
  }
3974
3983
  }
3975
3984
  });
3976
- } else if (isResponseOutputItemAddedReasoningChunk(value)) {
3985
+ } else if (isResponseOutputItemAddedChunk(value) && value.item.type === "reasoning") {
3977
3986
  activeReasoning[value.item.id] = {
3978
3987
  encryptedContent: value.item.encrypted_content,
3979
- summaryParts: [0]
3988
+ summaryParts: { 0: "active" }
3980
3989
  };
3981
3990
  controller.enqueue({
3982
3991
  type: "reasoning-start",
@@ -4104,9 +4113,14 @@ var OpenAIResponsesLanguageModel = class {
4104
4113
  type: "text-end",
4105
4114
  id: value.item.id
4106
4115
  });
4107
- } else if (isResponseOutputItemDoneReasoningChunk(value)) {
4116
+ } else if (value.item.type === "reasoning") {
4108
4117
  const activeReasoningPart = activeReasoning[value.item.id];
4109
- for (const summaryIndex of activeReasoningPart.summaryParts) {
4118
+ const summaryPartIndices = Object.entries(
4119
+ activeReasoningPart.summaryParts
4120
+ ).filter(
4121
+ ([_, status]) => status === "active" || status === "can-conclude"
4122
+ ).map(([summaryIndex]) => summaryIndex);
4123
+ for (const summaryIndex of summaryPartIndices) {
4110
4124
  controller.enqueue({
4111
4125
  type: "reasoning-end",
4112
4126
  id: `${value.item.id}:${summaryIndex}`,
@@ -4180,23 +4194,34 @@ var OpenAIResponsesLanguageModel = class {
4180
4194
  if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
4181
4195
  logprobs.push(value.logprobs);
4182
4196
  }
4183
- } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
4197
+ } else if (value.type === "response.reasoning_summary_part.added") {
4184
4198
  if (value.summary_index > 0) {
4185
- (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.summaryParts.push(
4186
- value.summary_index
4187
- );
4199
+ const activeReasoningPart = activeReasoning[value.item_id];
4200
+ activeReasoningPart.summaryParts[value.summary_index] = "active";
4201
+ for (const summaryIndex of Object.keys(
4202
+ activeReasoningPart.summaryParts
4203
+ )) {
4204
+ if (activeReasoningPart.summaryParts[summaryIndex] === "can-conclude") {
4205
+ controller.enqueue({
4206
+ type: "reasoning-end",
4207
+ id: `${value.item_id}:${summaryIndex}`,
4208
+ providerMetadata: { openai: { itemId: value.item_id } }
4209
+ });
4210
+ activeReasoningPart.summaryParts[summaryIndex] = "concluded";
4211
+ }
4212
+ }
4188
4213
  controller.enqueue({
4189
4214
  type: "reasoning-start",
4190
4215
  id: `${value.item_id}:${value.summary_index}`,
4191
4216
  providerMetadata: {
4192
4217
  openai: {
4193
4218
  itemId: value.item_id,
4194
- reasoningEncryptedContent: (_i = (_h = activeReasoning[value.item_id]) == null ? void 0 : _h.encryptedContent) != null ? _i : null
4219
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
4195
4220
  }
4196
4221
  }
4197
4222
  });
4198
4223
  }
4199
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
4224
+ } else if (value.type === "response.reasoning_summary_text.delta") {
4200
4225
  controller.enqueue({
4201
4226
  type: "reasoning-delta",
4202
4227
  id: `${value.item_id}:${value.summary_index}`,
@@ -4207,16 +4232,29 @@ var OpenAIResponsesLanguageModel = class {
4207
4232
  }
4208
4233
  }
4209
4234
  });
4235
+ } else if (value.type === "response.reasoning_summary_part.done") {
4236
+ if (store) {
4237
+ controller.enqueue({
4238
+ type: "reasoning-end",
4239
+ id: `${value.item_id}:${value.summary_index}`,
4240
+ providerMetadata: {
4241
+ openai: { itemId: value.item_id }
4242
+ }
4243
+ });
4244
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
4245
+ } else {
4246
+ activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
4247
+ }
4210
4248
  } else if (isResponseFinishedChunk(value)) {
4211
4249
  finishReason = mapOpenAIResponseFinishReason({
4212
- finishReason: (_j = value.response.incomplete_details) == null ? void 0 : _j.reason,
4250
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
4213
4251
  hasFunctionCall
4214
4252
  });
4215
4253
  usage.inputTokens = value.response.usage.input_tokens;
4216
4254
  usage.outputTokens = value.response.usage.output_tokens;
4217
4255
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
4218
- usage.reasoningTokens = (_l = (_k = value.response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0;
4219
- usage.cachedInputTokens = (_n = (_m = value.response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0;
4256
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
4257
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
4220
4258
  if (typeof value.response.service_tier === "string") {
4221
4259
  serviceTier = value.response.service_tier;
4222
4260
  }
@@ -4225,7 +4263,7 @@ var OpenAIResponsesLanguageModel = class {
4225
4263
  controller.enqueue({
4226
4264
  type: "source",
4227
4265
  sourceType: "url",
4228
- id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils29.generateId)(),
4266
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils29.generateId)(),
4229
4267
  url: value.annotation.url,
4230
4268
  title: value.annotation.title
4231
4269
  });
@@ -4233,10 +4271,10 @@ var OpenAIResponsesLanguageModel = class {
4233
4271
  controller.enqueue({
4234
4272
  type: "source",
4235
4273
  sourceType: "document",
4236
- id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : (0, import_provider_utils29.generateId)(),
4274
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : (0, import_provider_utils29.generateId)(),
4237
4275
  mediaType: "text/plain",
4238
- title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
4239
- filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id
4276
+ title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
4277
+ filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id
4240
4278
  });
4241
4279
  }
4242
4280
  } else if (isErrorChunk(value)) {
@@ -4275,9 +4313,6 @@ function isTextDeltaChunk(chunk) {
4275
4313
  function isResponseOutputItemDoneChunk(chunk) {
4276
4314
  return chunk.type === "response.output_item.done";
4277
4315
  }
4278
- function isResponseOutputItemDoneReasoningChunk(chunk) {
4279
- return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
4280
- }
4281
4316
  function isResponseFinishedChunk(chunk) {
4282
4317
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
4283
4318
  }
@@ -4296,18 +4331,9 @@ function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
4296
4331
  function isResponseOutputItemAddedChunk(chunk) {
4297
4332
  return chunk.type === "response.output_item.added";
4298
4333
  }
4299
- function isResponseOutputItemAddedReasoningChunk(chunk) {
4300
- return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
4301
- }
4302
4334
  function isResponseAnnotationAddedChunk(chunk) {
4303
4335
  return chunk.type === "response.output_text.annotation.added";
4304
4336
  }
4305
- function isResponseReasoningSummaryPartAddedChunk(chunk) {
4306
- return chunk.type === "response.reasoning_summary_part.added";
4307
- }
4308
- function isResponseReasoningSummaryTextDeltaChunk(chunk) {
4309
- return chunk.type === "response.reasoning_summary_text.delta";
4310
- }
4311
4337
  function isErrorChunk(chunk) {
4312
4338
  return chunk.type === "error";
4313
4339
  }
@@ -4315,7 +4341,6 @@ function getResponsesModelConfig(modelId) {
4315
4341
  const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4316
4342
  const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4317
4343
  const defaults = {
4318
- requiredAutoTruncation: false,
4319
4344
  systemMessageMode: "system",
4320
4345
  supportsFlexProcessing: supportsFlexProcessing2,
4321
4346
  supportsPriorityProcessing: supportsPriorityProcessing2