@ai-sdk/openai 2.0.14 → 2.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2606,7 +2606,7 @@ var OpenAIResponsesLanguageModel = class {
2606
2606
  });
2607
2607
  }
2608
2608
  }
2609
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2609
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
2610
2610
  warnings.push({
2611
2611
  type: "unsupported-setting",
2612
2612
  setting: "serviceTier",
@@ -2614,7 +2614,7 @@ var OpenAIResponsesLanguageModel = class {
2614
2614
  });
2615
2615
  delete baseArgs.service_tier;
2616
2616
  }
2617
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2617
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
2618
2618
  warnings.push({
2619
2619
  type: "unsupported-setting",
2620
2620
  setting: "serviceTier",
@@ -2641,7 +2641,7 @@ var OpenAIResponsesLanguageModel = class {
2641
2641
  };
2642
2642
  }
2643
2643
  async doGenerate(options) {
2644
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2644
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2645
2645
  const { args: body, warnings } = await this.getArgs(options);
2646
2646
  const url = this.config.url({
2647
2647
  path: "/responses",
@@ -2707,7 +2707,11 @@ var OpenAIResponsesLanguageModel = class {
2707
2707
  import_v416.z.object({
2708
2708
  type: import_v416.z.literal("web_search_call"),
2709
2709
  id: import_v416.z.string(),
2710
- status: import_v416.z.string().optional()
2710
+ status: import_v416.z.string().optional(),
2711
+ action: import_v416.z.object({
2712
+ type: import_v416.z.literal("search"),
2713
+ query: import_v416.z.string().optional()
2714
+ }).nullish()
2711
2715
  }),
2712
2716
  import_v416.z.object({
2713
2717
  type: import_v416.z.literal("computer_call"),
@@ -2839,14 +2843,17 @@ var OpenAIResponsesLanguageModel = class {
2839
2843
  type: "tool-call",
2840
2844
  toolCallId: part.id,
2841
2845
  toolName: "web_search_preview",
2842
- input: "",
2846
+ input: (_k = (_j = part.action) == null ? void 0 : _j.query) != null ? _k : "",
2843
2847
  providerExecuted: true
2844
2848
  });
2845
2849
  content.push({
2846
2850
  type: "tool-result",
2847
2851
  toolCallId: part.id,
2848
2852
  toolName: "web_search_preview",
2849
- result: { status: part.status || "completed" },
2853
+ result: {
2854
+ status: part.status || "completed",
2855
+ ...((_l = part.action) == null ? void 0 : _l.query) && { query: part.action.query }
2856
+ },
2850
2857
  providerExecuted: true
2851
2858
  });
2852
2859
  break;
@@ -2904,15 +2911,15 @@ var OpenAIResponsesLanguageModel = class {
2904
2911
  return {
2905
2912
  content,
2906
2913
  finishReason: mapOpenAIResponseFinishReason({
2907
- finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2914
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2908
2915
  hasToolCalls: content.some((part) => part.type === "tool-call")
2909
2916
  }),
2910
2917
  usage: {
2911
2918
  inputTokens: response.usage.input_tokens,
2912
2919
  outputTokens: response.usage.output_tokens,
2913
2920
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2914
- reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2915
- cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2921
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2922
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2916
2923
  },
2917
2924
  request: { body },
2918
2925
  response: {
@@ -2964,7 +2971,7 @@ var OpenAIResponsesLanguageModel = class {
2964
2971
  controller.enqueue({ type: "stream-start", warnings });
2965
2972
  },
2966
2973
  transform(chunk, controller) {
2967
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
2974
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
2968
2975
  if (options.includeRawChunks) {
2969
2976
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2970
2977
  }
@@ -3071,7 +3078,7 @@ var OpenAIResponsesLanguageModel = class {
3071
3078
  type: "tool-call",
3072
3079
  toolCallId: value.item.id,
3073
3080
  toolName: "web_search_preview",
3074
- input: "",
3081
+ input: (_c = (_b = value.item.action) == null ? void 0 : _b.query) != null ? _c : "",
3075
3082
  providerExecuted: true
3076
3083
  });
3077
3084
  controller.enqueue({
@@ -3080,7 +3087,10 @@ var OpenAIResponsesLanguageModel = class {
3080
3087
  toolName: "web_search_preview",
3081
3088
  result: {
3082
3089
  type: "web_search_tool_result",
3083
- status: value.item.status || "completed"
3090
+ status: value.item.status || "completed",
3091
+ ...((_d = value.item.action) == null ? void 0 : _d.query) && {
3092
+ query: value.item.action.query
3093
+ }
3084
3094
  },
3085
3095
  providerExecuted: true
3086
3096
  });
@@ -3148,7 +3158,7 @@ var OpenAIResponsesLanguageModel = class {
3148
3158
  providerMetadata: {
3149
3159
  openai: {
3150
3160
  itemId: value.item.id,
3151
- reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
3161
+ reasoningEncryptedContent: (_e = value.item.encrypted_content) != null ? _e : null
3152
3162
  }
3153
3163
  }
3154
3164
  });
@@ -3183,7 +3193,7 @@ var OpenAIResponsesLanguageModel = class {
3183
3193
  }
3184
3194
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3185
3195
  if (value.summary_index > 0) {
3186
- (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
3196
+ (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.summaryParts.push(
3187
3197
  value.summary_index
3188
3198
  );
3189
3199
  controller.enqueue({
@@ -3192,7 +3202,7 @@ var OpenAIResponsesLanguageModel = class {
3192
3202
  providerMetadata: {
3193
3203
  openai: {
3194
3204
  itemId: value.item_id,
3195
- reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
3205
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
3196
3206
  }
3197
3207
  }
3198
3208
  });
@@ -3210,20 +3220,20 @@ var OpenAIResponsesLanguageModel = class {
3210
3220
  });
3211
3221
  } else if (isResponseFinishedChunk(value)) {
3212
3222
  finishReason = mapOpenAIResponseFinishReason({
3213
- finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
3223
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
3214
3224
  hasToolCalls
3215
3225
  });
3216
3226
  usage.inputTokens = value.response.usage.input_tokens;
3217
3227
  usage.outputTokens = value.response.usage.output_tokens;
3218
3228
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3219
- usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
3220
- usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
3229
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
3230
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
3221
3231
  } else if (isResponseAnnotationAddedChunk(value)) {
3222
3232
  if (value.annotation.type === "url_citation") {
3223
3233
  controller.enqueue({
3224
3234
  type: "source",
3225
3235
  sourceType: "url",
3226
- id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils14.generateId)(),
3236
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils14.generateId)(),
3227
3237
  url: value.annotation.url,
3228
3238
  title: value.annotation.title
3229
3239
  });
@@ -3231,7 +3241,7 @@ var OpenAIResponsesLanguageModel = class {
3231
3241
  controller.enqueue({
3232
3242
  type: "source",
3233
3243
  sourceType: "document",
3234
- id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils14.generateId)(),
3244
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : (0, import_provider_utils14.generateId)(),
3235
3245
  mediaType: "text/plain",
3236
3246
  title: value.annotation.quote,
3237
3247
  filename: value.annotation.file_id
@@ -3321,7 +3331,11 @@ var responseOutputItemAddedSchema = import_v416.z.object({
3321
3331
  import_v416.z.object({
3322
3332
  type: import_v416.z.literal("web_search_call"),
3323
3333
  id: import_v416.z.string(),
3324
- status: import_v416.z.string()
3334
+ status: import_v416.z.string(),
3335
+ action: import_v416.z.object({
3336
+ type: import_v416.z.literal("search"),
3337
+ query: import_v416.z.string().optional()
3338
+ }).nullish()
3325
3339
  }),
3326
3340
  import_v416.z.object({
3327
3341
  type: import_v416.z.literal("computer_call"),
@@ -3370,7 +3384,11 @@ var responseOutputItemDoneSchema = import_v416.z.object({
3370
3384
  import_v416.z.object({
3371
3385
  type: import_v416.z.literal("web_search_call"),
3372
3386
  id: import_v416.z.string(),
3373
- status: import_v416.z.literal("completed")
3387
+ status: import_v416.z.literal("completed"),
3388
+ action: import_v416.z.object({
3389
+ type: import_v416.z.literal("search"),
3390
+ query: import_v416.z.string().optional()
3391
+ }).nullish()
3374
3392
  }),
3375
3393
  import_v416.z.object({
3376
3394
  type: import_v416.z.literal("computer_call"),
@@ -3478,39 +3496,39 @@ function isErrorChunk(chunk) {
3478
3496
  return chunk.type === "error";
3479
3497
  }
3480
3498
  function getResponsesModelConfig(modelId) {
3499
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3500
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3501
+ const defaults = {
3502
+ requiredAutoTruncation: false,
3503
+ systemMessageMode: "system",
3504
+ supportsFlexProcessing: supportsFlexProcessing2,
3505
+ supportsPriorityProcessing: supportsPriorityProcessing2
3506
+ };
3481
3507
  if (modelId.startsWith("gpt-5-chat")) {
3482
3508
  return {
3483
- isReasoningModel: false,
3484
- systemMessageMode: "system",
3485
- requiredAutoTruncation: false
3509
+ ...defaults,
3510
+ isReasoningModel: false
3486
3511
  };
3487
3512
  }
3488
3513
  if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3489
3514
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3490
3515
  return {
3516
+ ...defaults,
3491
3517
  isReasoningModel: true,
3492
- systemMessageMode: "remove",
3493
- requiredAutoTruncation: false
3518
+ systemMessageMode: "remove"
3494
3519
  };
3495
3520
  }
3496
3521
  return {
3522
+ ...defaults,
3497
3523
  isReasoningModel: true,
3498
- systemMessageMode: "developer",
3499
- requiredAutoTruncation: false
3524
+ systemMessageMode: "developer"
3500
3525
  };
3501
3526
  }
3502
3527
  return {
3503
- isReasoningModel: false,
3504
- systemMessageMode: "system",
3505
- requiredAutoTruncation: false
3528
+ ...defaults,
3529
+ isReasoningModel: false
3506
3530
  };
3507
3531
  }
3508
- function supportsFlexProcessing2(modelId) {
3509
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3510
- }
3511
- function supportsPriorityProcessing2(modelId) {
3512
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3513
- }
3514
3532
  var openaiResponsesProviderOptionsSchema = import_v416.z.object({
3515
3533
  metadata: import_v416.z.any().nullish(),
3516
3534
  parallelToolCalls: import_v416.z.boolean().nullish(),