@ai-sdk/openai 2.0.21 → 2.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.23
4
+
5
+ ### Patch Changes
6
+
7
+ - a9a61b7: Add serviceTier to provider metadata for OpenAI responses
8
+
9
+ ## 2.0.22
10
+
11
+ ### Patch Changes
12
+
13
+ - 0e272ae: fix(provider/openai): make file_citation annotation fields optional for responses api compatibility
14
+ - Updated dependencies [886e7cd]
15
+ - @ai-sdk/provider-utils@3.0.7
16
+
3
17
  ## 2.0.21
4
18
 
5
19
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2360,7 +2360,7 @@ var OpenAIResponsesLanguageModel = class {
2360
2360
  };
2361
2361
  }
2362
2362
  async doGenerate(options) {
2363
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2363
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2364
2364
  const { args: body, warnings } = await this.getArgs(options);
2365
2365
  const url = this.config.url({
2366
2366
  path: "/responses",
@@ -2406,10 +2406,12 @@ var OpenAIResponsesLanguageModel = class {
2406
2406
  }),
2407
2407
  import_v413.z.object({
2408
2408
  type: import_v413.z.literal("file_citation"),
2409
- start_index: import_v413.z.number(),
2410
- end_index: import_v413.z.number(),
2411
2409
  file_id: import_v413.z.string(),
2412
- quote: import_v413.z.string()
2410
+ filename: import_v413.z.string().nullish(),
2411
+ index: import_v413.z.number().nullish(),
2412
+ start_index: import_v413.z.number().nullish(),
2413
+ end_index: import_v413.z.number().nullish(),
2414
+ quote: import_v413.z.string().nullish()
2413
2415
  })
2414
2416
  ])
2415
2417
  )
@@ -2458,6 +2460,7 @@ var OpenAIResponsesLanguageModel = class {
2458
2460
  })
2459
2461
  ])
2460
2462
  ),
2463
+ service_tier: import_v413.z.string().nullish(),
2461
2464
  incomplete_details: import_v413.z.object({ reason: import_v413.z.string() }).nullable(),
2462
2465
  usage: usageSchema2
2463
2466
  })
@@ -2527,8 +2530,8 @@ var OpenAIResponsesLanguageModel = class {
2527
2530
  sourceType: "document",
2528
2531
  id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils12.generateId)(),
2529
2532
  mediaType: "text/plain",
2530
- title: annotation.quote,
2531
- filename: annotation.file_id
2533
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
2534
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id
2532
2535
  });
2533
2536
  }
2534
2537
  }
@@ -2616,18 +2619,21 @@ var OpenAIResponsesLanguageModel = class {
2616
2619
  if (logprobs.length > 0) {
2617
2620
  providerMetadata.openai.logprobs = logprobs;
2618
2621
  }
2622
+ if (typeof response.service_tier === "string") {
2623
+ providerMetadata.openai.serviceTier = response.service_tier;
2624
+ }
2619
2625
  return {
2620
2626
  content,
2621
2627
  finishReason: mapOpenAIResponseFinishReason({
2622
- finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2628
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2623
2629
  hasToolCalls: content.some((part) => part.type === "tool-call")
2624
2630
  }),
2625
2631
  usage: {
2626
2632
  inputTokens: response.usage.input_tokens,
2627
2633
  outputTokens: response.usage.output_tokens,
2628
2634
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2629
- reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2630
- cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2635
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2636
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2631
2637
  },
2632
2638
  request: { body },
2633
2639
  response: {
@@ -2672,6 +2678,7 @@ var OpenAIResponsesLanguageModel = class {
2672
2678
  const ongoingToolCalls = {};
2673
2679
  let hasToolCalls = false;
2674
2680
  const activeReasoning = {};
2681
+ let serviceTier;
2675
2682
  return {
2676
2683
  stream: response.pipeThrough(
2677
2684
  new TransformStream({
@@ -2679,7 +2686,7 @@ var OpenAIResponsesLanguageModel = class {
2679
2686
  controller.enqueue({ type: "stream-start", warnings });
2680
2687
  },
2681
2688
  transform(chunk, controller) {
2682
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
2689
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
2683
2690
  if (options.includeRawChunks) {
2684
2691
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2685
2692
  }
@@ -2930,6 +2937,9 @@ var OpenAIResponsesLanguageModel = class {
2930
2937
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2931
2938
  usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
2932
2939
  usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
2940
+ if (typeof value.response.service_tier === "string") {
2941
+ serviceTier = value.response.service_tier;
2942
+ }
2933
2943
  } else if (isResponseAnnotationAddedChunk(value)) {
2934
2944
  if (value.annotation.type === "url_citation") {
2935
2945
  controller.enqueue({
@@ -2945,8 +2955,8 @@ var OpenAIResponsesLanguageModel = class {
2945
2955
  sourceType: "document",
2946
2956
  id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : (0, import_provider_utils12.generateId)(),
2947
2957
  mediaType: "text/plain",
2948
- title: value.annotation.quote,
2949
- filename: value.annotation.file_id
2958
+ title: (_t = (_s = value.annotation.quote) != null ? _s : value.annotation.filename) != null ? _t : "Document",
2959
+ filename: (_u = value.annotation.filename) != null ? _u : value.annotation.file_id
2950
2960
  });
2951
2961
  }
2952
2962
  } else if (isErrorChunk(value)) {
@@ -2962,6 +2972,9 @@ var OpenAIResponsesLanguageModel = class {
2962
2972
  if (logprobs.length > 0) {
2963
2973
  providerMetadata.openai.logprobs = logprobs;
2964
2974
  }
2975
+ if (serviceTier !== void 0) {
2976
+ providerMetadata.openai.serviceTier = serviceTier;
2977
+ }
2965
2978
  controller.enqueue({
2966
2979
  type: "finish",
2967
2980
  finishReason,
@@ -2999,7 +3012,8 @@ var responseFinishedChunkSchema = import_v413.z.object({
2999
3012
  type: import_v413.z.enum(["response.completed", "response.incomplete"]),
3000
3013
  response: import_v413.z.object({
3001
3014
  incomplete_details: import_v413.z.object({ reason: import_v413.z.string() }).nullish(),
3002
- usage: usageSchema2
3015
+ usage: usageSchema2,
3016
+ service_tier: import_v413.z.string().nullish()
3003
3017
  })
3004
3018
  });
3005
3019
  var responseCreatedChunkSchema = import_v413.z.object({
@@ -3007,7 +3021,8 @@ var responseCreatedChunkSchema = import_v413.z.object({
3007
3021
  response: import_v413.z.object({
3008
3022
  id: import_v413.z.string(),
3009
3023
  created_at: import_v413.z.number(),
3010
- model: import_v413.z.string()
3024
+ model: import_v413.z.string(),
3025
+ service_tier: import_v413.z.string().nullish()
3011
3026
  })
3012
3027
  });
3013
3028
  var responseOutputItemAddedSchema = import_v413.z.object({
@@ -3124,7 +3139,11 @@ var responseAnnotationAddedSchema = import_v413.z.object({
3124
3139
  import_v413.z.object({
3125
3140
  type: import_v413.z.literal("file_citation"),
3126
3141
  file_id: import_v413.z.string(),
3127
- quote: import_v413.z.string()
3142
+ filename: import_v413.z.string().nullish(),
3143
+ index: import_v413.z.number().nullish(),
3144
+ start_index: import_v413.z.number().nullish(),
3145
+ end_index: import_v413.z.number().nullish(),
3146
+ quote: import_v413.z.string().nullish()
3128
3147
  })
3129
3148
  ])
3130
3149
  });