@ai-sdk/openai 2.0.22 → 2.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.23
4
+
5
+ ### Patch Changes
6
+
7
+ - a9a61b7: Add serviceTier to provider metadata for OpenAI responses
8
+
3
9
  ## 2.0.22
4
10
 
5
11
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2460,6 +2460,7 @@ var OpenAIResponsesLanguageModel = class {
2460
2460
  })
2461
2461
  ])
2462
2462
  ),
2463
+ service_tier: import_v413.z.string().nullish(),
2463
2464
  incomplete_details: import_v413.z.object({ reason: import_v413.z.string() }).nullable(),
2464
2465
  usage: usageSchema2
2465
2466
  })
@@ -2618,6 +2619,9 @@ var OpenAIResponsesLanguageModel = class {
2618
2619
  if (logprobs.length > 0) {
2619
2620
  providerMetadata.openai.logprobs = logprobs;
2620
2621
  }
2622
+ if (typeof response.service_tier === "string") {
2623
+ providerMetadata.openai.serviceTier = response.service_tier;
2624
+ }
2621
2625
  return {
2622
2626
  content,
2623
2627
  finishReason: mapOpenAIResponseFinishReason({
@@ -2674,6 +2678,7 @@ var OpenAIResponsesLanguageModel = class {
2674
2678
  const ongoingToolCalls = {};
2675
2679
  let hasToolCalls = false;
2676
2680
  const activeReasoning = {};
2681
+ let serviceTier;
2677
2682
  return {
2678
2683
  stream: response.pipeThrough(
2679
2684
  new TransformStream({
@@ -2932,6 +2937,9 @@ var OpenAIResponsesLanguageModel = class {
2932
2937
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2933
2938
  usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
2934
2939
  usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
2940
+ if (typeof value.response.service_tier === "string") {
2941
+ serviceTier = value.response.service_tier;
2942
+ }
2935
2943
  } else if (isResponseAnnotationAddedChunk(value)) {
2936
2944
  if (value.annotation.type === "url_citation") {
2937
2945
  controller.enqueue({
@@ -2964,6 +2972,9 @@ var OpenAIResponsesLanguageModel = class {
2964
2972
  if (logprobs.length > 0) {
2965
2973
  providerMetadata.openai.logprobs = logprobs;
2966
2974
  }
2975
+ if (serviceTier !== void 0) {
2976
+ providerMetadata.openai.serviceTier = serviceTier;
2977
+ }
2967
2978
  controller.enqueue({
2968
2979
  type: "finish",
2969
2980
  finishReason,
@@ -3001,7 +3012,8 @@ var responseFinishedChunkSchema = import_v413.z.object({
3001
3012
  type: import_v413.z.enum(["response.completed", "response.incomplete"]),
3002
3013
  response: import_v413.z.object({
3003
3014
  incomplete_details: import_v413.z.object({ reason: import_v413.z.string() }).nullish(),
3004
- usage: usageSchema2
3015
+ usage: usageSchema2,
3016
+ service_tier: import_v413.z.string().nullish()
3005
3017
  })
3006
3018
  });
3007
3019
  var responseCreatedChunkSchema = import_v413.z.object({
@@ -3009,7 +3021,8 @@ var responseCreatedChunkSchema = import_v413.z.object({
3009
3021
  response: import_v413.z.object({
3010
3022
  id: import_v413.z.string(),
3011
3023
  created_at: import_v413.z.number(),
3012
- model: import_v413.z.string()
3024
+ model: import_v413.z.string(),
3025
+ service_tier: import_v413.z.string().nullish()
3013
3026
  })
3014
3027
  });
3015
3028
  var responseOutputItemAddedSchema = import_v413.z.object({