@ai-sdk/openai 2.0.22 → 2.0.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.24
4
+
5
+ ### Patch Changes
6
+
7
+ - ad57512: fix(provider/openai): safe practice to include filename and fileExtension to avoid `experimental_transcribe` fails with valid Buffer
8
+ - Updated dependencies [99964ed]
9
+ - @ai-sdk/provider-utils@3.0.8
10
+
11
+ ## 2.0.23
12
+
13
+ ### Patch Changes
14
+
15
+ - a9a61b7: Add serviceTier to provider metadata for OpenAI responses
16
+
3
17
  ## 2.0.22
4
18
 
5
19
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2460,6 +2460,7 @@ var OpenAIResponsesLanguageModel = class {
2460
2460
  })
2461
2461
  ])
2462
2462
  ),
2463
+ service_tier: import_v413.z.string().nullish(),
2463
2464
  incomplete_details: import_v413.z.object({ reason: import_v413.z.string() }).nullable(),
2464
2465
  usage: usageSchema2
2465
2466
  })
@@ -2618,6 +2619,9 @@ var OpenAIResponsesLanguageModel = class {
2618
2619
  if (logprobs.length > 0) {
2619
2620
  providerMetadata.openai.logprobs = logprobs;
2620
2621
  }
2622
+ if (typeof response.service_tier === "string") {
2623
+ providerMetadata.openai.serviceTier = response.service_tier;
2624
+ }
2621
2625
  return {
2622
2626
  content,
2623
2627
  finishReason: mapOpenAIResponseFinishReason({
@@ -2674,6 +2678,7 @@ var OpenAIResponsesLanguageModel = class {
2674
2678
  const ongoingToolCalls = {};
2675
2679
  let hasToolCalls = false;
2676
2680
  const activeReasoning = {};
2681
+ let serviceTier;
2677
2682
  return {
2678
2683
  stream: response.pipeThrough(
2679
2684
  new TransformStream({
@@ -2932,6 +2937,9 @@ var OpenAIResponsesLanguageModel = class {
2932
2937
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2933
2938
  usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
2934
2939
  usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
2940
+ if (typeof value.response.service_tier === "string") {
2941
+ serviceTier = value.response.service_tier;
2942
+ }
2935
2943
  } else if (isResponseAnnotationAddedChunk(value)) {
2936
2944
  if (value.annotation.type === "url_citation") {
2937
2945
  controller.enqueue({
@@ -2964,6 +2972,9 @@ var OpenAIResponsesLanguageModel = class {
2964
2972
  if (logprobs.length > 0) {
2965
2973
  providerMetadata.openai.logprobs = logprobs;
2966
2974
  }
2975
+ if (serviceTier !== void 0) {
2976
+ providerMetadata.openai.serviceTier = serviceTier;
2977
+ }
2967
2978
  controller.enqueue({
2968
2979
  type: "finish",
2969
2980
  finishReason,
@@ -3001,7 +3012,8 @@ var responseFinishedChunkSchema = import_v413.z.object({
3001
3012
  type: import_v413.z.enum(["response.completed", "response.incomplete"]),
3002
3013
  response: import_v413.z.object({
3003
3014
  incomplete_details: import_v413.z.object({ reason: import_v413.z.string() }).nullish(),
3004
- usage: usageSchema2
3015
+ usage: usageSchema2,
3016
+ service_tier: import_v413.z.string().nullish()
3005
3017
  })
3006
3018
  });
3007
3019
  var responseCreatedChunkSchema = import_v413.z.object({
@@ -3009,7 +3021,8 @@ var responseCreatedChunkSchema = import_v413.z.object({
3009
3021
  response: import_v413.z.object({
3010
3022
  id: import_v413.z.string(),
3011
3023
  created_at: import_v413.z.number(),
3012
- model: import_v413.z.string()
3024
+ model: import_v413.z.string(),
3025
+ service_tier: import_v413.z.string().nullish()
3013
3026
  })
3014
3027
  });
3015
3028
  var responseOutputItemAddedSchema = import_v413.z.object({
@@ -3486,7 +3499,12 @@ var OpenAITranscriptionModel = class {
3486
3499
  const formData = new FormData();
3487
3500
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils14.convertBase64ToUint8Array)(audio)]);
3488
3501
  formData.append("model", this.modelId);
3489
- formData.append("file", new File([blob], "audio", { type: mediaType }));
3502
+ const fileExtension = (0, import_provider_utils14.mediaTypeToExtension)(mediaType);
3503
+ formData.append(
3504
+ "file",
3505
+ new File([blob], "audio", { type: mediaType }),
3506
+ `audio.${fileExtension}`
3507
+ );
3490
3508
  if (openAIOptions) {
3491
3509
  const transcriptionModelOptions = {
3492
3510
  include: openAIOptions.include,