@ai-sdk/openai 2.0.21 → 2.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2383,7 +2383,7 @@ var OpenAIResponsesLanguageModel = class {
2383
2383
  };
2384
2384
  }
2385
2385
  async doGenerate(options) {
2386
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2386
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2387
2387
  const { args: body, warnings } = await this.getArgs(options);
2388
2388
  const url = this.config.url({
2389
2389
  path: "/responses",
@@ -2429,10 +2429,12 @@ var OpenAIResponsesLanguageModel = class {
2429
2429
  }),
2430
2430
  z13.object({
2431
2431
  type: z13.literal("file_citation"),
2432
- start_index: z13.number(),
2433
- end_index: z13.number(),
2434
2432
  file_id: z13.string(),
2435
- quote: z13.string()
2433
+ filename: z13.string().nullish(),
2434
+ index: z13.number().nullish(),
2435
+ start_index: z13.number().nullish(),
2436
+ end_index: z13.number().nullish(),
2437
+ quote: z13.string().nullish()
2436
2438
  })
2437
2439
  ])
2438
2440
  )
@@ -2481,6 +2483,7 @@ var OpenAIResponsesLanguageModel = class {
2481
2483
  })
2482
2484
  ])
2483
2485
  ),
2486
+ service_tier: z13.string().nullish(),
2484
2487
  incomplete_details: z13.object({ reason: z13.string() }).nullable(),
2485
2488
  usage: usageSchema2
2486
2489
  })
@@ -2550,8 +2553,8 @@ var OpenAIResponsesLanguageModel = class {
2550
2553
  sourceType: "document",
2551
2554
  id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId2(),
2552
2555
  mediaType: "text/plain",
2553
- title: annotation.quote,
2554
- filename: annotation.file_id
2556
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
2557
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id
2555
2558
  });
2556
2559
  }
2557
2560
  }
@@ -2639,18 +2642,21 @@ var OpenAIResponsesLanguageModel = class {
2639
2642
  if (logprobs.length > 0) {
2640
2643
  providerMetadata.openai.logprobs = logprobs;
2641
2644
  }
2645
+ if (typeof response.service_tier === "string") {
2646
+ providerMetadata.openai.serviceTier = response.service_tier;
2647
+ }
2642
2648
  return {
2643
2649
  content,
2644
2650
  finishReason: mapOpenAIResponseFinishReason({
2645
- finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2651
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2646
2652
  hasToolCalls: content.some((part) => part.type === "tool-call")
2647
2653
  }),
2648
2654
  usage: {
2649
2655
  inputTokens: response.usage.input_tokens,
2650
2656
  outputTokens: response.usage.output_tokens,
2651
2657
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2652
- reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2653
- cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2658
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2659
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2654
2660
  },
2655
2661
  request: { body },
2656
2662
  response: {
@@ -2695,6 +2701,7 @@ var OpenAIResponsesLanguageModel = class {
2695
2701
  const ongoingToolCalls = {};
2696
2702
  let hasToolCalls = false;
2697
2703
  const activeReasoning = {};
2704
+ let serviceTier;
2698
2705
  return {
2699
2706
  stream: response.pipeThrough(
2700
2707
  new TransformStream({
@@ -2702,7 +2709,7 @@ var OpenAIResponsesLanguageModel = class {
2702
2709
  controller.enqueue({ type: "stream-start", warnings });
2703
2710
  },
2704
2711
  transform(chunk, controller) {
2705
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
2712
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
2706
2713
  if (options.includeRawChunks) {
2707
2714
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2708
2715
  }
@@ -2953,6 +2960,9 @@ var OpenAIResponsesLanguageModel = class {
2953
2960
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2954
2961
  usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
2955
2962
  usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
2963
+ if (typeof value.response.service_tier === "string") {
2964
+ serviceTier = value.response.service_tier;
2965
+ }
2956
2966
  } else if (isResponseAnnotationAddedChunk(value)) {
2957
2967
  if (value.annotation.type === "url_citation") {
2958
2968
  controller.enqueue({
@@ -2968,8 +2978,8 @@ var OpenAIResponsesLanguageModel = class {
2968
2978
  sourceType: "document",
2969
2979
  id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : generateId2(),
2970
2980
  mediaType: "text/plain",
2971
- title: value.annotation.quote,
2972
- filename: value.annotation.file_id
2981
+ title: (_t = (_s = value.annotation.quote) != null ? _s : value.annotation.filename) != null ? _t : "Document",
2982
+ filename: (_u = value.annotation.filename) != null ? _u : value.annotation.file_id
2973
2983
  });
2974
2984
  }
2975
2985
  } else if (isErrorChunk(value)) {
@@ -2985,6 +2995,9 @@ var OpenAIResponsesLanguageModel = class {
2985
2995
  if (logprobs.length > 0) {
2986
2996
  providerMetadata.openai.logprobs = logprobs;
2987
2997
  }
2998
+ if (serviceTier !== void 0) {
2999
+ providerMetadata.openai.serviceTier = serviceTier;
3000
+ }
2988
3001
  controller.enqueue({
2989
3002
  type: "finish",
2990
3003
  finishReason,
@@ -3022,7 +3035,8 @@ var responseFinishedChunkSchema = z13.object({
3022
3035
  type: z13.enum(["response.completed", "response.incomplete"]),
3023
3036
  response: z13.object({
3024
3037
  incomplete_details: z13.object({ reason: z13.string() }).nullish(),
3025
- usage: usageSchema2
3038
+ usage: usageSchema2,
3039
+ service_tier: z13.string().nullish()
3026
3040
  })
3027
3041
  });
3028
3042
  var responseCreatedChunkSchema = z13.object({
@@ -3030,7 +3044,8 @@ var responseCreatedChunkSchema = z13.object({
3030
3044
  response: z13.object({
3031
3045
  id: z13.string(),
3032
3046
  created_at: z13.number(),
3033
- model: z13.string()
3047
+ model: z13.string(),
3048
+ service_tier: z13.string().nullish()
3034
3049
  })
3035
3050
  });
3036
3051
  var responseOutputItemAddedSchema = z13.object({
@@ -3147,7 +3162,11 @@ var responseAnnotationAddedSchema = z13.object({
3147
3162
  z13.object({
3148
3163
  type: z13.literal("file_citation"),
3149
3164
  file_id: z13.string(),
3150
- quote: z13.string()
3165
+ filename: z13.string().nullish(),
3166
+ index: z13.number().nullish(),
3167
+ start_index: z13.number().nullish(),
3168
+ end_index: z13.number().nullish(),
3169
+ quote: z13.string().nullish()
3151
3170
  })
3152
3171
  ])
3153
3172
  });