@ai-sdk/openai 2.0.21 → 2.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2683,7 +2683,7 @@ var OpenAIResponsesLanguageModel = class {
2683
2683
  };
2684
2684
  }
2685
2685
  async doGenerate(options) {
2686
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2686
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2687
2687
  const { args: body, warnings } = await this.getArgs(options);
2688
2688
  const url = this.config.url({
2689
2689
  path: "/responses",
@@ -2729,10 +2729,12 @@ var OpenAIResponsesLanguageModel = class {
2729
2729
  }),
2730
2730
  import_v416.z.object({
2731
2731
  type: import_v416.z.literal("file_citation"),
2732
- start_index: import_v416.z.number(),
2733
- end_index: import_v416.z.number(),
2734
2732
  file_id: import_v416.z.string(),
2735
- quote: import_v416.z.string()
2733
+ filename: import_v416.z.string().nullish(),
2734
+ index: import_v416.z.number().nullish(),
2735
+ start_index: import_v416.z.number().nullish(),
2736
+ end_index: import_v416.z.number().nullish(),
2737
+ quote: import_v416.z.string().nullish()
2736
2738
  })
2737
2739
  ])
2738
2740
  )
@@ -2781,6 +2783,7 @@ var OpenAIResponsesLanguageModel = class {
2781
2783
  })
2782
2784
  ])
2783
2785
  ),
2786
+ service_tier: import_v416.z.string().nullish(),
2784
2787
  incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullable(),
2785
2788
  usage: usageSchema2
2786
2789
  })
@@ -2850,8 +2853,8 @@ var OpenAIResponsesLanguageModel = class {
2850
2853
  sourceType: "document",
2851
2854
  id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils14.generateId)(),
2852
2855
  mediaType: "text/plain",
2853
- title: annotation.quote,
2854
- filename: annotation.file_id
2856
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
2857
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id
2855
2858
  });
2856
2859
  }
2857
2860
  }
@@ -2939,18 +2942,21 @@ var OpenAIResponsesLanguageModel = class {
2939
2942
  if (logprobs.length > 0) {
2940
2943
  providerMetadata.openai.logprobs = logprobs;
2941
2944
  }
2945
+ if (typeof response.service_tier === "string") {
2946
+ providerMetadata.openai.serviceTier = response.service_tier;
2947
+ }
2942
2948
  return {
2943
2949
  content,
2944
2950
  finishReason: mapOpenAIResponseFinishReason({
2945
- finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2951
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2946
2952
  hasToolCalls: content.some((part) => part.type === "tool-call")
2947
2953
  }),
2948
2954
  usage: {
2949
2955
  inputTokens: response.usage.input_tokens,
2950
2956
  outputTokens: response.usage.output_tokens,
2951
2957
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2952
- reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2953
- cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2958
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2959
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2954
2960
  },
2955
2961
  request: { body },
2956
2962
  response: {
@@ -2995,6 +3001,7 @@ var OpenAIResponsesLanguageModel = class {
2995
3001
  const ongoingToolCalls = {};
2996
3002
  let hasToolCalls = false;
2997
3003
  const activeReasoning = {};
3004
+ let serviceTier;
2998
3005
  return {
2999
3006
  stream: response.pipeThrough(
3000
3007
  new TransformStream({
@@ -3002,7 +3009,7 @@ var OpenAIResponsesLanguageModel = class {
3002
3009
  controller.enqueue({ type: "stream-start", warnings });
3003
3010
  },
3004
3011
  transform(chunk, controller) {
3005
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
3012
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
3006
3013
  if (options.includeRawChunks) {
3007
3014
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3008
3015
  }
@@ -3253,6 +3260,9 @@ var OpenAIResponsesLanguageModel = class {
3253
3260
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3254
3261
  usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
3255
3262
  usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
3263
+ if (typeof value.response.service_tier === "string") {
3264
+ serviceTier = value.response.service_tier;
3265
+ }
3256
3266
  } else if (isResponseAnnotationAddedChunk(value)) {
3257
3267
  if (value.annotation.type === "url_citation") {
3258
3268
  controller.enqueue({
@@ -3268,8 +3278,8 @@ var OpenAIResponsesLanguageModel = class {
3268
3278
  sourceType: "document",
3269
3279
  id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : (0, import_provider_utils14.generateId)(),
3270
3280
  mediaType: "text/plain",
3271
- title: value.annotation.quote,
3272
- filename: value.annotation.file_id
3281
+ title: (_t = (_s = value.annotation.quote) != null ? _s : value.annotation.filename) != null ? _t : "Document",
3282
+ filename: (_u = value.annotation.filename) != null ? _u : value.annotation.file_id
3273
3283
  });
3274
3284
  }
3275
3285
  } else if (isErrorChunk(value)) {
@@ -3285,6 +3295,9 @@ var OpenAIResponsesLanguageModel = class {
3285
3295
  if (logprobs.length > 0) {
3286
3296
  providerMetadata.openai.logprobs = logprobs;
3287
3297
  }
3298
+ if (serviceTier !== void 0) {
3299
+ providerMetadata.openai.serviceTier = serviceTier;
3300
+ }
3288
3301
  controller.enqueue({
3289
3302
  type: "finish",
3290
3303
  finishReason,
@@ -3322,7 +3335,8 @@ var responseFinishedChunkSchema = import_v416.z.object({
3322
3335
  type: import_v416.z.enum(["response.completed", "response.incomplete"]),
3323
3336
  response: import_v416.z.object({
3324
3337
  incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
3325
- usage: usageSchema2
3338
+ usage: usageSchema2,
3339
+ service_tier: import_v416.z.string().nullish()
3326
3340
  })
3327
3341
  });
3328
3342
  var responseCreatedChunkSchema = import_v416.z.object({
@@ -3330,7 +3344,8 @@ var responseCreatedChunkSchema = import_v416.z.object({
3330
3344
  response: import_v416.z.object({
3331
3345
  id: import_v416.z.string(),
3332
3346
  created_at: import_v416.z.number(),
3333
- model: import_v416.z.string()
3347
+ model: import_v416.z.string(),
3348
+ service_tier: import_v416.z.string().nullish()
3334
3349
  })
3335
3350
  });
3336
3351
  var responseOutputItemAddedSchema = import_v416.z.object({
@@ -3447,7 +3462,11 @@ var responseAnnotationAddedSchema = import_v416.z.object({
3447
3462
  import_v416.z.object({
3448
3463
  type: import_v416.z.literal("file_citation"),
3449
3464
  file_id: import_v416.z.string(),
3450
- quote: import_v416.z.string()
3465
+ filename: import_v416.z.string().nullish(),
3466
+ index: import_v416.z.number().nullish(),
3467
+ start_index: import_v416.z.number().nullish(),
3468
+ end_index: import_v416.z.number().nullish(),
3469
+ quote: import_v416.z.string().nullish()
3451
3470
  })
3452
3471
  ])
3453
3472
  });