@ai-sdk/openai 2.0.20 → 2.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -430,7 +430,7 @@ var webSearchPreview = createProviderDefinedToolFactory2({
430
430
  action: z4.discriminatedUnion("type", [
431
431
  z4.object({
432
432
  type: z4.literal("search"),
433
- query: z4.string()
433
+ query: z4.string().nullish()
434
434
  }),
435
435
  z4.object({
436
436
  type: z4.literal("open_page"),
@@ -2174,7 +2174,7 @@ var webSearchCallItem = z13.object({
2174
2174
  action: z13.discriminatedUnion("type", [
2175
2175
  z13.object({
2176
2176
  type: z13.literal("search"),
2177
- query: z13.string()
2177
+ query: z13.string().nullish()
2178
2178
  }),
2179
2179
  z13.object({
2180
2180
  type: z13.literal("open_page"),
@@ -2383,7 +2383,7 @@ var OpenAIResponsesLanguageModel = class {
2383
2383
  };
2384
2384
  }
2385
2385
  async doGenerate(options) {
2386
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2386
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2387
2387
  const { args: body, warnings } = await this.getArgs(options);
2388
2388
  const url = this.config.url({
2389
2389
  path: "/responses",
@@ -2429,10 +2429,12 @@ var OpenAIResponsesLanguageModel = class {
2429
2429
  }),
2430
2430
  z13.object({
2431
2431
  type: z13.literal("file_citation"),
2432
- start_index: z13.number(),
2433
- end_index: z13.number(),
2434
2432
  file_id: z13.string(),
2435
- quote: z13.string()
2433
+ filename: z13.string().nullish(),
2434
+ index: z13.number().nullish(),
2435
+ start_index: z13.number().nullish(),
2436
+ end_index: z13.number().nullish(),
2437
+ quote: z13.string().nullish()
2436
2438
  })
2437
2439
  ])
2438
2440
  )
@@ -2550,8 +2552,8 @@ var OpenAIResponsesLanguageModel = class {
2550
2552
  sourceType: "document",
2551
2553
  id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId2(),
2552
2554
  mediaType: "text/plain",
2553
- title: annotation.quote,
2554
- filename: annotation.file_id
2555
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
2556
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id
2555
2557
  });
2556
2558
  }
2557
2559
  }
@@ -2642,15 +2644,15 @@ var OpenAIResponsesLanguageModel = class {
2642
2644
  return {
2643
2645
  content,
2644
2646
  finishReason: mapOpenAIResponseFinishReason({
2645
- finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2647
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2646
2648
  hasToolCalls: content.some((part) => part.type === "tool-call")
2647
2649
  }),
2648
2650
  usage: {
2649
2651
  inputTokens: response.usage.input_tokens,
2650
2652
  outputTokens: response.usage.output_tokens,
2651
2653
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2652
- reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2653
- cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2654
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2655
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2654
2656
  },
2655
2657
  request: { body },
2656
2658
  response: {
@@ -2702,7 +2704,7 @@ var OpenAIResponsesLanguageModel = class {
2702
2704
  controller.enqueue({ type: "stream-start", warnings });
2703
2705
  },
2704
2706
  transform(chunk, controller) {
2705
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
2707
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
2706
2708
  if (options.includeRawChunks) {
2707
2709
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2708
2710
  }
@@ -2968,8 +2970,8 @@ var OpenAIResponsesLanguageModel = class {
2968
2970
  sourceType: "document",
2969
2971
  id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : generateId2(),
2970
2972
  mediaType: "text/plain",
2971
- title: value.annotation.quote,
2972
- filename: value.annotation.file_id
2973
+ title: (_t = (_s = value.annotation.quote) != null ? _s : value.annotation.filename) != null ? _t : "Document",
2974
+ filename: (_u = value.annotation.filename) != null ? _u : value.annotation.file_id
2973
2975
  });
2974
2976
  }
2975
2977
  } else if (isErrorChunk(value)) {
@@ -3147,7 +3149,11 @@ var responseAnnotationAddedSchema = z13.object({
3147
3149
  z13.object({
3148
3150
  type: z13.literal("file_citation"),
3149
3151
  file_id: z13.string(),
3150
- quote: z13.string()
3152
+ filename: z13.string().nullish(),
3153
+ index: z13.number().nullish(),
3154
+ start_index: z13.number().nullish(),
3155
+ end_index: z13.number().nullish(),
3156
+ quote: z13.string().nullish()
3151
3157
  })
3152
3158
  ])
3153
3159
  });
@@ -3520,8 +3526,12 @@ var OpenAITranscriptionModel = class {
3520
3526
  include: openAIOptions.include,
3521
3527
  language: openAIOptions.language,
3522
3528
  prompt: openAIOptions.prompt,
3523
- response_format: "verbose_json",
3524
- // always use verbose_json to get segments
3529
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
3530
+ // prefer verbose_json to get segments for models that support it
3531
+ response_format: [
3532
+ "gpt-4o-transcribe",
3533
+ "gpt-4o-mini-transcribe"
3534
+ ].includes(this.modelId) ? "json" : "verbose_json",
3525
3535
  temperature: openAIOptions.temperature,
3526
3536
  timestamp_granularities: openAIOptions.timestampGranularities
3527
3537
  };