@ai-sdk/openai 2.0.19 → 2.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -448,7 +448,7 @@ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFacto
448
448
  action: import_v44.z.discriminatedUnion("type", [
449
449
  import_v44.z.object({
450
450
  type: import_v44.z.literal("search"),
451
- query: import_v44.z.string()
451
+ query: import_v44.z.string().nullish()
452
452
  }),
453
453
  import_v44.z.object({
454
454
  type: import_v44.z.literal("open_page"),
@@ -1936,8 +1936,12 @@ var OpenAITranscriptionModel = class {
1936
1936
  include: openAIOptions.include,
1937
1937
  language: openAIOptions.language,
1938
1938
  prompt: openAIOptions.prompt,
1939
- response_format: "verbose_json",
1940
- // always use verbose_json to get segments
1939
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
1940
+ // prefer verbose_json to get segments for models that support it
1941
+ response_format: [
1942
+ "gpt-4o-transcribe",
1943
+ "gpt-4o-mini-transcribe"
1944
+ ].includes(this.modelId) ? "json" : "verbose_json",
1941
1945
  temperature: openAIOptions.temperature,
1942
1946
  timestamp_granularities: openAIOptions.timestampGranularities
1943
1947
  };
@@ -2470,7 +2474,7 @@ var webSearchCallItem = import_v416.z.object({
2470
2474
  action: import_v416.z.discriminatedUnion("type", [
2471
2475
  import_v416.z.object({
2472
2476
  type: import_v416.z.literal("search"),
2473
- query: import_v416.z.string()
2477
+ query: import_v416.z.string().nullish()
2474
2478
  }),
2475
2479
  import_v416.z.object({
2476
2480
  type: import_v416.z.literal("open_page"),
@@ -2998,7 +3002,7 @@ var OpenAIResponsesLanguageModel = class {
2998
3002
  controller.enqueue({ type: "stream-start", warnings });
2999
3003
  },
3000
3004
  transform(chunk, controller) {
3001
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
3005
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
3002
3006
  if (options.includeRawChunks) {
3003
3007
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3004
3008
  }
@@ -3209,12 +3213,12 @@ var OpenAIResponsesLanguageModel = class {
3209
3213
  id: value.item_id,
3210
3214
  delta: value.delta
3211
3215
  });
3212
- if (value.logprobs) {
3216
+ if (((_d = (_c = options.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.logprobs) && value.logprobs) {
3213
3217
  logprobs.push(value.logprobs);
3214
3218
  }
3215
3219
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3216
3220
  if (value.summary_index > 0) {
3217
- (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
3221
+ (_e = activeReasoning[value.item_id]) == null ? void 0 : _e.summaryParts.push(
3218
3222
  value.summary_index
3219
3223
  );
3220
3224
  controller.enqueue({
@@ -3223,7 +3227,7 @@ var OpenAIResponsesLanguageModel = class {
3223
3227
  providerMetadata: {
3224
3228
  openai: {
3225
3229
  itemId: value.item_id,
3226
- reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
3230
+ reasoningEncryptedContent: (_g = (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.encryptedContent) != null ? _g : null
3227
3231
  }
3228
3232
  }
3229
3233
  });
@@ -3241,20 +3245,20 @@ var OpenAIResponsesLanguageModel = class {
3241
3245
  });
3242
3246
  } else if (isResponseFinishedChunk(value)) {
3243
3247
  finishReason = mapOpenAIResponseFinishReason({
3244
- finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
3248
+ finishReason: (_h = value.response.incomplete_details) == null ? void 0 : _h.reason,
3245
3249
  hasToolCalls
3246
3250
  });
3247
3251
  usage.inputTokens = value.response.usage.input_tokens;
3248
3252
  usage.outputTokens = value.response.usage.output_tokens;
3249
3253
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3250
- usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
3251
- usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
3254
+ usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
3255
+ usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
3252
3256
  } else if (isResponseAnnotationAddedChunk(value)) {
3253
3257
  if (value.annotation.type === "url_citation") {
3254
3258
  controller.enqueue({
3255
3259
  type: "source",
3256
3260
  sourceType: "url",
3257
- id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils14.generateId)(),
3261
+ id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : (0, import_provider_utils14.generateId)(),
3258
3262
  url: value.annotation.url,
3259
3263
  title: value.annotation.title
3260
3264
  });
@@ -3262,7 +3266,7 @@ var OpenAIResponsesLanguageModel = class {
3262
3266
  controller.enqueue({
3263
3267
  type: "source",
3264
3268
  sourceType: "document",
3265
- id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils14.generateId)(),
3269
+ id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : (0, import_provider_utils14.generateId)(),
3266
3270
  mediaType: "text/plain",
3267
3271
  title: value.annotation.quote,
3268
3272
  filename: value.annotation.file_id