@ai-sdk/openai 2.0.45 → 2.0.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -453,13 +453,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
453
453
  structuredOutputs: z3.boolean().optional(),
454
454
  /**
455
455
  * Service tier for the request.
456
- * - 'auto': Default service tier
456
+ * - 'auto': Default service tier. The request will be processed with the service tier configured in the
457
+ * Project settings. Unless otherwise configured, the Project will use 'default'.
457
458
  * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
458
459
  * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
460
+ * - 'default': The request will be processed with the standard pricing and performance for the selected model.
459
461
  *
460
462
  * @default 'auto'
461
463
  */
462
- serviceTier: z3.enum(["auto", "flex", "priority"]).optional(),
464
+ serviceTier: z3.enum(["auto", "flex", "priority", "default"]).optional(),
463
465
  /**
464
466
  * Whether to use strict JSON schema validation.
465
467
  *
@@ -2393,11 +2395,29 @@ async function convertToOpenAIResponsesInput({
2393
2395
  case "error-text":
2394
2396
  contentValue = output.value;
2395
2397
  break;
2396
- case "content":
2397
2398
  case "json":
2398
2399
  case "error-json":
2399
2400
  contentValue = JSON.stringify(output.value);
2400
2401
  break;
2402
+ case "content":
2403
+ contentValue = output.value.map((item) => {
2404
+ switch (item.type) {
2405
+ case "text": {
2406
+ return { type: "input_text", text: item.text };
2407
+ }
2408
+ case "media": {
2409
+ return item.mediaType.startsWith("image/") ? {
2410
+ type: "input_image",
2411
+ image_url: `data:${item.mediaType};base64,${item.data}`
2412
+ } : {
2413
+ type: "input_file",
2414
+ filename: "data",
2415
+ file_data: `data:${item.mediaType};base64,${item.data}`
2416
+ };
2417
+ }
2418
+ }
2419
+ });
2420
+ break;
2401
2421
  }
2402
2422
  input.push({
2403
2423
  type: "function_call_output",
@@ -2964,7 +2984,7 @@ var openaiResponsesProviderOptionsSchema = lazyValidator9(
2964
2984
  reasoningEffort: z17.string().nullish(),
2965
2985
  reasoningSummary: z17.string().nullish(),
2966
2986
  safetyIdentifier: z17.string().nullish(),
2967
- serviceTier: z17.enum(["auto", "flex", "priority"]).nullish(),
2987
+ serviceTier: z17.enum(["auto", "flex", "priority", "default"]).nullish(),
2968
2988
  store: z17.boolean().nullish(),
2969
2989
  strictJsonSchema: z17.boolean().nullish(),
2970
2990
  textVerbosity: z17.enum(["low", "medium", "high"]).nullish(),
@@ -4476,7 +4496,7 @@ var OpenAITranscriptionModel = class {
4476
4496
  };
4477
4497
 
4478
4498
  // src/version.ts
4479
- var VERSION = true ? "2.0.45" : "0.0.0-test";
4499
+ var VERSION = true ? "2.0.47" : "0.0.0-test";
4480
4500
 
4481
4501
  // src/openai-provider.ts
4482
4502
  function createOpenAI(options = {}) {