@ai-sdk/openai 2.0.0-alpha.1 → 2.0.0-alpha.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -56,6 +56,13 @@ declare const openaiProviderOptions: z.ZodObject<{
56
56
  * @default true
57
57
  */
58
58
  structuredOutputs: z.ZodOptional<z.ZodBoolean>;
59
+ /**
60
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
61
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
62
+ *
63
+ * @default 'auto'
64
+ */
65
+ serviceTier: z.ZodOptional<z.ZodEnum<["auto", "flex"]>>;
59
66
  }, "strip", z.ZodTypeAny, {
60
67
  user?: string | undefined;
61
68
  logitBias?: Record<number, number> | undefined;
@@ -67,6 +74,7 @@ declare const openaiProviderOptions: z.ZodObject<{
67
74
  metadata?: Record<string, string> | undefined;
68
75
  prediction?: Record<string, any> | undefined;
69
76
  structuredOutputs?: boolean | undefined;
77
+ serviceTier?: "auto" | "flex" | undefined;
70
78
  }, {
71
79
  user?: string | undefined;
72
80
  logitBias?: Record<number, number> | undefined;
@@ -78,6 +86,7 @@ declare const openaiProviderOptions: z.ZodObject<{
78
86
  metadata?: Record<string, string> | undefined;
79
87
  prediction?: Record<string, any> | undefined;
80
88
  structuredOutputs?: boolean | undefined;
89
+ serviceTier?: "auto" | "flex" | undefined;
81
90
  }>;
82
91
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
92
 
@@ -353,12 +362,14 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
353
362
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
354
363
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
355
364
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
365
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<["auto", "flex"]>>>;
356
366
  }, "strip", z.ZodTypeAny, {
357
367
  user?: string | null | undefined;
358
368
  parallelToolCalls?: boolean | null | undefined;
359
369
  reasoningEffort?: string | null | undefined;
360
370
  store?: boolean | null | undefined;
361
371
  metadata?: any;
372
+ serviceTier?: "auto" | "flex" | null | undefined;
362
373
  instructions?: string | null | undefined;
363
374
  previousResponseId?: string | null | undefined;
364
375
  strictSchemas?: boolean | null | undefined;
@@ -369,6 +380,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
369
380
  reasoningEffort?: string | null | undefined;
370
381
  store?: boolean | null | undefined;
371
382
  metadata?: any;
383
+ serviceTier?: "auto" | "flex" | null | undefined;
372
384
  instructions?: string | null | undefined;
373
385
  previousResponseId?: string | null | undefined;
374
386
  strictSchemas?: boolean | null | undefined;
@@ -56,6 +56,13 @@ declare const openaiProviderOptions: z.ZodObject<{
56
56
  * @default true
57
57
  */
58
58
  structuredOutputs: z.ZodOptional<z.ZodBoolean>;
59
+ /**
60
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
61
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
62
+ *
63
+ * @default 'auto'
64
+ */
65
+ serviceTier: z.ZodOptional<z.ZodEnum<["auto", "flex"]>>;
59
66
  }, "strip", z.ZodTypeAny, {
60
67
  user?: string | undefined;
61
68
  logitBias?: Record<number, number> | undefined;
@@ -67,6 +74,7 @@ declare const openaiProviderOptions: z.ZodObject<{
67
74
  metadata?: Record<string, string> | undefined;
68
75
  prediction?: Record<string, any> | undefined;
69
76
  structuredOutputs?: boolean | undefined;
77
+ serviceTier?: "auto" | "flex" | undefined;
70
78
  }, {
71
79
  user?: string | undefined;
72
80
  logitBias?: Record<number, number> | undefined;
@@ -78,6 +86,7 @@ declare const openaiProviderOptions: z.ZodObject<{
78
86
  metadata?: Record<string, string> | undefined;
79
87
  prediction?: Record<string, any> | undefined;
80
88
  structuredOutputs?: boolean | undefined;
89
+ serviceTier?: "auto" | "flex" | undefined;
81
90
  }>;
82
91
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
92
 
@@ -353,12 +362,14 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
353
362
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
354
363
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
355
364
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
365
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<["auto", "flex"]>>>;
356
366
  }, "strip", z.ZodTypeAny, {
357
367
  user?: string | null | undefined;
358
368
  parallelToolCalls?: boolean | null | undefined;
359
369
  reasoningEffort?: string | null | undefined;
360
370
  store?: boolean | null | undefined;
361
371
  metadata?: any;
372
+ serviceTier?: "auto" | "flex" | null | undefined;
362
373
  instructions?: string | null | undefined;
363
374
  previousResponseId?: string | null | undefined;
364
375
  strictSchemas?: boolean | null | undefined;
@@ -369,6 +380,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
369
380
  reasoningEffort?: string | null | undefined;
370
381
  store?: boolean | null | undefined;
371
382
  metadata?: any;
383
+ serviceTier?: "auto" | "flex" | null | undefined;
372
384
  instructions?: string | null | undefined;
373
385
  previousResponseId?: string | null | undefined;
374
386
  strictSchemas?: boolean | null | undefined;
@@ -144,7 +144,7 @@ function convertToOpenAIChatMessages({
144
144
  type: "file",
145
145
  file: {
146
146
  filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
147
- file_data: `data:application/pdf;base64,${part.data}`
147
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
148
148
  }
149
149
  };
150
150
  } else {
@@ -290,7 +290,14 @@ var openaiProviderOptions = import_zod.z.object({
290
290
  *
291
291
  * @default true
292
292
  */
293
- structuredOutputs: import_zod.z.boolean().optional()
293
+ structuredOutputs: import_zod.z.boolean().optional(),
294
+ /**
295
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
296
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
297
+ *
298
+ * @default 'auto'
299
+ */
300
+ serviceTier: import_zod.z.enum(["auto", "flex"]).optional()
294
301
  });
295
302
 
296
303
  // src/openai-error.ts
@@ -461,6 +468,7 @@ var OpenAIChatLanguageModel = class {
461
468
  metadata: openaiOptions.metadata,
462
469
  prediction: openaiOptions.prediction,
463
470
  reasoning_effort: openaiOptions.reasoningEffort,
471
+ service_tier: openaiOptions.serviceTier,
464
472
  // messages:
465
473
  messages
466
474
  };
@@ -534,6 +542,14 @@ var OpenAIChatLanguageModel = class {
534
542
  });
535
543
  }
536
544
  }
545
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
546
+ warnings.push({
547
+ type: "unsupported-setting",
548
+ setting: "serviceTier",
549
+ details: "flex processing is only available for o3 and o4-mini models"
550
+ });
551
+ baseArgs.service_tier = void 0;
552
+ }
537
553
  const {
538
554
  tools: openaiTools,
539
555
  toolChoice: openaiToolChoice,
@@ -905,6 +921,9 @@ var openaiChatChunkSchema = import_zod3.z.union([
905
921
  function isReasoningModel(modelId) {
906
922
  return modelId.startsWith("o");
907
923
  }
924
+ function supportsFlexProcessing(modelId) {
925
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
926
+ }
908
927
  function getSystemMessageMode(modelId) {
909
928
  var _a, _b;
910
929
  if (!isReasoningModel(modelId)) {
@@ -2098,6 +2117,7 @@ var OpenAIResponsesLanguageModel = class {
2098
2117
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2099
2118
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2100
2119
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2120
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2101
2121
  // model-specific settings:
2102
2122
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2103
2123
  reasoning: {
@@ -2131,6 +2151,14 @@ var OpenAIResponsesLanguageModel = class {
2131
2151
  });
2132
2152
  }
2133
2153
  }
2154
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2155
+ warnings.push({
2156
+ type: "unsupported-setting",
2157
+ setting: "serviceTier",
2158
+ details: "flex processing is only available for o3 and o4-mini models"
2159
+ });
2160
+ delete baseArgs.service_tier;
2161
+ }
2134
2162
  const {
2135
2163
  tools: openaiTools,
2136
2164
  toolChoice: openaiToolChoice,
@@ -2560,6 +2588,9 @@ function getResponsesModelConfig(modelId) {
2560
2588
  requiredAutoTruncation: false
2561
2589
  };
2562
2590
  }
2591
+ function supportsFlexProcessing2(modelId) {
2592
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
2593
+ }
2563
2594
  var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2564
2595
  metadata: import_zod12.z.any().nullish(),
2565
2596
  parallelToolCalls: import_zod12.z.boolean().nullish(),
@@ -2569,7 +2600,8 @@ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2569
2600
  reasoningEffort: import_zod12.z.string().nullish(),
2570
2601
  strictSchemas: import_zod12.z.boolean().nullish(),
2571
2602
  instructions: import_zod12.z.string().nullish(),
2572
- reasoningSummary: import_zod12.z.string().nullish()
2603
+ reasoningSummary: import_zod12.z.string().nullish(),
2604
+ serviceTier: import_zod12.z.enum(["auto", "flex"]).nullish()
2573
2605
  });
2574
2606
  // Annotate the CommonJS export names for ESM import in node:
2575
2607
  0 && (module.exports = {