@ai-sdk/openai 2.0.0-alpha.10 → 2.0.0-alpha.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-alpha.11
4
+
5
+ ### Patch Changes
6
+
7
+ - 8d12da5: feat(provider/openai): add serviceTier option for flex processing
8
+ - Updated dependencies [c1e6647]
9
+ - @ai-sdk/provider@2.0.0-alpha.11
10
+ - @ai-sdk/provider-utils@3.0.0-alpha.11
11
+
3
12
  ## 2.0.0-alpha.10
4
13
 
5
14
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -164,12 +164,14 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
164
164
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
165
165
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
166
166
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
167
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<["auto", "flex"]>>>;
167
168
  }, "strip", z.ZodTypeAny, {
168
169
  user?: string | null | undefined;
169
170
  parallelToolCalls?: boolean | null | undefined;
170
171
  reasoningEffort?: string | null | undefined;
171
172
  store?: boolean | null | undefined;
172
173
  metadata?: any;
174
+ serviceTier?: "auto" | "flex" | null | undefined;
173
175
  previousResponseId?: string | null | undefined;
174
176
  strictSchemas?: boolean | null | undefined;
175
177
  instructions?: string | null | undefined;
@@ -180,6 +182,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
180
182
  reasoningEffort?: string | null | undefined;
181
183
  store?: boolean | null | undefined;
182
184
  metadata?: any;
185
+ serviceTier?: "auto" | "flex" | null | undefined;
183
186
  previousResponseId?: string | null | undefined;
184
187
  strictSchemas?: boolean | null | undefined;
185
188
  instructions?: string | null | undefined;
package/dist/index.d.ts CHANGED
@@ -164,12 +164,14 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
164
164
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
165
165
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
166
166
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
167
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<["auto", "flex"]>>>;
167
168
  }, "strip", z.ZodTypeAny, {
168
169
  user?: string | null | undefined;
169
170
  parallelToolCalls?: boolean | null | undefined;
170
171
  reasoningEffort?: string | null | undefined;
171
172
  store?: boolean | null | undefined;
172
173
  metadata?: any;
174
+ serviceTier?: "auto" | "flex" | null | undefined;
173
175
  previousResponseId?: string | null | undefined;
174
176
  strictSchemas?: boolean | null | undefined;
175
177
  instructions?: string | null | undefined;
@@ -180,6 +182,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
180
182
  reasoningEffort?: string | null | undefined;
181
183
  store?: boolean | null | undefined;
182
184
  metadata?: any;
185
+ serviceTier?: "auto" | "flex" | null | undefined;
183
186
  previousResponseId?: string | null | undefined;
184
187
  strictSchemas?: boolean | null | undefined;
185
188
  instructions?: string | null | undefined;
package/dist/index.js CHANGED
@@ -282,7 +282,14 @@ var openaiProviderOptions = import_zod.z.object({
282
282
  *
283
283
  * @default true
284
284
  */
285
- structuredOutputs: import_zod.z.boolean().optional()
285
+ structuredOutputs: import_zod.z.boolean().optional(),
286
+ /**
287
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
288
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
289
+ *
290
+ * @default 'auto'
291
+ */
292
+ serviceTier: import_zod.z.enum(["auto", "flex"]).optional()
286
293
  });
287
294
 
288
295
  // src/openai-error.ts
@@ -453,6 +460,7 @@ var OpenAIChatLanguageModel = class {
453
460
  metadata: openaiOptions.metadata,
454
461
  prediction: openaiOptions.prediction,
455
462
  reasoning_effort: openaiOptions.reasoningEffort,
463
+ service_tier: openaiOptions.serviceTier,
456
464
  // messages:
457
465
  messages
458
466
  };
@@ -526,6 +534,14 @@ var OpenAIChatLanguageModel = class {
526
534
  });
527
535
  }
528
536
  }
537
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
538
+ warnings.push({
539
+ type: "unsupported-setting",
540
+ setting: "serviceTier",
541
+ details: "flex processing is only available for o3 and o4-mini models"
542
+ });
543
+ baseArgs.service_tier = void 0;
544
+ }
529
545
  const {
530
546
  tools: openaiTools2,
531
547
  toolChoice: openaiToolChoice,
@@ -897,6 +913,9 @@ var openaiChatChunkSchema = import_zod3.z.union([
897
913
  function isReasoningModel(modelId) {
898
914
  return modelId.startsWith("o");
899
915
  }
916
+ function supportsFlexProcessing(modelId) {
917
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
918
+ }
900
919
  function getSystemMessageMode(modelId) {
901
920
  var _a, _b;
902
921
  if (!isReasoningModel(modelId)) {
@@ -2012,6 +2031,7 @@ var OpenAIResponsesLanguageModel = class {
2012
2031
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2013
2032
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2014
2033
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2034
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2015
2035
  // model-specific settings:
2016
2036
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2017
2037
  reasoning: {
@@ -2045,6 +2065,14 @@ var OpenAIResponsesLanguageModel = class {
2045
2065
  });
2046
2066
  }
2047
2067
  }
2068
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2069
+ warnings.push({
2070
+ type: "unsupported-setting",
2071
+ setting: "serviceTier",
2072
+ details: "flex processing is only available for o3 and o4-mini models"
2073
+ });
2074
+ delete baseArgs.service_tier;
2075
+ }
2048
2076
  const {
2049
2077
  tools: openaiTools2,
2050
2078
  toolChoice: openaiToolChoice,
@@ -2474,6 +2502,9 @@ function getResponsesModelConfig(modelId) {
2474
2502
  requiredAutoTruncation: false
2475
2503
  };
2476
2504
  }
2505
+ function supportsFlexProcessing2(modelId) {
2506
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
2507
+ }
2477
2508
  var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2478
2509
  metadata: import_zod12.z.any().nullish(),
2479
2510
  parallelToolCalls: import_zod12.z.boolean().nullish(),
@@ -2483,7 +2514,8 @@ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2483
2514
  reasoningEffort: import_zod12.z.string().nullish(),
2484
2515
  strictSchemas: import_zod12.z.boolean().nullish(),
2485
2516
  instructions: import_zod12.z.string().nullish(),
2486
- reasoningSummary: import_zod12.z.string().nullish()
2517
+ reasoningSummary: import_zod12.z.string().nullish(),
2518
+ serviceTier: import_zod12.z.enum(["auto", "flex"]).nullish()
2487
2519
  });
2488
2520
 
2489
2521
  // src/openai-speech-model.ts