@ai-sdk/openai 2.0.0-beta.2 → 2.0.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -284,7 +284,13 @@ var openaiProviderOptions = z.object({
284
284
  *
285
285
  * @default 'auto'
286
286
  */
287
- serviceTier: z.enum(["auto", "flex"]).optional()
287
+ serviceTier: z.enum(["auto", "flex"]).optional(),
288
+ /**
289
+ * Whether to use strict JSON schema validation.
290
+ *
291
+ * @default true
292
+ */
293
+ strictJsonSchema: z.boolean().optional()
288
294
  });
289
295
 
290
296
  // src/openai-error.ts
@@ -383,7 +389,8 @@ var webSearchPreview = createProviderDefinedToolFactory2({
383
389
  function prepareTools({
384
390
  tools,
385
391
  toolChoice,
386
- structuredOutputs
392
+ structuredOutputs,
393
+ strictJsonSchema
387
394
  }) {
388
395
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
389
396
  const toolWarnings = [];
@@ -400,7 +407,7 @@ function prepareTools({
400
407
  name: tool.name,
401
408
  description: tool.description,
402
409
  parameters: tool.inputSchema,
403
- strict: structuredOutputs ? true : void 0
410
+ strict: structuredOutputs ? strictJsonSchema : void 0
404
411
  }
405
412
  });
406
413
  break;
@@ -492,7 +499,7 @@ var OpenAIChatLanguageModel = class {
492
499
  toolChoice,
493
500
  providerOptions
494
501
  }) {
495
- var _a, _b, _c;
502
+ var _a, _b, _c, _d;
496
503
  const warnings = [];
497
504
  const openaiOptions = (_a = await parseProviderOptions({
498
505
  provider: "openai",
@@ -520,6 +527,7 @@ var OpenAIChatLanguageModel = class {
520
527
  }
521
528
  );
522
529
  warnings.push(...messageWarnings);
530
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
523
531
  const baseArgs = {
524
532
  // model id:
525
533
  model: this.modelId,
@@ -535,18 +543,15 @@ var OpenAIChatLanguageModel = class {
535
543
  top_p: topP,
536
544
  frequency_penalty: frequencyPenalty,
537
545
  presence_penalty: presencePenalty,
538
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
539
- // TODO convert into provider option
540
- structuredOutputs && responseFormat.schema != null ? {
541
- type: "json_schema",
542
- json_schema: {
543
- schema: responseFormat.schema,
544
- strict: true,
545
- name: (_c = responseFormat.name) != null ? _c : "response",
546
- description: responseFormat.description
547
- }
548
- } : { type: "json_object" }
549
- ) : void 0,
546
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
547
+ type: "json_schema",
548
+ json_schema: {
549
+ schema: responseFormat.schema,
550
+ strict: strictJsonSchema,
551
+ name: (_d = responseFormat.name) != null ? _d : "response",
552
+ description: responseFormat.description
553
+ }
554
+ } : { type: "json_object" } : void 0,
550
555
  stop: stopSequences,
551
556
  seed,
552
557
  // openai specific settings:
@@ -645,7 +650,8 @@ var OpenAIChatLanguageModel = class {
645
650
  } = prepareTools({
646
651
  tools,
647
652
  toolChoice,
648
- structuredOutputs
653
+ structuredOutputs,
654
+ strictJsonSchema
649
655
  });
650
656
  return {
651
657
  args: {
@@ -2155,7 +2161,7 @@ import {
2155
2161
  function prepareResponsesTools({
2156
2162
  tools,
2157
2163
  toolChoice,
2158
- strict
2164
+ strictJsonSchema
2159
2165
  }) {
2160
2166
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2161
2167
  const toolWarnings = [];
@@ -2171,7 +2177,7 @@ function prepareResponsesTools({
2171
2177
  name: tool.name,
2172
2178
  description: tool.description,
2173
2179
  parameters: tool.inputSchema,
2174
- strict: strict ? true : void 0
2180
+ strict: strictJsonSchema
2175
2181
  });
2176
2182
  break;
2177
2183
  case "provider-defined":
@@ -2279,7 +2285,7 @@ var OpenAIResponsesLanguageModel = class {
2279
2285
  providerOptions,
2280
2286
  schema: openaiResponsesProviderOptionsSchema
2281
2287
  });
2282
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2288
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2283
2289
  const baseArgs = {
2284
2290
  model: this.modelId,
2285
2291
  input: messages,
@@ -2290,7 +2296,7 @@ var OpenAIResponsesLanguageModel = class {
2290
2296
  text: {
2291
2297
  format: responseFormat.schema != null ? {
2292
2298
  type: "json_schema",
2293
- strict: isStrict,
2299
+ strict: strictJsonSchema,
2294
2300
  name: (_b = responseFormat.name) != null ? _b : "response",
2295
2301
  description: responseFormat.description,
2296
2302
  schema: responseFormat.schema
@@ -2353,7 +2359,7 @@ var OpenAIResponsesLanguageModel = class {
2353
2359
  } = prepareResponsesTools({
2354
2360
  tools,
2355
2361
  toolChoice,
2356
- strict: isStrict
2362
+ strictJsonSchema
2357
2363
  });
2358
2364
  return {
2359
2365
  args: {
@@ -2958,7 +2964,7 @@ var openaiResponsesProviderOptionsSchema = z14.object({
2958
2964
  store: z14.boolean().nullish(),
2959
2965
  user: z14.string().nullish(),
2960
2966
  reasoningEffort: z14.string().nullish(),
2961
- strictSchemas: z14.boolean().nullish(),
2967
+ strictJsonSchema: z14.boolean().nullish(),
2962
2968
  instructions: z14.string().nullish(),
2963
2969
  reasoningSummary: z14.string().nullish(),
2964
2970
  serviceTier: z14.enum(["auto", "flex"]).nullish()