@ai-sdk/openai 2.0.0-beta.2 → 2.0.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -290,7 +290,13 @@ var openaiProviderOptions = z.object({
290
290
  *
291
291
  * @default 'auto'
292
292
  */
293
- serviceTier: z.enum(["auto", "flex"]).optional()
293
+ serviceTier: z.enum(["auto", "flex"]).optional(),
294
+ /**
295
+ * Whether to use strict JSON schema validation.
296
+ *
297
+ * @default true
298
+ */
299
+ strictJsonSchema: z.boolean().optional()
294
300
  });
295
301
 
296
302
  // src/openai-error.ts
@@ -389,7 +395,8 @@ var webSearchPreview = createProviderDefinedToolFactory2({
389
395
  function prepareTools({
390
396
  tools,
391
397
  toolChoice,
392
- structuredOutputs
398
+ structuredOutputs,
399
+ strictJsonSchema
393
400
  }) {
394
401
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
395
402
  const toolWarnings = [];
@@ -406,7 +413,7 @@ function prepareTools({
406
413
  name: tool.name,
407
414
  description: tool.description,
408
415
  parameters: tool.inputSchema,
409
- strict: structuredOutputs ? true : void 0
416
+ strict: structuredOutputs ? strictJsonSchema : void 0
410
417
  }
411
418
  });
412
419
  break;
@@ -498,7 +505,7 @@ var OpenAIChatLanguageModel = class {
498
505
  toolChoice,
499
506
  providerOptions
500
507
  }) {
501
- var _a, _b, _c;
508
+ var _a, _b, _c, _d;
502
509
  const warnings = [];
503
510
  const openaiOptions = (_a = await parseProviderOptions({
504
511
  provider: "openai",
@@ -526,6 +533,7 @@ var OpenAIChatLanguageModel = class {
526
533
  }
527
534
  );
528
535
  warnings.push(...messageWarnings);
536
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
529
537
  const baseArgs = {
530
538
  // model id:
531
539
  model: this.modelId,
@@ -541,18 +549,15 @@ var OpenAIChatLanguageModel = class {
541
549
  top_p: topP,
542
550
  frequency_penalty: frequencyPenalty,
543
551
  presence_penalty: presencePenalty,
544
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
545
- // TODO convert into provider option
546
- structuredOutputs && responseFormat.schema != null ? {
547
- type: "json_schema",
548
- json_schema: {
549
- schema: responseFormat.schema,
550
- strict: true,
551
- name: (_c = responseFormat.name) != null ? _c : "response",
552
- description: responseFormat.description
553
- }
554
- } : { type: "json_object" }
555
- ) : void 0,
552
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
553
+ type: "json_schema",
554
+ json_schema: {
555
+ schema: responseFormat.schema,
556
+ strict: strictJsonSchema,
557
+ name: (_d = responseFormat.name) != null ? _d : "response",
558
+ description: responseFormat.description
559
+ }
560
+ } : { type: "json_object" } : void 0,
556
561
  stop: stopSequences,
557
562
  seed,
558
563
  // openai specific settings:
@@ -651,7 +656,8 @@ var OpenAIChatLanguageModel = class {
651
656
  } = prepareTools({
652
657
  tools,
653
658
  toolChoice,
654
- structuredOutputs
659
+ structuredOutputs,
660
+ strictJsonSchema
655
661
  });
656
662
  return {
657
663
  args: {
@@ -2055,7 +2061,7 @@ import {
2055
2061
  function prepareResponsesTools({
2056
2062
  tools,
2057
2063
  toolChoice,
2058
- strict
2064
+ strictJsonSchema
2059
2065
  }) {
2060
2066
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2061
2067
  const toolWarnings = [];
@@ -2071,7 +2077,7 @@ function prepareResponsesTools({
2071
2077
  name: tool.name,
2072
2078
  description: tool.description,
2073
2079
  parameters: tool.inputSchema,
2074
- strict: strict ? true : void 0
2080
+ strict: strictJsonSchema
2075
2081
  });
2076
2082
  break;
2077
2083
  case "provider-defined":
@@ -2179,7 +2185,7 @@ var OpenAIResponsesLanguageModel = class {
2179
2185
  providerOptions,
2180
2186
  schema: openaiResponsesProviderOptionsSchema
2181
2187
  });
2182
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2188
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2183
2189
  const baseArgs = {
2184
2190
  model: this.modelId,
2185
2191
  input: messages,
@@ -2190,7 +2196,7 @@ var OpenAIResponsesLanguageModel = class {
2190
2196
  text: {
2191
2197
  format: responseFormat.schema != null ? {
2192
2198
  type: "json_schema",
2193
- strict: isStrict,
2199
+ strict: strictJsonSchema,
2194
2200
  name: (_b = responseFormat.name) != null ? _b : "response",
2195
2201
  description: responseFormat.description,
2196
2202
  schema: responseFormat.schema
@@ -2253,7 +2259,7 @@ var OpenAIResponsesLanguageModel = class {
2253
2259
  } = prepareResponsesTools({
2254
2260
  tools,
2255
2261
  toolChoice,
2256
- strict: isStrict
2262
+ strictJsonSchema
2257
2263
  });
2258
2264
  return {
2259
2265
  args: {
@@ -2858,7 +2864,7 @@ var openaiResponsesProviderOptionsSchema = z13.object({
2858
2864
  store: z13.boolean().nullish(),
2859
2865
  user: z13.string().nullish(),
2860
2866
  reasoningEffort: z13.string().nullish(),
2861
- strictSchemas: z13.boolean().nullish(),
2867
+ strictJsonSchema: z13.boolean().nullish(),
2862
2868
  instructions: z13.string().nullish(),
2863
2869
  reasoningSummary: z13.string().nullish(),
2864
2870
  serviceTier: z13.enum(["auto", "flex"]).nullish()