@ai-sdk/openai 2.0.0-beta.1 → 2.0.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -17,7 +17,7 @@ import {
17
17
  parseProviderOptions,
18
18
  postJsonToApi
19
19
  } from "@ai-sdk/provider-utils";
20
- import { z as z5 } from "zod";
20
+ import { z as z5 } from "zod/v4";
21
21
 
22
22
  // src/convert-to-openai-chat-messages.ts
23
23
  import {
@@ -230,7 +230,7 @@ function mapOpenAIFinishReason(finishReason) {
230
230
  }
231
231
 
232
232
  // src/openai-chat-options.ts
233
- import { z } from "zod";
233
+ import { z } from "zod/v4";
234
234
  var openaiProviderOptions = z.object({
235
235
  /**
236
236
  * Modify the likelihood of specified tokens appearing in the completion.
@@ -273,11 +273,11 @@ var openaiProviderOptions = z.object({
273
273
  /**
274
274
  * Metadata to associate with the request.
275
275
  */
276
- metadata: z.record(z.string()).optional(),
276
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
277
277
  /**
278
278
  * Parameters for prediction mode.
279
279
  */
280
- prediction: z.record(z.any()).optional(),
280
+ prediction: z.record(z.string(), z.any()).optional(),
281
281
  /**
282
282
  * Whether to use structured outputs.
283
283
  *
@@ -290,11 +290,17 @@ var openaiProviderOptions = z.object({
290
290
  *
291
291
  * @default 'auto'
292
292
  */
293
- serviceTier: z.enum(["auto", "flex"]).optional()
293
+ serviceTier: z.enum(["auto", "flex"]).optional(),
294
+ /**
295
+ * Whether to use strict JSON schema validation.
296
+ *
297
+ * @default true
298
+ */
299
+ strictJsonSchema: z.boolean().optional()
294
300
  });
295
301
 
296
302
  // src/openai-error.ts
297
- import { z as z2 } from "zod";
303
+ import { z as z2 } from "zod/v4";
298
304
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
299
305
  var openaiErrorDataSchema = z2.object({
300
306
  error: z2.object({
@@ -319,7 +325,7 @@ import {
319
325
 
320
326
  // src/tool/file-search.ts
321
327
  import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
322
- import { z as z3 } from "zod";
328
+ import { z as z3 } from "zod/v4";
323
329
  var fileSearchArgsSchema = z3.object({
324
330
  /**
325
331
  * List of vector store IDs to search through. If not provided, searches all available vector stores.
@@ -344,7 +350,7 @@ var fileSearch = createProviderDefinedToolFactory({
344
350
 
345
351
  // src/tool/web-search-preview.ts
346
352
  import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
347
- import { z as z4 } from "zod";
353
+ import { z as z4 } from "zod/v4";
348
354
  var webSearchPreviewArgsSchema = z4.object({
349
355
  /**
350
356
  * Search context size to use for the web search.
@@ -389,7 +395,8 @@ var webSearchPreview = createProviderDefinedToolFactory2({
389
395
  function prepareTools({
390
396
  tools,
391
397
  toolChoice,
392
- structuredOutputs
398
+ structuredOutputs,
399
+ strictJsonSchema
393
400
  }) {
394
401
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
395
402
  const toolWarnings = [];
@@ -406,7 +413,7 @@ function prepareTools({
406
413
  name: tool.name,
407
414
  description: tool.description,
408
415
  parameters: tool.inputSchema,
409
- strict: structuredOutputs ? true : void 0
416
+ strict: structuredOutputs ? strictJsonSchema : void 0
410
417
  }
411
418
  });
412
419
  break;
@@ -498,7 +505,7 @@ var OpenAIChatLanguageModel = class {
498
505
  toolChoice,
499
506
  providerOptions
500
507
  }) {
501
- var _a, _b, _c;
508
+ var _a, _b, _c, _d;
502
509
  const warnings = [];
503
510
  const openaiOptions = (_a = await parseProviderOptions({
504
511
  provider: "openai",
@@ -526,6 +533,7 @@ var OpenAIChatLanguageModel = class {
526
533
  }
527
534
  );
528
535
  warnings.push(...messageWarnings);
536
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
529
537
  const baseArgs = {
530
538
  // model id:
531
539
  model: this.modelId,
@@ -541,18 +549,15 @@ var OpenAIChatLanguageModel = class {
541
549
  top_p: topP,
542
550
  frequency_penalty: frequencyPenalty,
543
551
  presence_penalty: presencePenalty,
544
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
545
- // TODO convert into provider option
546
- structuredOutputs && responseFormat.schema != null ? {
547
- type: "json_schema",
548
- json_schema: {
549
- schema: responseFormat.schema,
550
- strict: true,
551
- name: (_c = responseFormat.name) != null ? _c : "response",
552
- description: responseFormat.description
553
- }
554
- } : { type: "json_object" }
555
- ) : void 0,
552
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
553
+ type: "json_schema",
554
+ json_schema: {
555
+ schema: responseFormat.schema,
556
+ strict: strictJsonSchema,
557
+ name: (_d = responseFormat.name) != null ? _d : "response",
558
+ description: responseFormat.description
559
+ }
560
+ } : { type: "json_object" } : void 0,
556
561
  stop: stopSequences,
557
562
  seed,
558
563
  // openai specific settings:
@@ -651,7 +656,8 @@ var OpenAIChatLanguageModel = class {
651
656
  } = prepareTools({
652
657
  tools,
653
658
  toolChoice,
654
- structuredOutputs
659
+ structuredOutputs,
660
+ strictJsonSchema
655
661
  });
656
662
  return {
657
663
  args: {
@@ -1084,7 +1090,7 @@ import {
1084
1090
  parseProviderOptions as parseProviderOptions2,
1085
1091
  postJsonToApi as postJsonToApi2
1086
1092
  } from "@ai-sdk/provider-utils";
1087
- import { z as z7 } from "zod";
1093
+ import { z as z7 } from "zod/v4";
1088
1094
 
1089
1095
  // src/convert-to-openai-completion-prompt.ts
1090
1096
  import {
@@ -1165,7 +1171,7 @@ ${user}:`]
1165
1171
  }
1166
1172
 
1167
1173
  // src/openai-completion-options.ts
1168
- import { z as z6 } from "zod";
1174
+ import { z as z6 } from "zod/v4";
1169
1175
  var openaiCompletionProviderOptions = z6.object({
1170
1176
  /**
1171
1177
  Echo back the prompt in addition to the completion.
@@ -1490,10 +1496,10 @@ import {
1490
1496
  parseProviderOptions as parseProviderOptions3,
1491
1497
  postJsonToApi as postJsonToApi3
1492
1498
  } from "@ai-sdk/provider-utils";
1493
- import { z as z9 } from "zod";
1499
+ import { z as z9 } from "zod/v4";
1494
1500
 
1495
1501
  // src/openai-embedding-options.ts
1496
- import { z as z8 } from "zod";
1502
+ import { z as z8 } from "zod/v4";
1497
1503
  var openaiEmbeddingProviderOptions = z8.object({
1498
1504
  /**
1499
1505
  The number of dimensions the resulting output embeddings should have.
@@ -1581,7 +1587,7 @@ import {
1581
1587
  createJsonResponseHandler as createJsonResponseHandler4,
1582
1588
  postJsonToApi as postJsonToApi4
1583
1589
  } from "@ai-sdk/provider-utils";
1584
- import { z as z10 } from "zod";
1590
+ import { z as z10 } from "zod/v4";
1585
1591
 
1586
1592
  // src/openai-image-settings.ts
1587
1593
  var modelMaxImagesPerCall = {
@@ -1689,10 +1695,10 @@ import {
1689
1695
  parseProviderOptions as parseProviderOptions4,
1690
1696
  postFormDataToApi
1691
1697
  } from "@ai-sdk/provider-utils";
1692
- import { z as z12 } from "zod";
1698
+ import { z as z12 } from "zod/v4";
1693
1699
 
1694
1700
  // src/openai-transcription-options.ts
1695
- import { z as z11 } from "zod";
1701
+ import { z as z11 } from "zod/v4";
1696
1702
  var openAITranscriptionProviderOptions = z11.object({
1697
1703
  /**
1698
1704
  * Additional information to include in the transcription response.
@@ -1885,7 +1891,7 @@ import {
1885
1891
  parseProviderOptions as parseProviderOptions5,
1886
1892
  postJsonToApi as postJsonToApi5
1887
1893
  } from "@ai-sdk/provider-utils";
1888
- import { z as z13 } from "zod";
1894
+ import { z as z13 } from "zod/v4";
1889
1895
 
1890
1896
  // src/responses/convert-to-openai-responses-messages.ts
1891
1897
  import {
@@ -2055,7 +2061,7 @@ import {
2055
2061
  function prepareResponsesTools({
2056
2062
  tools,
2057
2063
  toolChoice,
2058
- strict
2064
+ strictJsonSchema
2059
2065
  }) {
2060
2066
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2061
2067
  const toolWarnings = [];
@@ -2071,7 +2077,7 @@ function prepareResponsesTools({
2071
2077
  name: tool.name,
2072
2078
  description: tool.description,
2073
2079
  parameters: tool.inputSchema,
2074
- strict: strict ? true : void 0
2080
+ strict: strictJsonSchema
2075
2081
  });
2076
2082
  break;
2077
2083
  case "provider-defined":
@@ -2179,7 +2185,7 @@ var OpenAIResponsesLanguageModel = class {
2179
2185
  providerOptions,
2180
2186
  schema: openaiResponsesProviderOptionsSchema
2181
2187
  });
2182
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2188
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2183
2189
  const baseArgs = {
2184
2190
  model: this.modelId,
2185
2191
  input: messages,
@@ -2190,7 +2196,7 @@ var OpenAIResponsesLanguageModel = class {
2190
2196
  text: {
2191
2197
  format: responseFormat.schema != null ? {
2192
2198
  type: "json_schema",
2193
- strict: isStrict,
2199
+ strict: strictJsonSchema,
2194
2200
  name: (_b = responseFormat.name) != null ? _b : "response",
2195
2201
  description: responseFormat.description,
2196
2202
  schema: responseFormat.schema
@@ -2253,7 +2259,7 @@ var OpenAIResponsesLanguageModel = class {
2253
2259
  } = prepareResponsesTools({
2254
2260
  tools,
2255
2261
  toolChoice,
2256
- strict: isStrict
2262
+ strictJsonSchema
2257
2263
  });
2258
2264
  return {
2259
2265
  args: {
@@ -2858,7 +2864,7 @@ var openaiResponsesProviderOptionsSchema = z13.object({
2858
2864
  store: z13.boolean().nullish(),
2859
2865
  user: z13.string().nullish(),
2860
2866
  reasoningEffort: z13.string().nullish(),
2861
- strictSchemas: z13.boolean().nullish(),
2867
+ strictJsonSchema: z13.boolean().nullish(),
2862
2868
  instructions: z13.string().nullish(),
2863
2869
  reasoningSummary: z13.string().nullish(),
2864
2870
  serviceTier: z13.enum(["auto", "flex"]).nullish()
@@ -2871,7 +2877,7 @@ import {
2871
2877
  parseProviderOptions as parseProviderOptions6,
2872
2878
  postJsonToApi as postJsonToApi6
2873
2879
  } from "@ai-sdk/provider-utils";
2874
- import { z as z14 } from "zod";
2880
+ import { z as z14 } from "zod/v4";
2875
2881
  var OpenAIProviderOptionsSchema = z14.object({
2876
2882
  instructions: z14.string().nullish(),
2877
2883
  speed: z14.number().min(0.25).max(4).default(1).nullish()