@ai-sdk/openai 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod';
3
4
 
4
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
6
  interface OpenAIChatSettings {
@@ -256,5 +257,31 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV1 {
256
257
  doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
257
258
  doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
258
259
  }
260
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
261
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
262
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
263
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
264
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
265
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
266
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
267
+ strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
268
+ }, "strip", z.ZodTypeAny, {
269
+ user?: string | null | undefined;
270
+ store?: boolean | null | undefined;
271
+ metadata?: any;
272
+ reasoningEffort?: string | null | undefined;
273
+ parallelToolCalls?: boolean | null | undefined;
274
+ previousResponseId?: string | null | undefined;
275
+ strictSchemas?: boolean | null | undefined;
276
+ }, {
277
+ user?: string | null | undefined;
278
+ store?: boolean | null | undefined;
279
+ metadata?: any;
280
+ reasoningEffort?: string | null | undefined;
281
+ parallelToolCalls?: boolean | null | undefined;
282
+ previousResponseId?: string | null | undefined;
283
+ strictSchemas?: boolean | null | undefined;
284
+ }>;
285
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
259
286
 
260
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, modelMaxImagesPerCall };
287
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, modelMaxImagesPerCall };
@@ -1,5 +1,6 @@
1
1
  import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod';
3
4
 
4
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
6
  interface OpenAIChatSettings {
@@ -256,5 +257,31 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV1 {
256
257
  doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
257
258
  doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
258
259
  }
260
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
261
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
262
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
263
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
264
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
265
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
266
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
267
+ strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
268
+ }, "strip", z.ZodTypeAny, {
269
+ user?: string | null | undefined;
270
+ store?: boolean | null | undefined;
271
+ metadata?: any;
272
+ reasoningEffort?: string | null | undefined;
273
+ parallelToolCalls?: boolean | null | undefined;
274
+ previousResponseId?: string | null | undefined;
275
+ strictSchemas?: boolean | null | undefined;
276
+ }, {
277
+ user?: string | null | undefined;
278
+ store?: boolean | null | undefined;
279
+ metadata?: any;
280
+ reasoningEffort?: string | null | undefined;
281
+ parallelToolCalls?: boolean | null | undefined;
282
+ previousResponseId?: string | null | undefined;
283
+ strictSchemas?: boolean | null | undefined;
284
+ }>;
285
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
259
286
 
260
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, modelMaxImagesPerCall };
287
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, modelMaxImagesPerCall };
@@ -1603,7 +1603,6 @@ var openaiImageResponseSchema = import_zod5.z.object({
1603
1603
  });
1604
1604
 
1605
1605
  // src/responses/openai-responses-language-model.ts
1606
- var import_provider9 = require("@ai-sdk/provider");
1607
1606
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1608
1607
  var import_zod6 = require("zod");
1609
1608
 
@@ -1843,7 +1842,7 @@ var OpenAIResponsesLanguageModel = class {
1843
1842
  providerMetadata,
1844
1843
  responseFormat
1845
1844
  }) {
1846
- var _a, _b, _c, _d;
1845
+ var _a, _b, _c;
1847
1846
  const warnings = [];
1848
1847
  const modelConfig = getResponsesModelConfig(this.modelId);
1849
1848
  const type = mode.type;
@@ -1882,19 +1881,12 @@ var OpenAIResponsesLanguageModel = class {
1882
1881
  systemMessageMode: modelConfig.systemMessageMode
1883
1882
  });
1884
1883
  warnings.push(...messageWarnings);
1885
- const parsedProviderOptions = providerMetadata != null ? (0, import_provider_utils8.safeValidateTypes)({
1886
- value: providerMetadata,
1887
- schema: providerOptionsSchema
1888
- }) : { success: true, value: void 0 };
1889
- if (!parsedProviderOptions.success) {
1890
- throw new import_provider9.InvalidArgumentError({
1891
- argument: "providerOptions",
1892
- message: "invalid provider options",
1893
- cause: parsedProviderOptions.error
1894
- });
1895
- }
1896
- const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1897
- const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1884
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1885
+ provider: "openai",
1886
+ providerOptions: providerMetadata,
1887
+ schema: openaiResponsesProviderOptionsSchema
1888
+ });
1889
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
1898
1890
  const baseArgs = {
1899
1891
  model: this.modelId,
1900
1892
  input: messages,
@@ -1906,7 +1898,7 @@ var OpenAIResponsesLanguageModel = class {
1906
1898
  format: responseFormat.schema != null ? {
1907
1899
  type: "json_schema",
1908
1900
  strict: isStrict,
1909
- name: (_c = responseFormat.name) != null ? _c : "response",
1901
+ name: (_b = responseFormat.name) != null ? _b : "response",
1910
1902
  description: responseFormat.description,
1911
1903
  schema: responseFormat.schema
1912
1904
  } : { type: "json_object" }
@@ -1968,7 +1960,7 @@ var OpenAIResponsesLanguageModel = class {
1968
1960
  format: mode.schema != null ? {
1969
1961
  type: "json_schema",
1970
1962
  strict: isStrict,
1971
- name: (_d = mode.name) != null ? _d : "response",
1963
+ name: (_c = mode.name) != null ? _c : "response",
1972
1964
  description: mode.description,
1973
1965
  schema: mode.schema
1974
1966
  } : { type: "json_object" }
@@ -2361,17 +2353,6 @@ function isResponseOutputItemAddedChunk(chunk) {
2361
2353
  function isResponseAnnotationAddedChunk(chunk) {
2362
2354
  return chunk.type === "response.output_text.annotation.added";
2363
2355
  }
2364
- var providerOptionsSchema = import_zod6.z.object({
2365
- openai: import_zod6.z.object({
2366
- metadata: import_zod6.z.any().nullish(),
2367
- parallelToolCalls: import_zod6.z.boolean().nullish(),
2368
- previousResponseId: import_zod6.z.string().nullish(),
2369
- store: import_zod6.z.boolean().nullish(),
2370
- user: import_zod6.z.string().nullish(),
2371
- reasoningEffort: import_zod6.z.string().nullish(),
2372
- strictSchemas: import_zod6.z.boolean().nullish()
2373
- }).nullish()
2374
- });
2375
2356
  function getResponsesModelConfig(modelId) {
2376
2357
  if (modelId.startsWith("o")) {
2377
2358
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2393,6 +2374,15 @@ function getResponsesModelConfig(modelId) {
2393
2374
  requiredAutoTruncation: false
2394
2375
  };
2395
2376
  }
2377
+ var openaiResponsesProviderOptionsSchema = import_zod6.z.object({
2378
+ metadata: import_zod6.z.any().nullish(),
2379
+ parallelToolCalls: import_zod6.z.boolean().nullish(),
2380
+ previousResponseId: import_zod6.z.string().nullish(),
2381
+ store: import_zod6.z.boolean().nullish(),
2382
+ user: import_zod6.z.string().nullish(),
2383
+ reasoningEffort: import_zod6.z.string().nullish(),
2384
+ strictSchemas: import_zod6.z.boolean().nullish()
2385
+ });
2396
2386
  // Annotate the CommonJS export names for ESM import in node:
2397
2387
  0 && (module.exports = {
2398
2388
  OpenAIChatLanguageModel,