@ai-sdk/openai 1.3.1 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod';
3
4
 
4
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
6
  interface OpenAIChatSettings {
@@ -256,5 +257,34 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV1 {
256
257
  doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
257
258
  doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
258
259
  }
260
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
261
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
262
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
263
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
264
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
265
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
266
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
267
+ strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
268
+ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
269
+ }, "strip", z.ZodTypeAny, {
270
+ user?: string | null | undefined;
271
+ store?: boolean | null | undefined;
272
+ metadata?: any;
273
+ reasoningEffort?: string | null | undefined;
274
+ parallelToolCalls?: boolean | null | undefined;
275
+ previousResponseId?: string | null | undefined;
276
+ strictSchemas?: boolean | null | undefined;
277
+ instructions?: string | null | undefined;
278
+ }, {
279
+ user?: string | null | undefined;
280
+ store?: boolean | null | undefined;
281
+ metadata?: any;
282
+ reasoningEffort?: string | null | undefined;
283
+ parallelToolCalls?: boolean | null | undefined;
284
+ previousResponseId?: string | null | undefined;
285
+ strictSchemas?: boolean | null | undefined;
286
+ instructions?: string | null | undefined;
287
+ }>;
288
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
259
289
 
260
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, modelMaxImagesPerCall };
290
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, modelMaxImagesPerCall };
@@ -1,5 +1,6 @@
1
1
  import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod';
3
4
 
4
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
6
  interface OpenAIChatSettings {
@@ -256,5 +257,34 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV1 {
256
257
  doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
257
258
  doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
258
259
  }
260
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
261
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
262
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
263
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
264
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
265
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
266
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
267
+ strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
268
+ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
269
+ }, "strip", z.ZodTypeAny, {
270
+ user?: string | null | undefined;
271
+ store?: boolean | null | undefined;
272
+ metadata?: any;
273
+ reasoningEffort?: string | null | undefined;
274
+ parallelToolCalls?: boolean | null | undefined;
275
+ previousResponseId?: string | null | undefined;
276
+ strictSchemas?: boolean | null | undefined;
277
+ instructions?: string | null | undefined;
278
+ }, {
279
+ user?: string | null | undefined;
280
+ store?: boolean | null | undefined;
281
+ metadata?: any;
282
+ reasoningEffort?: string | null | undefined;
283
+ parallelToolCalls?: boolean | null | undefined;
284
+ previousResponseId?: string | null | undefined;
285
+ strictSchemas?: boolean | null | undefined;
286
+ instructions?: string | null | undefined;
287
+ }>;
288
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
259
289
 
260
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, modelMaxImagesPerCall };
290
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, modelMaxImagesPerCall };
@@ -1603,7 +1603,6 @@ var openaiImageResponseSchema = import_zod5.z.object({
1603
1603
  });
1604
1604
 
1605
1605
  // src/responses/openai-responses-language-model.ts
1606
- var import_provider9 = require("@ai-sdk/provider");
1607
1606
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1608
1607
  var import_zod6 = require("zod");
1609
1608
 
@@ -1843,7 +1842,7 @@ var OpenAIResponsesLanguageModel = class {
1843
1842
  providerMetadata,
1844
1843
  responseFormat
1845
1844
  }) {
1846
- var _a, _b, _c, _d;
1845
+ var _a, _b, _c;
1847
1846
  const warnings = [];
1848
1847
  const modelConfig = getResponsesModelConfig(this.modelId);
1849
1848
  const type = mode.type;
@@ -1882,19 +1881,12 @@ var OpenAIResponsesLanguageModel = class {
1882
1881
  systemMessageMode: modelConfig.systemMessageMode
1883
1882
  });
1884
1883
  warnings.push(...messageWarnings);
1885
- const parsedProviderOptions = providerMetadata != null ? (0, import_provider_utils8.safeValidateTypes)({
1886
- value: providerMetadata,
1887
- schema: providerOptionsSchema
1888
- }) : { success: true, value: void 0 };
1889
- if (!parsedProviderOptions.success) {
1890
- throw new import_provider9.InvalidArgumentError({
1891
- argument: "providerOptions",
1892
- message: "invalid provider options",
1893
- cause: parsedProviderOptions.error
1894
- });
1895
- }
1896
- const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1897
- const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1884
+ const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1885
+ provider: "openai",
1886
+ providerOptions: providerMetadata,
1887
+ schema: openaiResponsesProviderOptionsSchema
1888
+ });
1889
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
1898
1890
  const baseArgs = {
1899
1891
  model: this.modelId,
1900
1892
  input: messages,
@@ -1906,7 +1898,7 @@ var OpenAIResponsesLanguageModel = class {
1906
1898
  format: responseFormat.schema != null ? {
1907
1899
  type: "json_schema",
1908
1900
  strict: isStrict,
1909
- name: (_c = responseFormat.name) != null ? _c : "response",
1901
+ name: (_b = responseFormat.name) != null ? _b : "response",
1910
1902
  description: responseFormat.description,
1911
1903
  schema: responseFormat.schema
1912
1904
  } : { type: "json_object" }
@@ -1918,6 +1910,7 @@ var OpenAIResponsesLanguageModel = class {
1918
1910
  previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1919
1911
  store: openaiOptions == null ? void 0 : openaiOptions.store,
1920
1912
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1913
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1921
1914
  // model-specific settings:
1922
1915
  ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1923
1916
  reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
@@ -1968,7 +1961,7 @@ var OpenAIResponsesLanguageModel = class {
1968
1961
  format: mode.schema != null ? {
1969
1962
  type: "json_schema",
1970
1963
  strict: isStrict,
1971
- name: (_d = mode.name) != null ? _d : "response",
1964
+ name: (_c = mode.name) != null ? _c : "response",
1972
1965
  description: mode.description,
1973
1966
  schema: mode.schema
1974
1967
  } : { type: "json_object" }
@@ -2361,17 +2354,6 @@ function isResponseOutputItemAddedChunk(chunk) {
2361
2354
  function isResponseAnnotationAddedChunk(chunk) {
2362
2355
  return chunk.type === "response.output_text.annotation.added";
2363
2356
  }
2364
- var providerOptionsSchema = import_zod6.z.object({
2365
- openai: import_zod6.z.object({
2366
- metadata: import_zod6.z.any().nullish(),
2367
- parallelToolCalls: import_zod6.z.boolean().nullish(),
2368
- previousResponseId: import_zod6.z.string().nullish(),
2369
- store: import_zod6.z.boolean().nullish(),
2370
- user: import_zod6.z.string().nullish(),
2371
- reasoningEffort: import_zod6.z.string().nullish(),
2372
- strictSchemas: import_zod6.z.boolean().nullish()
2373
- }).nullish()
2374
- });
2375
2357
  function getResponsesModelConfig(modelId) {
2376
2358
  if (modelId.startsWith("o")) {
2377
2359
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2393,6 +2375,16 @@ function getResponsesModelConfig(modelId) {
2393
2375
  requiredAutoTruncation: false
2394
2376
  };
2395
2377
  }
2378
+ var openaiResponsesProviderOptionsSchema = import_zod6.z.object({
2379
+ metadata: import_zod6.z.any().nullish(),
2380
+ parallelToolCalls: import_zod6.z.boolean().nullish(),
2381
+ previousResponseId: import_zod6.z.string().nullish(),
2382
+ store: import_zod6.z.boolean().nullish(),
2383
+ user: import_zod6.z.string().nullish(),
2384
+ reasoningEffort: import_zod6.z.string().nullish(),
2385
+ strictSchemas: import_zod6.z.boolean().nullish(),
2386
+ instructions: import_zod6.z.string().nullish()
2387
+ });
2396
2388
  // Annotate the CommonJS export names for ESM import in node:
2397
2389
  0 && (module.exports = {
2398
2390
  OpenAIChatLanguageModel,