@ai-sdk/openai 1.2.3 → 1.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1605,12 +1605,16 @@ var openaiImageResponseSchema = z5.object({
1605
1605
  });
1606
1606
 
1607
1607
  // src/responses/openai-responses-language-model.ts
1608
+ import {
1609
+ InvalidArgumentError
1610
+ } from "@ai-sdk/provider";
1608
1611
  import {
1609
1612
  combineHeaders as combineHeaders5,
1610
1613
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1611
1614
  createJsonResponseHandler as createJsonResponseHandler5,
1612
1615
  generateId as generateId2,
1613
- postJsonToApi as postJsonToApi5
1616
+ postJsonToApi as postJsonToApi5,
1617
+ safeValidateTypes
1614
1618
  } from "@ai-sdk/provider-utils";
1615
1619
  import { z as z6 } from "zod";
1616
1620
 
@@ -1838,7 +1842,7 @@ var OpenAIResponsesLanguageModel = class {
1838
1842
  providerMetadata,
1839
1843
  responseFormat
1840
1844
  }) {
1841
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1845
+ var _a, _b, _c, _d;
1842
1846
  const warnings = [];
1843
1847
  const modelConfig = getResponsesModelConfig(this.modelId);
1844
1848
  const type = mode.type;
@@ -1877,7 +1881,19 @@ var OpenAIResponsesLanguageModel = class {
1877
1881
  systemMessageMode: modelConfig.systemMessageMode
1878
1882
  });
1879
1883
  warnings.push(...messageWarnings);
1880
- const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1884
+ const parsedProviderOptions = providerMetadata != null ? safeValidateTypes({
1885
+ value: providerMetadata,
1886
+ schema: providerOptionsSchema
1887
+ }) : { success: true, value: void 0 };
1888
+ if (!parsedProviderOptions.success) {
1889
+ throw new InvalidArgumentError({
1890
+ argument: "providerOptions",
1891
+ message: "invalid provider options",
1892
+ cause: parsedProviderOptions.error
1893
+ });
1894
+ }
1895
+ const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1896
+ const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1881
1897
  const baseArgs = {
1882
1898
  model: this.modelId,
1883
1899
  input: messages,
@@ -1888,7 +1904,7 @@ var OpenAIResponsesLanguageModel = class {
1888
1904
  text: {
1889
1905
  format: responseFormat.schema != null ? {
1890
1906
  type: "json_schema",
1891
- strict: isStrictJsonSchema,
1907
+ strict: isStrict,
1892
1908
  name: (_c = responseFormat.name) != null ? _c : "response",
1893
1909
  description: responseFormat.description,
1894
1910
  schema: responseFormat.schema
@@ -1896,14 +1912,14 @@ var OpenAIResponsesLanguageModel = class {
1896
1912
  }
1897
1913
  },
1898
1914
  // provider options:
1899
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1900
- parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1901
- previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1902
- store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1903
- user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1915
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
1916
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
1917
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1918
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
1919
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
1904
1920
  // model-specific settings:
1905
- ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1906
- reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1921
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1922
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1907
1923
  },
1908
1924
  ...modelConfig.requiredAutoTruncation && {
1909
1925
  truncation: "auto"
@@ -1931,7 +1947,8 @@ var OpenAIResponsesLanguageModel = class {
1931
1947
  case "regular": {
1932
1948
  const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1933
1949
  mode,
1934
- strict: true
1950
+ strict: isStrict
1951
+ // TODO support provider options on tools
1935
1952
  });
1936
1953
  return {
1937
1954
  args: {
@@ -1949,8 +1966,8 @@ var OpenAIResponsesLanguageModel = class {
1949
1966
  text: {
1950
1967
  format: mode.schema != null ? {
1951
1968
  type: "json_schema",
1952
- strict: isStrictJsonSchema,
1953
- name: (_k = mode.name) != null ? _k : "response",
1969
+ strict: isStrict,
1970
+ name: (_d = mode.name) != null ? _d : "response",
1954
1971
  description: mode.description,
1955
1972
  schema: mode.schema
1956
1973
  } : { type: "json_object" }
@@ -1970,7 +1987,7 @@ var OpenAIResponsesLanguageModel = class {
1970
1987
  name: mode.tool.name,
1971
1988
  description: mode.tool.description,
1972
1989
  parameters: mode.tool.parameters,
1973
- strict: isStrictJsonSchema
1990
+ strict: isStrict
1974
1991
  }
1975
1992
  ]
1976
1993
  },
@@ -2343,6 +2360,17 @@ function isResponseOutputItemAddedChunk(chunk) {
2343
2360
  function isResponseAnnotationAddedChunk(chunk) {
2344
2361
  return chunk.type === "response.output_text.annotation.added";
2345
2362
  }
2363
+ var providerOptionsSchema = z6.object({
2364
+ openai: z6.object({
2365
+ metadata: z6.any().nullish(),
2366
+ parallelToolCalls: z6.boolean().nullish(),
2367
+ previousResponseId: z6.string().nullish(),
2368
+ store: z6.boolean().nullish(),
2369
+ user: z6.string().nullish(),
2370
+ reasoningEffort: z6.string().nullish(),
2371
+ strictSchemas: z6.boolean().nullish()
2372
+ }).nullish()
2373
+ });
2346
2374
  function getResponsesModelConfig(modelId) {
2347
2375
  if (modelId.startsWith("o")) {
2348
2376
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {