@ai-sdk/openai 1.2.2 → 1.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1611,12 +1611,16 @@ var openaiImageResponseSchema = z5.object({
1611
1611
  });
1612
1612
 
1613
1613
  // src/responses/openai-responses-language-model.ts
1614
+ import {
1615
+ InvalidArgumentError
1616
+ } from "@ai-sdk/provider";
1614
1617
  import {
1615
1618
  combineHeaders as combineHeaders5,
1616
1619
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1617
1620
  createJsonResponseHandler as createJsonResponseHandler5,
1618
1621
  generateId as generateId2,
1619
- postJsonToApi as postJsonToApi5
1622
+ postJsonToApi as postJsonToApi5,
1623
+ safeValidateTypes
1620
1624
  } from "@ai-sdk/provider-utils";
1621
1625
  import { z as z6 } from "zod";
1622
1626
 
@@ -1844,7 +1848,7 @@ var OpenAIResponsesLanguageModel = class {
1844
1848
  providerMetadata,
1845
1849
  responseFormat
1846
1850
  }) {
1847
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1851
+ var _a, _b, _c, _d;
1848
1852
  const warnings = [];
1849
1853
  const modelConfig = getResponsesModelConfig(this.modelId);
1850
1854
  const type = mode.type;
@@ -1883,7 +1887,19 @@ var OpenAIResponsesLanguageModel = class {
1883
1887
  systemMessageMode: modelConfig.systemMessageMode
1884
1888
  });
1885
1889
  warnings.push(...messageWarnings);
1886
- const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1890
+ const parsedProviderOptions = providerMetadata != null ? safeValidateTypes({
1891
+ value: providerMetadata,
1892
+ schema: providerOptionsSchema
1893
+ }) : { success: true, value: void 0 };
1894
+ if (!parsedProviderOptions.success) {
1895
+ throw new InvalidArgumentError({
1896
+ argument: "providerOptions",
1897
+ message: "invalid provider options",
1898
+ cause: parsedProviderOptions.error
1899
+ });
1900
+ }
1901
+ const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1902
+ const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1887
1903
  const baseArgs = {
1888
1904
  model: this.modelId,
1889
1905
  input: messages,
@@ -1894,7 +1910,7 @@ var OpenAIResponsesLanguageModel = class {
1894
1910
  text: {
1895
1911
  format: responseFormat.schema != null ? {
1896
1912
  type: "json_schema",
1897
- strict: isStrictJsonSchema,
1913
+ strict: isStrict,
1898
1914
  name: (_c = responseFormat.name) != null ? _c : "response",
1899
1915
  description: responseFormat.description,
1900
1916
  schema: responseFormat.schema
@@ -1902,14 +1918,14 @@ var OpenAIResponsesLanguageModel = class {
1902
1918
  }
1903
1919
  },
1904
1920
  // provider options:
1905
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1906
- parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1907
- previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1908
- store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1909
- user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1921
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
1922
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
1923
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1924
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
1925
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
1910
1926
  // model-specific settings:
1911
- ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1912
- reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1927
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1928
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1913
1929
  },
1914
1930
  ...modelConfig.requiredAutoTruncation && {
1915
1931
  truncation: "auto"
@@ -1937,7 +1953,8 @@ var OpenAIResponsesLanguageModel = class {
1937
1953
  case "regular": {
1938
1954
  const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1939
1955
  mode,
1940
- strict: true
1956
+ strict: isStrict
1957
+ // TODO support provider options on tools
1941
1958
  });
1942
1959
  return {
1943
1960
  args: {
@@ -1955,8 +1972,8 @@ var OpenAIResponsesLanguageModel = class {
1955
1972
  text: {
1956
1973
  format: mode.schema != null ? {
1957
1974
  type: "json_schema",
1958
- strict: isStrictJsonSchema,
1959
- name: (_k = mode.name) != null ? _k : "response",
1975
+ strict: isStrict,
1976
+ name: (_d = mode.name) != null ? _d : "response",
1960
1977
  description: mode.description,
1961
1978
  schema: mode.schema
1962
1979
  } : { type: "json_object" }
@@ -1976,7 +1993,7 @@ var OpenAIResponsesLanguageModel = class {
1976
1993
  name: mode.tool.name,
1977
1994
  description: mode.tool.description,
1978
1995
  parameters: mode.tool.parameters,
1979
- strict: isStrictJsonSchema
1996
+ strict: isStrict
1980
1997
  }
1981
1998
  ]
1982
1999
  },
@@ -2349,6 +2366,17 @@ function isResponseOutputItemAddedChunk(chunk) {
2349
2366
  function isResponseAnnotationAddedChunk(chunk) {
2350
2367
  return chunk.type === "response.output_text.annotation.added";
2351
2368
  }
2369
+ var providerOptionsSchema = z6.object({
2370
+ openai: z6.object({
2371
+ metadata: z6.any().nullish(),
2372
+ parallelToolCalls: z6.boolean().nullish(),
2373
+ previousResponseId: z6.string().nullish(),
2374
+ store: z6.boolean().nullish(),
2375
+ user: z6.string().nullish(),
2376
+ reasoningEffort: z6.string().nullish(),
2377
+ strictSchemas: z6.boolean().nullish()
2378
+ }).nullish()
2379
+ });
2352
2380
  function getResponsesModelConfig(modelId) {
2353
2381
  if (modelId.startsWith("o")) {
2354
2382
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {