@ai-sdk/openai 1.2.3 → 1.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -70,6 +70,8 @@ interface OpenAIChatSettings {
70
70
  Enable this if the model that you are using does not support streaming.
71
71
 
72
72
  Defaults to `false`.
73
+
74
+ @deprecated Use `simulateStreamingMiddleware` instead.
73
75
  */
74
76
  simulateStreaming?: boolean;
75
77
  /**
@@ -70,6 +70,8 @@ interface OpenAIChatSettings {
70
70
  Enable this if the model that you are using does not support streaming.
71
71
 
72
72
  Defaults to `false`.
73
+
74
+ @deprecated Use `simulateStreamingMiddleware` instead.
73
75
  */
74
76
  simulateStreaming?: boolean;
75
77
  /**
@@ -1602,6 +1602,7 @@ var openaiImageResponseSchema = import_zod5.z.object({
1602
1602
  });
1603
1603
 
1604
1604
  // src/responses/openai-responses-language-model.ts
1605
+ var import_provider9 = require("@ai-sdk/provider");
1605
1606
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1606
1607
  var import_zod6 = require("zod");
1607
1608
 
@@ -1645,8 +1646,8 @@ function convertToOpenAIResponsesMessages({
1645
1646
  case "user": {
1646
1647
  messages.push({
1647
1648
  role: "user",
1648
- content: content.map((part) => {
1649
- var _a, _b, _c;
1649
+ content: content.map((part, index) => {
1650
+ var _a, _b, _c, _d;
1650
1651
  switch (part.type) {
1651
1652
  case "text": {
1652
1653
  return { type: "input_text", text: part.text };
@@ -1660,9 +1661,25 @@ function convertToOpenAIResponsesMessages({
1660
1661
  };
1661
1662
  }
1662
1663
  case "file": {
1663
- throw new import_provider7.UnsupportedFunctionalityError({
1664
- functionality: "Image content parts in user messages"
1665
- });
1664
+ if (part.data instanceof URL) {
1665
+ throw new import_provider7.UnsupportedFunctionalityError({
1666
+ functionality: "File URLs in user messages"
1667
+ });
1668
+ }
1669
+ switch (part.mimeType) {
1670
+ case "application/pdf": {
1671
+ return {
1672
+ type: "input_file",
1673
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1674
+ file_data: `data:application/pdf;base64,${part.data}`
1675
+ };
1676
+ }
1677
+ default: {
1678
+ throw new import_provider7.UnsupportedFunctionalityError({
1679
+ functionality: "Only PDF files are supported in user messages"
1680
+ });
1681
+ }
1682
+ }
1666
1683
  }
1667
1684
  }
1668
1685
  })
@@ -1825,7 +1842,7 @@ var OpenAIResponsesLanguageModel = class {
1825
1842
  providerMetadata,
1826
1843
  responseFormat
1827
1844
  }) {
1828
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1845
+ var _a, _b, _c, _d;
1829
1846
  const warnings = [];
1830
1847
  const modelConfig = getResponsesModelConfig(this.modelId);
1831
1848
  const type = mode.type;
@@ -1864,7 +1881,19 @@ var OpenAIResponsesLanguageModel = class {
1864
1881
  systemMessageMode: modelConfig.systemMessageMode
1865
1882
  });
1866
1883
  warnings.push(...messageWarnings);
1867
- const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1884
+ const parsedProviderOptions = providerMetadata != null ? (0, import_provider_utils8.safeValidateTypes)({
1885
+ value: providerMetadata,
1886
+ schema: providerOptionsSchema
1887
+ }) : { success: true, value: void 0 };
1888
+ if (!parsedProviderOptions.success) {
1889
+ throw new import_provider9.InvalidArgumentError({
1890
+ argument: "providerOptions",
1891
+ message: "invalid provider options",
1892
+ cause: parsedProviderOptions.error
1893
+ });
1894
+ }
1895
+ const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1896
+ const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1868
1897
  const baseArgs = {
1869
1898
  model: this.modelId,
1870
1899
  input: messages,
@@ -1875,7 +1904,7 @@ var OpenAIResponsesLanguageModel = class {
1875
1904
  text: {
1876
1905
  format: responseFormat.schema != null ? {
1877
1906
  type: "json_schema",
1878
- strict: isStrictJsonSchema,
1907
+ strict: isStrict,
1879
1908
  name: (_c = responseFormat.name) != null ? _c : "response",
1880
1909
  description: responseFormat.description,
1881
1910
  schema: responseFormat.schema
@@ -1883,14 +1912,14 @@ var OpenAIResponsesLanguageModel = class {
1883
1912
  }
1884
1913
  },
1885
1914
  // provider options:
1886
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1887
- parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1888
- previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1889
- store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1890
- user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1915
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
1916
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
1917
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1918
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
1919
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
1891
1920
  // model-specific settings:
1892
- ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1893
- reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1921
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1922
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1894
1923
  },
1895
1924
  ...modelConfig.requiredAutoTruncation && {
1896
1925
  truncation: "auto"
@@ -1918,7 +1947,8 @@ var OpenAIResponsesLanguageModel = class {
1918
1947
  case "regular": {
1919
1948
  const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1920
1949
  mode,
1921
- strict: true
1950
+ strict: isStrict
1951
+ // TODO support provider options on tools
1922
1952
  });
1923
1953
  return {
1924
1954
  args: {
@@ -1936,8 +1966,8 @@ var OpenAIResponsesLanguageModel = class {
1936
1966
  text: {
1937
1967
  format: mode.schema != null ? {
1938
1968
  type: "json_schema",
1939
- strict: isStrictJsonSchema,
1940
- name: (_k = mode.name) != null ? _k : "response",
1969
+ strict: isStrict,
1970
+ name: (_d = mode.name) != null ? _d : "response",
1941
1971
  description: mode.description,
1942
1972
  schema: mode.schema
1943
1973
  } : { type: "json_object" }
@@ -1957,7 +1987,7 @@ var OpenAIResponsesLanguageModel = class {
1957
1987
  name: mode.tool.name,
1958
1988
  description: mode.tool.description,
1959
1989
  parameters: mode.tool.parameters,
1960
- strict: isStrictJsonSchema
1990
+ strict: isStrict
1961
1991
  }
1962
1992
  ]
1963
1993
  },
@@ -2330,6 +2360,17 @@ function isResponseOutputItemAddedChunk(chunk) {
2330
2360
  function isResponseAnnotationAddedChunk(chunk) {
2331
2361
  return chunk.type === "response.output_text.annotation.added";
2332
2362
  }
2363
+ var providerOptionsSchema = import_zod6.z.object({
2364
+ openai: import_zod6.z.object({
2365
+ metadata: import_zod6.z.any().nullish(),
2366
+ parallelToolCalls: import_zod6.z.boolean().nullish(),
2367
+ previousResponseId: import_zod6.z.string().nullish(),
2368
+ store: import_zod6.z.boolean().nullish(),
2369
+ user: import_zod6.z.string().nullish(),
2370
+ reasoningEffort: import_zod6.z.string().nullish(),
2371
+ strictSchemas: import_zod6.z.boolean().nullish()
2372
+ }).nullish()
2373
+ });
2333
2374
  function getResponsesModelConfig(modelId) {
2334
2375
  if (modelId.startsWith("o")) {
2335
2376
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {