@ai-sdk/openai 1.2.3 → 1.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1605,12 +1605,16 @@ var openaiImageResponseSchema = z5.object({
1605
1605
  });
1606
1606
 
1607
1607
  // src/responses/openai-responses-language-model.ts
1608
+ import {
1609
+ InvalidArgumentError
1610
+ } from "@ai-sdk/provider";
1608
1611
  import {
1609
1612
  combineHeaders as combineHeaders5,
1610
1613
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1611
1614
  createJsonResponseHandler as createJsonResponseHandler5,
1612
1615
  generateId as generateId2,
1613
- postJsonToApi as postJsonToApi5
1616
+ postJsonToApi as postJsonToApi5,
1617
+ safeValidateTypes
1614
1618
  } from "@ai-sdk/provider-utils";
1615
1619
  import { z as z6 } from "zod";
1616
1620
 
@@ -1656,8 +1660,8 @@ function convertToOpenAIResponsesMessages({
1656
1660
  case "user": {
1657
1661
  messages.push({
1658
1662
  role: "user",
1659
- content: content.map((part) => {
1660
- var _a, _b, _c;
1663
+ content: content.map((part, index) => {
1664
+ var _a, _b, _c, _d;
1661
1665
  switch (part.type) {
1662
1666
  case "text": {
1663
1667
  return { type: "input_text", text: part.text };
@@ -1671,9 +1675,25 @@ function convertToOpenAIResponsesMessages({
1671
1675
  };
1672
1676
  }
1673
1677
  case "file": {
1674
- throw new UnsupportedFunctionalityError6({
1675
- functionality: "Image content parts in user messages"
1676
- });
1678
+ if (part.data instanceof URL) {
1679
+ throw new UnsupportedFunctionalityError6({
1680
+ functionality: "File URLs in user messages"
1681
+ });
1682
+ }
1683
+ switch (part.mimeType) {
1684
+ case "application/pdf": {
1685
+ return {
1686
+ type: "input_file",
1687
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1688
+ file_data: `data:application/pdf;base64,${part.data}`
1689
+ };
1690
+ }
1691
+ default: {
1692
+ throw new UnsupportedFunctionalityError6({
1693
+ functionality: "Only PDF files are supported in user messages"
1694
+ });
1695
+ }
1696
+ }
1677
1697
  }
1678
1698
  }
1679
1699
  })
@@ -1838,7 +1858,7 @@ var OpenAIResponsesLanguageModel = class {
1838
1858
  providerMetadata,
1839
1859
  responseFormat
1840
1860
  }) {
1841
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1861
+ var _a, _b, _c, _d;
1842
1862
  const warnings = [];
1843
1863
  const modelConfig = getResponsesModelConfig(this.modelId);
1844
1864
  const type = mode.type;
@@ -1877,7 +1897,19 @@ var OpenAIResponsesLanguageModel = class {
1877
1897
  systemMessageMode: modelConfig.systemMessageMode
1878
1898
  });
1879
1899
  warnings.push(...messageWarnings);
1880
- const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1900
+ const parsedProviderOptions = providerMetadata != null ? safeValidateTypes({
1901
+ value: providerMetadata,
1902
+ schema: providerOptionsSchema
1903
+ }) : { success: true, value: void 0 };
1904
+ if (!parsedProviderOptions.success) {
1905
+ throw new InvalidArgumentError({
1906
+ argument: "providerOptions",
1907
+ message: "invalid provider options",
1908
+ cause: parsedProviderOptions.error
1909
+ });
1910
+ }
1911
+ const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1912
+ const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1881
1913
  const baseArgs = {
1882
1914
  model: this.modelId,
1883
1915
  input: messages,
@@ -1888,7 +1920,7 @@ var OpenAIResponsesLanguageModel = class {
1888
1920
  text: {
1889
1921
  format: responseFormat.schema != null ? {
1890
1922
  type: "json_schema",
1891
- strict: isStrictJsonSchema,
1923
+ strict: isStrict,
1892
1924
  name: (_c = responseFormat.name) != null ? _c : "response",
1893
1925
  description: responseFormat.description,
1894
1926
  schema: responseFormat.schema
@@ -1896,14 +1928,14 @@ var OpenAIResponsesLanguageModel = class {
1896
1928
  }
1897
1929
  },
1898
1930
  // provider options:
1899
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1900
- parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1901
- previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1902
- store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1903
- user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1931
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
1932
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
1933
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1934
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
1935
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
1904
1936
  // model-specific settings:
1905
- ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1906
- reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1937
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1938
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1907
1939
  },
1908
1940
  ...modelConfig.requiredAutoTruncation && {
1909
1941
  truncation: "auto"
@@ -1931,7 +1963,8 @@ var OpenAIResponsesLanguageModel = class {
1931
1963
  case "regular": {
1932
1964
  const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1933
1965
  mode,
1934
- strict: true
1966
+ strict: isStrict
1967
+ // TODO support provider options on tools
1935
1968
  });
1936
1969
  return {
1937
1970
  args: {
@@ -1949,8 +1982,8 @@ var OpenAIResponsesLanguageModel = class {
1949
1982
  text: {
1950
1983
  format: mode.schema != null ? {
1951
1984
  type: "json_schema",
1952
- strict: isStrictJsonSchema,
1953
- name: (_k = mode.name) != null ? _k : "response",
1985
+ strict: isStrict,
1986
+ name: (_d = mode.name) != null ? _d : "response",
1954
1987
  description: mode.description,
1955
1988
  schema: mode.schema
1956
1989
  } : { type: "json_object" }
@@ -1970,7 +2003,7 @@ var OpenAIResponsesLanguageModel = class {
1970
2003
  name: mode.tool.name,
1971
2004
  description: mode.tool.description,
1972
2005
  parameters: mode.tool.parameters,
1973
- strict: isStrictJsonSchema
2006
+ strict: isStrict
1974
2007
  }
1975
2008
  ]
1976
2009
  },
@@ -2343,6 +2376,17 @@ function isResponseOutputItemAddedChunk(chunk) {
2343
2376
  function isResponseAnnotationAddedChunk(chunk) {
2344
2377
  return chunk.type === "response.output_text.annotation.added";
2345
2378
  }
2379
+ var providerOptionsSchema = z6.object({
2380
+ openai: z6.object({
2381
+ metadata: z6.any().nullish(),
2382
+ parallelToolCalls: z6.boolean().nullish(),
2383
+ previousResponseId: z6.string().nullish(),
2384
+ store: z6.boolean().nullish(),
2385
+ user: z6.string().nullish(),
2386
+ reasoningEffort: z6.string().nullish(),
2387
+ strictSchemas: z6.boolean().nullish()
2388
+ }).nullish()
2389
+ });
2346
2390
  function getResponsesModelConfig(modelId) {
2347
2391
  if (modelId.startsWith("o")) {
2348
2392
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {