@ai-sdk/openai 1.2.3 → 1.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1611,12 +1611,16 @@ var openaiImageResponseSchema = z5.object({
1611
1611
  });
1612
1612
 
1613
1613
  // src/responses/openai-responses-language-model.ts
1614
+ import {
1615
+ InvalidArgumentError
1616
+ } from "@ai-sdk/provider";
1614
1617
  import {
1615
1618
  combineHeaders as combineHeaders5,
1616
1619
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1617
1620
  createJsonResponseHandler as createJsonResponseHandler5,
1618
1621
  generateId as generateId2,
1619
- postJsonToApi as postJsonToApi5
1622
+ postJsonToApi as postJsonToApi5,
1623
+ safeValidateTypes
1620
1624
  } from "@ai-sdk/provider-utils";
1621
1625
  import { z as z6 } from "zod";
1622
1626
 
@@ -1662,8 +1666,8 @@ function convertToOpenAIResponsesMessages({
1662
1666
  case "user": {
1663
1667
  messages.push({
1664
1668
  role: "user",
1665
- content: content.map((part) => {
1666
- var _a, _b, _c;
1669
+ content: content.map((part, index) => {
1670
+ var _a, _b, _c, _d;
1667
1671
  switch (part.type) {
1668
1672
  case "text": {
1669
1673
  return { type: "input_text", text: part.text };
@@ -1677,9 +1681,25 @@ function convertToOpenAIResponsesMessages({
1677
1681
  };
1678
1682
  }
1679
1683
  case "file": {
1680
- throw new UnsupportedFunctionalityError6({
1681
- functionality: "Image content parts in user messages"
1682
- });
1684
+ if (part.data instanceof URL) {
1685
+ throw new UnsupportedFunctionalityError6({
1686
+ functionality: "File URLs in user messages"
1687
+ });
1688
+ }
1689
+ switch (part.mimeType) {
1690
+ case "application/pdf": {
1691
+ return {
1692
+ type: "input_file",
1693
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1694
+ file_data: `data:application/pdf;base64,${part.data}`
1695
+ };
1696
+ }
1697
+ default: {
1698
+ throw new UnsupportedFunctionalityError6({
1699
+ functionality: "Only PDF files are supported in user messages"
1700
+ });
1701
+ }
1702
+ }
1683
1703
  }
1684
1704
  }
1685
1705
  })
@@ -1844,7 +1864,7 @@ var OpenAIResponsesLanguageModel = class {
1844
1864
  providerMetadata,
1845
1865
  responseFormat
1846
1866
  }) {
1847
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1867
+ var _a, _b, _c, _d;
1848
1868
  const warnings = [];
1849
1869
  const modelConfig = getResponsesModelConfig(this.modelId);
1850
1870
  const type = mode.type;
@@ -1883,7 +1903,19 @@ var OpenAIResponsesLanguageModel = class {
1883
1903
  systemMessageMode: modelConfig.systemMessageMode
1884
1904
  });
1885
1905
  warnings.push(...messageWarnings);
1886
- const isStrictJsonSchema = (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.strictJsonSchema) != null ? _b : true;
1906
+ const parsedProviderOptions = providerMetadata != null ? safeValidateTypes({
1907
+ value: providerMetadata,
1908
+ schema: providerOptionsSchema
1909
+ }) : { success: true, value: void 0 };
1910
+ if (!parsedProviderOptions.success) {
1911
+ throw new InvalidArgumentError({
1912
+ argument: "providerOptions",
1913
+ message: "invalid provider options",
1914
+ cause: parsedProviderOptions.error
1915
+ });
1916
+ }
1917
+ const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1918
+ const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1887
1919
  const baseArgs = {
1888
1920
  model: this.modelId,
1889
1921
  input: messages,
@@ -1894,7 +1926,7 @@ var OpenAIResponsesLanguageModel = class {
1894
1926
  text: {
1895
1927
  format: responseFormat.schema != null ? {
1896
1928
  type: "json_schema",
1897
- strict: isStrictJsonSchema,
1929
+ strict: isStrict,
1898
1930
  name: (_c = responseFormat.name) != null ? _c : "response",
1899
1931
  description: responseFormat.description,
1900
1932
  schema: responseFormat.schema
@@ -1902,14 +1934,14 @@ var OpenAIResponsesLanguageModel = class {
1902
1934
  }
1903
1935
  },
1904
1936
  // provider options:
1905
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1906
- parallel_tool_calls: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.parallelToolCalls,
1907
- previous_response_id: (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.previousResponseId,
1908
- store: (_g = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _g.store,
1909
- user: (_h = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _h.user,
1937
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
1938
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
1939
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1940
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
1941
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
1910
1942
  // model-specific settings:
1911
- ...modelConfig.isReasoningModel && ((_i = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _i.reasoningEffort) != null && {
1912
- reasoning: { effort: (_j = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _j.reasoningEffort }
1943
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1944
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1913
1945
  },
1914
1946
  ...modelConfig.requiredAutoTruncation && {
1915
1947
  truncation: "auto"
@@ -1937,7 +1969,8 @@ var OpenAIResponsesLanguageModel = class {
1937
1969
  case "regular": {
1938
1970
  const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1939
1971
  mode,
1940
- strict: true
1972
+ strict: isStrict
1973
+ // TODO support provider options on tools
1941
1974
  });
1942
1975
  return {
1943
1976
  args: {
@@ -1955,8 +1988,8 @@ var OpenAIResponsesLanguageModel = class {
1955
1988
  text: {
1956
1989
  format: mode.schema != null ? {
1957
1990
  type: "json_schema",
1958
- strict: isStrictJsonSchema,
1959
- name: (_k = mode.name) != null ? _k : "response",
1991
+ strict: isStrict,
1992
+ name: (_d = mode.name) != null ? _d : "response",
1960
1993
  description: mode.description,
1961
1994
  schema: mode.schema
1962
1995
  } : { type: "json_object" }
@@ -1976,7 +2009,7 @@ var OpenAIResponsesLanguageModel = class {
1976
2009
  name: mode.tool.name,
1977
2010
  description: mode.tool.description,
1978
2011
  parameters: mode.tool.parameters,
1979
- strict: isStrictJsonSchema
2012
+ strict: isStrict
1980
2013
  }
1981
2014
  ]
1982
2015
  },
@@ -2349,6 +2382,17 @@ function isResponseOutputItemAddedChunk(chunk) {
2349
2382
  function isResponseAnnotationAddedChunk(chunk) {
2350
2383
  return chunk.type === "response.output_text.annotation.added";
2351
2384
  }
2385
+ var providerOptionsSchema = z6.object({
2386
+ openai: z6.object({
2387
+ metadata: z6.any().nullish(),
2388
+ parallelToolCalls: z6.boolean().nullish(),
2389
+ previousResponseId: z6.string().nullish(),
2390
+ store: z6.boolean().nullish(),
2391
+ user: z6.string().nullish(),
2392
+ reasoningEffort: z6.string().nullish(),
2393
+ strictSchemas: z6.boolean().nullish()
2394
+ }).nullish()
2395
+ });
2352
2396
  function getResponsesModelConfig(modelId) {
2353
2397
  if (modelId.startsWith("o")) {
2354
2398
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {