@ai-sdk/openai 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1606,16 +1606,13 @@ var openaiImageResponseSchema = z5.object({
1606
1606
  });
1607
1607
 
1608
1608
  // src/responses/openai-responses-language-model.ts
1609
- import {
1610
- InvalidArgumentError
1611
- } from "@ai-sdk/provider";
1612
1609
  import {
1613
1610
  combineHeaders as combineHeaders5,
1614
1611
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1615
1612
  createJsonResponseHandler as createJsonResponseHandler5,
1616
1613
  generateId as generateId2,
1617
- postJsonToApi as postJsonToApi5,
1618
- safeValidateTypes
1614
+ parseProviderOptions,
1615
+ postJsonToApi as postJsonToApi5
1619
1616
  } from "@ai-sdk/provider-utils";
1620
1617
  import { z as z6 } from "zod";
1621
1618
 
@@ -1859,7 +1856,7 @@ var OpenAIResponsesLanguageModel = class {
1859
1856
  providerMetadata,
1860
1857
  responseFormat
1861
1858
  }) {
1862
- var _a, _b, _c, _d;
1859
+ var _a, _b, _c;
1863
1860
  const warnings = [];
1864
1861
  const modelConfig = getResponsesModelConfig(this.modelId);
1865
1862
  const type = mode.type;
@@ -1898,19 +1895,12 @@ var OpenAIResponsesLanguageModel = class {
1898
1895
  systemMessageMode: modelConfig.systemMessageMode
1899
1896
  });
1900
1897
  warnings.push(...messageWarnings);
1901
- const parsedProviderOptions = providerMetadata != null ? safeValidateTypes({
1902
- value: providerMetadata,
1903
- schema: providerOptionsSchema
1904
- }) : { success: true, value: void 0 };
1905
- if (!parsedProviderOptions.success) {
1906
- throw new InvalidArgumentError({
1907
- argument: "providerOptions",
1908
- message: "invalid provider options",
1909
- cause: parsedProviderOptions.error
1910
- });
1911
- }
1912
- const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1913
- const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1898
+ const openaiOptions = parseProviderOptions({
1899
+ provider: "openai",
1900
+ providerOptions: providerMetadata,
1901
+ schema: openaiResponsesProviderOptionsSchema
1902
+ });
1903
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
1914
1904
  const baseArgs = {
1915
1905
  model: this.modelId,
1916
1906
  input: messages,
@@ -1922,7 +1912,7 @@ var OpenAIResponsesLanguageModel = class {
1922
1912
  format: responseFormat.schema != null ? {
1923
1913
  type: "json_schema",
1924
1914
  strict: isStrict,
1925
- name: (_c = responseFormat.name) != null ? _c : "response",
1915
+ name: (_b = responseFormat.name) != null ? _b : "response",
1926
1916
  description: responseFormat.description,
1927
1917
  schema: responseFormat.schema
1928
1918
  } : { type: "json_object" }
@@ -1984,7 +1974,7 @@ var OpenAIResponsesLanguageModel = class {
1984
1974
  format: mode.schema != null ? {
1985
1975
  type: "json_schema",
1986
1976
  strict: isStrict,
1987
- name: (_d = mode.name) != null ? _d : "response",
1977
+ name: (_c = mode.name) != null ? _c : "response",
1988
1978
  description: mode.description,
1989
1979
  schema: mode.schema
1990
1980
  } : { type: "json_object" }
@@ -2377,17 +2367,6 @@ function isResponseOutputItemAddedChunk(chunk) {
2377
2367
  function isResponseAnnotationAddedChunk(chunk) {
2378
2368
  return chunk.type === "response.output_text.annotation.added";
2379
2369
  }
2380
- var providerOptionsSchema = z6.object({
2381
- openai: z6.object({
2382
- metadata: z6.any().nullish(),
2383
- parallelToolCalls: z6.boolean().nullish(),
2384
- previousResponseId: z6.string().nullish(),
2385
- store: z6.boolean().nullish(),
2386
- user: z6.string().nullish(),
2387
- reasoningEffort: z6.string().nullish(),
2388
- strictSchemas: z6.boolean().nullish()
2389
- }).nullish()
2390
- });
2391
2370
  function getResponsesModelConfig(modelId) {
2392
2371
  if (modelId.startsWith("o")) {
2393
2372
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2409,6 +2388,15 @@ function getResponsesModelConfig(modelId) {
2409
2388
  requiredAutoTruncation: false
2410
2389
  };
2411
2390
  }
2391
+ var openaiResponsesProviderOptionsSchema = z6.object({
2392
+ metadata: z6.any().nullish(),
2393
+ parallelToolCalls: z6.boolean().nullish(),
2394
+ previousResponseId: z6.string().nullish(),
2395
+ store: z6.boolean().nullish(),
2396
+ user: z6.string().nullish(),
2397
+ reasoningEffort: z6.string().nullish(),
2398
+ strictSchemas: z6.boolean().nullish()
2399
+ });
2412
2400
  export {
2413
2401
  OpenAIChatLanguageModel,
2414
2402
  OpenAICompletionLanguageModel,