@ai-sdk/openai 1.3.1 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1606,16 +1606,13 @@ var openaiImageResponseSchema = z5.object({
1606
1606
  });
1607
1607
 
1608
1608
  // src/responses/openai-responses-language-model.ts
1609
- import {
1610
- InvalidArgumentError
1611
- } from "@ai-sdk/provider";
1612
1609
  import {
1613
1610
  combineHeaders as combineHeaders5,
1614
1611
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1615
1612
  createJsonResponseHandler as createJsonResponseHandler5,
1616
1613
  generateId as generateId2,
1617
- postJsonToApi as postJsonToApi5,
1618
- safeValidateTypes
1614
+ parseProviderOptions,
1615
+ postJsonToApi as postJsonToApi5
1619
1616
  } from "@ai-sdk/provider-utils";
1620
1617
  import { z as z6 } from "zod";
1621
1618
 
@@ -1859,7 +1856,7 @@ var OpenAIResponsesLanguageModel = class {
1859
1856
  providerMetadata,
1860
1857
  responseFormat
1861
1858
  }) {
1862
- var _a, _b, _c, _d;
1859
+ var _a, _b, _c;
1863
1860
  const warnings = [];
1864
1861
  const modelConfig = getResponsesModelConfig(this.modelId);
1865
1862
  const type = mode.type;
@@ -1898,19 +1895,12 @@ var OpenAIResponsesLanguageModel = class {
1898
1895
  systemMessageMode: modelConfig.systemMessageMode
1899
1896
  });
1900
1897
  warnings.push(...messageWarnings);
1901
- const parsedProviderOptions = providerMetadata != null ? safeValidateTypes({
1902
- value: providerMetadata,
1903
- schema: providerOptionsSchema
1904
- }) : { success: true, value: void 0 };
1905
- if (!parsedProviderOptions.success) {
1906
- throw new InvalidArgumentError({
1907
- argument: "providerOptions",
1908
- message: "invalid provider options",
1909
- cause: parsedProviderOptions.error
1910
- });
1911
- }
1912
- const openaiOptions = (_a = parsedProviderOptions.value) == null ? void 0 : _a.openai;
1913
- const isStrict = (_b = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _b : true;
1898
+ const openaiOptions = parseProviderOptions({
1899
+ provider: "openai",
1900
+ providerOptions: providerMetadata,
1901
+ schema: openaiResponsesProviderOptionsSchema
1902
+ });
1903
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
1914
1904
  const baseArgs = {
1915
1905
  model: this.modelId,
1916
1906
  input: messages,
@@ -1922,7 +1912,7 @@ var OpenAIResponsesLanguageModel = class {
1922
1912
  format: responseFormat.schema != null ? {
1923
1913
  type: "json_schema",
1924
1914
  strict: isStrict,
1925
- name: (_c = responseFormat.name) != null ? _c : "response",
1915
+ name: (_b = responseFormat.name) != null ? _b : "response",
1926
1916
  description: responseFormat.description,
1927
1917
  schema: responseFormat.schema
1928
1918
  } : { type: "json_object" }
@@ -1934,6 +1924,7 @@ var OpenAIResponsesLanguageModel = class {
1934
1924
  previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
1935
1925
  store: openaiOptions == null ? void 0 : openaiOptions.store,
1936
1926
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1927
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1937
1928
  // model-specific settings:
1938
1929
  ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1939
1930
  reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
@@ -1984,7 +1975,7 @@ var OpenAIResponsesLanguageModel = class {
1984
1975
  format: mode.schema != null ? {
1985
1976
  type: "json_schema",
1986
1977
  strict: isStrict,
1987
- name: (_d = mode.name) != null ? _d : "response",
1978
+ name: (_c = mode.name) != null ? _c : "response",
1988
1979
  description: mode.description,
1989
1980
  schema: mode.schema
1990
1981
  } : { type: "json_object" }
@@ -2377,17 +2368,6 @@ function isResponseOutputItemAddedChunk(chunk) {
2377
2368
  function isResponseAnnotationAddedChunk(chunk) {
2378
2369
  return chunk.type === "response.output_text.annotation.added";
2379
2370
  }
2380
- var providerOptionsSchema = z6.object({
2381
- openai: z6.object({
2382
- metadata: z6.any().nullish(),
2383
- parallelToolCalls: z6.boolean().nullish(),
2384
- previousResponseId: z6.string().nullish(),
2385
- store: z6.boolean().nullish(),
2386
- user: z6.string().nullish(),
2387
- reasoningEffort: z6.string().nullish(),
2388
- strictSchemas: z6.boolean().nullish()
2389
- }).nullish()
2390
- });
2391
2371
  function getResponsesModelConfig(modelId) {
2392
2372
  if (modelId.startsWith("o")) {
2393
2373
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2409,6 +2389,16 @@ function getResponsesModelConfig(modelId) {
2409
2389
  requiredAutoTruncation: false
2410
2390
  };
2411
2391
  }
2392
+ var openaiResponsesProviderOptionsSchema = z6.object({
2393
+ metadata: z6.any().nullish(),
2394
+ parallelToolCalls: z6.boolean().nullish(),
2395
+ previousResponseId: z6.string().nullish(),
2396
+ store: z6.boolean().nullish(),
2397
+ user: z6.string().nullish(),
2398
+ reasoningEffort: z6.string().nullish(),
2399
+ strictSchemas: z6.boolean().nullish(),
2400
+ instructions: z6.string().nullish()
2401
+ });
2412
2402
  export {
2413
2403
  OpenAIChatLanguageModel,
2414
2404
  OpenAICompletionLanguageModel,