@ai-sdk/openai 2.0.10 → 2.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,7 @@ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2Ca
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod/v4';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
6
  declare const openaiProviderOptions: z.ZodObject<{
7
7
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
8
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
@@ -30,6 +30,8 @@ declare const openaiProviderOptions: z.ZodObject<{
30
30
  medium: "medium";
31
31
  high: "high";
32
32
  }>>;
33
+ promptCacheKey: z.ZodOptional<z.ZodString>;
34
+ safetyIdentifier: z.ZodOptional<z.ZodString>;
33
35
  }, z.core.$strip>;
34
36
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
35
37
 
@@ -97,6 +99,15 @@ type OpenAIConfig = {
97
99
  headers: () => Record<string, string | undefined>;
98
100
  fetch?: FetchFunction;
99
101
  generateId?: () => string;
102
+ /**
103
+ * File ID prefixes used to identify file IDs in Responses API.
104
+ * When undefined, all file data is treated as base64 content.
105
+ *
106
+ * Examples:
107
+ * - OpenAI: ['file-'] for IDs like 'file-abc123'
108
+ * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
109
+ */
110
+ fileIdPrefixes?: readonly string[];
100
111
  };
101
112
 
102
113
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
@@ -191,8 +202,7 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
191
202
  doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
192
203
  }
193
204
 
194
- declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
195
- type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
205
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
196
206
 
197
207
  declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
198
208
  readonly specificationVersion = "v2";
@@ -229,6 +239,8 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
229
239
  medium: "medium";
230
240
  high: "high";
231
241
  }>>>;
242
+ promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
243
+ safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
232
244
  }, z.core.$strip>;
233
245
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
234
246
 
@@ -2,7 +2,7 @@ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2Ca
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod/v4';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
6
  declare const openaiProviderOptions: z.ZodObject<{
7
7
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
8
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
@@ -30,6 +30,8 @@ declare const openaiProviderOptions: z.ZodObject<{
30
30
  medium: "medium";
31
31
  high: "high";
32
32
  }>>;
33
+ promptCacheKey: z.ZodOptional<z.ZodString>;
34
+ safetyIdentifier: z.ZodOptional<z.ZodString>;
33
35
  }, z.core.$strip>;
34
36
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
35
37
 
@@ -97,6 +99,15 @@ type OpenAIConfig = {
97
99
  headers: () => Record<string, string | undefined>;
98
100
  fetch?: FetchFunction;
99
101
  generateId?: () => string;
102
+ /**
103
+ * File ID prefixes used to identify file IDs in Responses API.
104
+ * When undefined, all file data is treated as base64 content.
105
+ *
106
+ * Examples:
107
+ * - OpenAI: ['file-'] for IDs like 'file-abc123'
108
+ * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
109
+ */
110
+ fileIdPrefixes?: readonly string[];
100
111
  };
101
112
 
102
113
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
@@ -191,8 +202,7 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
191
202
  doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
192
203
  }
193
204
 
194
- declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
195
- type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
205
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
196
206
 
197
207
  declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
198
208
  readonly specificationVersion = "v2";
@@ -229,6 +239,8 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
229
239
  medium: "medium";
230
240
  high: "high";
231
241
  }>>>;
242
+ promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
243
+ safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
232
244
  }, z.core.$strip>;
233
245
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
234
246
 
@@ -342,7 +342,20 @@ var openaiProviderOptions = import_v42.z.object({
342
342
  * Controls the verbosity of the model's responses.
343
343
  * Lower values will result in more concise responses, while higher values will result in more verbose responses.
344
344
  */
345
- textVerbosity: import_v42.z.enum(["low", "medium", "high"]).optional()
345
+ textVerbosity: import_v42.z.enum(["low", "medium", "high"]).optional(),
346
+ /**
347
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
348
+ * Useful for improving cache hit rates and working around automatic caching issues.
349
+ */
350
+ promptCacheKey: import_v42.z.string().optional(),
351
+ /**
352
+ * A stable identifier used to help detect users of your application
353
+ * that may be violating OpenAI's usage policies. The IDs should be a
354
+ * string that uniquely identifies each user. We recommend hashing their
355
+ * username or email address, in order to avoid sending us any identifying
356
+ * information.
357
+ */
358
+ safetyIdentifier: import_v42.z.string().optional()
346
359
  });
347
360
 
348
361
  // src/chat/openai-chat-prepare-tools.ts
@@ -613,6 +626,8 @@ var OpenAIChatLanguageModel = class {
613
626
  prediction: openaiOptions.prediction,
614
627
  reasoning_effort: openaiOptions.reasoningEffort,
615
628
  service_tier: openaiOptions.serviceTier,
629
+ prompt_cache_key: openaiOptions.promptCacheKey,
630
+ safety_identifier: openaiOptions.safetyIdentifier,
616
631
  // messages:
617
632
  messages
618
633
  };
@@ -1128,13 +1143,13 @@ var openaiChatChunkSchema = import_v45.z.union([
1128
1143
  openaiErrorDataSchema
1129
1144
  ]);
1130
1145
  function isReasoningModel(modelId) {
1131
- return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1146
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1132
1147
  }
1133
1148
  function supportsFlexProcessing(modelId) {
1134
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1149
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1135
1150
  }
1136
1151
  function supportsPriorityProcessing(modelId) {
1137
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1152
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1138
1153
  }
1139
1154
  function getSystemMessageMode(modelId) {
1140
1155
  var _a, _b;
@@ -1905,6 +1920,8 @@ var OpenAITranscriptionModel = class {
1905
1920
  include: openAIOptions.include,
1906
1921
  language: openAIOptions.language,
1907
1922
  prompt: openAIOptions.prompt,
1923
+ response_format: "verbose_json",
1924
+ // always use verbose_json to get segments
1908
1925
  temperature: openAIOptions.temperature,
1909
1926
  timestamp_granularities: openAIOptions.timestampGranularities
1910
1927
  };
@@ -1920,7 +1937,7 @@ var OpenAITranscriptionModel = class {
1920
1937
  };
1921
1938
  }
1922
1939
  async doGenerate(options) {
1923
- var _a, _b, _c, _d, _e, _f;
1940
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1924
1941
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1925
1942
  const { formData, warnings } = await this.getArgs(options);
1926
1943
  const {
@@ -1944,13 +1961,17 @@ var OpenAITranscriptionModel = class {
1944
1961
  const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1945
1962
  return {
1946
1963
  text: response.text,
1947
- segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1964
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
1965
+ text: segment.text,
1966
+ startSecond: segment.start,
1967
+ endSecond: segment.end
1968
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
1948
1969
  text: word.word,
1949
1970
  startSecond: word.start,
1950
1971
  endSecond: word.end
1951
- }))) != null ? _e : [],
1972
+ }))) != null ? _g : [],
1952
1973
  language,
1953
- durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1974
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
1954
1975
  warnings,
1955
1976
  response: {
1956
1977
  timestamp: currentDate,
@@ -1971,6 +1992,20 @@ var openaiTranscriptionResponseSchema = import_v412.z.object({
1971
1992
  start: import_v412.z.number(),
1972
1993
  end: import_v412.z.number()
1973
1994
  })
1995
+ ).nullish(),
1996
+ segments: import_v412.z.array(
1997
+ import_v412.z.object({
1998
+ id: import_v412.z.number(),
1999
+ seek: import_v412.z.number(),
2000
+ start: import_v412.z.number(),
2001
+ end: import_v412.z.number(),
2002
+ text: import_v412.z.string(),
2003
+ tokens: import_v412.z.array(import_v412.z.number()),
2004
+ temperature: import_v412.z.number(),
2005
+ avg_logprob: import_v412.z.number(),
2006
+ compression_ratio: import_v412.z.number(),
2007
+ no_speech_prob: import_v412.z.number()
2008
+ })
1974
2009
  ).nullish()
1975
2010
  });
1976
2011
 
@@ -2091,9 +2126,14 @@ var import_provider6 = require("@ai-sdk/provider");
2091
2126
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
2092
2127
  var import_v414 = require("zod/v4");
2093
2128
  var import_provider_utils12 = require("@ai-sdk/provider-utils");
2129
+ function isFileId(data, prefixes) {
2130
+ if (!prefixes) return false;
2131
+ return prefixes.some((prefix) => data.startsWith(prefix));
2132
+ }
2094
2133
  async function convertToOpenAIResponsesMessages({
2095
2134
  prompt,
2096
- systemMessageMode
2135
+ systemMessageMode,
2136
+ fileIdPrefixes
2097
2137
  }) {
2098
2138
  var _a, _b, _c, _d, _e, _f;
2099
2139
  const messages = [];
@@ -2140,7 +2180,7 @@ async function convertToOpenAIResponsesMessages({
2140
2180
  const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2141
2181
  return {
2142
2182
  type: "input_image",
2143
- ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2183
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2144
2184
  image_url: `data:${mediaType};base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2145
2185
  },
2146
2186
  detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
@@ -2153,7 +2193,7 @@ async function convertToOpenAIResponsesMessages({
2153
2193
  }
2154
2194
  return {
2155
2195
  type: "input_file",
2156
- ...typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2196
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2157
2197
  filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2158
2198
  file_data: `data:application/pdf;base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2159
2199
  }
@@ -2459,7 +2499,8 @@ var OpenAIResponsesLanguageModel = class {
2459
2499
  }
2460
2500
  const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2461
2501
  prompt,
2462
- systemMessageMode: modelConfig.systemMessageMode
2502
+ systemMessageMode: modelConfig.systemMessageMode,
2503
+ fileIdPrefixes: this.config.fileIdPrefixes
2463
2504
  });
2464
2505
  warnings.push(...messageWarnings);
2465
2506
  const openaiOptions = await (0, import_provider_utils14.parseProviderOptions)({
@@ -2499,6 +2540,8 @@ var OpenAIResponsesLanguageModel = class {
2499
2540
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2500
2541
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2501
2542
  include: openaiOptions == null ? void 0 : openaiOptions.include,
2543
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2544
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2502
2545
  // model-specific settings:
2503
2546
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2504
2547
  reasoning: {
@@ -2648,7 +2691,18 @@ var OpenAIResponsesLanguageModel = class {
2648
2691
  import_v416.z.object({
2649
2692
  type: import_v416.z.literal("file_search_call"),
2650
2693
  id: import_v416.z.string(),
2651
- status: import_v416.z.string().optional()
2694
+ status: import_v416.z.string().optional(),
2695
+ queries: import_v416.z.array(import_v416.z.string()).nullish(),
2696
+ results: import_v416.z.array(
2697
+ import_v416.z.object({
2698
+ attributes: import_v416.z.object({
2699
+ file_id: import_v416.z.string(),
2700
+ filename: import_v416.z.string(),
2701
+ score: import_v416.z.number(),
2702
+ text: import_v416.z.string()
2703
+ })
2704
+ })
2705
+ ).nullish()
2652
2706
  }),
2653
2707
  import_v416.z.object({
2654
2708
  type: import_v416.z.literal("reasoning"),
@@ -2790,7 +2844,9 @@ var OpenAIResponsesLanguageModel = class {
2790
2844
  toolName: "file_search",
2791
2845
  result: {
2792
2846
  type: "file_search_tool_result",
2793
- status: part.status || "completed"
2847
+ status: part.status || "completed",
2848
+ ...part.queries && { queries: part.queries },
2849
+ ...part.results && { results: part.results }
2794
2850
  },
2795
2851
  providerExecuted: true
2796
2852
  });
@@ -2905,6 +2961,16 @@ var OpenAIResponsesLanguageModel = class {
2905
2961
  id: value.item.id,
2906
2962
  toolName: "computer_use"
2907
2963
  });
2964
+ } else if (value.item.type === "file_search_call") {
2965
+ ongoingToolCalls[value.output_index] = {
2966
+ toolName: "file_search",
2967
+ toolCallId: value.item.id
2968
+ };
2969
+ controller.enqueue({
2970
+ type: "tool-input-start",
2971
+ id: value.item.id,
2972
+ toolName: "file_search"
2973
+ });
2908
2974
  } else if (value.item.type === "message") {
2909
2975
  controller.enqueue({
2910
2976
  type: "text-start",
@@ -2998,6 +3064,32 @@ var OpenAIResponsesLanguageModel = class {
2998
3064
  },
2999
3065
  providerExecuted: true
3000
3066
  });
3067
+ } else if (value.item.type === "file_search_call") {
3068
+ ongoingToolCalls[value.output_index] = void 0;
3069
+ hasToolCalls = true;
3070
+ controller.enqueue({
3071
+ type: "tool-input-end",
3072
+ id: value.item.id
3073
+ });
3074
+ controller.enqueue({
3075
+ type: "tool-call",
3076
+ toolCallId: value.item.id,
3077
+ toolName: "file_search",
3078
+ input: "",
3079
+ providerExecuted: true
3080
+ });
3081
+ controller.enqueue({
3082
+ type: "tool-result",
3083
+ toolCallId: value.item.id,
3084
+ toolName: "file_search",
3085
+ result: {
3086
+ type: "file_search_tool_result",
3087
+ status: value.item.status || "completed",
3088
+ ...value.item.queries && { queries: value.item.queries },
3089
+ ...value.item.results && { results: value.item.results }
3090
+ },
3091
+ providerExecuted: true
3092
+ });
3001
3093
  } else if (value.item.type === "message") {
3002
3094
  controller.enqueue({
3003
3095
  type: "text-end",
@@ -3176,7 +3268,18 @@ var responseOutputItemAddedSchema = import_v416.z.object({
3176
3268
  import_v416.z.object({
3177
3269
  type: import_v416.z.literal("file_search_call"),
3178
3270
  id: import_v416.z.string(),
3179
- status: import_v416.z.string()
3271
+ status: import_v416.z.string(),
3272
+ queries: import_v416.z.array(import_v416.z.string()).nullish(),
3273
+ results: import_v416.z.array(
3274
+ import_v416.z.object({
3275
+ attributes: import_v416.z.object({
3276
+ file_id: import_v416.z.string(),
3277
+ filename: import_v416.z.string(),
3278
+ score: import_v416.z.number(),
3279
+ text: import_v416.z.string()
3280
+ })
3281
+ })
3282
+ ).optional()
3180
3283
  })
3181
3284
  ])
3182
3285
  });
@@ -3214,7 +3317,18 @@ var responseOutputItemDoneSchema = import_v416.z.object({
3214
3317
  import_v416.z.object({
3215
3318
  type: import_v416.z.literal("file_search_call"),
3216
3319
  id: import_v416.z.string(),
3217
- status: import_v416.z.literal("completed")
3320
+ status: import_v416.z.literal("completed"),
3321
+ queries: import_v416.z.array(import_v416.z.string()).nullish(),
3322
+ results: import_v416.z.array(
3323
+ import_v416.z.object({
3324
+ attributes: import_v416.z.object({
3325
+ file_id: import_v416.z.string(),
3326
+ filename: import_v416.z.string(),
3327
+ score: import_v416.z.number(),
3328
+ text: import_v416.z.string()
3329
+ })
3330
+ })
3331
+ ).nullish()
3218
3332
  })
3219
3333
  ])
3220
3334
  });
@@ -3294,6 +3408,13 @@ function isErrorChunk(chunk) {
3294
3408
  return chunk.type === "error";
3295
3409
  }
3296
3410
  function getResponsesModelConfig(modelId) {
3411
+ if (modelId.startsWith("gpt-5-chat")) {
3412
+ return {
3413
+ isReasoningModel: false,
3414
+ systemMessageMode: "system",
3415
+ requiredAutoTruncation: false
3416
+ };
3417
+ }
3297
3418
  if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3298
3419
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3299
3420
  return {
@@ -3315,10 +3436,10 @@ function getResponsesModelConfig(modelId) {
3315
3436
  };
3316
3437
  }
3317
3438
  function supportsFlexProcessing2(modelId) {
3318
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3439
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3319
3440
  }
3320
3441
  function supportsPriorityProcessing2(modelId) {
3321
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3442
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3322
3443
  }
3323
3444
  var openaiResponsesProviderOptionsSchema = import_v416.z.object({
3324
3445
  metadata: import_v416.z.any().nullish(),
@@ -3332,7 +3453,9 @@ var openaiResponsesProviderOptionsSchema = import_v416.z.object({
3332
3453
  reasoningSummary: import_v416.z.string().nullish(),
3333
3454
  serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
3334
3455
  include: import_v416.z.array(import_v416.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3335
- textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish()
3456
+ textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
3457
+ promptCacheKey: import_v416.z.string().nullish(),
3458
+ safetyIdentifier: import_v416.z.string().nullish()
3336
3459
  });
3337
3460
  // Annotate the CommonJS export names for ESM import in node:
3338
3461
  0 && (module.exports = {