@ai-sdk/openai 3.0.47 → 3.0.49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.49
4
+
5
+ ### Patch Changes
6
+
7
+ - bc01093: fix(openai): support file-url parts in tool output content
8
+
9
+ ## 3.0.48
10
+
11
+ ### Patch Changes
12
+
13
+ - 9c548de: Add `gpt-5.4-mini`, `gpt-5.4-mini-2026-03-17`, `gpt-5.4-nano`, and `gpt-5.4-nano-2026-03-17` models.
14
+ - bcb04df: fix(openai): preserve raw finish reason for failed responses stream events
15
+
16
+ Handle `response.failed` chunks in Responses API streaming so `finishReason.raw` is preserved from `incomplete_details.reason` (e.g. `max_output_tokens`), and map failed-without-reason cases to unified `error` instead of `other`.
17
+
3
18
  ## 3.0.47
4
19
 
5
20
  ### Patch Changes
@@ -374,13 +389,13 @@
374
389
  Before
375
390
 
376
391
  ```ts
377
- model.textEmbeddingModel('my-model-id');
392
+ model.textEmbeddingModel("my-model-id");
378
393
  ```
379
394
 
380
395
  After
381
396
 
382
397
  ```ts
383
- model.embeddingModel('my-model-id');
398
+ model.embeddingModel("my-model-id");
384
399
  ```
385
400
 
386
401
  - 60f4775: fix: remove code for unsuported o1-mini and o1-preview models
@@ -390,15 +405,15 @@
390
405
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
391
406
 
392
407
  ```ts
393
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
394
- import { generateText } from 'ai';
408
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
409
+ import { generateText } from "ai";
395
410
 
396
411
  await generateText({
397
- model: openai.chat('gpt-4o'),
398
- prompt: 'Invent a new holiday and describe its traditions.',
412
+ model: openai.chat("gpt-4o"),
413
+ prompt: "Invent a new holiday and describe its traditions.",
399
414
  providerOptions: {
400
415
  openai: {
401
- user: 'user-123',
416
+ user: "user-123",
402
417
  } satisfies OpenAIChatLanguageModelOptions,
403
418
  },
404
419
  });
@@ -799,13 +814,13 @@
799
814
  Before
800
815
 
801
816
  ```ts
802
- model.textEmbeddingModel('my-model-id');
817
+ model.textEmbeddingModel("my-model-id");
803
818
  ```
804
819
 
805
820
  After
806
821
 
807
822
  ```ts
808
- model.embeddingModel('my-model-id');
823
+ model.embeddingModel("my-model-id");
809
824
  ```
810
825
 
811
826
  - Updated dependencies [8d9e8ad]
@@ -1275,15 +1290,15 @@
1275
1290
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
1276
1291
 
1277
1292
  ```ts
1278
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
1279
- import { generateText } from 'ai';
1293
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
1294
+ import { generateText } from "ai";
1280
1295
 
1281
1296
  await generateText({
1282
- model: openai.chat('gpt-4o'),
1283
- prompt: 'Invent a new holiday and describe its traditions.',
1297
+ model: openai.chat("gpt-4o"),
1298
+ prompt: "Invent a new holiday and describe its traditions.",
1284
1299
  providerOptions: {
1285
1300
  openai: {
1286
- user: 'user-123',
1301
+ user: "user-123",
1287
1302
  } satisfies OpenAIChatLanguageModelOptions,
1288
1303
  },
1289
1304
  });
@@ -1579,7 +1594,7 @@
1579
1594
 
1580
1595
  ```js
1581
1596
  await generateImage({
1582
- model: luma.image('photon-flash-1', {
1597
+ model: luma.image("photon-flash-1", {
1583
1598
  maxImagesPerCall: 5,
1584
1599
  pollIntervalMillis: 500,
1585
1600
  }),
@@ -1592,7 +1607,7 @@
1592
1607
 
1593
1608
  ```js
1594
1609
  await generateImage({
1595
- model: luma.image('photon-flash-1'),
1610
+ model: luma.image("photon-flash-1"),
1596
1611
  prompt,
1597
1612
  n: 10,
1598
1613
  maxImagesPerCall: 5,
@@ -1654,10 +1669,10 @@
1654
1669
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
1655
1670
 
1656
1671
  ```js
1657
- const prompt = 'Santa Claus driving a Cadillac';
1672
+ const prompt = "Santa Claus driving a Cadillac";
1658
1673
 
1659
1674
  const { providerMetadata } = await experimental_generateImage({
1660
- model: openai.image('dall-e-3'),
1675
+ model: openai.image("dall-e-3"),
1661
1676
  prompt,
1662
1677
  });
1663
1678
 
@@ -1956,7 +1971,7 @@
1956
1971
 
1957
1972
  ```js
1958
1973
  await generateImage({
1959
- model: luma.image('photon-flash-1', {
1974
+ model: luma.image("photon-flash-1", {
1960
1975
  maxImagesPerCall: 5,
1961
1976
  pollIntervalMillis: 500,
1962
1977
  }),
@@ -1969,7 +1984,7 @@
1969
1984
 
1970
1985
  ```js
1971
1986
  await generateImage({
1972
- model: luma.image('photon-flash-1'),
1987
+ model: luma.image("photon-flash-1"),
1973
1988
  prompt,
1974
1989
  n: 10,
1975
1990
  maxImagesPerCall: 5,
@@ -2014,10 +2029,10 @@
2014
2029
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
2015
2030
 
2016
2031
  ```js
2017
- const prompt = 'Santa Claus driving a Cadillac';
2032
+ const prompt = "Santa Claus driving a Cadillac";
2018
2033
 
2019
2034
  const { providerMetadata } = await experimental_generateImage({
2020
- model: openai.image('dall-e-3'),
2035
+ model: openai.image("dall-e-3"),
2021
2036
  prompt,
2022
2037
  });
2023
2038
 
package/dist/index.d.mts CHANGED
@@ -3,7 +3,7 @@ import { JSONValue, ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3,
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -206,6 +206,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
206
206
  } | null | undefined;
207
207
  service_tier?: string | null | undefined;
208
208
  };
209
+ } | {
210
+ type: "response.failed";
211
+ response: {
212
+ error?: {
213
+ message: string;
214
+ code?: string | null | undefined;
215
+ } | null | undefined;
216
+ incomplete_details?: {
217
+ reason: string;
218
+ } | null | undefined;
219
+ usage?: {
220
+ input_tokens: number;
221
+ output_tokens: number;
222
+ input_tokens_details?: {
223
+ cached_tokens?: number | null | undefined;
224
+ } | null | undefined;
225
+ output_tokens_details?: {
226
+ reasoning_tokens?: number | null | undefined;
227
+ } | null | undefined;
228
+ } | null | undefined;
229
+ service_tier?: string | null | undefined;
230
+ };
209
231
  } | {
210
232
  type: "response.created";
211
233
  response: {
@@ -984,7 +1006,7 @@ declare const openaiTools: {
984
1006
  }>;
985
1007
  };
986
1008
 
987
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
1009
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
988
1010
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
989
1011
  conversation?: string | null | undefined;
990
1012
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
package/dist/index.d.ts CHANGED
@@ -3,7 +3,7 @@ import { JSONValue, ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3,
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -206,6 +206,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
206
206
  } | null | undefined;
207
207
  service_tier?: string | null | undefined;
208
208
  };
209
+ } | {
210
+ type: "response.failed";
211
+ response: {
212
+ error?: {
213
+ message: string;
214
+ code?: string | null | undefined;
215
+ } | null | undefined;
216
+ incomplete_details?: {
217
+ reason: string;
218
+ } | null | undefined;
219
+ usage?: {
220
+ input_tokens: number;
221
+ output_tokens: number;
222
+ input_tokens_details?: {
223
+ cached_tokens?: number | null | undefined;
224
+ } | null | undefined;
225
+ output_tokens_details?: {
226
+ reasoning_tokens?: number | null | undefined;
227
+ } | null | undefined;
228
+ } | null | undefined;
229
+ service_tier?: string | null | undefined;
230
+ };
209
231
  } | {
210
232
  type: "response.created";
211
233
  response: {
@@ -984,7 +1006,7 @@ declare const openaiTools: {
984
1006
  }>;
985
1007
  };
986
1008
 
987
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
1009
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
988
1010
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
989
1011
  conversation?: string | null | undefined;
990
1012
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
package/dist/index.js CHANGED
@@ -55,7 +55,7 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
55
55
  // src/openai-language-model-capabilities.ts
56
56
  function getOpenAILanguageModelCapabilities(modelId) {
57
57
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
58
- const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
58
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") && !modelId.startsWith("gpt-5.4-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
59
59
  const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
60
60
  const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2") || modelId.startsWith("gpt-5.3") || modelId.startsWith("gpt-5.4");
61
61
  const systemMessageMode = isReasoningModel ? "developer" : "system";
@@ -3134,6 +3134,11 @@ async function convertToOpenAIResponsesInput({
3134
3134
  filename: (_a2 = item.filename) != null ? _a2 : "data",
3135
3135
  file_data: `data:${item.mediaType};base64,${item.data}`
3136
3136
  };
3137
+ case "file-url":
3138
+ return {
3139
+ type: "input_file",
3140
+ file_url: item.url
3141
+ };
3137
3142
  default:
3138
3143
  warnings.push({
3139
3144
  type: "other",
@@ -3192,6 +3197,12 @@ async function convertToOpenAIResponsesInput({
3192
3197
  file_data: `data:${item.mediaType};base64,${item.data}`
3193
3198
  };
3194
3199
  }
3200
+ case "file-url": {
3201
+ return {
3202
+ type: "input_file",
3203
+ file_url: item.url
3204
+ };
3205
+ }
3195
3206
  default: {
3196
3207
  warnings.push({
3197
3208
  type: "other",
@@ -3299,6 +3310,23 @@ var openaiResponsesChunkSchema = (0, import_provider_utils26.lazySchema)(
3299
3310
  service_tier: import_v421.z.string().nullish()
3300
3311
  })
3301
3312
  }),
3313
+ import_v421.z.object({
3314
+ type: import_v421.z.literal("response.failed"),
3315
+ response: import_v421.z.object({
3316
+ error: import_v421.z.object({
3317
+ code: import_v421.z.string().nullish(),
3318
+ message: import_v421.z.string()
3319
+ }).nullish(),
3320
+ incomplete_details: import_v421.z.object({ reason: import_v421.z.string() }).nullish(),
3321
+ usage: import_v421.z.object({
3322
+ input_tokens: import_v421.z.number(),
3323
+ input_tokens_details: import_v421.z.object({ cached_tokens: import_v421.z.number().nullish() }).nullish(),
3324
+ output_tokens: import_v421.z.number(),
3325
+ output_tokens_details: import_v421.z.object({ reasoning_tokens: import_v421.z.number().nullish() }).nullish()
3326
+ }).nullish(),
3327
+ service_tier: import_v421.z.string().nullish()
3328
+ })
3329
+ }),
3302
3330
  import_v421.z.object({
3303
3331
  type: import_v421.z.literal("response.created"),
3304
3332
  response: import_v421.z.object({
@@ -4116,6 +4144,10 @@ var openaiResponsesReasoningModelIds = [
4116
4144
  "gpt-5.3-codex",
4117
4145
  "gpt-5.4",
4118
4146
  "gpt-5.4-2026-03-05",
4147
+ "gpt-5.4-mini",
4148
+ "gpt-5.4-mini-2026-03-17",
4149
+ "gpt-5.4-nano",
4150
+ "gpt-5.4-nano-2026-03-17",
4119
4151
  "gpt-5.4-pro",
4120
4152
  "gpt-5.4-pro-2026-03-05"
4121
4153
  ];
@@ -5369,7 +5401,7 @@ var OpenAIResponsesLanguageModel = class {
5369
5401
  controller.enqueue({ type: "stream-start", warnings });
5370
5402
  },
5371
5403
  transform(chunk, controller) {
5372
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J;
5404
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J, _K, _L;
5373
5405
  if (options.includeRawChunks) {
5374
5406
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5375
5407
  }
@@ -6094,13 +6126,23 @@ var OpenAIResponsesLanguageModel = class {
6094
6126
  if (typeof value.response.service_tier === "string") {
6095
6127
  serviceTier = value.response.service_tier;
6096
6128
  }
6129
+ } else if (isResponseFailedChunk(value)) {
6130
+ const incompleteReason = (_y = value.response.incomplete_details) == null ? void 0 : _y.reason;
6131
+ finishReason = {
6132
+ unified: incompleteReason ? mapOpenAIResponseFinishReason({
6133
+ finishReason: incompleteReason,
6134
+ hasFunctionCall
6135
+ }) : "error",
6136
+ raw: incompleteReason != null ? incompleteReason : "error"
6137
+ };
6138
+ usage = (_z = value.response.usage) != null ? _z : void 0;
6097
6139
  } else if (isResponseAnnotationAddedChunk(value)) {
6098
6140
  ongoingAnnotations.push(value.annotation);
6099
6141
  if (value.annotation.type === "url_citation") {
6100
6142
  controller.enqueue({
6101
6143
  type: "source",
6102
6144
  sourceType: "url",
6103
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : (0, import_provider_utils29.generateId)(),
6145
+ id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : (0, import_provider_utils29.generateId)(),
6104
6146
  url: value.annotation.url,
6105
6147
  title: value.annotation.title
6106
6148
  });
@@ -6108,7 +6150,7 @@ var OpenAIResponsesLanguageModel = class {
6108
6150
  controller.enqueue({
6109
6151
  type: "source",
6110
6152
  sourceType: "document",
6111
- id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : (0, import_provider_utils29.generateId)(),
6153
+ id: (_F = (_E = (_D = self.config).generateId) == null ? void 0 : _E.call(_D)) != null ? _F : (0, import_provider_utils29.generateId)(),
6112
6154
  mediaType: "text/plain",
6113
6155
  title: value.annotation.filename,
6114
6156
  filename: value.annotation.filename,
@@ -6124,7 +6166,7 @@ var OpenAIResponsesLanguageModel = class {
6124
6166
  controller.enqueue({
6125
6167
  type: "source",
6126
6168
  sourceType: "document",
6127
- id: (_G = (_F = (_E = self.config).generateId) == null ? void 0 : _F.call(_E)) != null ? _G : (0, import_provider_utils29.generateId)(),
6169
+ id: (_I = (_H = (_G = self.config).generateId) == null ? void 0 : _H.call(_G)) != null ? _I : (0, import_provider_utils29.generateId)(),
6128
6170
  mediaType: "text/plain",
6129
6171
  title: value.annotation.filename,
6130
6172
  filename: value.annotation.filename,
@@ -6140,7 +6182,7 @@ var OpenAIResponsesLanguageModel = class {
6140
6182
  controller.enqueue({
6141
6183
  type: "source",
6142
6184
  sourceType: "document",
6143
- id: (_J = (_I = (_H = self.config).generateId) == null ? void 0 : _I.call(_H)) != null ? _J : (0, import_provider_utils29.generateId)(),
6185
+ id: (_L = (_K = (_J = self.config).generateId) == null ? void 0 : _K.call(_J)) != null ? _L : (0, import_provider_utils29.generateId)(),
6144
6186
  mediaType: "application/octet-stream",
6145
6187
  title: value.annotation.file_id,
6146
6188
  filename: value.annotation.file_id,
@@ -6188,6 +6230,9 @@ function isResponseOutputItemDoneChunk(chunk) {
6188
6230
  function isResponseFinishedChunk(chunk) {
6189
6231
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
6190
6232
  }
6233
+ function isResponseFailedChunk(chunk) {
6234
+ return chunk.type === "response.failed";
6235
+ }
6191
6236
  function isResponseCreatedChunk(chunk) {
6192
6237
  return chunk.type === "response.created";
6193
6238
  }
@@ -6602,7 +6647,7 @@ var OpenAITranscriptionModel = class {
6602
6647
  };
6603
6648
 
6604
6649
  // src/version.ts
6605
- var VERSION = true ? "3.0.47" : "0.0.0-test";
6650
+ var VERSION = true ? "3.0.49" : "0.0.0-test";
6606
6651
 
6607
6652
  // src/openai-provider.ts
6608
6653
  function createOpenAI(options = {}) {