@ai-sdk/openai 3.0.47 → 3.0.48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.48
4
+
5
+ ### Patch Changes
6
+
7
+ - 9c548de: Add `gpt-5.4-mini`, `gpt-5.4-mini-2026-03-17`, `gpt-5.4-nano`, and `gpt-5.4-nano-2026-03-17` models.
8
+ - bcb04df: fix(openai): preserve raw finish reason for failed responses stream events
9
+
10
+ Handle `response.failed` chunks in Responses API streaming so `finishReason.raw` is preserved from `incomplete_details.reason` (e.g. `max_output_tokens`), and map failed-without-reason cases to unified `error` instead of `other`.
11
+
3
12
  ## 3.0.47
4
13
 
5
14
  ### Patch Changes
@@ -374,13 +383,13 @@
374
383
  Before
375
384
 
376
385
  ```ts
377
- model.textEmbeddingModel('my-model-id');
386
+ model.textEmbeddingModel("my-model-id");
378
387
  ```
379
388
 
380
389
  After
381
390
 
382
391
  ```ts
383
- model.embeddingModel('my-model-id');
392
+ model.embeddingModel("my-model-id");
384
393
  ```
385
394
 
386
395
  - 60f4775: fix: remove code for unsuported o1-mini and o1-preview models
@@ -390,15 +399,15 @@
390
399
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
391
400
 
392
401
  ```ts
393
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
394
- import { generateText } from 'ai';
402
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
403
+ import { generateText } from "ai";
395
404
 
396
405
  await generateText({
397
- model: openai.chat('gpt-4o'),
398
- prompt: 'Invent a new holiday and describe its traditions.',
406
+ model: openai.chat("gpt-4o"),
407
+ prompt: "Invent a new holiday and describe its traditions.",
399
408
  providerOptions: {
400
409
  openai: {
401
- user: 'user-123',
410
+ user: "user-123",
402
411
  } satisfies OpenAIChatLanguageModelOptions,
403
412
  },
404
413
  });
@@ -799,13 +808,13 @@
799
808
  Before
800
809
 
801
810
  ```ts
802
- model.textEmbeddingModel('my-model-id');
811
+ model.textEmbeddingModel("my-model-id");
803
812
  ```
804
813
 
805
814
  After
806
815
 
807
816
  ```ts
808
- model.embeddingModel('my-model-id');
817
+ model.embeddingModel("my-model-id");
809
818
  ```
810
819
 
811
820
  - Updated dependencies [8d9e8ad]
@@ -1275,15 +1284,15 @@
1275
1284
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
1276
1285
 
1277
1286
  ```ts
1278
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
1279
- import { generateText } from 'ai';
1287
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
1288
+ import { generateText } from "ai";
1280
1289
 
1281
1290
  await generateText({
1282
- model: openai.chat('gpt-4o'),
1283
- prompt: 'Invent a new holiday and describe its traditions.',
1291
+ model: openai.chat("gpt-4o"),
1292
+ prompt: "Invent a new holiday and describe its traditions.",
1284
1293
  providerOptions: {
1285
1294
  openai: {
1286
- user: 'user-123',
1295
+ user: "user-123",
1287
1296
  } satisfies OpenAIChatLanguageModelOptions,
1288
1297
  },
1289
1298
  });
@@ -1579,7 +1588,7 @@
1579
1588
 
1580
1589
  ```js
1581
1590
  await generateImage({
1582
- model: luma.image('photon-flash-1', {
1591
+ model: luma.image("photon-flash-1", {
1583
1592
  maxImagesPerCall: 5,
1584
1593
  pollIntervalMillis: 500,
1585
1594
  }),
@@ -1592,7 +1601,7 @@
1592
1601
 
1593
1602
  ```js
1594
1603
  await generateImage({
1595
- model: luma.image('photon-flash-1'),
1604
+ model: luma.image("photon-flash-1"),
1596
1605
  prompt,
1597
1606
  n: 10,
1598
1607
  maxImagesPerCall: 5,
@@ -1654,10 +1663,10 @@
1654
1663
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
1655
1664
 
1656
1665
  ```js
1657
- const prompt = 'Santa Claus driving a Cadillac';
1666
+ const prompt = "Santa Claus driving a Cadillac";
1658
1667
 
1659
1668
  const { providerMetadata } = await experimental_generateImage({
1660
- model: openai.image('dall-e-3'),
1669
+ model: openai.image("dall-e-3"),
1661
1670
  prompt,
1662
1671
  });
1663
1672
 
@@ -1956,7 +1965,7 @@
1956
1965
 
1957
1966
  ```js
1958
1967
  await generateImage({
1959
- model: luma.image('photon-flash-1', {
1968
+ model: luma.image("photon-flash-1", {
1960
1969
  maxImagesPerCall: 5,
1961
1970
  pollIntervalMillis: 500,
1962
1971
  }),
@@ -1969,7 +1978,7 @@
1969
1978
 
1970
1979
  ```js
1971
1980
  await generateImage({
1972
- model: luma.image('photon-flash-1'),
1981
+ model: luma.image("photon-flash-1"),
1973
1982
  prompt,
1974
1983
  n: 10,
1975
1984
  maxImagesPerCall: 5,
@@ -2014,10 +2023,10 @@
2014
2023
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
2015
2024
 
2016
2025
  ```js
2017
- const prompt = 'Santa Claus driving a Cadillac';
2026
+ const prompt = "Santa Claus driving a Cadillac";
2018
2027
 
2019
2028
  const { providerMetadata } = await experimental_generateImage({
2020
- model: openai.image('dall-e-3'),
2029
+ model: openai.image("dall-e-3"),
2021
2030
  prompt,
2022
2031
  });
2023
2032
 
package/dist/index.d.mts CHANGED
@@ -3,7 +3,7 @@ import { JSONValue, ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3,
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -206,6 +206,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
206
206
  } | null | undefined;
207
207
  service_tier?: string | null | undefined;
208
208
  };
209
+ } | {
210
+ type: "response.failed";
211
+ response: {
212
+ error?: {
213
+ message: string;
214
+ code?: string | null | undefined;
215
+ } | null | undefined;
216
+ incomplete_details?: {
217
+ reason: string;
218
+ } | null | undefined;
219
+ usage?: {
220
+ input_tokens: number;
221
+ output_tokens: number;
222
+ input_tokens_details?: {
223
+ cached_tokens?: number | null | undefined;
224
+ } | null | undefined;
225
+ output_tokens_details?: {
226
+ reasoning_tokens?: number | null | undefined;
227
+ } | null | undefined;
228
+ } | null | undefined;
229
+ service_tier?: string | null | undefined;
230
+ };
209
231
  } | {
210
232
  type: "response.created";
211
233
  response: {
@@ -984,7 +1006,7 @@ declare const openaiTools: {
984
1006
  }>;
985
1007
  };
986
1008
 
987
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
1009
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
988
1010
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
989
1011
  conversation?: string | null | undefined;
990
1012
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
package/dist/index.d.ts CHANGED
@@ -3,7 +3,7 @@ import { JSONValue, ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3,
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -206,6 +206,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
206
206
  } | null | undefined;
207
207
  service_tier?: string | null | undefined;
208
208
  };
209
+ } | {
210
+ type: "response.failed";
211
+ response: {
212
+ error?: {
213
+ message: string;
214
+ code?: string | null | undefined;
215
+ } | null | undefined;
216
+ incomplete_details?: {
217
+ reason: string;
218
+ } | null | undefined;
219
+ usage?: {
220
+ input_tokens: number;
221
+ output_tokens: number;
222
+ input_tokens_details?: {
223
+ cached_tokens?: number | null | undefined;
224
+ } | null | undefined;
225
+ output_tokens_details?: {
226
+ reasoning_tokens?: number | null | undefined;
227
+ } | null | undefined;
228
+ } | null | undefined;
229
+ service_tier?: string | null | undefined;
230
+ };
209
231
  } | {
210
232
  type: "response.created";
211
233
  response: {
@@ -984,7 +1006,7 @@ declare const openaiTools: {
984
1006
  }>;
985
1007
  };
986
1008
 
987
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
1009
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
988
1010
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
989
1011
  conversation?: string | null | undefined;
990
1012
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
package/dist/index.js CHANGED
@@ -55,7 +55,7 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
55
55
  // src/openai-language-model-capabilities.ts
56
56
  function getOpenAILanguageModelCapabilities(modelId) {
57
57
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
58
- const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
58
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") && !modelId.startsWith("gpt-5.4-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
59
59
  const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
60
60
  const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2") || modelId.startsWith("gpt-5.3") || modelId.startsWith("gpt-5.4");
61
61
  const systemMessageMode = isReasoningModel ? "developer" : "system";
@@ -3299,6 +3299,23 @@ var openaiResponsesChunkSchema = (0, import_provider_utils26.lazySchema)(
3299
3299
  service_tier: import_v421.z.string().nullish()
3300
3300
  })
3301
3301
  }),
3302
+ import_v421.z.object({
3303
+ type: import_v421.z.literal("response.failed"),
3304
+ response: import_v421.z.object({
3305
+ error: import_v421.z.object({
3306
+ code: import_v421.z.string().nullish(),
3307
+ message: import_v421.z.string()
3308
+ }).nullish(),
3309
+ incomplete_details: import_v421.z.object({ reason: import_v421.z.string() }).nullish(),
3310
+ usage: import_v421.z.object({
3311
+ input_tokens: import_v421.z.number(),
3312
+ input_tokens_details: import_v421.z.object({ cached_tokens: import_v421.z.number().nullish() }).nullish(),
3313
+ output_tokens: import_v421.z.number(),
3314
+ output_tokens_details: import_v421.z.object({ reasoning_tokens: import_v421.z.number().nullish() }).nullish()
3315
+ }).nullish(),
3316
+ service_tier: import_v421.z.string().nullish()
3317
+ })
3318
+ }),
3302
3319
  import_v421.z.object({
3303
3320
  type: import_v421.z.literal("response.created"),
3304
3321
  response: import_v421.z.object({
@@ -4116,6 +4133,10 @@ var openaiResponsesReasoningModelIds = [
4116
4133
  "gpt-5.3-codex",
4117
4134
  "gpt-5.4",
4118
4135
  "gpt-5.4-2026-03-05",
4136
+ "gpt-5.4-mini",
4137
+ "gpt-5.4-mini-2026-03-17",
4138
+ "gpt-5.4-nano",
4139
+ "gpt-5.4-nano-2026-03-17",
4119
4140
  "gpt-5.4-pro",
4120
4141
  "gpt-5.4-pro-2026-03-05"
4121
4142
  ];
@@ -5369,7 +5390,7 @@ var OpenAIResponsesLanguageModel = class {
5369
5390
  controller.enqueue({ type: "stream-start", warnings });
5370
5391
  },
5371
5392
  transform(chunk, controller) {
5372
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J;
5393
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J, _K, _L;
5373
5394
  if (options.includeRawChunks) {
5374
5395
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5375
5396
  }
@@ -6094,13 +6115,23 @@ var OpenAIResponsesLanguageModel = class {
6094
6115
  if (typeof value.response.service_tier === "string") {
6095
6116
  serviceTier = value.response.service_tier;
6096
6117
  }
6118
+ } else if (isResponseFailedChunk(value)) {
6119
+ const incompleteReason = (_y = value.response.incomplete_details) == null ? void 0 : _y.reason;
6120
+ finishReason = {
6121
+ unified: incompleteReason ? mapOpenAIResponseFinishReason({
6122
+ finishReason: incompleteReason,
6123
+ hasFunctionCall
6124
+ }) : "error",
6125
+ raw: incompleteReason != null ? incompleteReason : "error"
6126
+ };
6127
+ usage = (_z = value.response.usage) != null ? _z : void 0;
6097
6128
  } else if (isResponseAnnotationAddedChunk(value)) {
6098
6129
  ongoingAnnotations.push(value.annotation);
6099
6130
  if (value.annotation.type === "url_citation") {
6100
6131
  controller.enqueue({
6101
6132
  type: "source",
6102
6133
  sourceType: "url",
6103
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : (0, import_provider_utils29.generateId)(),
6134
+ id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : (0, import_provider_utils29.generateId)(),
6104
6135
  url: value.annotation.url,
6105
6136
  title: value.annotation.title
6106
6137
  });
@@ -6108,7 +6139,7 @@ var OpenAIResponsesLanguageModel = class {
6108
6139
  controller.enqueue({
6109
6140
  type: "source",
6110
6141
  sourceType: "document",
6111
- id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : (0, import_provider_utils29.generateId)(),
6142
+ id: (_F = (_E = (_D = self.config).generateId) == null ? void 0 : _E.call(_D)) != null ? _F : (0, import_provider_utils29.generateId)(),
6112
6143
  mediaType: "text/plain",
6113
6144
  title: value.annotation.filename,
6114
6145
  filename: value.annotation.filename,
@@ -6124,7 +6155,7 @@ var OpenAIResponsesLanguageModel = class {
6124
6155
  controller.enqueue({
6125
6156
  type: "source",
6126
6157
  sourceType: "document",
6127
- id: (_G = (_F = (_E = self.config).generateId) == null ? void 0 : _F.call(_E)) != null ? _G : (0, import_provider_utils29.generateId)(),
6158
+ id: (_I = (_H = (_G = self.config).generateId) == null ? void 0 : _H.call(_G)) != null ? _I : (0, import_provider_utils29.generateId)(),
6128
6159
  mediaType: "text/plain",
6129
6160
  title: value.annotation.filename,
6130
6161
  filename: value.annotation.filename,
@@ -6140,7 +6171,7 @@ var OpenAIResponsesLanguageModel = class {
6140
6171
  controller.enqueue({
6141
6172
  type: "source",
6142
6173
  sourceType: "document",
6143
- id: (_J = (_I = (_H = self.config).generateId) == null ? void 0 : _I.call(_H)) != null ? _J : (0, import_provider_utils29.generateId)(),
6174
+ id: (_L = (_K = (_J = self.config).generateId) == null ? void 0 : _K.call(_J)) != null ? _L : (0, import_provider_utils29.generateId)(),
6144
6175
  mediaType: "application/octet-stream",
6145
6176
  title: value.annotation.file_id,
6146
6177
  filename: value.annotation.file_id,
@@ -6188,6 +6219,9 @@ function isResponseOutputItemDoneChunk(chunk) {
6188
6219
  function isResponseFinishedChunk(chunk) {
6189
6220
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
6190
6221
  }
6222
+ function isResponseFailedChunk(chunk) {
6223
+ return chunk.type === "response.failed";
6224
+ }
6191
6225
  function isResponseCreatedChunk(chunk) {
6192
6226
  return chunk.type === "response.created";
6193
6227
  }
@@ -6602,7 +6636,7 @@ var OpenAITranscriptionModel = class {
6602
6636
  };
6603
6637
 
6604
6638
  // src/version.ts
6605
- var VERSION = true ? "3.0.47" : "0.0.0-test";
6639
+ var VERSION = true ? "3.0.48" : "0.0.0-test";
6606
6640
 
6607
6641
  // src/openai-provider.ts
6608
6642
  function createOpenAI(options = {}) {