@ai-sdk/openai 4.0.0-beta.4 → 4.0.0-beta.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/CHANGELOG.md +399 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +166 -49
  4. package/dist/index.js +2454 -1627
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +176 -53
  7. package/dist/internal/index.js +2220 -1648
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/03-openai.mdx +292 -22
  10. package/package.json +13 -14
  11. package/src/chat/convert-openai-chat-usage.ts +2 -2
  12. package/src/chat/convert-to-openai-chat-messages.ts +99 -71
  13. package/src/chat/map-openai-finish-reason.ts +2 -2
  14. package/src/chat/openai-chat-api.ts +6 -2
  15. package/src/chat/openai-chat-language-model.ts +68 -164
  16. package/src/chat/openai-chat-options.ts +10 -1
  17. package/src/chat/openai-chat-prepare-tools.ts +7 -7
  18. package/src/completion/convert-openai-completion-usage.ts +2 -2
  19. package/src/completion/convert-to-openai-completion-prompt.ts +2 -3
  20. package/src/completion/map-openai-finish-reason.ts +2 -2
  21. package/src/completion/openai-completion-api.ts +5 -2
  22. package/src/completion/openai-completion-language-model.ts +46 -30
  23. package/src/completion/openai-completion-options.ts +5 -1
  24. package/src/embedding/openai-embedding-model.ts +25 -8
  25. package/src/embedding/openai-embedding-options.ts +5 -1
  26. package/src/files/openai-files-api.ts +17 -0
  27. package/src/files/openai-files-options.ts +22 -0
  28. package/src/files/openai-files.ts +100 -0
  29. package/src/image/openai-image-model.ts +31 -15
  30. package/src/image/openai-image-options.ts +3 -0
  31. package/src/index.ts +2 -0
  32. package/src/openai-config.ts +7 -7
  33. package/src/openai-language-model-capabilities.ts +3 -2
  34. package/src/openai-provider.ts +63 -30
  35. package/src/openai-tools.ts +12 -1
  36. package/src/responses/convert-openai-responses-usage.ts +2 -2
  37. package/src/responses/convert-to-openai-responses-input.ts +244 -77
  38. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  39. package/src/responses/openai-responses-api.ts +141 -3
  40. package/src/responses/openai-responses-language-model.ts +274 -61
  41. package/src/responses/openai-responses-options.ts +29 -3
  42. package/src/responses/openai-responses-prepare-tools.ts +48 -15
  43. package/src/responses/openai-responses-provider-metadata.ts +12 -2
  44. package/src/skills/openai-skills-api.ts +31 -0
  45. package/src/skills/openai-skills.ts +83 -0
  46. package/src/speech/openai-speech-model.ts +28 -12
  47. package/src/speech/openai-speech-options.ts +5 -1
  48. package/src/tool/apply-patch.ts +33 -32
  49. package/src/tool/code-interpreter.ts +40 -41
  50. package/src/tool/custom.ts +2 -8
  51. package/src/tool/file-search.ts +3 -3
  52. package/src/tool/image-generation.ts +2 -2
  53. package/src/tool/local-shell.ts +2 -2
  54. package/src/tool/mcp.ts +3 -3
  55. package/src/tool/shell.ts +9 -4
  56. package/src/tool/tool-search.ts +98 -0
  57. package/src/tool/web-search-preview.ts +2 -2
  58. package/src/tool/web-search.ts +2 -2
  59. package/src/transcription/openai-transcription-model.ts +30 -14
  60. package/src/transcription/openai-transcription-options.ts +5 -1
  61. package/dist/index.d.mts +0 -1107
  62. package/dist/index.mjs +0 -6508
  63. package/dist/index.mjs.map +0 -1
  64. package/dist/internal/index.d.mts +0 -1137
  65. package/dist/internal/index.mjs +0 -6321
  66. package/dist/internal/index.mjs.map +0 -1
@@ -1,18 +1,18 @@
1
1
  import {
2
2
  APICallError,
3
- JSONValue,
4
- LanguageModelV3,
5
- LanguageModelV3Prompt,
6
- LanguageModelV3CallOptions,
7
- LanguageModelV3Content,
8
- LanguageModelV3FinishReason,
9
- LanguageModelV3GenerateResult,
10
- LanguageModelV3ProviderTool,
11
- LanguageModelV3StreamPart,
12
- LanguageModelV3StreamResult,
13
- LanguageModelV3ToolApprovalRequest,
14
- SharedV3ProviderMetadata,
15
- SharedV3Warning,
3
+ type JSONValue,
4
+ type LanguageModelV4,
5
+ type LanguageModelV4Prompt,
6
+ type LanguageModelV4CallOptions,
7
+ type LanguageModelV4Content,
8
+ type LanguageModelV4FinishReason,
9
+ type LanguageModelV4GenerateResult,
10
+ type LanguageModelV4ProviderTool,
11
+ type LanguageModelV4StreamPart,
12
+ type LanguageModelV4StreamResult,
13
+ type LanguageModelV4ToolApprovalRequest,
14
+ type SharedV4ProviderMetadata,
15
+ type SharedV4Warning,
16
16
  } from '@ai-sdk/provider';
17
17
  import {
18
18
  combineHeaders,
@@ -20,49 +20,58 @@ import {
20
20
  createJsonResponseHandler,
21
21
  createToolNameMapping,
22
22
  generateId,
23
- InferSchema,
23
+ isCustomReasoning,
24
24
  parseProviderOptions,
25
- ParseResult,
26
25
  postJsonToApi,
26
+ serializeModelOptions,
27
+ WORKFLOW_DESERIALIZE,
28
+ WORKFLOW_SERIALIZE,
29
+ type InferSchema,
30
+ type ParseResult,
27
31
  } from '@ai-sdk/provider-utils';
28
- import { OpenAIConfig } from '../openai-config';
32
+ import type { OpenAIConfig } from '../openai-config';
29
33
  import { openaiFailedResponseHandler } from '../openai-error';
30
34
  import { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';
31
- import { applyPatchInputSchema } from '../tool/apply-patch';
32
- import {
35
+ import type { applyPatchInputSchema } from '../tool/apply-patch';
36
+ import type {
33
37
  codeInterpreterInputSchema,
34
38
  codeInterpreterOutputSchema,
35
39
  } from '../tool/code-interpreter';
36
- import { fileSearchOutputSchema } from '../tool/file-search';
37
- import { imageGenerationOutputSchema } from '../tool/image-generation';
38
- import { localShellInputSchema } from '../tool/local-shell';
39
- import { mcpOutputSchema } from '../tool/mcp';
40
- import { shellInputSchema, shellOutputSchema } from '../tool/shell';
41
- import { webSearchOutputSchema } from '../tool/web-search';
40
+ import type { fileSearchOutputSchema } from '../tool/file-search';
41
+ import type { imageGenerationOutputSchema } from '../tool/image-generation';
42
+ import type { localShellInputSchema } from '../tool/local-shell';
43
+ import type { mcpOutputSchema } from '../tool/mcp';
44
+ import type { shellInputSchema, shellOutputSchema } from '../tool/shell';
45
+ import type {
46
+ toolSearchInputSchema,
47
+ toolSearchOutputSchema,
48
+ } from '../tool/tool-search';
49
+ import type { webSearchOutputSchema } from '../tool/web-search';
42
50
  import {
43
51
  convertOpenAIResponsesUsage,
44
- OpenAIResponsesUsage,
52
+ type OpenAIResponsesUsage,
45
53
  } from './convert-openai-responses-usage';
46
54
  import { convertToOpenAIResponsesInput } from './convert-to-openai-responses-input';
47
55
  import { mapOpenAIResponseFinishReason } from './map-openai-responses-finish-reason';
48
56
  import {
49
- OpenAIResponsesChunk,
50
57
  openaiResponsesChunkSchema,
51
- OpenAIResponsesIncludeOptions,
52
- OpenAIResponsesIncludeValue,
53
- OpenAIResponsesLogprobs,
54
58
  openaiResponsesResponseSchema,
55
- OpenAIResponsesWebSearchAction,
56
- OpenAIResponsesApplyPatchOperationDiffDeltaChunk,
57
- OpenAIResponsesApplyPatchOperationDiffDoneChunk,
59
+ type OpenAIResponsesChunk,
60
+ type OpenAIResponsesIncludeOptions,
61
+ type OpenAIResponsesIncludeValue,
62
+ type OpenAIResponsesLogprobs,
63
+ type OpenAIResponsesWebSearchAction,
64
+ type OpenAIResponsesApplyPatchOperationDiffDeltaChunk,
65
+ type OpenAIResponsesApplyPatchOperationDiffDoneChunk,
58
66
  } from './openai-responses-api';
59
67
  import {
60
- OpenAIResponsesModelId,
61
68
  openaiLanguageModelResponsesOptionsSchema,
62
69
  TOP_LOGPROBS_MAX,
70
+ type OpenAIResponsesModelId,
63
71
  } from './openai-responses-options';
64
72
  import { prepareResponsesTools } from './openai-responses-prepare-tools';
65
- import {
73
+ import type {
74
+ ResponsesCompactionProviderMetadata,
66
75
  ResponsesProviderMetadata,
67
76
  ResponsesReasoningProviderMetadata,
68
77
  ResponsesSourceDocumentProviderMetadata,
@@ -77,7 +86,7 @@ import {
77
86
  * so that tool results reference the correct tool call.
78
87
  */
79
88
  function extractApprovalRequestIdToToolCallIdMapping(
80
- prompt: LanguageModelV3Prompt,
89
+ prompt: LanguageModelV4Prompt,
81
90
  ): Record<string, string> {
82
91
  const mapping: Record<string, string> = {};
83
92
  for (const message of prompt) {
@@ -94,13 +103,27 @@ function extractApprovalRequestIdToToolCallIdMapping(
94
103
  return mapping;
95
104
  }
96
105
 
97
- export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
98
- readonly specificationVersion = 'v3';
106
+ export class OpenAIResponsesLanguageModel implements LanguageModelV4 {
107
+ readonly specificationVersion = 'v4';
99
108
 
100
109
  readonly modelId: OpenAIResponsesModelId;
101
110
 
102
111
  private readonly config: OpenAIConfig;
103
112
 
113
+ static [WORKFLOW_SERIALIZE](model: OpenAIResponsesLanguageModel) {
114
+ return serializeModelOptions({
115
+ modelId: model.modelId,
116
+ config: model.config,
117
+ });
118
+ }
119
+
120
+ static [WORKFLOW_DESERIALIZE](options: {
121
+ modelId: OpenAIResponsesModelId;
122
+ config: OpenAIConfig;
123
+ }) {
124
+ return new OpenAIResponsesLanguageModel(options.modelId, options.config);
125
+ }
126
+
104
127
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig) {
105
128
  this.modelId = modelId;
106
129
  this.config = config;
@@ -125,12 +148,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
125
148
  frequencyPenalty,
126
149
  seed,
127
150
  prompt,
151
+ reasoning,
128
152
  providerOptions,
129
153
  tools,
130
154
  toolChoice,
131
155
  responseFormat,
132
- }: LanguageModelV3CallOptions) {
133
- const warnings: SharedV3Warning[] = [];
156
+ }: LanguageModelV4CallOptions) {
157
+ const warnings: SharedV4Warning[] = [];
134
158
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
135
159
 
136
160
  if (topK != null) {
@@ -170,6 +194,10 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
170
194
  });
171
195
  }
172
196
 
197
+ const resolvedReasoningEffort =
198
+ openaiOptions?.reasoningEffort ??
199
+ (isCustomReasoning(reasoning) ? reasoning : undefined);
200
+
173
201
  const isReasoningModel =
174
202
  openaiOptions?.forceReasoning ?? modelCapabilities.isReasoningModel;
175
203
 
@@ -193,11 +221,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
193
221
  'openai.web_search_preview': 'web_search_preview',
194
222
  'openai.mcp': 'mcp',
195
223
  'openai.apply_patch': 'apply_patch',
224
+ 'openai.tool_search': 'tool_search',
196
225
  },
197
- resolveProviderToolName: tool =>
198
- tool.id === 'openai.custom'
199
- ? (tool.args as { name?: string }).name
200
- : undefined,
201
226
  });
202
227
 
203
228
  const customProviderToolNames = new Set<string>();
@@ -273,7 +298,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
273
298
  tool.type === 'provider' &&
274
299
  (tool.id === 'openai.web_search' ||
275
300
  tool.id === 'openai.web_search_preview'),
276
- ) as LanguageModelV3ProviderTool | undefined
301
+ ) as LanguageModelV4ProviderTool | undefined
277
302
  )?.name;
278
303
 
279
304
  if (webSearchToolName) {
@@ -336,13 +361,21 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
336
361
  top_logprobs: topLogprobs,
337
362
  truncation: openaiOptions?.truncation,
338
363
 
364
+ // context management (server-side compaction):
365
+ ...(openaiOptions?.contextManagement && {
366
+ context_management: openaiOptions.contextManagement.map(cm => ({
367
+ type: cm.type,
368
+ compact_threshold: cm.compactThreshold,
369
+ })),
370
+ }),
371
+
339
372
  // model-specific settings:
340
373
  ...(isReasoningModel &&
341
- (openaiOptions?.reasoningEffort != null ||
374
+ (resolvedReasoningEffort != null ||
342
375
  openaiOptions?.reasoningSummary != null) && {
343
376
  reasoning: {
344
- ...(openaiOptions?.reasoningEffort != null && {
345
- effort: openaiOptions.reasoningEffort,
377
+ ...(resolvedReasoningEffort != null && {
378
+ effort: resolvedReasoningEffort,
346
379
  }),
347
380
  ...(openaiOptions?.reasoningSummary != null && {
348
381
  summary: openaiOptions.reasoningSummary,
@@ -358,7 +391,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
358
391
  // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
359
392
  if (
360
393
  !(
361
- openaiOptions?.reasoningEffort === 'none' &&
394
+ resolvedReasoningEffort === 'none' &&
362
395
  modelCapabilities.supportsNonReasoningParameters
363
396
  )
364
397
  ) {
@@ -454,8 +487,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
454
487
  }
455
488
 
456
489
  async doGenerate(
457
- options: LanguageModelV3CallOptions,
458
- ): Promise<LanguageModelV3GenerateResult> {
490
+ options: LanguageModelV4CallOptions,
491
+ ): Promise<LanguageModelV4GenerateResult> {
459
492
  const {
460
493
  args: body,
461
494
  warnings,
@@ -478,7 +511,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
478
511
  rawValue: rawResponse,
479
512
  } = await postJsonToApi({
480
513
  url,
481
- headers: combineHeaders(this.config.headers(), options.headers),
514
+ headers: combineHeaders(this.config.headers?.(), options.headers),
482
515
  body,
483
516
  failedResponseHandler: openaiFailedResponseHandler,
484
517
  successfulResponseHandler: createJsonResponseHandler(
@@ -500,11 +533,12 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
500
533
  });
501
534
  }
502
535
 
503
- const content: Array<LanguageModelV3Content> = [];
536
+ const content: Array<LanguageModelV4Content> = [];
504
537
  const logprobs: Array<OpenAIResponsesLogprobs> = [];
505
538
 
506
539
  // flag that checks if there have been client-side tool calls (not executed by openai)
507
540
  let hasFunctionCall = false;
541
+ const hostedToolSearchCallIds: string[] = [];
508
542
 
509
543
  // map response content to content array (defined when there is no error)
510
544
  for (const part of response.output!) {
@@ -551,6 +585,54 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
551
585
  break;
552
586
  }
553
587
 
588
+ case 'tool_search_call': {
589
+ const toolCallId = part.call_id ?? part.id;
590
+ const isHosted = part.execution === 'server';
591
+
592
+ if (isHosted) {
593
+ hostedToolSearchCallIds.push(toolCallId);
594
+ }
595
+
596
+ content.push({
597
+ type: 'tool-call',
598
+ toolCallId,
599
+ toolName: toolNameMapping.toCustomToolName('tool_search'),
600
+ input: JSON.stringify({
601
+ arguments: part.arguments,
602
+ call_id: part.call_id,
603
+ } satisfies InferSchema<typeof toolSearchInputSchema>),
604
+ ...(isHosted ? { providerExecuted: true } : {}),
605
+ providerMetadata: {
606
+ [providerOptionsName]: {
607
+ itemId: part.id,
608
+ },
609
+ },
610
+ });
611
+
612
+ break;
613
+ }
614
+
615
+ case 'tool_search_output': {
616
+ const toolCallId =
617
+ part.call_id ?? hostedToolSearchCallIds.shift() ?? part.id;
618
+
619
+ content.push({
620
+ type: 'tool-result',
621
+ toolCallId,
622
+ toolName: toolNameMapping.toCustomToolName('tool_search'),
623
+ result: {
624
+ tools: part.tools,
625
+ } satisfies InferSchema<typeof toolSearchOutputSchema>,
626
+ providerMetadata: {
627
+ [providerOptionsName]: {
628
+ itemId: part.id,
629
+ },
630
+ },
631
+ });
632
+
633
+ break;
634
+ }
635
+
554
636
  case 'local_shell_call': {
555
637
  content.push({
556
638
  type: 'tool-call',
@@ -621,7 +703,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
621
703
  logprobs.push(contentPart.logprobs);
622
704
  }
623
705
 
624
- const providerMetadata: SharedV3ProviderMetadata[string] = {
706
+ const providerMetadata: SharedV4ProviderMetadata[string] = {
625
707
  itemId: part.id,
626
708
  ...(part.phase != null && { phase: part.phase }),
627
709
  ...(contentPart.annotations.length > 0 && {
@@ -833,7 +915,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
833
915
  type: 'tool-approval-request',
834
916
  approvalId: approvalRequestId,
835
917
  toolCallId: dummyToolCallId,
836
- } satisfies LanguageModelV3ToolApprovalRequest);
918
+ } satisfies LanguageModelV4ToolApprovalRequest);
837
919
  break;
838
920
  }
839
921
 
@@ -927,10 +1009,25 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
927
1009
 
928
1010
  break;
929
1011
  }
1012
+
1013
+ case 'compaction': {
1014
+ content.push({
1015
+ type: 'custom',
1016
+ kind: 'openai.compaction',
1017
+ providerMetadata: {
1018
+ [providerOptionsName]: {
1019
+ type: 'compaction',
1020
+ itemId: part.id,
1021
+ encryptedContent: part.encrypted_content,
1022
+ } satisfies ResponsesCompactionProviderMetadata,
1023
+ },
1024
+ });
1025
+ break;
1026
+ }
930
1027
  }
931
1028
  }
932
1029
 
933
- const providerMetadata: SharedV3ProviderMetadata = {
1030
+ const providerMetadata: SharedV4ProviderMetadata = {
934
1031
  [providerOptionsName]: {
935
1032
  responseId: response.id,
936
1033
  ...(logprobs.length > 0 ? { logprobs } : {}),
@@ -966,8 +1063,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
966
1063
  }
967
1064
 
968
1065
  async doStream(
969
- options: LanguageModelV3CallOptions,
970
- ): Promise<LanguageModelV3StreamResult> {
1066
+ options: LanguageModelV4CallOptions,
1067
+ ): Promise<LanguageModelV4StreamResult> {
971
1068
  const {
972
1069
  args: body,
973
1070
  warnings,
@@ -983,7 +1080,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
983
1080
  path: '/responses',
984
1081
  modelId: this.modelId,
985
1082
  }),
986
- headers: combineHeaders(this.config.headers(), options.headers),
1083
+ headers: combineHeaders(this.config.headers?.(), options.headers),
987
1084
  body: {
988
1085
  ...body,
989
1086
  stream: true,
@@ -1006,7 +1103,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1006
1103
  string
1007
1104
  >();
1008
1105
 
1009
- let finishReason: LanguageModelV3FinishReason = {
1106
+ let finishReason: LanguageModelV4FinishReason = {
1010
1107
  unified: 'other',
1011
1108
  raw: undefined,
1012
1109
  };
@@ -1026,6 +1123,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1026
1123
  hasDiff: boolean;
1027
1124
  endEmitted: boolean;
1028
1125
  };
1126
+ toolSearchExecution?: 'server' | 'client';
1029
1127
  }
1030
1128
  | undefined
1031
1129
  > = {};
@@ -1054,12 +1152,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1054
1152
  > = {};
1055
1153
 
1056
1154
  let serviceTier: string | undefined;
1155
+ const hostedToolSearchCallIds: string[] = [];
1057
1156
 
1058
1157
  return {
1059
1158
  stream: response.pipeThrough(
1060
1159
  new TransformStream<
1061
1160
  ParseResult<OpenAIResponsesChunk>,
1062
- LanguageModelV3StreamPart
1161
+ LanguageModelV4StreamPart
1063
1162
  >({
1064
1163
  start(controller) {
1065
1164
  controller.enqueue({ type: 'stream-start', warnings });
@@ -1188,6 +1287,28 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1188
1287
  input: '{}',
1189
1288
  providerExecuted: true,
1190
1289
  });
1290
+ } else if (value.item.type === 'tool_search_call') {
1291
+ const toolCallId = value.item.id;
1292
+ const toolName =
1293
+ toolNameMapping.toCustomToolName('tool_search');
1294
+ const isHosted = value.item.execution === 'server';
1295
+
1296
+ ongoingToolCalls[value.output_index] = {
1297
+ toolName,
1298
+ toolCallId,
1299
+ toolSearchExecution: value.item.execution ?? 'server',
1300
+ };
1301
+
1302
+ if (isHosted) {
1303
+ controller.enqueue({
1304
+ type: 'tool-input-start',
1305
+ id: toolCallId,
1306
+ toolName,
1307
+ providerExecuted: true,
1308
+ });
1309
+ }
1310
+ } else if (value.item.type === 'tool_search_output') {
1311
+ // handled on output_item.done so we can pair it with the call
1191
1312
  } else if (
1192
1313
  value.item.type === 'mcp_call' ||
1193
1314
  value.item.type === 'mcp_list_tools' ||
@@ -1418,6 +1539,67 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1418
1539
  result: value.item.result,
1419
1540
  } satisfies InferSchema<typeof imageGenerationOutputSchema>,
1420
1541
  });
1542
+ } else if (value.item.type === 'tool_search_call') {
1543
+ const toolCall = ongoingToolCalls[value.output_index];
1544
+ const isHosted = value.item.execution === 'server';
1545
+
1546
+ if (toolCall != null) {
1547
+ const toolCallId = isHosted
1548
+ ? toolCall.toolCallId
1549
+ : (value.item.call_id ?? value.item.id);
1550
+
1551
+ if (isHosted) {
1552
+ hostedToolSearchCallIds.push(toolCallId);
1553
+ } else {
1554
+ controller.enqueue({
1555
+ type: 'tool-input-start',
1556
+ id: toolCallId,
1557
+ toolName: toolCall.toolName,
1558
+ });
1559
+ }
1560
+
1561
+ controller.enqueue({
1562
+ type: 'tool-input-end',
1563
+ id: toolCallId,
1564
+ });
1565
+
1566
+ controller.enqueue({
1567
+ type: 'tool-call',
1568
+ toolCallId,
1569
+ toolName: toolCall.toolName,
1570
+ input: JSON.stringify({
1571
+ arguments: value.item.arguments,
1572
+ call_id: isHosted ? null : toolCallId,
1573
+ } satisfies InferSchema<typeof toolSearchInputSchema>),
1574
+ ...(isHosted ? { providerExecuted: true } : {}),
1575
+ providerMetadata: {
1576
+ [providerOptionsName]: {
1577
+ itemId: value.item.id,
1578
+ },
1579
+ },
1580
+ });
1581
+ }
1582
+
1583
+ ongoingToolCalls[value.output_index] = undefined;
1584
+ } else if (value.item.type === 'tool_search_output') {
1585
+ const toolCallId =
1586
+ value.item.call_id ??
1587
+ hostedToolSearchCallIds.shift() ??
1588
+ value.item.id;
1589
+
1590
+ controller.enqueue({
1591
+ type: 'tool-result',
1592
+ toolCallId,
1593
+ toolName: toolNameMapping.toCustomToolName('tool_search'),
1594
+ result: {
1595
+ tools: value.item.tools,
1596
+ } satisfies InferSchema<typeof toolSearchOutputSchema>,
1597
+ providerMetadata: {
1598
+ [providerOptionsName]: {
1599
+ itemId: value.item.id,
1600
+ },
1601
+ },
1602
+ });
1421
1603
  } else if (value.item.type === 'mcp_call') {
1422
1604
  ongoingToolCalls[value.output_index] = undefined;
1423
1605
 
@@ -1647,6 +1829,18 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1647
1829
  }
1648
1830
 
1649
1831
  delete activeReasoning[value.item.id];
1832
+ } else if (value.item.type === 'compaction') {
1833
+ controller.enqueue({
1834
+ type: 'custom',
1835
+ kind: 'openai.compaction',
1836
+ providerMetadata: {
1837
+ [providerOptionsName]: {
1838
+ type: 'compaction',
1839
+ itemId: value.item.id,
1840
+ encryptedContent: value.item.encrypted_content,
1841
+ } satisfies ResponsesCompactionProviderMetadata,
1842
+ },
1843
+ });
1650
1844
  }
1651
1845
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
1652
1846
  const toolCall = ongoingToolCalls[value.output_index];
@@ -1867,6 +2061,19 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1867
2061
  if (typeof value.response.service_tier === 'string') {
1868
2062
  serviceTier = value.response.service_tier;
1869
2063
  }
2064
+ } else if (isResponseFailedChunk(value)) {
2065
+ const incompleteReason =
2066
+ value.response.incomplete_details?.reason;
2067
+ finishReason = {
2068
+ unified: incompleteReason
2069
+ ? mapOpenAIResponseFinishReason({
2070
+ finishReason: incompleteReason,
2071
+ hasFunctionCall,
2072
+ })
2073
+ : 'error',
2074
+ raw: incompleteReason ?? 'error',
2075
+ };
2076
+ usage = value.response.usage ?? undefined;
1870
2077
  } else if (isResponseAnnotationAddedChunk(value)) {
1871
2078
  ongoingAnnotations.push(value.annotation);
1872
2079
  if (value.annotation.type === 'url_citation') {
@@ -1941,7 +2148,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1941
2148
  },
1942
2149
 
1943
2150
  flush(controller) {
1944
- const providerMetadata: SharedV3ProviderMetadata = {
2151
+ const providerMetadata: SharedV4ProviderMetadata = {
1945
2152
  [providerOptionsName]: {
1946
2153
  responseId: responseId,
1947
2154
  ...(logprobs.length > 0 ? { logprobs } : {}),
@@ -1986,6 +2193,12 @@ function isResponseFinishedChunk(
1986
2193
  );
1987
2194
  }
1988
2195
 
2196
+ function isResponseFailedChunk(
2197
+ chunk: OpenAIResponsesChunk,
2198
+ ): chunk is OpenAIResponsesChunk & { type: 'response.failed' } {
2199
+ return chunk.type === 'response.failed';
2200
+ }
2201
+
1989
2202
  function isResponseCreatedChunk(
1990
2203
  chunk: OpenAIResponsesChunk,
1991
2204
  ): chunk is OpenAIResponsesChunk & { type: 'response.created' } {
@@ -1,4 +1,8 @@
1
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import {
2
+ lazySchema,
3
+ zodSchema,
4
+ type InferSchema,
5
+ } from '@ai-sdk/provider-utils';
2
6
  import { z } from 'zod/v4';
3
7
 
4
8
  /**
@@ -37,11 +41,16 @@ export const openaiResponsesReasoningModelIds = [
37
41
  'gpt-5.2-chat-latest',
38
42
  'gpt-5.2-pro',
39
43
  'gpt-5.2-codex',
44
+ 'gpt-5.3-chat-latest',
45
+ 'gpt-5.3-codex',
40
46
  'gpt-5.4',
41
47
  'gpt-5.4-2026-03-05',
48
+ 'gpt-5.4-mini',
49
+ 'gpt-5.4-mini-2026-03-17',
50
+ 'gpt-5.4-nano',
51
+ 'gpt-5.4-nano-2026-03-17',
42
52
  'gpt-5.4-pro',
43
53
  'gpt-5.4-pro-2026-03-05',
44
- 'gpt-5.3-codex',
45
54
  ] as const;
46
55
 
47
56
  export const openaiResponsesModelIds = [
@@ -98,11 +107,16 @@ export type OpenAIResponsesModelId =
98
107
  | 'gpt-5.2-pro'
99
108
  | 'gpt-5.2-pro-2025-12-11'
100
109
  | 'gpt-5.2-codex'
110
+ | 'gpt-5.3-chat-latest'
111
+ | 'gpt-5.3-codex'
101
112
  | 'gpt-5.4'
102
113
  | 'gpt-5.4-2026-03-05'
114
+ | 'gpt-5.4-mini'
115
+ | 'gpt-5.4-mini-2026-03-17'
116
+ | 'gpt-5.4-nano'
117
+ | 'gpt-5.4-nano-2026-03-17'
103
118
  | 'gpt-5.4-pro'
104
119
  | 'gpt-5.4-pro-2026-03-05'
105
- | 'gpt-5.3-codex'
106
120
  | 'gpt-5-2025-08-07'
107
121
  | 'gpt-5-chat-latest'
108
122
  | 'gpt-5-codex'
@@ -298,6 +312,18 @@ export const openaiLanguageModelResponsesOptionsSchema = lazySchema(() =>
298
312
  * and defaults `systemMessageMode` to `developer` unless overridden.
299
313
  */
300
314
  forceReasoning: z.boolean().optional(),
315
+
316
+ /**
317
+ * Enable server-side context management (compaction).
318
+ */
319
+ contextManagement: z
320
+ .array(
321
+ z.object({
322
+ type: z.literal('compaction'),
323
+ compactThreshold: z.number(),
324
+ }),
325
+ )
326
+ .nullish(),
301
327
  }),
302
328
  ),
303
329
  );