@ai-sdk/openai 4.0.0-beta.0 → 4.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +143 -0
  2. package/dist/index.d.mts +69 -22
  3. package/dist/index.d.ts +69 -22
  4. package/dist/index.js +1172 -872
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1126 -821
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +56 -28
  9. package/dist/internal/index.d.ts +56 -28
  10. package/dist/internal/index.js +1201 -911
  11. package/dist/internal/index.js.map +1 -1
  12. package/dist/internal/index.mjs +1183 -888
  13. package/dist/internal/index.mjs.map +1 -1
  14. package/docs/03-openai.mdx +142 -3
  15. package/package.json +3 -3
  16. package/src/chat/convert-openai-chat-usage.ts +2 -2
  17. package/src/chat/convert-to-openai-chat-messages.ts +5 -5
  18. package/src/chat/map-openai-finish-reason.ts +2 -2
  19. package/src/chat/openai-chat-language-model.ts +22 -22
  20. package/src/chat/openai-chat-options.ts +5 -0
  21. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  22. package/src/completion/convert-openai-completion-usage.ts +2 -2
  23. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  24. package/src/completion/map-openai-finish-reason.ts +2 -2
  25. package/src/completion/openai-completion-language-model.ts +20 -20
  26. package/src/embedding/openai-embedding-model.ts +5 -5
  27. package/src/image/openai-image-model.ts +9 -9
  28. package/src/openai-language-model-capabilities.ts +5 -2
  29. package/src/openai-provider.ts +21 -21
  30. package/src/openai-tools.ts +12 -1
  31. package/src/responses/convert-openai-responses-usage.ts +2 -2
  32. package/src/responses/convert-to-openai-responses-input.ts +116 -12
  33. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  34. package/src/responses/openai-responses-api.ts +87 -1
  35. package/src/responses/openai-responses-language-model.ts +168 -33
  36. package/src/responses/openai-responses-options.ts +10 -0
  37. package/src/responses/openai-responses-prepare-tools.ts +34 -9
  38. package/src/speech/openai-speech-model.ts +7 -7
  39. package/src/tool/custom.ts +0 -6
  40. package/src/tool/tool-search.ts +98 -0
  41. package/src/transcription/openai-transcription-model.ts +8 -8
package/CHANGELOG.md CHANGED
@@ -1,5 +1,148 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 4.0.0-beta.10
4
+
5
+ ### Major Changes
6
+
7
+ - 61753c3: ### `@ai-sdk/openai`: remove redundant `name` argument from `openai.tools.customTool()`
8
+
9
+ `openai.tools.customTool()` no longer accepts a `name` field. the tool name is now derived from the sdk tool key (the object key in the `tools` object).
10
+
11
+ migration: remove the `name` property from `customTool()` calls. the object key is now used as the tool name sent to the openai api.
12
+
13
+ before:
14
+
15
+ ```ts
16
+ tools: {
17
+ write_sql: openai.tools.customTool({
18
+ name: 'write_sql',
19
+ description: '...',
20
+ }),
21
+ }
22
+ ```
23
+
24
+ after:
25
+
26
+ ```ts
27
+ tools: {
28
+ write_sql: openai.tools.customTool({
29
+ description: '...',
30
+ }),
31
+ }
32
+ ```
33
+
34
+ ### `@ai-sdk/provider-utils`: `createToolNameMapping()` no longer accepts the `resolveProviderToolName` parameter
35
+
36
+ before: tool name can be set dynamically
37
+
38
+ ```ts
39
+ const toolNameMapping = createToolNameMapping({
40
+ tools,
41
+ providerToolNames: {
42
+ 'openai.code_interpreter': 'code_interpreter',
43
+ 'openai.file_search': 'file_search',
44
+ 'openai.image_generation': 'image_generation',
45
+ 'openai.local_shell': 'local_shell',
46
+ 'openai.shell': 'shell',
47
+ 'openai.web_search': 'web_search',
48
+ 'openai.web_search_preview': 'web_search_preview',
49
+ 'openai.mcp': 'mcp',
50
+ 'openai.apply_patch': 'apply_patch',
51
+ },
52
+ resolveProviderToolName: tool =>
53
+ tool.id === 'openai.custom'
54
+ ? (tool.args as { name?: string }).name
55
+ : undefined,
56
+ });
57
+ ```
58
+
59
+ after: tool name is static based on `tools` keys
60
+
61
+ ```
62
+ const toolNameMapping = createToolNameMapping({
63
+ tools,
64
+ providerToolNames: {
65
+ 'openai.code_interpreter': 'code_interpreter',
66
+ 'openai.file_search': 'file_search',
67
+ 'openai.image_generation': 'image_generation',
68
+ 'openai.local_shell': 'local_shell',
69
+ 'openai.shell': 'shell',
70
+ 'openai.web_search': 'web_search',
71
+ 'openai.web_search_preview': 'web_search_preview',
72
+ 'openai.mcp': 'mcp',
73
+ 'openai.apply_patch': 'apply_patch',
74
+ }
75
+ });
76
+ ```
77
+
78
+ ### Patch Changes
79
+
80
+ - Updated dependencies [61753c3]
81
+ - @ai-sdk/provider-utils@5.0.0-beta.4
82
+
83
+ ## 4.0.0-beta.9
84
+
85
+ ### Patch Changes
86
+
87
+ - 156cdf0: feat(openai): add new tool search tool
88
+
89
+ ## 4.0.0-beta.8
90
+
91
+ ### Patch Changes
92
+
93
+ - Updated dependencies [f7d4f01]
94
+ - @ai-sdk/provider-utils@5.0.0-beta.3
95
+ - @ai-sdk/provider@4.0.0-beta.2
96
+
97
+ ## 4.0.0-beta.7
98
+
99
+ ### Patch Changes
100
+
101
+ - Updated dependencies [5c2a5a2]
102
+ - @ai-sdk/provider@4.0.0-beta.1
103
+ - @ai-sdk/provider-utils@5.0.0-beta.2
104
+
105
+ ## 4.0.0-beta.6
106
+
107
+ ### Patch Changes
108
+
109
+ - 83f9d04: feat(openai): upgrade v3 specs to v4
110
+
111
+ ## 4.0.0-beta.5
112
+
113
+ ### Patch Changes
114
+
115
+ - ac18f89: feat(provider/openai): add `gpt-5.3-chat-latest`
116
+
117
+ ## 4.0.0-beta.4
118
+
119
+ ### Patch Changes
120
+
121
+ - a71d345: fix(provider/openai): drop reasoning parts without encrypted content when store: false
122
+
123
+ ## 4.0.0-beta.3
124
+
125
+ ### Patch Changes
126
+
127
+ - 45b3d76: fix(security): prevent streaming tool calls from finalizing on parsable partial JSON
128
+
129
+ Streaming tool call arguments were finalized using `isParsableJson()` as a heuristic for completion. If partial accumulated JSON happened to be valid JSON before all chunks arrived, the tool call would be executed with incomplete arguments. Tool call finalization now only occurs in `flush()` after the stream is fully consumed.
130
+
131
+ - f7295cb: revert incorrect fix https://github.com/vercel/ai/pull/13172
132
+
133
+ ## 4.0.0-beta.2
134
+
135
+ ### Patch Changes
136
+
137
+ - Updated dependencies [531251e]
138
+ - @ai-sdk/provider-utils@5.0.0-beta.1
139
+
140
+ ## 4.0.0-beta.1
141
+
142
+ ### Patch Changes
143
+
144
+ - 7afaece: feat(provider/openai): add GPT-5.4 model support
145
+
3
146
  ## 4.0.0-beta.0
4
147
 
5
148
  ### Major Changes
package/dist/index.d.mts CHANGED
@@ -1,9 +1,9 @@
1
1
  import * as _ai_sdk_provider from '@ai-sdk/provider';
2
- import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
+ import { JSONValue, ProviderV4, LanguageModelV4, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4, SpeechModelV4 } from '@ai-sdk/provider';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -315,6 +315,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
315
315
  exit_code: number;
316
316
  };
317
317
  }[];
318
+ } | {
319
+ type: "tool_search_call";
320
+ id: string;
321
+ execution: "server" | "client";
322
+ call_id: string | null;
323
+ status: "completed" | "in_progress" | "incomplete";
324
+ arguments: unknown;
325
+ } | {
326
+ type: "tool_search_output";
327
+ id: string;
328
+ execution: "server" | "client";
329
+ call_id: string | null;
330
+ status: "completed" | "in_progress" | "incomplete";
331
+ tools: Record<string, JSONValue | undefined>[];
318
332
  };
319
333
  } | {
320
334
  type: "response.output_item.done";
@@ -484,6 +498,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
484
498
  exit_code: number;
485
499
  };
486
500
  }[];
501
+ } | {
502
+ type: "tool_search_call";
503
+ id: string;
504
+ execution: "server" | "client";
505
+ call_id: string | null;
506
+ status: "completed" | "in_progress" | "incomplete";
507
+ arguments: unknown;
508
+ } | {
509
+ type: "tool_search_output";
510
+ id: string;
511
+ execution: "server" | "client";
512
+ call_id: string | null;
513
+ status: "completed" | "in_progress" | "incomplete";
514
+ tools: Record<string, JSONValue | undefined>[];
487
515
  };
488
516
  } | {
489
517
  type: "response.function_call_arguments.delta";
@@ -575,10 +603,6 @@ type OpenAIResponsesLogprobs = NonNullable<(OpenAIResponsesChunk & {
575
603
  })['logprobs']> | null;
576
604
 
577
605
  declare const customToolFactory: _ai_sdk_provider_utils.ProviderToolFactory<string, {
578
- /**
579
- * The name of the custom tool, used to identify it in the API.
580
- */
581
- name: string;
582
606
  /**
583
607
  * An optional description of what the tool does.
584
608
  */
@@ -647,7 +671,6 @@ declare const openaiTools: {
647
671
  * Lark syntax). The model returns a `custom_tool_call` output item whose
648
672
  * `input` field is a string matching the specified grammar.
649
673
  *
650
- * @param name - The name of the custom tool.
651
674
  * @param description - An optional description of the tool.
652
675
  * @param format - The output format constraint (grammar type, syntax, and definition).
653
676
  */
@@ -918,9 +941,33 @@ declare const openaiTools: {
918
941
  output?: string | null;
919
942
  error?: _ai_sdk_provider.JSONValue;
920
943
  }>;
944
+ /**
945
+ * Tool search allows the model to dynamically search for and load deferred
946
+ * tools into the model's context as needed. This helps reduce overall token
947
+ * usage, cost, and latency by only loading tools when the model needs them.
948
+ *
949
+ * To use tool search, mark functions or namespaces with `defer_loading: true`
950
+ * in the tools array. The model will use tool search to load these tools
951
+ * when it determines they are needed.
952
+ */
953
+ toolSearch: (args?: Parameters<_ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
954
+ arguments?: unknown;
955
+ call_id?: string | null;
956
+ }, {
957
+ tools: Array<_ai_sdk_provider.JSONObject>;
958
+ }, {
959
+ execution?: "server" | "client";
960
+ description?: string;
961
+ parameters?: Record<string, unknown>;
962
+ }>>[0]) => _ai_sdk_provider_utils.Tool<{
963
+ arguments?: unknown;
964
+ call_id?: string | null;
965
+ }, {
966
+ tools: Array<_ai_sdk_provider.JSONObject>;
967
+ }>;
921
968
  };
922
969
 
923
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
970
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
924
971
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
925
972
  conversation?: string | null | undefined;
926
973
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
@@ -963,56 +1010,56 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
963
1010
  }>;
964
1011
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
965
1012
 
966
- interface OpenAIProvider extends ProviderV3 {
967
- (modelId: OpenAIResponsesModelId): LanguageModelV3;
1013
+ interface OpenAIProvider extends ProviderV4 {
1014
+ (modelId: OpenAIResponsesModelId): LanguageModelV4;
968
1015
  /**
969
1016
  * Creates an OpenAI model for text generation.
970
1017
  */
971
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
1018
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV4;
972
1019
  /**
973
1020
  * Creates an OpenAI chat model for text generation.
974
1021
  */
975
- chat(modelId: OpenAIChatModelId): LanguageModelV3;
1022
+ chat(modelId: OpenAIChatModelId): LanguageModelV4;
976
1023
  /**
977
1024
  * Creates an OpenAI responses API model for text generation.
978
1025
  */
979
- responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
1026
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV4;
980
1027
  /**
981
1028
  * Creates an OpenAI completion model for text generation.
982
1029
  */
983
- completion(modelId: OpenAICompletionModelId): LanguageModelV3;
1030
+ completion(modelId: OpenAICompletionModelId): LanguageModelV4;
984
1031
  /**
985
1032
  * Creates a model for text embeddings.
986
1033
  */
987
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1034
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
988
1035
  /**
989
1036
  * Creates a model for text embeddings.
990
1037
  */
991
- embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1038
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
992
1039
  /**
993
1040
  * @deprecated Use `embedding` instead.
994
1041
  */
995
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1042
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
996
1043
  /**
997
1044
  * @deprecated Use `embeddingModel` instead.
998
1045
  */
999
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1046
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
1000
1047
  /**
1001
1048
  * Creates a model for image generation.
1002
1049
  */
1003
- image(modelId: OpenAIImageModelId): ImageModelV3;
1050
+ image(modelId: OpenAIImageModelId): ImageModelV4;
1004
1051
  /**
1005
1052
  * Creates a model for image generation.
1006
1053
  */
1007
- imageModel(modelId: OpenAIImageModelId): ImageModelV3;
1054
+ imageModel(modelId: OpenAIImageModelId): ImageModelV4;
1008
1055
  /**
1009
1056
  * Creates a model for transcription.
1010
1057
  */
1011
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
1058
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV4;
1012
1059
  /**
1013
1060
  * Creates a model for speech generation.
1014
1061
  */
1015
- speech(modelId: OpenAISpeechModelId): SpeechModelV3;
1062
+ speech(modelId: OpenAISpeechModelId): SpeechModelV4;
1016
1063
  /**
1017
1064
  * OpenAI-specific tools.
1018
1065
  */
package/dist/index.d.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  import * as _ai_sdk_provider from '@ai-sdk/provider';
2
- import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
+ import { JSONValue, ProviderV4, LanguageModelV4, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4, SpeechModelV4 } from '@ai-sdk/provider';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -315,6 +315,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
315
315
  exit_code: number;
316
316
  };
317
317
  }[];
318
+ } | {
319
+ type: "tool_search_call";
320
+ id: string;
321
+ execution: "server" | "client";
322
+ call_id: string | null;
323
+ status: "completed" | "in_progress" | "incomplete";
324
+ arguments: unknown;
325
+ } | {
326
+ type: "tool_search_output";
327
+ id: string;
328
+ execution: "server" | "client";
329
+ call_id: string | null;
330
+ status: "completed" | "in_progress" | "incomplete";
331
+ tools: Record<string, JSONValue | undefined>[];
318
332
  };
319
333
  } | {
320
334
  type: "response.output_item.done";
@@ -484,6 +498,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
484
498
  exit_code: number;
485
499
  };
486
500
  }[];
501
+ } | {
502
+ type: "tool_search_call";
503
+ id: string;
504
+ execution: "server" | "client";
505
+ call_id: string | null;
506
+ status: "completed" | "in_progress" | "incomplete";
507
+ arguments: unknown;
508
+ } | {
509
+ type: "tool_search_output";
510
+ id: string;
511
+ execution: "server" | "client";
512
+ call_id: string | null;
513
+ status: "completed" | "in_progress" | "incomplete";
514
+ tools: Record<string, JSONValue | undefined>[];
487
515
  };
488
516
  } | {
489
517
  type: "response.function_call_arguments.delta";
@@ -575,10 +603,6 @@ type OpenAIResponsesLogprobs = NonNullable<(OpenAIResponsesChunk & {
575
603
  })['logprobs']> | null;
576
604
 
577
605
  declare const customToolFactory: _ai_sdk_provider_utils.ProviderToolFactory<string, {
578
- /**
579
- * The name of the custom tool, used to identify it in the API.
580
- */
581
- name: string;
582
606
  /**
583
607
  * An optional description of what the tool does.
584
608
  */
@@ -647,7 +671,6 @@ declare const openaiTools: {
647
671
  * Lark syntax). The model returns a `custom_tool_call` output item whose
648
672
  * `input` field is a string matching the specified grammar.
649
673
  *
650
- * @param name - The name of the custom tool.
651
674
  * @param description - An optional description of the tool.
652
675
  * @param format - The output format constraint (grammar type, syntax, and definition).
653
676
  */
@@ -918,9 +941,33 @@ declare const openaiTools: {
918
941
  output?: string | null;
919
942
  error?: _ai_sdk_provider.JSONValue;
920
943
  }>;
944
+ /**
945
+ * Tool search allows the model to dynamically search for and load deferred
946
+ * tools into the model's context as needed. This helps reduce overall token
947
+ * usage, cost, and latency by only loading tools when the model needs them.
948
+ *
949
+ * To use tool search, mark functions or namespaces with `defer_loading: true`
950
+ * in the tools array. The model will use tool search to load these tools
951
+ * when it determines they are needed.
952
+ */
953
+ toolSearch: (args?: Parameters<_ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
954
+ arguments?: unknown;
955
+ call_id?: string | null;
956
+ }, {
957
+ tools: Array<_ai_sdk_provider.JSONObject>;
958
+ }, {
959
+ execution?: "server" | "client";
960
+ description?: string;
961
+ parameters?: Record<string, unknown>;
962
+ }>>[0]) => _ai_sdk_provider_utils.Tool<{
963
+ arguments?: unknown;
964
+ call_id?: string | null;
965
+ }, {
966
+ tools: Array<_ai_sdk_provider.JSONObject>;
967
+ }>;
921
968
  };
922
969
 
923
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
970
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
924
971
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
925
972
  conversation?: string | null | undefined;
926
973
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
@@ -963,56 +1010,56 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
963
1010
  }>;
964
1011
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
965
1012
 
966
- interface OpenAIProvider extends ProviderV3 {
967
- (modelId: OpenAIResponsesModelId): LanguageModelV3;
1013
+ interface OpenAIProvider extends ProviderV4 {
1014
+ (modelId: OpenAIResponsesModelId): LanguageModelV4;
968
1015
  /**
969
1016
  * Creates an OpenAI model for text generation.
970
1017
  */
971
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
1018
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV4;
972
1019
  /**
973
1020
  * Creates an OpenAI chat model for text generation.
974
1021
  */
975
- chat(modelId: OpenAIChatModelId): LanguageModelV3;
1022
+ chat(modelId: OpenAIChatModelId): LanguageModelV4;
976
1023
  /**
977
1024
  * Creates an OpenAI responses API model for text generation.
978
1025
  */
979
- responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
1026
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV4;
980
1027
  /**
981
1028
  * Creates an OpenAI completion model for text generation.
982
1029
  */
983
- completion(modelId: OpenAICompletionModelId): LanguageModelV3;
1030
+ completion(modelId: OpenAICompletionModelId): LanguageModelV4;
984
1031
  /**
985
1032
  * Creates a model for text embeddings.
986
1033
  */
987
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1034
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
988
1035
  /**
989
1036
  * Creates a model for text embeddings.
990
1037
  */
991
- embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1038
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
992
1039
  /**
993
1040
  * @deprecated Use `embedding` instead.
994
1041
  */
995
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1042
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
996
1043
  /**
997
1044
  * @deprecated Use `embeddingModel` instead.
998
1045
  */
999
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1046
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
1000
1047
  /**
1001
1048
  * Creates a model for image generation.
1002
1049
  */
1003
- image(modelId: OpenAIImageModelId): ImageModelV3;
1050
+ image(modelId: OpenAIImageModelId): ImageModelV4;
1004
1051
  /**
1005
1052
  * Creates a model for image generation.
1006
1053
  */
1007
- imageModel(modelId: OpenAIImageModelId): ImageModelV3;
1054
+ imageModel(modelId: OpenAIImageModelId): ImageModelV4;
1008
1055
  /**
1009
1056
  * Creates a model for transcription.
1010
1057
  */
1011
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
1058
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV4;
1012
1059
  /**
1013
1060
  * Creates a model for speech generation.
1014
1061
  */
1015
- speech(modelId: OpenAISpeechModelId): SpeechModelV3;
1062
+ speech(modelId: OpenAISpeechModelId): SpeechModelV4;
1016
1063
  /**
1017
1064
  * OpenAI-specific tools.
1018
1065
  */