@ai-sdk/openai 4.0.0-beta.4 → 4.0.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/CHANGELOG.md +387 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +162 -45
  4. package/dist/index.js +2341 -1572
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +174 -51
  7. package/dist/internal/index.js +2110 -1593
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/03-openai.mdx +274 -9
  10. package/package.json +13 -14
  11. package/src/chat/convert-openai-chat-usage.ts +2 -2
  12. package/src/chat/convert-to-openai-chat-messages.ts +33 -18
  13. package/src/chat/map-openai-finish-reason.ts +2 -2
  14. package/src/chat/openai-chat-language-model.ts +62 -158
  15. package/src/chat/openai-chat-options.ts +5 -0
  16. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  17. package/src/completion/convert-openai-completion-usage.ts +2 -2
  18. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  19. package/src/completion/map-openai-finish-reason.ts +2 -2
  20. package/src/completion/openai-completion-language-model.ts +40 -23
  21. package/src/embedding/openai-embedding-model.ts +23 -6
  22. package/src/files/openai-files-api.ts +17 -0
  23. package/src/files/openai-files-options.ts +18 -0
  24. package/src/files/openai-files.ts +102 -0
  25. package/src/image/openai-image-model.ts +28 -11
  26. package/src/image/openai-image-options.ts +3 -0
  27. package/src/index.ts +2 -0
  28. package/src/openai-config.ts +6 -6
  29. package/src/openai-language-model-capabilities.ts +3 -2
  30. package/src/openai-provider.ts +54 -21
  31. package/src/openai-tools.ts +12 -1
  32. package/src/responses/convert-openai-responses-usage.ts +2 -2
  33. package/src/responses/convert-to-openai-responses-input.ts +194 -39
  34. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  35. package/src/responses/openai-responses-api.ts +136 -2
  36. package/src/responses/openai-responses-language-model.ts +252 -39
  37. package/src/responses/openai-responses-options.ts +24 -2
  38. package/src/responses/openai-responses-prepare-tools.ts +47 -14
  39. package/src/responses/openai-responses-provider-metadata.ts +10 -0
  40. package/src/skills/openai-skills-api.ts +31 -0
  41. package/src/skills/openai-skills.ts +87 -0
  42. package/src/speech/openai-speech-model.ts +25 -8
  43. package/src/tool/apply-patch.ts +33 -32
  44. package/src/tool/code-interpreter.ts +40 -41
  45. package/src/tool/custom.ts +2 -8
  46. package/src/tool/file-search.ts +2 -2
  47. package/src/tool/image-generation.ts +2 -2
  48. package/src/tool/local-shell.ts +2 -2
  49. package/src/tool/mcp.ts +2 -2
  50. package/src/tool/shell.ts +9 -4
  51. package/src/tool/tool-search.ts +98 -0
  52. package/src/tool/web-search-preview.ts +2 -2
  53. package/src/tool/web-search.ts +2 -2
  54. package/src/transcription/openai-transcription-model.ts +26 -9
  55. package/dist/index.d.mts +0 -1107
  56. package/dist/index.mjs +0 -6508
  57. package/dist/index.mjs.map +0 -1
  58. package/dist/internal/index.d.mts +0 -1137
  59. package/dist/internal/index.mjs +0 -6321
  60. package/dist/internal/index.mjs.map +0 -1
package/dist/index.d.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  import * as _ai_sdk_provider from '@ai-sdk/provider';
2
- import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
+ import { JSONValue, ProviderV4, LanguageModelV4, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4, SpeechModelV4, FilesV4, SkillsV4 } from '@ai-sdk/provider';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
7
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
8
8
  logitBias?: Record<number, number> | undefined;
9
9
  logprobs?: number | boolean | undefined;
@@ -42,9 +42,9 @@ declare const openaiEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
42
42
  }>;
43
43
  type OpenAIEmbeddingModelOptions = InferSchema<typeof openaiEmbeddingModelOptions>;
44
44
 
45
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'chatgpt-image-latest' | (string & {});
45
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'gpt-image-2' | 'chatgpt-image-latest' | (string & {});
46
46
 
47
- declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
47
+ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderExecutedToolFactory<{}, {
48
48
  /**
49
49
  * An object describing the specific action taken in this web search call.
50
50
  * Includes details on how the model used the web (search, open_page, find_in_page).
@@ -141,7 +141,7 @@ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWi
141
141
  */
142
142
  timezone?: string;
143
143
  };
144
- }>;
144
+ }, {}>;
145
145
 
146
146
  /**
147
147
  * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
@@ -206,6 +206,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
206
206
  } | null | undefined;
207
207
  service_tier?: string | null | undefined;
208
208
  };
209
+ } | {
210
+ type: "response.failed";
211
+ response: {
212
+ error?: {
213
+ message: string;
214
+ code?: string | null | undefined;
215
+ } | null | undefined;
216
+ incomplete_details?: {
217
+ reason: string;
218
+ } | null | undefined;
219
+ usage?: {
220
+ input_tokens: number;
221
+ output_tokens: number;
222
+ input_tokens_details?: {
223
+ cached_tokens?: number | null | undefined;
224
+ } | null | undefined;
225
+ output_tokens_details?: {
226
+ reasoning_tokens?: number | null | undefined;
227
+ } | null | undefined;
228
+ } | null | undefined;
229
+ service_tier?: string | null | undefined;
230
+ };
209
231
  } | {
210
232
  type: "response.created";
211
233
  response: {
@@ -300,6 +322,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
300
322
  action: {
301
323
  commands: string[];
302
324
  };
325
+ } | {
326
+ type: "compaction";
327
+ id: string;
328
+ encrypted_content?: string | null | undefined;
303
329
  } | {
304
330
  type: "shell_call_output";
305
331
  id: string;
@@ -315,6 +341,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
315
341
  exit_code: number;
316
342
  };
317
343
  }[];
344
+ } | {
345
+ type: "tool_search_call";
346
+ id: string;
347
+ execution: "server" | "client";
348
+ call_id: string | null;
349
+ status: "completed" | "in_progress" | "incomplete";
350
+ arguments: unknown;
351
+ } | {
352
+ type: "tool_search_output";
353
+ id: string;
354
+ execution: "server" | "client";
355
+ call_id: string | null;
356
+ status: "completed" | "in_progress" | "incomplete";
357
+ tools: Record<string, JSONValue | undefined>[];
318
358
  };
319
359
  } | {
320
360
  type: "response.output_item.done";
@@ -469,6 +509,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
469
509
  action: {
470
510
  commands: string[];
471
511
  };
512
+ } | {
513
+ type: "compaction";
514
+ id: string;
515
+ encrypted_content: string;
472
516
  } | {
473
517
  type: "shell_call_output";
474
518
  id: string;
@@ -484,6 +528,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
484
528
  exit_code: number;
485
529
  };
486
530
  }[];
531
+ } | {
532
+ type: "tool_search_call";
533
+ id: string;
534
+ execution: "server" | "client";
535
+ call_id: string | null;
536
+ status: "completed" | "in_progress" | "incomplete";
537
+ arguments: unknown;
538
+ } | {
539
+ type: "tool_search_output";
540
+ id: string;
541
+ execution: "server" | "client";
542
+ call_id: string | null;
543
+ status: "completed" | "in_progress" | "incomplete";
544
+ tools: Record<string, JSONValue | undefined>[];
487
545
  };
488
546
  } | {
489
547
  type: "response.function_call_arguments.delta";
@@ -574,11 +632,7 @@ type OpenAIResponsesLogprobs = NonNullable<(OpenAIResponsesChunk & {
574
632
  type: 'response.output_text.delta';
575
633
  })['logprobs']> | null;
576
634
 
577
- declare const customToolFactory: _ai_sdk_provider_utils.ProviderToolFactory<string, {
578
- /**
579
- * The name of the custom tool, used to identify it in the API.
580
- */
581
- name: string;
635
+ declare const customToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<string, {
582
636
  /**
583
637
  * An optional description of what the tool does.
584
638
  */
@@ -594,7 +648,7 @@ declare const customToolFactory: _ai_sdk_provider_utils.ProviderToolFactory<stri
594
648
  } | {
595
649
  type: "text";
596
650
  };
597
- }>;
651
+ }, {}>;
598
652
 
599
653
  /**
600
654
  * Type definitions for the apply_patch operations.
@@ -635,23 +689,22 @@ declare const openaiTools: {
635
689
  * enabling iterative, multi-step code editing workflows.
636
690
  *
637
691
  */
638
- applyPatch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
692
+ applyPatch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
639
693
  callId: string;
640
694
  operation: ApplyPatchOperation;
641
695
  }, {
642
696
  status: "completed" | "failed";
643
697
  output?: string;
644
- }, {}>;
698
+ }, {}, {}>;
645
699
  /**
646
700
  * Custom tools let callers constrain model output to a grammar (regex or
647
701
  * Lark syntax). The model returns a `custom_tool_call` output item whose
648
702
  * `input` field is a string matching the specified grammar.
649
703
  *
650
- * @param name - The name of the custom tool.
651
704
  * @param description - An optional description of the tool.
652
705
  * @param format - The output format constraint (grammar type, syntax, and definition).
653
706
  */
654
- customTool: (args: Parameters<typeof customToolFactory>[0]) => _ai_sdk_provider_utils.Tool<string, unknown>;
707
+ customTool: (args: Parameters<typeof customToolFactory>[0]) => _ai_sdk_provider_utils.Tool<string, unknown, {}>;
655
708
  /**
656
709
  * The Code Interpreter tool allows models to write and run Python code in a
657
710
  * sandboxed environment to solve complex problems in domains like data analysis,
@@ -674,7 +727,7 @@ declare const openaiTools: {
674
727
  type: "image";
675
728
  url: string;
676
729
  }> | null;
677
- }>;
730
+ }, {}>;
678
731
  /**
679
732
  * File search is a tool available in the Responses API. It enables models to
680
733
  * retrieve information in a knowledge base of previously uploaded files through
@@ -685,7 +738,7 @@ declare const openaiTools: {
685
738
  * @param ranking - The ranking options to use for the file search.
686
739
  * @param filters - The filters to use for the file search.
687
740
  */
688
- fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
741
+ fileSearch: _ai_sdk_provider_utils.ProviderExecutedToolFactory<{}, {
689
742
  queries: string[];
690
743
  results: null | {
691
744
  attributes: Record<string, unknown>;
@@ -702,7 +755,7 @@ declare const openaiTools: {
702
755
  scoreThreshold?: number;
703
756
  };
704
757
  filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
705
- }>;
758
+ }, {}>;
706
759
  /**
707
760
  * The image generation tool allows you to generate images using a text prompt,
708
761
  * and optionally image inputs. It leverages the GPT Image model,
@@ -735,14 +788,14 @@ declare const openaiTools: {
735
788
  size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
736
789
  }) => _ai_sdk_provider_utils.Tool<{}, {
737
790
  result: string;
738
- }>;
791
+ }, {}>;
739
792
  /**
740
793
  * Local shell is a tool that allows agents to run shell commands locally
741
794
  * on a machine you or the user provides.
742
795
  *
743
796
  * Supported models: `gpt-5-codex`
744
797
  */
745
- localShell: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
798
+ localShell: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
746
799
  action: {
747
800
  type: "exec";
748
801
  command: string[];
@@ -753,7 +806,7 @@ declare const openaiTools: {
753
806
  };
754
807
  }, {
755
808
  output: string;
756
- }, {}>;
809
+ }, {}, {}>;
757
810
  /**
758
811
  * The shell tool allows the model to interact with your local computer through
759
812
  * a controlled command-line interface. The model proposes shell commands; your
@@ -765,7 +818,7 @@ declare const openaiTools: {
765
818
  * execution or add strict allow-/deny-lists before forwarding a command to
766
819
  * the system shell.
767
820
  */
768
- shell: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
821
+ shell: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
769
822
  action: {
770
823
  commands: string[];
771
824
  timeoutMs?: number;
@@ -800,7 +853,7 @@ declare const openaiTools: {
800
853
  };
801
854
  skills?: Array<{
802
855
  type: "skillReference";
803
- skillId: string;
856
+ providerReference: _ai_sdk_provider.SharedV4ProviderReference;
804
857
  version?: string;
805
858
  } | {
806
859
  type: "inline";
@@ -817,13 +870,27 @@ declare const openaiTools: {
817
870
  containerId: string;
818
871
  } | {
819
872
  type?: "local";
820
- skills?: Array<{
873
+ skills
874
+ /**
875
+ * Web search allows models to access up-to-date information from the internet
876
+ * and provide answers with sourced citations.
877
+ *
878
+ * @param searchContextSize - The search context size to use for the web search.
879
+ * @param userLocation - The user location to use for the web search.
880
+ */
881
+ ? /**
882
+ * Web search allows models to access up-to-date information from the internet
883
+ * and provide answers with sourced citations.
884
+ *
885
+ * @param searchContextSize - The search context size to use for the web search.
886
+ * @param userLocation - The user location to use for the web search.
887
+ */: Array<{
821
888
  name: string;
822
889
  description: string;
823
890
  path: string;
824
891
  }>;
825
892
  };
826
- }>;
893
+ }, {}>;
827
894
  /**
828
895
  * Web search allows models to access up-to-date information from the internet
829
896
  * and provide answers with sourced citations.
@@ -831,7 +898,7 @@ declare const openaiTools: {
831
898
  * @param searchContextSize - The search context size to use for the web search.
832
899
  * @param userLocation - The user location to use for the web search.
833
900
  */
834
- webSearchPreview: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
901
+ webSearchPreview: _ai_sdk_provider_utils.ProviderExecutedToolFactory<{}, {
835
902
  action?: {
836
903
  type: "search";
837
904
  query?: string;
@@ -852,7 +919,7 @@ declare const openaiTools: {
852
919
  region?: string;
853
920
  timezone?: string;
854
921
  };
855
- }>;
922
+ }, {}>;
856
923
  /**
857
924
  * Web search allows models to access up-to-date information from the internet
858
925
  * and provide answers with sourced citations.
@@ -880,7 +947,7 @@ declare const openaiTools: {
880
947
  type: "api";
881
948
  name: string;
882
949
  }>;
883
- }>;
950
+ }, {}>;
884
951
  /**
885
952
  * MCP (Model Context Protocol) allows models to call tools exposed by
886
953
  * remote MCP servers or service connectors.
@@ -917,10 +984,34 @@ declare const openaiTools: {
917
984
  arguments: string;
918
985
  output?: string | null;
919
986
  error?: _ai_sdk_provider.JSONValue;
920
- }>;
987
+ }, {}>;
988
+ /**
989
+ * Tool search allows the model to dynamically search for and load deferred
990
+ * tools into the model's context as needed. This helps reduce overall token
991
+ * usage, cost, and latency by only loading tools when the model needs them.
992
+ *
993
+ * To use tool search, mark functions or namespaces with `defer_loading: true`
994
+ * in the tools array. The model will use tool search to load these tools
995
+ * when it determines they are needed.
996
+ */
997
+ toolSearch: (args?: Parameters<_ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
998
+ arguments?: unknown;
999
+ call_id?: string | null;
1000
+ }, {
1001
+ tools: Array<_ai_sdk_provider.JSONObject>;
1002
+ }, {
1003
+ execution?: "server" | "client";
1004
+ description?: string;
1005
+ parameters?: Record<string, unknown>;
1006
+ }, {}>>[0]) => _ai_sdk_provider_utils.Tool<{
1007
+ arguments?: unknown;
1008
+ call_id?: string | null;
1009
+ }, {
1010
+ tools: Array<_ai_sdk_provider.JSONObject>;
1011
+ }, {}>;
921
1012
  };
922
1013
 
923
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
1014
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
924
1015
  declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
925
1016
  conversation?: string | null | undefined;
926
1017
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
@@ -943,6 +1034,10 @@ declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.
943
1034
  user?: string | null | undefined;
944
1035
  systemMessageMode?: "remove" | "system" | "developer" | undefined;
945
1036
  forceReasoning?: boolean | undefined;
1037
+ contextManagement?: {
1038
+ type: "compaction";
1039
+ compactThreshold: number;
1040
+ }[] | null | undefined;
946
1041
  }>;
947
1042
  type OpenAILanguageModelResponsesOptions = InferSchema<typeof openaiLanguageModelResponsesOptionsSchema>;
948
1043
 
@@ -963,56 +1058,64 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
963
1058
  }>;
964
1059
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
965
1060
 
966
- interface OpenAIProvider extends ProviderV3 {
967
- (modelId: OpenAIResponsesModelId): LanguageModelV3;
1061
+ interface OpenAIProvider extends ProviderV4 {
1062
+ (modelId: OpenAIResponsesModelId): LanguageModelV4;
968
1063
  /**
969
1064
  * Creates an OpenAI model for text generation.
970
1065
  */
971
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
1066
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV4;
972
1067
  /**
973
1068
  * Creates an OpenAI chat model for text generation.
974
1069
  */
975
- chat(modelId: OpenAIChatModelId): LanguageModelV3;
1070
+ chat(modelId: OpenAIChatModelId): LanguageModelV4;
976
1071
  /**
977
1072
  * Creates an OpenAI responses API model for text generation.
978
1073
  */
979
- responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
1074
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV4;
980
1075
  /**
981
1076
  * Creates an OpenAI completion model for text generation.
982
1077
  */
983
- completion(modelId: OpenAICompletionModelId): LanguageModelV3;
1078
+ completion(modelId: OpenAICompletionModelId): LanguageModelV4;
984
1079
  /**
985
1080
  * Creates a model for text embeddings.
986
1081
  */
987
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1082
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
988
1083
  /**
989
1084
  * Creates a model for text embeddings.
990
1085
  */
991
- embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1086
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
992
1087
  /**
993
1088
  * @deprecated Use `embedding` instead.
994
1089
  */
995
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1090
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
996
1091
  /**
997
1092
  * @deprecated Use `embeddingModel` instead.
998
1093
  */
999
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
1094
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV4;
1000
1095
  /**
1001
1096
  * Creates a model for image generation.
1002
1097
  */
1003
- image(modelId: OpenAIImageModelId): ImageModelV3;
1098
+ image(modelId: OpenAIImageModelId): ImageModelV4;
1004
1099
  /**
1005
1100
  * Creates a model for image generation.
1006
1101
  */
1007
- imageModel(modelId: OpenAIImageModelId): ImageModelV3;
1102
+ imageModel(modelId: OpenAIImageModelId): ImageModelV4;
1008
1103
  /**
1009
1104
  * Creates a model for transcription.
1010
1105
  */
1011
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
1106
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV4;
1012
1107
  /**
1013
1108
  * Creates a model for speech generation.
1014
1109
  */
1015
- speech(modelId: OpenAISpeechModelId): SpeechModelV3;
1110
+ speech(modelId: OpenAISpeechModelId): SpeechModelV4;
1111
+ /**
1112
+ * Returns a FilesV4 interface for uploading files to OpenAI.
1113
+ */
1114
+ files(): FilesV4;
1115
+ /**
1116
+ * Returns a SkillsV4 interface for uploading skills to OpenAI.
1117
+ */
1118
+ skills(): SkillsV4;
1016
1119
  /**
1017
1120
  * OpenAI-specific tools.
1018
1121
  */
@@ -1058,6 +1161,12 @@ declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
1058
1161
  */
1059
1162
  declare const openai: OpenAIProvider;
1060
1163
 
1164
+ declare const openaiFilesOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
1165
+ purpose?: string | undefined;
1166
+ expiresAfter?: number | undefined;
1167
+ }>;
1168
+ type OpenAIFilesOptions = InferSchema<typeof openaiFilesOptionsSchema>;
1169
+
1061
1170
  type OpenaiResponsesChunk = InferSchema<typeof openaiResponsesChunkSchema>;
1062
1171
  type ResponsesOutputTextAnnotationProviderMetadata = Extract<OpenaiResponsesChunk, {
1063
1172
  type: 'response.output_text.annotation.added';
@@ -1077,6 +1186,14 @@ type OpenaiResponsesReasoningProviderMetadata = {
1077
1186
  type OpenaiResponsesProviderMetadata = {
1078
1187
  openai: ResponsesProviderMetadata;
1079
1188
  };
1189
+ type ResponsesCompactionProviderMetadata = {
1190
+ type: 'compaction';
1191
+ itemId: string;
1192
+ encryptedContent?: string;
1193
+ };
1194
+ type OpenaiResponsesCompactionProviderMetadata = {
1195
+ openai: ResponsesCompactionProviderMetadata;
1196
+ };
1080
1197
  type ResponsesTextProviderMetadata = {
1081
1198
  itemId: string;
1082
1199
  phase?: 'commentary' | 'final_answer' | null;
@@ -1104,4 +1221,4 @@ type OpenaiResponsesSourceDocumentProviderMetadata = {
1104
1221
 
1105
1222
  declare const VERSION: string;
1106
1223
 
1107
- export { type OpenAILanguageModelChatOptions as OpenAIChatLanguageModelOptions, type OpenAIEmbeddingModelOptions, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, type OpenAILanguageModelResponsesOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAILanguageModelResponsesOptions as OpenAIResponsesProviderOptions, type OpenAISpeechModelOptions, type OpenAITranscriptionModelOptions, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };
1224
+ export { type OpenAILanguageModelChatOptions as OpenAIChatLanguageModelOptions, type OpenAIEmbeddingModelOptions, type OpenAIFilesOptions, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, type OpenAILanguageModelResponsesOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAILanguageModelResponsesOptions as OpenAIResponsesProviderOptions, type OpenAISpeechModelOptions, type OpenAITranscriptionModelOptions, type OpenaiResponsesCompactionProviderMetadata, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };