openai 0.36.0 → 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +22 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/all_models.rb +1 -0
  5. data/lib/openai/models/beta/assistant_create_params.rb +4 -3
  6. data/lib/openai/models/beta/assistant_update_params.rb +4 -3
  7. data/lib/openai/models/beta/threads/run_create_params.rb +4 -3
  8. data/lib/openai/models/chat/completion_create_params.rb +4 -3
  9. data/lib/openai/models/container_create_params.rb +22 -1
  10. data/lib/openai/models/container_create_response.rb +32 -1
  11. data/lib/openai/models/container_list_response.rb +32 -1
  12. data/lib/openai/models/container_retrieve_response.rb +32 -1
  13. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  14. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  15. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +4 -3
  16. data/lib/openai/models/evals/run_cancel_response.rb +8 -6
  17. data/lib/openai/models/evals/run_create_params.rb +8 -6
  18. data/lib/openai/models/evals/run_create_response.rb +8 -6
  19. data/lib/openai/models/evals/run_list_response.rb +8 -6
  20. data/lib/openai/models/evals/run_retrieve_response.rb +8 -6
  21. data/lib/openai/models/graders/score_model_grader.rb +4 -3
  22. data/lib/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rb +43 -0
  23. data/lib/openai/models/realtime/output_audio_buffer_clear_event.rb +4 -4
  24. data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +13 -5
  25. data/lib/openai/models/realtime/realtime_client_event.rb +1 -1
  26. data/lib/openai/models/realtime/realtime_server_event.rb +16 -9
  27. data/lib/openai/models/realtime/realtime_session.rb +13 -5
  28. data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -9
  29. data/lib/openai/models/realtime/realtime_session_create_response.rb +27 -14
  30. data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +13 -5
  31. data/lib/openai/models/realtime/realtime_truncation.rb +14 -9
  32. data/lib/openai/models/reasoning.rb +4 -3
  33. data/lib/openai/models/reasoning_effort.rb +5 -3
  34. data/lib/openai/models/responses/compacted_response.rb +56 -0
  35. data/lib/openai/models/responses/input_token_count_params.rb +4 -4
  36. data/lib/openai/models/responses/response.rb +6 -6
  37. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +23 -23
  38. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +9 -9
  39. data/lib/openai/models/responses/response_compact_params.rb +344 -0
  40. data/lib/openai/models/responses/response_compaction_item.rb +43 -0
  41. data/lib/openai/models/responses/response_compaction_item_param.rb +36 -0
  42. data/lib/openai/models/responses/response_create_params.rb +4 -4
  43. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +10 -10
  44. data/lib/openai/models/responses/response_function_shell_tool_call.rb +5 -5
  45. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +2 -2
  46. data/lib/openai/models/responses/response_input_item.rb +22 -19
  47. data/lib/openai/models/responses/response_output_item.rb +4 -1
  48. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  49. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  50. data/lib/openai/models/responses/tool.rb +4 -2
  51. data/lib/openai/models/responses/tool_choice_shell.rb +1 -1
  52. data/lib/openai/models/responses_model.rb +1 -0
  53. data/lib/openai/models/video_create_params.rb +11 -6
  54. data/lib/openai/resources/containers.rb +3 -1
  55. data/lib/openai/resources/conversations/items.rb +1 -1
  56. data/lib/openai/resources/conversations.rb +1 -1
  57. data/lib/openai/resources/responses/input_tokens.rb +1 -1
  58. data/lib/openai/resources/responses.rb +33 -2
  59. data/lib/openai/resources/videos.rb +6 -3
  60. data/lib/openai/resources/webhooks.rb +0 -3
  61. data/lib/openai/version.rb +1 -1
  62. data/lib/openai.rb +6 -0
  63. data/manifest.yaml +1 -0
  64. data/rbi/openai/models/all_models.rbi +5 -0
  65. data/rbi/openai/models/beta/assistant_create_params.rbi +8 -6
  66. data/rbi/openai/models/beta/assistant_update_params.rbi +8 -6
  67. data/rbi/openai/models/beta/threads/run_create_params.rbi +8 -6
  68. data/rbi/openai/models/chat/completion_create_params.rbi +8 -6
  69. data/rbi/openai/models/container_create_params.rbi +51 -0
  70. data/rbi/openai/models/container_create_response.rbi +81 -3
  71. data/rbi/openai/models/container_list_response.rbi +80 -3
  72. data/rbi/openai/models/container_retrieve_response.rbi +83 -3
  73. data/rbi/openai/models/conversations/conversation_create_params.rbi +3 -0
  74. data/rbi/openai/models/conversations/item_create_params.rbi +3 -0
  75. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -6
  76. data/rbi/openai/models/evals/run_cancel_response.rbi +16 -12
  77. data/rbi/openai/models/evals/run_create_params.rbi +16 -12
  78. data/rbi/openai/models/evals/run_create_response.rbi +16 -12
  79. data/rbi/openai/models/evals/run_list_response.rbi +16 -12
  80. data/rbi/openai/models/evals/run_retrieve_response.rbi +16 -12
  81. data/rbi/openai/models/graders/score_model_grader.rbi +8 -6
  82. data/rbi/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbi +56 -0
  83. data/rbi/openai/models/realtime/output_audio_buffer_clear_event.rbi +4 -4
  84. data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +24 -8
  85. data/rbi/openai/models/realtime/realtime_server_event.rbi +6 -5
  86. data/rbi/openai/models/realtime/realtime_session.rbi +24 -8
  87. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +28 -18
  88. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +52 -26
  89. data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +24 -8
  90. data/rbi/openai/models/realtime/realtime_truncation.rbi +14 -9
  91. data/rbi/openai/models/reasoning.rbi +8 -6
  92. data/rbi/openai/models/reasoning_effort.rbi +5 -3
  93. data/rbi/openai/models/responses/compacted_response.rbi +105 -0
  94. data/rbi/openai/models/responses/response.rbi +1 -0
  95. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +53 -67
  96. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +9 -9
  97. data/rbi/openai/models/responses/response_compact_params.rbi +593 -0
  98. data/rbi/openai/models/responses/response_compaction_item.rbi +67 -0
  99. data/rbi/openai/models/responses/response_compaction_item_param.rbi +54 -0
  100. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +9 -9
  101. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +6 -6
  102. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +1 -1
  103. data/rbi/openai/models/responses/response_input_item.rbi +21 -23
  104. data/rbi/openai/models/responses/response_output_item.rbi +1 -0
  105. data/rbi/openai/models/responses/response_output_item_added_event.rbi +1 -0
  106. data/rbi/openai/models/responses/response_output_item_done_event.rbi +1 -0
  107. data/rbi/openai/models/responses/tool.rbi +6 -3
  108. data/rbi/openai/models/responses/tool_choice_shell.rbi +1 -1
  109. data/rbi/openai/models/responses_model.rbi +5 -0
  110. data/rbi/openai/models/video_create_params.rbi +10 -6
  111. data/rbi/openai/resources/beta/assistants.rbi +8 -6
  112. data/rbi/openai/resources/beta/threads/runs.rbi +8 -6
  113. data/rbi/openai/resources/chat/completions.rbi +8 -6
  114. data/rbi/openai/resources/containers.rbi +3 -0
  115. data/rbi/openai/resources/conversations/items.rbi +1 -0
  116. data/rbi/openai/resources/conversations.rbi +1 -0
  117. data/rbi/openai/resources/realtime/calls.rbi +14 -9
  118. data/rbi/openai/resources/responses.rbi +42 -0
  119. data/rbi/openai/resources/videos.rbi +5 -3
  120. data/sig/openai/models/all_models.rbs +2 -0
  121. data/sig/openai/models/container_create_params.rbs +23 -1
  122. data/sig/openai/models/container_create_response.rbs +32 -3
  123. data/sig/openai/models/container_list_response.rbs +32 -3
  124. data/sig/openai/models/container_retrieve_response.rbs +32 -3
  125. data/sig/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbs +32 -0
  126. data/sig/openai/models/realtime/realtime_server_event.rbs +1 -0
  127. data/sig/openai/models/reasoning_effort.rbs +2 -1
  128. data/sig/openai/models/responses/compacted_response.rbs +42 -0
  129. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +18 -22
  130. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +7 -7
  131. data/sig/openai/models/responses/response_compact_params.rbs +226 -0
  132. data/sig/openai/models/responses/response_compaction_item.rbs +39 -0
  133. data/sig/openai/models/responses/response_compaction_item_param.rbs +28 -0
  134. data/sig/openai/models/responses/response_input_item.rbs +5 -6
  135. data/sig/openai/models/responses/response_output_item.rbs +1 -0
  136. data/sig/openai/models/responses_model.rbs +2 -0
  137. data/sig/openai/resources/containers.rbs +1 -0
  138. data/sig/openai/resources/responses.rbs +8 -0
  139. metadata +31 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 17baebbeaa4b29e9cd335b7cf1c0bef7a1e63832804c6f332e6cb508a4afd4d8
4
- data.tar.gz: b9009868811efc0f57c9b33bbe51c031436e0ccebb2f944ad5d60259095a4099
3
+ metadata.gz: 136d7cc343b404ef69a7d7d32adcc6942d66b3e87385ed6ad02d91cf6014dad6
4
+ data.tar.gz: b7192ae587a9921a96e547b52c64dbd312b8b8d66ee6b6de9f572b18a0809625
5
5
  SHA512:
6
- metadata.gz: 8b5ca35d9d0758f61af7f3b7966017ad6df9b3e0a582d2cbc3bc455ac3d651bed29554710a0c05e22a5ad481e0469cb8ab0448cbe1d356c52f07ee2bd0cfff65
7
- data.tar.gz: 474f40178d640e14ddb001786670e0e063c45b7205895115a72a28075597ed798b52ab3cd1952296e847822f056433bb907d7022dae5c3780451c872709485f9
6
+ metadata.gz: 900ac61caada3d3a9e099ecd37807f331eaa2785b05183bad4b512deb5d2698b4926142cc5d63bcbb870f21bf5659a70601e4389309dbbcbe92e06d8513b3f96
7
+ data.tar.gz: 148cff14a45e1e1473e946221cad39b44c0c4162d993ee5333367cf64b850f2beafc11016596cc274e1ece7a2526ba5aed1d446a06451294a71c18b69eb280c8
data/CHANGELOG.md CHANGED
@@ -1,5 +1,27 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.37.0 (2025-12-04)
4
+
5
+ Full Changelog: [v0.36.1...v0.37.0](https://github.com/openai/openai-ruby/compare/v0.36.1...v0.37.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** gpt-5.1-codex-max and responses/compact ([17f7eda](https://github.com/openai/openai-ruby/commit/17f7eda484df9835373c09fee5c3ada5536c18af))
10
+
11
+
12
+ ### Chores
13
+
14
+ * add "base64" as a dependency for newer ruby versions ([#868](https://github.com/openai/openai-ruby/issues/868)) ([b7be495](https://github.com/openai/openai-ruby/commit/b7be4955d5ea7affdf55ce11086afc1bc904f471))
15
+ * explicitly require "base64" gem ([4be5941](https://github.com/openai/openai-ruby/commit/4be5941881cd50e5c7596c62e3b7b4fda2af5196))
16
+
17
+ ## 0.36.1 (2025-11-17)
18
+
19
+ Full Changelog: [v0.36.0...v0.36.1](https://github.com/openai/openai-ruby/compare/v0.36.0...v0.36.1)
20
+
21
+ ### Bug Fixes
22
+
23
+ * **api:** align types of input items / output items for typescript ([b593643](https://github.com/openai/openai-ruby/commit/b5936439c6804161bc1a9a7900bdc33f09be1485))
24
+
3
25
  ## 0.36.0 (2025-11-13)
4
26
 
5
27
  Full Changelog: [v0.35.2...v0.36.0](https://github.com/openai/openai-ruby/compare/v0.35.2...v0.36.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.36.0"
18
+ gem "openai", "~> 0.37.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -27,6 +27,7 @@ module OpenAI
27
27
  GPT_5_CODEX = :"gpt-5-codex"
28
28
  GPT_5_PRO = :"gpt-5-pro"
29
29
  GPT_5_PRO_2025_10_06 = :"gpt-5-pro-2025-10-06"
30
+ GPT_5_1_CODEX_MAX = :"gpt-5.1-codex-max"
30
31
 
31
32
  # @!method self.values
32
33
  # @return [Array<Symbol>]
@@ -51,9 +51,9 @@ module OpenAI
51
51
  # @!attribute reasoning_effort
52
52
  # Constrains effort on reasoning for
53
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
54
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
55
- # reasoning effort can result in faster responses and fewer tokens used on
56
- # reasoning in a response.
54
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
55
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
56
+ # on reasoning in a response.
57
57
  #
58
58
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
59
59
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -61,6 +61,7 @@ module OpenAI
61
61
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
62
  # support `none`.
63
63
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64
65
  #
65
66
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
66
67
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -51,9 +51,9 @@ module OpenAI
51
51
  # @!attribute reasoning_effort
52
52
  # Constrains effort on reasoning for
53
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
54
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
55
- # reasoning effort can result in faster responses and fewer tokens used on
56
- # reasoning in a response.
54
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
55
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
56
+ # on reasoning in a response.
57
57
  #
58
58
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
59
59
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -61,6 +61,7 @@ module OpenAI
61
61
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
62
  # support `none`.
63
63
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64
65
  #
65
66
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
66
67
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -109,9 +109,9 @@ module OpenAI
109
109
  # @!attribute reasoning_effort
110
110
  # Constrains effort on reasoning for
111
111
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
112
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
113
- # reasoning effort can result in faster responses and fewer tokens used on
114
- # reasoning in a response.
112
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
113
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
114
+ # on reasoning in a response.
115
115
  #
116
116
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
117
117
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -119,6 +119,7 @@ module OpenAI
119
119
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
120
120
  # support `none`.
121
121
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
122
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
122
123
  #
123
124
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
124
125
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -204,9 +204,9 @@ module OpenAI
204
204
  # @!attribute reasoning_effort
205
205
  # Constrains effort on reasoning for
206
206
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
207
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
208
- # reasoning effort can result in faster responses and fewer tokens used on
209
- # reasoning in a response.
207
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
208
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
209
+ # on reasoning in a response.
210
210
  #
211
211
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
212
212
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -214,6 +214,7 @@ module OpenAI
214
214
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
215
215
  # support `none`.
216
216
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
217
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
217
218
  #
218
219
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
219
220
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -25,13 +25,21 @@ module OpenAI
25
25
  # @return [Array<String>, nil]
26
26
  optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
27
27
 
28
- # @!method initialize(name:, expires_after: nil, file_ids: nil, request_options: {})
28
+ # @!attribute memory_limit
29
+ # Optional memory limit for the container. Defaults to "1g".
30
+ #
31
+ # @return [Symbol, OpenAI::Models::ContainerCreateParams::MemoryLimit, nil]
32
+ optional :memory_limit, enum: -> { OpenAI::ContainerCreateParams::MemoryLimit }
33
+
34
+ # @!method initialize(name:, expires_after: nil, file_ids: nil, memory_limit: nil, request_options: {})
29
35
  # @param name [String] Name of the container to create.
30
36
  #
31
37
  # @param expires_after [OpenAI::Models::ContainerCreateParams::ExpiresAfter] Container expiration time in seconds relative to the 'anchor' time.
32
38
  #
33
39
  # @param file_ids [Array<String>] IDs of files to copy to the container.
34
40
  #
41
+ # @param memory_limit [Symbol, OpenAI::Models::ContainerCreateParams::MemoryLimit] Optional memory limit for the container. Defaults to "1g".
42
+ #
35
43
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
36
44
 
37
45
  class ExpiresAfter < OpenAI::Internal::Type::BaseModel
@@ -70,6 +78,19 @@ module OpenAI
70
78
  # @return [Array<Symbol>]
71
79
  end
72
80
  end
81
+
82
+ # Optional memory limit for the container. Defaults to "1g".
83
+ module MemoryLimit
84
+ extend OpenAI::Internal::Type::Enum
85
+
86
+ MEMORY_LIMIT_1G = :"1g"
87
+ MEMORY_LIMIT_4G = :"4g"
88
+ MEMORY_LIMIT_16G = :"16g"
89
+ MEMORY_LIMIT_64G = :"64g"
90
+
91
+ # @!method self.values
92
+ # @return [Array<Symbol>]
93
+ end
73
94
  end
74
95
  end
75
96
  end
@@ -42,7 +42,19 @@ module OpenAI
42
42
  # @return [OpenAI::Models::ContainerCreateResponse::ExpiresAfter, nil]
43
43
  optional :expires_after, -> { OpenAI::Models::ContainerCreateResponse::ExpiresAfter }
44
44
 
45
- # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil)
45
+ # @!attribute last_active_at
46
+ # Unix timestamp (in seconds) when the container was last active.
47
+ #
48
+ # @return [Integer, nil]
49
+ optional :last_active_at, Integer
50
+
51
+ # @!attribute memory_limit
52
+ # The memory limit configured for the container.
53
+ #
54
+ # @return [Symbol, OpenAI::Models::ContainerCreateResponse::MemoryLimit, nil]
55
+ optional :memory_limit, enum: -> { OpenAI::Models::ContainerCreateResponse::MemoryLimit }
56
+
57
+ # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil, last_active_at: nil, memory_limit: nil)
46
58
  # Some parameter documentations has been truncated, see
47
59
  # {OpenAI::Models::ContainerCreateResponse} for more details.
48
60
  #
@@ -57,6 +69,10 @@ module OpenAI
57
69
  # @param status [String] Status of the container (e.g., active, deleted).
58
70
  #
59
71
  # @param expires_after [OpenAI::Models::ContainerCreateResponse::ExpiresAfter] The container will expire after this time period.
72
+ #
73
+ # @param last_active_at [Integer] Unix timestamp (in seconds) when the container was last active.
74
+ #
75
+ # @param memory_limit [Symbol, OpenAI::Models::ContainerCreateResponse::MemoryLimit] The memory limit configured for the container.
60
76
 
61
77
  # @see OpenAI::Models::ContainerCreateResponse#expires_after
62
78
  class ExpiresAfter < OpenAI::Internal::Type::BaseModel
@@ -93,6 +109,21 @@ module OpenAI
93
109
  # @return [Array<Symbol>]
94
110
  end
95
111
  end
112
+
113
+ # The memory limit configured for the container.
114
+ #
115
+ # @see OpenAI::Models::ContainerCreateResponse#memory_limit
116
+ module MemoryLimit
117
+ extend OpenAI::Internal::Type::Enum
118
+
119
+ MEMORY_LIMIT_1G = :"1g"
120
+ MEMORY_LIMIT_4G = :"4g"
121
+ MEMORY_LIMIT_16G = :"16g"
122
+ MEMORY_LIMIT_64G = :"64g"
123
+
124
+ # @!method self.values
125
+ # @return [Array<Symbol>]
126
+ end
96
127
  end
97
128
  end
98
129
  end
@@ -42,7 +42,19 @@ module OpenAI
42
42
  # @return [OpenAI::Models::ContainerListResponse::ExpiresAfter, nil]
43
43
  optional :expires_after, -> { OpenAI::Models::ContainerListResponse::ExpiresAfter }
44
44
 
45
- # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil)
45
+ # @!attribute last_active_at
46
+ # Unix timestamp (in seconds) when the container was last active.
47
+ #
48
+ # @return [Integer, nil]
49
+ optional :last_active_at, Integer
50
+
51
+ # @!attribute memory_limit
52
+ # The memory limit configured for the container.
53
+ #
54
+ # @return [Symbol, OpenAI::Models::ContainerListResponse::MemoryLimit, nil]
55
+ optional :memory_limit, enum: -> { OpenAI::Models::ContainerListResponse::MemoryLimit }
56
+
57
+ # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil, last_active_at: nil, memory_limit: nil)
46
58
  # Some parameter documentations has been truncated, see
47
59
  # {OpenAI::Models::ContainerListResponse} for more details.
48
60
  #
@@ -57,6 +69,10 @@ module OpenAI
57
69
  # @param status [String] Status of the container (e.g., active, deleted).
58
70
  #
59
71
  # @param expires_after [OpenAI::Models::ContainerListResponse::ExpiresAfter] The container will expire after this time period.
72
+ #
73
+ # @param last_active_at [Integer] Unix timestamp (in seconds) when the container was last active.
74
+ #
75
+ # @param memory_limit [Symbol, OpenAI::Models::ContainerListResponse::MemoryLimit] The memory limit configured for the container.
60
76
 
61
77
  # @see OpenAI::Models::ContainerListResponse#expires_after
62
78
  class ExpiresAfter < OpenAI::Internal::Type::BaseModel
@@ -93,6 +109,21 @@ module OpenAI
93
109
  # @return [Array<Symbol>]
94
110
  end
95
111
  end
112
+
113
+ # The memory limit configured for the container.
114
+ #
115
+ # @see OpenAI::Models::ContainerListResponse#memory_limit
116
+ module MemoryLimit
117
+ extend OpenAI::Internal::Type::Enum
118
+
119
+ MEMORY_LIMIT_1G = :"1g"
120
+ MEMORY_LIMIT_4G = :"4g"
121
+ MEMORY_LIMIT_16G = :"16g"
122
+ MEMORY_LIMIT_64G = :"64g"
123
+
124
+ # @!method self.values
125
+ # @return [Array<Symbol>]
126
+ end
96
127
  end
97
128
  end
98
129
  end
@@ -42,7 +42,19 @@ module OpenAI
42
42
  # @return [OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter, nil]
43
43
  optional :expires_after, -> { OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter }
44
44
 
45
- # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil)
45
+ # @!attribute last_active_at
46
+ # Unix timestamp (in seconds) when the container was last active.
47
+ #
48
+ # @return [Integer, nil]
49
+ optional :last_active_at, Integer
50
+
51
+ # @!attribute memory_limit
52
+ # The memory limit configured for the container.
53
+ #
54
+ # @return [Symbol, OpenAI::Models::ContainerRetrieveResponse::MemoryLimit, nil]
55
+ optional :memory_limit, enum: -> { OpenAI::Models::ContainerRetrieveResponse::MemoryLimit }
56
+
57
+ # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil, last_active_at: nil, memory_limit: nil)
46
58
  # Some parameter documentations has been truncated, see
47
59
  # {OpenAI::Models::ContainerRetrieveResponse} for more details.
48
60
  #
@@ -57,6 +69,10 @@ module OpenAI
57
69
  # @param status [String] Status of the container (e.g., active, deleted).
58
70
  #
59
71
  # @param expires_after [OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter] The container will expire after this time period.
72
+ #
73
+ # @param last_active_at [Integer] Unix timestamp (in seconds) when the container was last active.
74
+ #
75
+ # @param memory_limit [Symbol, OpenAI::Models::ContainerRetrieveResponse::MemoryLimit] The memory limit configured for the container.
60
76
 
61
77
  # @see OpenAI::Models::ContainerRetrieveResponse#expires_after
62
78
  class ExpiresAfter < OpenAI::Internal::Type::BaseModel
@@ -93,6 +109,21 @@ module OpenAI
93
109
  # @return [Array<Symbol>]
94
110
  end
95
111
  end
112
+
113
+ # The memory limit configured for the container.
114
+ #
115
+ # @see OpenAI::Models::ContainerRetrieveResponse#memory_limit
116
+ module MemoryLimit
117
+ extend OpenAI::Internal::Type::Enum
118
+
119
+ MEMORY_LIMIT_1G = :"1g"
120
+ MEMORY_LIMIT_4G = :"4g"
121
+ MEMORY_LIMIT_16G = :"16g"
122
+ MEMORY_LIMIT_64G = :"64g"
123
+
124
+ # @!method self.values
125
+ # @return [Array<Symbol>]
126
+ end
96
127
  end
97
128
  end
98
129
  end
@@ -12,7 +12,7 @@ module OpenAI
12
12
  # Initial items to include in the conversation context. You may add up to 20 items
13
13
  # at a time.
14
14
  #
15
- # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
15
+ # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
16
16
  optional :items,
17
17
  -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] },
18
18
  nil?: true
@@ -32,7 +32,7 @@ module OpenAI
32
32
  # Some parameter documentations has been truncated, see
33
33
  # {OpenAI::Models::Conversations::ConversationCreateParams} for more details.
34
34
  #
35
- # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
35
+ # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
36
36
  #
37
37
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
38
38
  #
@@ -11,7 +11,7 @@ module OpenAI
11
11
  # @!attribute items
12
12
  # The items to add to the conversation. You may add up to 20 items at a time.
13
13
  #
14
- # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
14
+ # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
15
15
  required :items, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] }
16
16
 
17
17
  # @!attribute include
@@ -26,7 +26,7 @@ module OpenAI
26
26
  # Some parameter documentations has been truncated, see
27
27
  # {OpenAI::Models::Conversations::ItemCreateParams} for more details.
28
28
  #
29
- # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] The items to add to the conversation. You may add up to 20 items at a time.
29
+ # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] The items to add to the conversation. You may add up to 20 items at a time.
30
30
  #
31
31
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
32
32
  #
@@ -462,9 +462,9 @@ module OpenAI
462
462
  # @!attribute reasoning_effort
463
463
  # Constrains effort on reasoning for
464
464
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
465
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
466
- # reasoning effort can result in faster responses and fewer tokens used on
467
- # reasoning in a response.
465
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
466
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
467
+ # on reasoning in a response.
468
468
  #
469
469
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
470
470
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -472,6 +472,7 @@ module OpenAI
472
472
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
473
473
  # support `none`.
474
474
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
475
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
475
476
  #
476
477
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
477
478
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -316,9 +316,9 @@ module OpenAI
316
316
  # @!attribute reasoning_effort
317
317
  # Constrains effort on reasoning for
318
318
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
320
- # reasoning effort can result in faster responses and fewer tokens used on
321
- # reasoning in a response.
319
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
320
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
321
+ # on reasoning in a response.
322
322
  #
323
323
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
324
324
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -326,6 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
330
  #
330
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
331
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -664,9 +665,9 @@ module OpenAI
664
665
  # @!attribute reasoning_effort
665
666
  # Constrains effort on reasoning for
666
667
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
667
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
668
- # reasoning effort can result in faster responses and fewer tokens used on
669
- # reasoning in a response.
668
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
669
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
670
+ # on reasoning in a response.
670
671
  #
671
672
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
672
673
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -674,6 +675,7 @@ module OpenAI
674
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
675
676
  # support `none`.
676
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
677
679
  #
678
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
679
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -228,9 +228,9 @@ module OpenAI
228
228
  # @!attribute reasoning_effort
229
229
  # Constrains effort on reasoning for
230
230
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
231
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
232
- # reasoning effort can result in faster responses and fewer tokens used on
233
- # reasoning in a response.
231
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
232
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
233
+ # on reasoning in a response.
234
234
  #
235
235
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
236
236
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -238,6 +238,7 @@ module OpenAI
238
238
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
239
239
  # support `none`.
240
240
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
241
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
241
242
  #
242
243
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
243
244
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -592,9 +593,9 @@ module OpenAI
592
593
  # @!attribute reasoning_effort
593
594
  # Constrains effort on reasoning for
594
595
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
595
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
596
- # reasoning effort can result in faster responses and fewer tokens used on
597
- # reasoning in a response.
596
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
597
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
598
+ # on reasoning in a response.
598
599
  #
599
600
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
600
601
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -602,6 +603,7 @@ module OpenAI
602
603
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
603
604
  # support `none`.
604
605
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
606
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
605
607
  #
606
608
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
607
609
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -316,9 +316,9 @@ module OpenAI
316
316
  # @!attribute reasoning_effort
317
317
  # Constrains effort on reasoning for
318
318
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
320
- # reasoning effort can result in faster responses and fewer tokens used on
321
- # reasoning in a response.
319
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
320
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
321
+ # on reasoning in a response.
322
322
  #
323
323
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
324
324
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -326,6 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
330
  #
330
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
331
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -664,9 +665,9 @@ module OpenAI
664
665
  # @!attribute reasoning_effort
665
666
  # Constrains effort on reasoning for
666
667
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
667
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
668
- # reasoning effort can result in faster responses and fewer tokens used on
669
- # reasoning in a response.
668
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
669
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
670
+ # on reasoning in a response.
670
671
  #
671
672
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
672
673
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -674,6 +675,7 @@ module OpenAI
674
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
675
676
  # support `none`.
676
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
677
679
  #
678
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
679
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -316,9 +316,9 @@ module OpenAI
316
316
  # @!attribute reasoning_effort
317
317
  # Constrains effort on reasoning for
318
318
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
320
- # reasoning effort can result in faster responses and fewer tokens used on
321
- # reasoning in a response.
319
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
320
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
321
+ # on reasoning in a response.
322
322
  #
323
323
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
324
324
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -326,6 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
330
  #
330
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
331
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -664,9 +665,9 @@ module OpenAI
664
665
  # @!attribute reasoning_effort
665
666
  # Constrains effort on reasoning for
666
667
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
667
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
668
- # reasoning effort can result in faster responses and fewer tokens used on
669
- # reasoning in a response.
668
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
669
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
670
+ # on reasoning in a response.
670
671
  #
671
672
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
672
673
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -674,6 +675,7 @@ module OpenAI
674
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
675
676
  # support `none`.
676
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
677
679
  #
678
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
679
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true