openai 0.36.0 → 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +22 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/all_models.rb +1 -0
  5. data/lib/openai/models/beta/assistant_create_params.rb +4 -3
  6. data/lib/openai/models/beta/assistant_update_params.rb +4 -3
  7. data/lib/openai/models/beta/threads/run_create_params.rb +4 -3
  8. data/lib/openai/models/chat/completion_create_params.rb +4 -3
  9. data/lib/openai/models/container_create_params.rb +22 -1
  10. data/lib/openai/models/container_create_response.rb +32 -1
  11. data/lib/openai/models/container_list_response.rb +32 -1
  12. data/lib/openai/models/container_retrieve_response.rb +32 -1
  13. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  14. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  15. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +4 -3
  16. data/lib/openai/models/evals/run_cancel_response.rb +8 -6
  17. data/lib/openai/models/evals/run_create_params.rb +8 -6
  18. data/lib/openai/models/evals/run_create_response.rb +8 -6
  19. data/lib/openai/models/evals/run_list_response.rb +8 -6
  20. data/lib/openai/models/evals/run_retrieve_response.rb +8 -6
  21. data/lib/openai/models/graders/score_model_grader.rb +4 -3
  22. data/lib/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rb +43 -0
  23. data/lib/openai/models/realtime/output_audio_buffer_clear_event.rb +4 -4
  24. data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +13 -5
  25. data/lib/openai/models/realtime/realtime_client_event.rb +1 -1
  26. data/lib/openai/models/realtime/realtime_server_event.rb +16 -9
  27. data/lib/openai/models/realtime/realtime_session.rb +13 -5
  28. data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -9
  29. data/lib/openai/models/realtime/realtime_session_create_response.rb +27 -14
  30. data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +13 -5
  31. data/lib/openai/models/realtime/realtime_truncation.rb +14 -9
  32. data/lib/openai/models/reasoning.rb +4 -3
  33. data/lib/openai/models/reasoning_effort.rb +5 -3
  34. data/lib/openai/models/responses/compacted_response.rb +56 -0
  35. data/lib/openai/models/responses/input_token_count_params.rb +4 -4
  36. data/lib/openai/models/responses/response.rb +6 -6
  37. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +23 -23
  38. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +9 -9
  39. data/lib/openai/models/responses/response_compact_params.rb +344 -0
  40. data/lib/openai/models/responses/response_compaction_item.rb +43 -0
  41. data/lib/openai/models/responses/response_compaction_item_param.rb +36 -0
  42. data/lib/openai/models/responses/response_create_params.rb +4 -4
  43. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +10 -10
  44. data/lib/openai/models/responses/response_function_shell_tool_call.rb +5 -5
  45. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +2 -2
  46. data/lib/openai/models/responses/response_input_item.rb +22 -19
  47. data/lib/openai/models/responses/response_output_item.rb +4 -1
  48. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  49. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  50. data/lib/openai/models/responses/tool.rb +4 -2
  51. data/lib/openai/models/responses/tool_choice_shell.rb +1 -1
  52. data/lib/openai/models/responses_model.rb +1 -0
  53. data/lib/openai/models/video_create_params.rb +11 -6
  54. data/lib/openai/resources/containers.rb +3 -1
  55. data/lib/openai/resources/conversations/items.rb +1 -1
  56. data/lib/openai/resources/conversations.rb +1 -1
  57. data/lib/openai/resources/responses/input_tokens.rb +1 -1
  58. data/lib/openai/resources/responses.rb +33 -2
  59. data/lib/openai/resources/videos.rb +6 -3
  60. data/lib/openai/resources/webhooks.rb +0 -3
  61. data/lib/openai/version.rb +1 -1
  62. data/lib/openai.rb +6 -0
  63. data/manifest.yaml +1 -0
  64. data/rbi/openai/models/all_models.rbi +5 -0
  65. data/rbi/openai/models/beta/assistant_create_params.rbi +8 -6
  66. data/rbi/openai/models/beta/assistant_update_params.rbi +8 -6
  67. data/rbi/openai/models/beta/threads/run_create_params.rbi +8 -6
  68. data/rbi/openai/models/chat/completion_create_params.rbi +8 -6
  69. data/rbi/openai/models/container_create_params.rbi +51 -0
  70. data/rbi/openai/models/container_create_response.rbi +81 -3
  71. data/rbi/openai/models/container_list_response.rbi +80 -3
  72. data/rbi/openai/models/container_retrieve_response.rbi +83 -3
  73. data/rbi/openai/models/conversations/conversation_create_params.rbi +3 -0
  74. data/rbi/openai/models/conversations/item_create_params.rbi +3 -0
  75. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -6
  76. data/rbi/openai/models/evals/run_cancel_response.rbi +16 -12
  77. data/rbi/openai/models/evals/run_create_params.rbi +16 -12
  78. data/rbi/openai/models/evals/run_create_response.rbi +16 -12
  79. data/rbi/openai/models/evals/run_list_response.rbi +16 -12
  80. data/rbi/openai/models/evals/run_retrieve_response.rbi +16 -12
  81. data/rbi/openai/models/graders/score_model_grader.rbi +8 -6
  82. data/rbi/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbi +56 -0
  83. data/rbi/openai/models/realtime/output_audio_buffer_clear_event.rbi +4 -4
  84. data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +24 -8
  85. data/rbi/openai/models/realtime/realtime_server_event.rbi +6 -5
  86. data/rbi/openai/models/realtime/realtime_session.rbi +24 -8
  87. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +28 -18
  88. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +52 -26
  89. data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +24 -8
  90. data/rbi/openai/models/realtime/realtime_truncation.rbi +14 -9
  91. data/rbi/openai/models/reasoning.rbi +8 -6
  92. data/rbi/openai/models/reasoning_effort.rbi +5 -3
  93. data/rbi/openai/models/responses/compacted_response.rbi +105 -0
  94. data/rbi/openai/models/responses/response.rbi +1 -0
  95. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +53 -67
  96. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +9 -9
  97. data/rbi/openai/models/responses/response_compact_params.rbi +593 -0
  98. data/rbi/openai/models/responses/response_compaction_item.rbi +67 -0
  99. data/rbi/openai/models/responses/response_compaction_item_param.rbi +54 -0
  100. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +9 -9
  101. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +6 -6
  102. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +1 -1
  103. data/rbi/openai/models/responses/response_input_item.rbi +21 -23
  104. data/rbi/openai/models/responses/response_output_item.rbi +1 -0
  105. data/rbi/openai/models/responses/response_output_item_added_event.rbi +1 -0
  106. data/rbi/openai/models/responses/response_output_item_done_event.rbi +1 -0
  107. data/rbi/openai/models/responses/tool.rbi +6 -3
  108. data/rbi/openai/models/responses/tool_choice_shell.rbi +1 -1
  109. data/rbi/openai/models/responses_model.rbi +5 -0
  110. data/rbi/openai/models/video_create_params.rbi +10 -6
  111. data/rbi/openai/resources/beta/assistants.rbi +8 -6
  112. data/rbi/openai/resources/beta/threads/runs.rbi +8 -6
  113. data/rbi/openai/resources/chat/completions.rbi +8 -6
  114. data/rbi/openai/resources/containers.rbi +3 -0
  115. data/rbi/openai/resources/conversations/items.rbi +1 -0
  116. data/rbi/openai/resources/conversations.rbi +1 -0
  117. data/rbi/openai/resources/realtime/calls.rbi +14 -9
  118. data/rbi/openai/resources/responses.rbi +42 -0
  119. data/rbi/openai/resources/videos.rbi +5 -3
  120. data/sig/openai/models/all_models.rbs +2 -0
  121. data/sig/openai/models/container_create_params.rbs +23 -1
  122. data/sig/openai/models/container_create_response.rbs +32 -3
  123. data/sig/openai/models/container_list_response.rbs +32 -3
  124. data/sig/openai/models/container_retrieve_response.rbs +32 -3
  125. data/sig/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbs +32 -0
  126. data/sig/openai/models/realtime/realtime_server_event.rbs +1 -0
  127. data/sig/openai/models/reasoning_effort.rbs +2 -1
  128. data/sig/openai/models/responses/compacted_response.rbs +42 -0
  129. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +18 -22
  130. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +7 -7
  131. data/sig/openai/models/responses/response_compact_params.rbs +226 -0
  132. data/sig/openai/models/responses/response_compaction_item.rbs +39 -0
  133. data/sig/openai/models/responses/response_compaction_item_param.rbs +28 -0
  134. data/sig/openai/models/responses/response_input_item.rbs +5 -6
  135. data/sig/openai/models/responses/response_output_item.rbs +1 -0
  136. data/sig/openai/models/responses_model.rbs +2 -0
  137. data/sig/openai/resources/containers.rbs +1 -0
  138. data/sig/openai/resources/responses.rbs +8 -0
  139. metadata +31 -2
@@ -34,7 +34,7 @@ module OpenAI
34
34
  #
35
35
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently suppo
36
36
  #
37
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
37
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
38
38
  #
39
39
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
40
40
  #
@@ -135,7 +135,7 @@ module OpenAI
135
135
  #
136
136
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently suppo
137
137
  #
138
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
138
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
139
139
  #
140
140
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
141
141
  #
@@ -459,6 +459,37 @@ module OpenAI
459
459
  )
460
460
  end
461
461
 
462
+ # Some parameter documentations has been truncated, see
463
+ # {OpenAI::Models::Responses::ResponseCompactParams} for more details.
464
+ #
465
+ # Compact conversation
466
+ #
467
+ # @overload compact(input: nil, instructions: nil, model: nil, previous_response_id: nil, request_options: {})
468
+ #
469
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
470
+ #
471
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
472
+ #
473
+ # @param model [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil] Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wi
474
+ #
475
+ # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to create multi-tu
476
+ #
477
+ # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
478
+ #
479
+ # @return [OpenAI::Models::Responses::CompactedResponse]
480
+ #
481
+ # @see OpenAI::Models::Responses::ResponseCompactParams
482
+ def compact(params = {})
483
+ parsed, options = OpenAI::Responses::ResponseCompactParams.dump_request(params)
484
+ @client.request(
485
+ method: :post,
486
+ path: "responses/compact",
487
+ body: parsed,
488
+ model: OpenAI::Responses::CompactedResponse,
489
+ options: options
490
+ )
491
+ end
492
+
462
493
  # @api private
463
494
  #
464
495
  # @param client [OpenAI::Client]
@@ -3,6 +3,9 @@
3
3
  module OpenAI
4
4
  module Resources
5
5
  class Videos
6
+ # Some parameter documentations has been truncated, see
7
+ # {OpenAI::Models::VideoCreateParams} for more details.
8
+ #
6
9
  # Create a video
7
10
  #
8
11
  # @overload create(prompt:, input_reference: nil, model: nil, seconds: nil, size: nil, request_options: {})
@@ -11,11 +14,11 @@ module OpenAI
11
14
  #
12
15
  # @param input_reference [Pathname, StringIO, IO, String, OpenAI::FilePart] Optional image reference that guides generation.
13
16
  #
14
- # @param model [Symbol, OpenAI::Models::VideoModel] The video generation model to use. Defaults to `sora-2`.
17
+ # @param model [Symbol, OpenAI::Models::VideoModel] The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
15
18
  #
16
- # @param seconds [Symbol, OpenAI::Models::VideoSeconds] Clip duration in seconds. Defaults to 4 seconds.
19
+ # @param seconds [Symbol, OpenAI::Models::VideoSeconds] Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
17
20
  #
18
- # @param size [Symbol, OpenAI::Models::VideoSize] Output resolution formatted as width x height. Defaults to 720x1280.
21
+ # @param size [Symbol, OpenAI::Models::VideoSize] Output resolution formatted as width x height (allowed values: 720x1280, 1280x72
19
22
  #
20
23
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
21
24
  #
@@ -1,8 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "openssl"
4
- require "base64"
5
-
6
3
  module OpenAI
7
4
  module Resources
8
5
  class Webhooks
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.36.0"
4
+ VERSION = "0.37.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  # Standard libraries.
4
4
  require "English"
5
+ require "base64"
5
6
  require "cgi"
6
7
  require "date"
7
8
  require "erb"
@@ -441,6 +442,7 @@ require_relative "openai/models/realtime/input_audio_buffer_cleared_event"
441
442
  require_relative "openai/models/realtime/input_audio_buffer_clear_event"
442
443
  require_relative "openai/models/realtime/input_audio_buffer_commit_event"
443
444
  require_relative "openai/models/realtime/input_audio_buffer_committed_event"
445
+ require_relative "openai/models/realtime/input_audio_buffer_dtmf_event_received_event"
444
446
  require_relative "openai/models/realtime/input_audio_buffer_speech_started_event"
445
447
  require_relative "openai/models/realtime/input_audio_buffer_speech_stopped_event"
446
448
  require_relative "openai/models/realtime/input_audio_buffer_timeout_triggered"
@@ -529,6 +531,7 @@ require_relative "openai/models/response_format_text"
529
531
  require_relative "openai/models/response_format_text_grammar"
530
532
  require_relative "openai/models/response_format_text_python"
531
533
  require_relative "openai/models/responses/apply_patch_tool"
534
+ require_relative "openai/models/responses/compacted_response"
532
535
  require_relative "openai/models/responses/computer_tool"
533
536
  require_relative "openai/models/responses/custom_tool"
534
537
  require_relative "openai/models/responses/easy_input_message"
@@ -552,6 +555,9 @@ require_relative "openai/models/responses/response_code_interpreter_call_complet
552
555
  require_relative "openai/models/responses/response_code_interpreter_call_in_progress_event"
553
556
  require_relative "openai/models/responses/response_code_interpreter_call_interpreting_event"
554
557
  require_relative "openai/models/responses/response_code_interpreter_tool_call"
558
+ require_relative "openai/models/responses/response_compaction_item"
559
+ require_relative "openai/models/responses/response_compaction_item_param"
560
+ require_relative "openai/models/responses/response_compact_params"
555
561
  require_relative "openai/models/responses/response_completed_event"
556
562
  require_relative "openai/models/responses/response_computer_tool_call"
557
563
  require_relative "openai/models/responses/response_computer_tool_call_output_item"
data/manifest.yaml CHANGED
@@ -1,5 +1,6 @@
1
1
  dependencies:
2
2
  - English
3
+ - base64
3
4
  - cgi
4
5
  - date
5
6
  - erb
@@ -80,6 +80,11 @@ module OpenAI
80
80
  :"gpt-5-pro-2025-10-06",
81
81
  OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
82
82
  )
83
+ GPT_5_1_CODEX_MAX =
84
+ T.let(
85
+ :"gpt-5.1-codex-max",
86
+ OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
87
+ )
83
88
 
84
89
  sig do
85
90
  override.returns(
@@ -47,9 +47,9 @@ module OpenAI
47
47
 
48
48
  # Constrains effort on reasoning for
49
49
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
50
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
51
- # reasoning effort can result in faster responses and fewer tokens used on
52
- # reasoning in a response.
50
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
51
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
52
+ # on reasoning in a response.
53
53
  #
54
54
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
55
55
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -57,6 +57,7 @@ module OpenAI
57
57
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
58
58
  # support `none`.
59
59
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
60
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
60
61
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
61
62
  attr_accessor :reasoning_effort
62
63
 
@@ -216,9 +217,9 @@ module OpenAI
216
217
  name: nil,
217
218
  # Constrains effort on reasoning for
218
219
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
219
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
220
- # reasoning effort can result in faster responses and fewer tokens used on
221
- # reasoning in a response.
220
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
221
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
222
+ # on reasoning in a response.
222
223
  #
223
224
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
224
225
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -226,6 +227,7 @@ module OpenAI
226
227
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
227
228
  # support `none`.
228
229
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
230
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
229
231
  reasoning_effort: nil,
230
232
  # Specifies the format that the model must output. Compatible with
231
233
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -67,9 +67,9 @@ module OpenAI
67
67
 
68
68
  # Constrains effort on reasoning for
69
69
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
70
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
71
- # reasoning effort can result in faster responses and fewer tokens used on
72
- # reasoning in a response.
70
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
71
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
72
+ # on reasoning in a response.
73
73
  #
74
74
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
75
75
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -77,6 +77,7 @@ module OpenAI
77
77
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
78
78
  # support `none`.
79
79
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
80
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
80
81
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
81
82
  attr_accessor :reasoning_effort
82
83
 
@@ -240,9 +241,9 @@ module OpenAI
240
241
  name: nil,
241
242
  # Constrains effort on reasoning for
242
243
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
243
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
244
- # reasoning effort can result in faster responses and fewer tokens used on
245
- # reasoning in a response.
244
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
245
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
246
+ # on reasoning in a response.
246
247
  #
247
248
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
248
249
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -250,6 +251,7 @@ module OpenAI
250
251
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
251
252
  # support `none`.
252
253
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
254
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
253
255
  reasoning_effort: nil,
254
256
  # Specifies the format that the model must output. Compatible with
255
257
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -113,9 +113,9 @@ module OpenAI
113
113
 
114
114
  # Constrains effort on reasoning for
115
115
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
116
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
117
- # reasoning effort can result in faster responses and fewer tokens used on
118
- # reasoning in a response.
116
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
117
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
118
+ # on reasoning in a response.
119
119
  #
120
120
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
121
121
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -123,6 +123,7 @@ module OpenAI
123
123
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
124
124
  # support `none`.
125
125
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
126
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
126
127
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
127
128
  attr_accessor :reasoning_effort
128
129
 
@@ -338,9 +339,9 @@ module OpenAI
338
339
  parallel_tool_calls: nil,
339
340
  # Constrains effort on reasoning for
340
341
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
341
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
342
- # reasoning effort can result in faster responses and fewer tokens used on
343
- # reasoning in a response.
342
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
343
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
344
+ # on reasoning in a response.
344
345
  #
345
346
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
346
347
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -348,6 +349,7 @@ module OpenAI
348
349
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
349
350
  # support `none`.
350
351
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
352
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
351
353
  reasoning_effort: nil,
352
354
  # Specifies the format that the model must output. Compatible with
353
355
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -240,9 +240,9 @@ module OpenAI
240
240
 
241
241
  # Constrains effort on reasoning for
242
242
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
243
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
244
- # reasoning effort can result in faster responses and fewer tokens used on
245
- # reasoning in a response.
243
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
244
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
245
+ # on reasoning in a response.
246
246
  #
247
247
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
248
248
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -250,6 +250,7 @@ module OpenAI
250
250
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
251
251
  # support `none`.
252
252
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
253
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
253
254
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
254
255
  attr_accessor :reasoning_effort
255
256
 
@@ -693,9 +694,9 @@ module OpenAI
693
694
  prompt_cache_retention: nil,
694
695
  # Constrains effort on reasoning for
695
696
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
696
- # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
697
- # reasoning effort can result in faster responses and fewer tokens used on
698
- # reasoning in a response.
697
+ # supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
698
+ # Reducing reasoning effort can result in faster responses and fewer tokens used
699
+ # on reasoning in a response.
699
700
  #
700
701
  # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
701
702
  # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -703,6 +704,7 @@ module OpenAI
703
704
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
704
705
  # support `none`.
705
706
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
707
+ # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
706
708
  reasoning_effort: nil,
707
709
  # An object specifying the format that the model must output.
708
710
  #
@@ -33,11 +33,25 @@ module OpenAI
33
33
  sig { params(file_ids: T::Array[String]).void }
34
34
  attr_writer :file_ids
35
35
 
36
+ # Optional memory limit for the container. Defaults to "1g".
37
+ sig do
38
+ returns(T.nilable(OpenAI::ContainerCreateParams::MemoryLimit::OrSymbol))
39
+ end
40
+ attr_reader :memory_limit
41
+
42
+ sig do
43
+ params(
44
+ memory_limit: OpenAI::ContainerCreateParams::MemoryLimit::OrSymbol
45
+ ).void
46
+ end
47
+ attr_writer :memory_limit
48
+
36
49
  sig do
37
50
  params(
38
51
  name: String,
39
52
  expires_after: OpenAI::ContainerCreateParams::ExpiresAfter::OrHash,
40
53
  file_ids: T::Array[String],
54
+ memory_limit: OpenAI::ContainerCreateParams::MemoryLimit::OrSymbol,
41
55
  request_options: OpenAI::RequestOptions::OrHash
42
56
  ).returns(T.attached_class)
43
57
  end
@@ -48,6 +62,8 @@ module OpenAI
48
62
  expires_after: nil,
49
63
  # IDs of files to copy to the container.
50
64
  file_ids: nil,
65
+ # Optional memory limit for the container. Defaults to "1g".
66
+ memory_limit: nil,
51
67
  request_options: {}
52
68
  )
53
69
  end
@@ -58,6 +74,7 @@ module OpenAI
58
74
  name: String,
59
75
  expires_after: OpenAI::ContainerCreateParams::ExpiresAfter,
60
76
  file_ids: T::Array[String],
77
+ memory_limit: OpenAI::ContainerCreateParams::MemoryLimit::OrSymbol,
61
78
  request_options: OpenAI::RequestOptions
62
79
  }
63
80
  )
@@ -140,6 +157,40 @@ module OpenAI
140
157
  end
141
158
  end
142
159
  end
160
+
161
+ # Optional memory limit for the container. Defaults to "1g".
162
+ module MemoryLimit
163
+ extend OpenAI::Internal::Type::Enum
164
+
165
+ TaggedSymbol =
166
+ T.type_alias do
167
+ T.all(Symbol, OpenAI::ContainerCreateParams::MemoryLimit)
168
+ end
169
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
170
+
171
+ MEMORY_LIMIT_1G =
172
+ T.let(:"1g", OpenAI::ContainerCreateParams::MemoryLimit::TaggedSymbol)
173
+ MEMORY_LIMIT_4G =
174
+ T.let(:"4g", OpenAI::ContainerCreateParams::MemoryLimit::TaggedSymbol)
175
+ MEMORY_LIMIT_16G =
176
+ T.let(
177
+ :"16g",
178
+ OpenAI::ContainerCreateParams::MemoryLimit::TaggedSymbol
179
+ )
180
+ MEMORY_LIMIT_64G =
181
+ T.let(
182
+ :"64g",
183
+ OpenAI::ContainerCreateParams::MemoryLimit::TaggedSymbol
184
+ )
185
+
186
+ sig do
187
+ override.returns(
188
+ T::Array[OpenAI::ContainerCreateParams::MemoryLimit::TaggedSymbol]
189
+ )
190
+ end
191
+ def self.values
192
+ end
193
+ end
143
194
  end
144
195
  end
145
196
  end
@@ -49,6 +49,31 @@ module OpenAI
49
49
  end
50
50
  attr_writer :expires_after
51
51
 
52
+ # Unix timestamp (in seconds) when the container was last active.
53
+ sig { returns(T.nilable(Integer)) }
54
+ attr_reader :last_active_at
55
+
56
+ sig { params(last_active_at: Integer).void }
57
+ attr_writer :last_active_at
58
+
59
+ # The memory limit configured for the container.
60
+ sig do
61
+ returns(
62
+ T.nilable(
63
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
64
+ )
65
+ )
66
+ end
67
+ attr_reader :memory_limit
68
+
69
+ sig do
70
+ params(
71
+ memory_limit:
72
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::OrSymbol
73
+ ).void
74
+ end
75
+ attr_writer :memory_limit
76
+
52
77
  sig do
53
78
  params(
54
79
  id: String,
@@ -57,7 +82,10 @@ module OpenAI
57
82
  object: String,
58
83
  status: String,
59
84
  expires_after:
60
- OpenAI::Models::ContainerCreateResponse::ExpiresAfter::OrHash
85
+ OpenAI::Models::ContainerCreateResponse::ExpiresAfter::OrHash,
86
+ last_active_at: Integer,
87
+ memory_limit:
88
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::OrSymbol
61
89
  ).returns(T.attached_class)
62
90
  end
63
91
  def self.new(
@@ -74,7 +102,11 @@ module OpenAI
74
102
  # The container will expire after this time period. The anchor is the reference
75
103
  # point for the expiration. The minutes is the number of minutes after the anchor
76
104
  # before the container expires.
77
- expires_after: nil
105
+ expires_after: nil,
106
+ # Unix timestamp (in seconds) when the container was last active.
107
+ last_active_at: nil,
108
+ # The memory limit configured for the container.
109
+ memory_limit: nil
78
110
  )
79
111
  end
80
112
 
@@ -86,7 +118,11 @@ module OpenAI
86
118
  name: String,
87
119
  object: String,
88
120
  status: String,
89
- expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter
121
+ expires_after:
122
+ OpenAI::Models::ContainerCreateResponse::ExpiresAfter,
123
+ last_active_at: Integer,
124
+ memory_limit:
125
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
90
126
  }
91
127
  )
92
128
  end
@@ -187,6 +223,48 @@ module OpenAI
187
223
  end
188
224
  end
189
225
  end
226
+
227
+ # The memory limit configured for the container.
228
+ module MemoryLimit
229
+ extend OpenAI::Internal::Type::Enum
230
+
231
+ TaggedSymbol =
232
+ T.type_alias do
233
+ T.all(Symbol, OpenAI::Models::ContainerCreateResponse::MemoryLimit)
234
+ end
235
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
236
+
237
+ MEMORY_LIMIT_1G =
238
+ T.let(
239
+ :"1g",
240
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
241
+ )
242
+ MEMORY_LIMIT_4G =
243
+ T.let(
244
+ :"4g",
245
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
246
+ )
247
+ MEMORY_LIMIT_16G =
248
+ T.let(
249
+ :"16g",
250
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
251
+ )
252
+ MEMORY_LIMIT_64G =
253
+ T.let(
254
+ :"64g",
255
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
256
+ )
257
+
258
+ sig do
259
+ override.returns(
260
+ T::Array[
261
+ OpenAI::Models::ContainerCreateResponse::MemoryLimit::TaggedSymbol
262
+ ]
263
+ )
264
+ end
265
+ def self.values
266
+ end
267
+ end
190
268
  end
191
269
  end
192
270
  end
@@ -47,6 +47,31 @@ module OpenAI
47
47
  end
48
48
  attr_writer :expires_after
49
49
 
50
+ # Unix timestamp (in seconds) when the container was last active.
51
+ sig { returns(T.nilable(Integer)) }
52
+ attr_reader :last_active_at
53
+
54
+ sig { params(last_active_at: Integer).void }
55
+ attr_writer :last_active_at
56
+
57
+ # The memory limit configured for the container.
58
+ sig do
59
+ returns(
60
+ T.nilable(
61
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
62
+ )
63
+ )
64
+ end
65
+ attr_reader :memory_limit
66
+
67
+ sig do
68
+ params(
69
+ memory_limit:
70
+ OpenAI::Models::ContainerListResponse::MemoryLimit::OrSymbol
71
+ ).void
72
+ end
73
+ attr_writer :memory_limit
74
+
50
75
  sig do
51
76
  params(
52
77
  id: String,
@@ -55,7 +80,10 @@ module OpenAI
55
80
  object: String,
56
81
  status: String,
57
82
  expires_after:
58
- OpenAI::Models::ContainerListResponse::ExpiresAfter::OrHash
83
+ OpenAI::Models::ContainerListResponse::ExpiresAfter::OrHash,
84
+ last_active_at: Integer,
85
+ memory_limit:
86
+ OpenAI::Models::ContainerListResponse::MemoryLimit::OrSymbol
59
87
  ).returns(T.attached_class)
60
88
  end
61
89
  def self.new(
@@ -72,7 +100,11 @@ module OpenAI
72
100
  # The container will expire after this time period. The anchor is the reference
73
101
  # point for the expiration. The minutes is the number of minutes after the anchor
74
102
  # before the container expires.
75
- expires_after: nil
103
+ expires_after: nil,
104
+ # Unix timestamp (in seconds) when the container was last active.
105
+ last_active_at: nil,
106
+ # The memory limit configured for the container.
107
+ memory_limit: nil
76
108
  )
77
109
  end
78
110
 
@@ -84,7 +116,10 @@ module OpenAI
84
116
  name: String,
85
117
  object: String,
86
118
  status: String,
87
- expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter
119
+ expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter,
120
+ last_active_at: Integer,
121
+ memory_limit:
122
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
88
123
  }
89
124
  )
90
125
  end
@@ -185,6 +220,48 @@ module OpenAI
185
220
  end
186
221
  end
187
222
  end
223
+
224
+ # The memory limit configured for the container.
225
+ module MemoryLimit
226
+ extend OpenAI::Internal::Type::Enum
227
+
228
+ TaggedSymbol =
229
+ T.type_alias do
230
+ T.all(Symbol, OpenAI::Models::ContainerListResponse::MemoryLimit)
231
+ end
232
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
233
+
234
+ MEMORY_LIMIT_1G =
235
+ T.let(
236
+ :"1g",
237
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
238
+ )
239
+ MEMORY_LIMIT_4G =
240
+ T.let(
241
+ :"4g",
242
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
243
+ )
244
+ MEMORY_LIMIT_16G =
245
+ T.let(
246
+ :"16g",
247
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
248
+ )
249
+ MEMORY_LIMIT_64G =
250
+ T.let(
251
+ :"64g",
252
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
253
+ )
254
+
255
+ sig do
256
+ override.returns(
257
+ T::Array[
258
+ OpenAI::Models::ContainerListResponse::MemoryLimit::TaggedSymbol
259
+ ]
260
+ )
261
+ end
262
+ def self.values
263
+ end
264
+ end
188
265
  end
189
266
  end
190
267
  end