openai 0.36.0 → 0.37.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +22 -0
- data/README.md +1 -1
- data/lib/openai/models/all_models.rb +1 -0
- data/lib/openai/models/beta/assistant_create_params.rb +4 -3
- data/lib/openai/models/beta/assistant_update_params.rb +4 -3
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -3
- data/lib/openai/models/chat/completion_create_params.rb +4 -3
- data/lib/openai/models/container_create_params.rb +22 -1
- data/lib/openai/models/container_create_response.rb +32 -1
- data/lib/openai/models/container_list_response.rb +32 -1
- data/lib/openai/models/container_retrieve_response.rb +32 -1
- data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
- data/lib/openai/models/conversations/item_create_params.rb +2 -2
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +4 -3
- data/lib/openai/models/evals/run_cancel_response.rb +8 -6
- data/lib/openai/models/evals/run_create_params.rb +8 -6
- data/lib/openai/models/evals/run_create_response.rb +8 -6
- data/lib/openai/models/evals/run_list_response.rb +8 -6
- data/lib/openai/models/evals/run_retrieve_response.rb +8 -6
- data/lib/openai/models/graders/score_model_grader.rb +4 -3
- data/lib/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rb +43 -0
- data/lib/openai/models/realtime/output_audio_buffer_clear_event.rb +4 -4
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +13 -5
- data/lib/openai/models/realtime/realtime_client_event.rb +1 -1
- data/lib/openai/models/realtime/realtime_server_event.rb +16 -9
- data/lib/openai/models/realtime/realtime_session.rb +13 -5
- data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -9
- data/lib/openai/models/realtime/realtime_session_create_response.rb +27 -14
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +13 -5
- data/lib/openai/models/realtime/realtime_truncation.rb +14 -9
- data/lib/openai/models/reasoning.rb +4 -3
- data/lib/openai/models/reasoning_effort.rb +5 -3
- data/lib/openai/models/responses/compacted_response.rb +56 -0
- data/lib/openai/models/responses/input_token_count_params.rb +4 -4
- data/lib/openai/models/responses/response.rb +6 -6
- data/lib/openai/models/responses/response_apply_patch_tool_call.rb +23 -23
- data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +9 -9
- data/lib/openai/models/responses/response_compact_params.rb +344 -0
- data/lib/openai/models/responses/response_compaction_item.rb +43 -0
- data/lib/openai/models/responses/response_compaction_item_param.rb +36 -0
- data/lib/openai/models/responses/response_create_params.rb +4 -4
- data/lib/openai/models/responses/response_function_shell_call_output_content.rb +10 -10
- data/lib/openai/models/responses/response_function_shell_tool_call.rb +5 -5
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +2 -2
- data/lib/openai/models/responses/response_input_item.rb +22 -19
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/tool.rb +4 -2
- data/lib/openai/models/responses/tool_choice_shell.rb +1 -1
- data/lib/openai/models/responses_model.rb +1 -0
- data/lib/openai/models/video_create_params.rb +11 -6
- data/lib/openai/resources/containers.rb +3 -1
- data/lib/openai/resources/conversations/items.rb +1 -1
- data/lib/openai/resources/conversations.rb +1 -1
- data/lib/openai/resources/responses/input_tokens.rb +1 -1
- data/lib/openai/resources/responses.rb +33 -2
- data/lib/openai/resources/videos.rb +6 -3
- data/lib/openai/resources/webhooks.rb +0 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/manifest.yaml +1 -0
- data/rbi/openai/models/all_models.rbi +5 -0
- data/rbi/openai/models/beta/assistant_create_params.rbi +8 -6
- data/rbi/openai/models/beta/assistant_update_params.rbi +8 -6
- data/rbi/openai/models/beta/threads/run_create_params.rbi +8 -6
- data/rbi/openai/models/chat/completion_create_params.rbi +8 -6
- data/rbi/openai/models/container_create_params.rbi +51 -0
- data/rbi/openai/models/container_create_response.rbi +81 -3
- data/rbi/openai/models/container_list_response.rbi +80 -3
- data/rbi/openai/models/container_retrieve_response.rbi +83 -3
- data/rbi/openai/models/conversations/conversation_create_params.rbi +3 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +3 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -6
- data/rbi/openai/models/evals/run_cancel_response.rbi +16 -12
- data/rbi/openai/models/evals/run_create_params.rbi +16 -12
- data/rbi/openai/models/evals/run_create_response.rbi +16 -12
- data/rbi/openai/models/evals/run_list_response.rbi +16 -12
- data/rbi/openai/models/evals/run_retrieve_response.rbi +16 -12
- data/rbi/openai/models/graders/score_model_grader.rbi +8 -6
- data/rbi/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbi +56 -0
- data/rbi/openai/models/realtime/output_audio_buffer_clear_event.rbi +4 -4
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_server_event.rbi +6 -5
- data/rbi/openai/models/realtime/realtime_session.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +28 -18
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +52 -26
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_truncation.rbi +14 -9
- data/rbi/openai/models/reasoning.rbi +8 -6
- data/rbi/openai/models/reasoning_effort.rbi +5 -3
- data/rbi/openai/models/responses/compacted_response.rbi +105 -0
- data/rbi/openai/models/responses/response.rbi +1 -0
- data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +53 -67
- data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +9 -9
- data/rbi/openai/models/responses/response_compact_params.rbi +593 -0
- data/rbi/openai/models/responses/response_compaction_item.rbi +67 -0
- data/rbi/openai/models/responses/response_compaction_item_param.rbi +54 -0
- data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +9 -9
- data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +6 -6
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +1 -1
- data/rbi/openai/models/responses/response_input_item.rbi +21 -23
- data/rbi/openai/models/responses/response_output_item.rbi +1 -0
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +1 -0
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +1 -0
- data/rbi/openai/models/responses/tool.rbi +6 -3
- data/rbi/openai/models/responses/tool_choice_shell.rbi +1 -1
- data/rbi/openai/models/responses_model.rbi +5 -0
- data/rbi/openai/models/video_create_params.rbi +10 -6
- data/rbi/openai/resources/beta/assistants.rbi +8 -6
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -6
- data/rbi/openai/resources/chat/completions.rbi +8 -6
- data/rbi/openai/resources/containers.rbi +3 -0
- data/rbi/openai/resources/conversations/items.rbi +1 -0
- data/rbi/openai/resources/conversations.rbi +1 -0
- data/rbi/openai/resources/realtime/calls.rbi +14 -9
- data/rbi/openai/resources/responses.rbi +42 -0
- data/rbi/openai/resources/videos.rbi +5 -3
- data/sig/openai/models/all_models.rbs +2 -0
- data/sig/openai/models/container_create_params.rbs +23 -1
- data/sig/openai/models/container_create_response.rbs +32 -3
- data/sig/openai/models/container_list_response.rbs +32 -3
- data/sig/openai/models/container_retrieve_response.rbs +32 -3
- data/sig/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbs +32 -0
- data/sig/openai/models/realtime/realtime_server_event.rbs +1 -0
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/responses/compacted_response.rbs +42 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +18 -22
- data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +7 -7
- data/sig/openai/models/responses/response_compact_params.rbs +226 -0
- data/sig/openai/models/responses/response_compaction_item.rbs +39 -0
- data/sig/openai/models/responses/response_compaction_item_param.rbs +28 -0
- data/sig/openai/models/responses/response_input_item.rbs +5 -6
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses_model.rbs +2 -0
- data/sig/openai/resources/containers.rbs +1 -0
- data/sig/openai/resources/responses.rbs +8 -0
- metadata +31 -2
|
@@ -12,7 +12,7 @@ module OpenAI
|
|
|
12
12
|
)
|
|
13
13
|
end
|
|
14
14
|
|
|
15
|
-
# The exit or timeout outcome associated with this
|
|
15
|
+
# The exit or timeout outcome associated with this shell call.
|
|
16
16
|
sig do
|
|
17
17
|
returns(
|
|
18
18
|
T.any(
|
|
@@ -23,15 +23,15 @@ module OpenAI
|
|
|
23
23
|
end
|
|
24
24
|
attr_accessor :outcome
|
|
25
25
|
|
|
26
|
-
# Captured stderr output for
|
|
26
|
+
# Captured stderr output for the shell call.
|
|
27
27
|
sig { returns(String) }
|
|
28
28
|
attr_accessor :stderr
|
|
29
29
|
|
|
30
|
-
# Captured stdout output for
|
|
30
|
+
# Captured stdout output for the shell call.
|
|
31
31
|
sig { returns(String) }
|
|
32
32
|
attr_accessor :stdout
|
|
33
33
|
|
|
34
|
-
# Captured stdout and stderr for a portion of a
|
|
34
|
+
# Captured stdout and stderr for a portion of a shell tool call output.
|
|
35
35
|
sig do
|
|
36
36
|
params(
|
|
37
37
|
outcome:
|
|
@@ -44,11 +44,11 @@ module OpenAI
|
|
|
44
44
|
).returns(T.attached_class)
|
|
45
45
|
end
|
|
46
46
|
def self.new(
|
|
47
|
-
# The exit or timeout outcome associated with this
|
|
47
|
+
# The exit or timeout outcome associated with this shell call.
|
|
48
48
|
outcome:,
|
|
49
|
-
# Captured stderr output for
|
|
49
|
+
# Captured stderr output for the shell call.
|
|
50
50
|
stderr:,
|
|
51
|
-
# Captured stdout output for
|
|
51
|
+
# Captured stdout output for the shell call.
|
|
52
52
|
stdout:
|
|
53
53
|
)
|
|
54
54
|
end
|
|
@@ -69,7 +69,7 @@ module OpenAI
|
|
|
69
69
|
def to_hash
|
|
70
70
|
end
|
|
71
71
|
|
|
72
|
-
# The exit or timeout outcome associated with this
|
|
72
|
+
# The exit or timeout outcome associated with this shell call.
|
|
73
73
|
module Outcome
|
|
74
74
|
extend OpenAI::Internal::Type::Union
|
|
75
75
|
|
|
@@ -94,7 +94,7 @@ module OpenAI
|
|
|
94
94
|
sig { returns(Symbol) }
|
|
95
95
|
attr_accessor :type
|
|
96
96
|
|
|
97
|
-
# Indicates that the
|
|
97
|
+
# Indicates that the shell call exceeded its configured time limit.
|
|
98
98
|
sig { params(type: Symbol).returns(T.attached_class) }
|
|
99
99
|
def self.new(
|
|
100
100
|
# The outcome type. Always `timeout`.
|
|
@@ -12,8 +12,8 @@ module OpenAI
|
|
|
12
12
|
)
|
|
13
13
|
end
|
|
14
14
|
|
|
15
|
-
# The unique ID of the
|
|
16
|
-
#
|
|
15
|
+
# The unique ID of the shell tool call. Populated when this item is returned via
|
|
16
|
+
# API.
|
|
17
17
|
sig { returns(String) }
|
|
18
18
|
attr_accessor :id
|
|
19
19
|
|
|
@@ -31,7 +31,7 @@ module OpenAI
|
|
|
31
31
|
end
|
|
32
32
|
attr_writer :action
|
|
33
33
|
|
|
34
|
-
# The unique ID of the
|
|
34
|
+
# The unique ID of the shell tool call generated by the model.
|
|
35
35
|
sig { returns(String) }
|
|
36
36
|
attr_accessor :call_id
|
|
37
37
|
|
|
@@ -69,12 +69,12 @@ module OpenAI
|
|
|
69
69
|
).returns(T.attached_class)
|
|
70
70
|
end
|
|
71
71
|
def self.new(
|
|
72
|
-
# The unique ID of the
|
|
73
|
-
#
|
|
72
|
+
# The unique ID of the shell tool call. Populated when this item is returned via
|
|
73
|
+
# API.
|
|
74
74
|
id:,
|
|
75
75
|
# The shell commands and limits that describe how to run the tool call.
|
|
76
76
|
action:,
|
|
77
|
-
# The unique ID of the
|
|
77
|
+
# The unique ID of the shell tool call generated by the model.
|
|
78
78
|
call_id:,
|
|
79
79
|
# The status of the shell call. One of `in_progress`, `completed`, or
|
|
80
80
|
# `incomplete`.
|
|
@@ -188,7 +188,7 @@ module OpenAI
|
|
|
188
188
|
sig { returns(Symbol) }
|
|
189
189
|
attr_accessor :type
|
|
190
190
|
|
|
191
|
-
# Indicates that the
|
|
191
|
+
# Indicates that the shell call exceeded its configured time limit.
|
|
192
192
|
sig { params(type: Symbol).returns(T.attached_class) }
|
|
193
193
|
def self.new(
|
|
194
194
|
# The outcome type. Always `timeout`.
|
|
@@ -24,6 +24,7 @@ module OpenAI
|
|
|
24
24
|
OpenAI::Responses::ResponseFunctionToolCall,
|
|
25
25
|
OpenAI::Responses::ResponseInputItem::FunctionCallOutput,
|
|
26
26
|
OpenAI::Responses::ResponseReasoningItem,
|
|
27
|
+
OpenAI::Responses::ResponseCompactionItemParam,
|
|
27
28
|
OpenAI::Responses::ResponseInputItem::ImageGenerationCall,
|
|
28
29
|
OpenAI::Responses::ResponseCodeInterpreterToolCall,
|
|
29
30
|
OpenAI::Responses::ResponseInputItem::LocalShellCall,
|
|
@@ -1102,16 +1103,16 @@ module OpenAI
|
|
|
1102
1103
|
end
|
|
1103
1104
|
attr_writer :action
|
|
1104
1105
|
|
|
1105
|
-
# The unique ID of the
|
|
1106
|
+
# The unique ID of the shell tool call generated by the model.
|
|
1106
1107
|
sig { returns(String) }
|
|
1107
1108
|
attr_accessor :call_id
|
|
1108
1109
|
|
|
1109
|
-
# The type of the item. Always `
|
|
1110
|
+
# The type of the item. Always `shell_call`.
|
|
1110
1111
|
sig { returns(Symbol) }
|
|
1111
1112
|
attr_accessor :type
|
|
1112
1113
|
|
|
1113
|
-
# The unique ID of the
|
|
1114
|
-
#
|
|
1114
|
+
# The unique ID of the shell tool call. Populated when this item is returned via
|
|
1115
|
+
# API.
|
|
1115
1116
|
sig { returns(T.nilable(String)) }
|
|
1116
1117
|
attr_accessor :id
|
|
1117
1118
|
|
|
@@ -1143,15 +1144,15 @@ module OpenAI
|
|
|
1143
1144
|
def self.new(
|
|
1144
1145
|
# The shell commands and limits that describe how to run the tool call.
|
|
1145
1146
|
action:,
|
|
1146
|
-
# The unique ID of the
|
|
1147
|
+
# The unique ID of the shell tool call generated by the model.
|
|
1147
1148
|
call_id:,
|
|
1148
|
-
# The unique ID of the
|
|
1149
|
-
#
|
|
1149
|
+
# The unique ID of the shell tool call. Populated when this item is returned via
|
|
1150
|
+
# API.
|
|
1150
1151
|
id: nil,
|
|
1151
1152
|
# The status of the shell call. One of `in_progress`, `completed`, or
|
|
1152
1153
|
# `incomplete`.
|
|
1153
1154
|
status: nil,
|
|
1154
|
-
# The type of the item. Always `
|
|
1155
|
+
# The type of the item. Always `shell_call`.
|
|
1155
1156
|
type: :shell_call
|
|
1156
1157
|
)
|
|
1157
1158
|
end
|
|
@@ -1278,7 +1279,7 @@ module OpenAI
|
|
|
1278
1279
|
)
|
|
1279
1280
|
end
|
|
1280
1281
|
|
|
1281
|
-
# The unique ID of the
|
|
1282
|
+
# The unique ID of the shell tool call generated by the model.
|
|
1282
1283
|
sig { returns(String) }
|
|
1283
1284
|
attr_accessor :call_id
|
|
1284
1285
|
|
|
@@ -1293,12 +1294,12 @@ module OpenAI
|
|
|
1293
1294
|
end
|
|
1294
1295
|
attr_accessor :output
|
|
1295
1296
|
|
|
1296
|
-
# The type of the item. Always `
|
|
1297
|
+
# The type of the item. Always `shell_call_output`.
|
|
1297
1298
|
sig { returns(Symbol) }
|
|
1298
1299
|
attr_accessor :type
|
|
1299
1300
|
|
|
1300
|
-
# The unique ID of the
|
|
1301
|
-
#
|
|
1301
|
+
# The unique ID of the shell tool call output. Populated when this item is
|
|
1302
|
+
# returned via API.
|
|
1302
1303
|
sig { returns(T.nilable(String)) }
|
|
1303
1304
|
attr_accessor :id
|
|
1304
1305
|
|
|
@@ -1307,7 +1308,7 @@ module OpenAI
|
|
|
1307
1308
|
sig { returns(T.nilable(Integer)) }
|
|
1308
1309
|
attr_accessor :max_output_length
|
|
1309
1310
|
|
|
1310
|
-
# The streamed output items emitted by a
|
|
1311
|
+
# The streamed output items emitted by a shell tool call.
|
|
1311
1312
|
sig do
|
|
1312
1313
|
params(
|
|
1313
1314
|
call_id: String,
|
|
@@ -1321,18 +1322,18 @@ module OpenAI
|
|
|
1321
1322
|
).returns(T.attached_class)
|
|
1322
1323
|
end
|
|
1323
1324
|
def self.new(
|
|
1324
|
-
# The unique ID of the
|
|
1325
|
+
# The unique ID of the shell tool call generated by the model.
|
|
1325
1326
|
call_id:,
|
|
1326
1327
|
# Captured chunks of stdout and stderr output, along with their associated
|
|
1327
1328
|
# outcomes.
|
|
1328
1329
|
output:,
|
|
1329
|
-
# The unique ID of the
|
|
1330
|
-
#
|
|
1330
|
+
# The unique ID of the shell tool call output. Populated when this item is
|
|
1331
|
+
# returned via API.
|
|
1331
1332
|
id: nil,
|
|
1332
1333
|
# The maximum number of UTF-8 characters captured for this shell call's combined
|
|
1333
1334
|
# output.
|
|
1334
1335
|
max_output_length: nil,
|
|
1335
|
-
# The type of the item. Always `
|
|
1336
|
+
# The type of the item. Always `shell_call_output`.
|
|
1336
1337
|
type: :shell_call_output
|
|
1337
1338
|
)
|
|
1338
1339
|
end
|
|
@@ -1667,10 +1668,7 @@ module OpenAI
|
|
|
1667
1668
|
# Optional human-readable log text from the apply patch tool (e.g., patch results
|
|
1668
1669
|
# or errors).
|
|
1669
1670
|
sig { returns(T.nilable(String)) }
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
sig { params(output: String).void }
|
|
1673
|
-
attr_writer :output
|
|
1671
|
+
attr_accessor :output
|
|
1674
1672
|
|
|
1675
1673
|
# The streamed output emitted by an apply patch tool call.
|
|
1676
1674
|
sig do
|
|
@@ -1679,7 +1677,7 @@ module OpenAI
|
|
|
1679
1677
|
status:
|
|
1680
1678
|
OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::OrSymbol,
|
|
1681
1679
|
id: T.nilable(String),
|
|
1682
|
-
output: String,
|
|
1680
|
+
output: T.nilable(String),
|
|
1683
1681
|
type: Symbol
|
|
1684
1682
|
).returns(T.attached_class)
|
|
1685
1683
|
end
|
|
@@ -1707,7 +1705,7 @@ module OpenAI
|
|
|
1707
1705
|
OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::Status::OrSymbol,
|
|
1708
1706
|
type: Symbol,
|
|
1709
1707
|
id: T.nilable(String),
|
|
1710
|
-
output: String
|
|
1708
|
+
output: T.nilable(String)
|
|
1711
1709
|
}
|
|
1712
1710
|
)
|
|
1713
1711
|
end
|
|
@@ -16,6 +16,7 @@ module OpenAI
|
|
|
16
16
|
OpenAI::Responses::ResponseFunctionWebSearch,
|
|
17
17
|
OpenAI::Responses::ResponseComputerToolCall,
|
|
18
18
|
OpenAI::Responses::ResponseReasoningItem,
|
|
19
|
+
OpenAI::Responses::ResponseCompactionItem,
|
|
19
20
|
OpenAI::Responses::ResponseOutputItem::ImageGenerationCall,
|
|
20
21
|
OpenAI::Responses::ResponseCodeInterpreterToolCall,
|
|
21
22
|
OpenAI::Responses::ResponseOutputItem::LocalShellCall,
|
|
@@ -39,6 +39,7 @@ module OpenAI
|
|
|
39
39
|
OpenAI::Responses::ResponseFunctionWebSearch::OrHash,
|
|
40
40
|
OpenAI::Responses::ResponseComputerToolCall::OrHash,
|
|
41
41
|
OpenAI::Responses::ResponseReasoningItem::OrHash,
|
|
42
|
+
OpenAI::Responses::ResponseCompactionItem::OrHash,
|
|
42
43
|
OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash,
|
|
43
44
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
44
45
|
OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash,
|
|
@@ -39,6 +39,7 @@ module OpenAI
|
|
|
39
39
|
OpenAI::Responses::ResponseFunctionWebSearch::OrHash,
|
|
40
40
|
OpenAI::Responses::ResponseComputerToolCall::OrHash,
|
|
41
41
|
OpenAI::Responses::ResponseReasoningItem::OrHash,
|
|
42
|
+
OpenAI::Responses::ResponseCompactionItem::OrHash,
|
|
42
43
|
OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash,
|
|
43
44
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
44
45
|
OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash,
|
|
@@ -632,7 +632,8 @@ module OpenAI
|
|
|
632
632
|
end
|
|
633
633
|
|
|
634
634
|
# The code interpreter container. Can be a container ID or an object that
|
|
635
|
-
# specifies uploaded file IDs to make available to your code
|
|
635
|
+
# specifies uploaded file IDs to make available to your code, along with an
|
|
636
|
+
# optional `memory_limit` setting.
|
|
636
637
|
sig do
|
|
637
638
|
returns(
|
|
638
639
|
T.any(
|
|
@@ -660,7 +661,8 @@ module OpenAI
|
|
|
660
661
|
end
|
|
661
662
|
def self.new(
|
|
662
663
|
# The code interpreter container. Can be a container ID or an object that
|
|
663
|
-
# specifies uploaded file IDs to make available to your code
|
|
664
|
+
# specifies uploaded file IDs to make available to your code, along with an
|
|
665
|
+
# optional `memory_limit` setting.
|
|
664
666
|
container:,
|
|
665
667
|
# The type of the code interpreter tool. Always `code_interpreter`.
|
|
666
668
|
type: :code_interpreter
|
|
@@ -683,7 +685,8 @@ module OpenAI
|
|
|
683
685
|
end
|
|
684
686
|
|
|
685
687
|
# The code interpreter container. Can be a container ID or an object that
|
|
686
|
-
# specifies uploaded file IDs to make available to your code
|
|
688
|
+
# specifies uploaded file IDs to make available to your code, along with an
|
|
689
|
+
# optional `memory_limit` setting.
|
|
687
690
|
module Container
|
|
688
691
|
extend OpenAI::Internal::Type::Union
|
|
689
692
|
|
|
@@ -13,7 +13,7 @@ module OpenAI
|
|
|
13
13
|
sig { returns(Symbol) }
|
|
14
14
|
attr_accessor :type
|
|
15
15
|
|
|
16
|
-
# Forces the model to call the
|
|
16
|
+
# Forces the model to call the shell tool when a tool call is required.
|
|
17
17
|
sig { params(type: Symbol).returns(T.attached_class) }
|
|
18
18
|
def self.new(
|
|
19
19
|
# The tool to call. Always `shell`.
|
|
@@ -88,6 +88,11 @@ module OpenAI
|
|
|
88
88
|
:"gpt-5-pro-2025-10-06",
|
|
89
89
|
OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
|
|
90
90
|
)
|
|
91
|
+
GPT_5_1_CODEX_MAX =
|
|
92
|
+
T.let(
|
|
93
|
+
:"gpt-5.1-codex-max",
|
|
94
|
+
OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
|
|
95
|
+
)
|
|
91
96
|
|
|
92
97
|
sig do
|
|
93
98
|
override.returns(
|
|
@@ -22,21 +22,23 @@ module OpenAI
|
|
|
22
22
|
sig { params(input_reference: OpenAI::Internal::FileInput).void }
|
|
23
23
|
attr_writer :input_reference
|
|
24
24
|
|
|
25
|
-
# The video generation model to use
|
|
25
|
+
# The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
26
|
+
# to `sora-2`.
|
|
26
27
|
sig { returns(T.nilable(OpenAI::VideoModel::OrSymbol)) }
|
|
27
28
|
attr_reader :model
|
|
28
29
|
|
|
29
30
|
sig { params(model: OpenAI::VideoModel::OrSymbol).void }
|
|
30
31
|
attr_writer :model
|
|
31
32
|
|
|
32
|
-
# Clip duration in seconds. Defaults to 4 seconds.
|
|
33
|
+
# Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
33
34
|
sig { returns(T.nilable(OpenAI::VideoSeconds::OrSymbol)) }
|
|
34
35
|
attr_reader :seconds
|
|
35
36
|
|
|
36
37
|
sig { params(seconds: OpenAI::VideoSeconds::OrSymbol).void }
|
|
37
38
|
attr_writer :seconds
|
|
38
39
|
|
|
39
|
-
# Output resolution formatted as width x height
|
|
40
|
+
# Output resolution formatted as width x height (allowed values: 720x1280,
|
|
41
|
+
# 1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
|
|
40
42
|
sig { returns(T.nilable(OpenAI::VideoSize::OrSymbol)) }
|
|
41
43
|
attr_reader :size
|
|
42
44
|
|
|
@@ -58,11 +60,13 @@ module OpenAI
|
|
|
58
60
|
prompt:,
|
|
59
61
|
# Optional image reference that guides generation.
|
|
60
62
|
input_reference: nil,
|
|
61
|
-
# The video generation model to use
|
|
63
|
+
# The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
64
|
+
# to `sora-2`.
|
|
62
65
|
model: nil,
|
|
63
|
-
# Clip duration in seconds. Defaults to 4 seconds.
|
|
66
|
+
# Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
64
67
|
seconds: nil,
|
|
65
|
-
# Output resolution formatted as width x height
|
|
68
|
+
# Output resolution formatted as width x height (allowed values: 720x1280,
|
|
69
|
+
# 1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
|
|
66
70
|
size: nil,
|
|
67
71
|
request_options: {}
|
|
68
72
|
)
|
|
@@ -62,9 +62,9 @@ module OpenAI
|
|
|
62
62
|
name: nil,
|
|
63
63
|
# Constrains effort on reasoning for
|
|
64
64
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
65
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
66
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
67
|
-
# reasoning in a response.
|
|
65
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
66
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
67
|
+
# on reasoning in a response.
|
|
68
68
|
#
|
|
69
69
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
70
70
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -72,6 +72,7 @@ module OpenAI
|
|
|
72
72
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
73
73
|
# support `none`.
|
|
74
74
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
75
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
75
76
|
reasoning_effort: nil,
|
|
76
77
|
# Specifies the format that the model must output. Compatible with
|
|
77
78
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -196,9 +197,9 @@ module OpenAI
|
|
|
196
197
|
name: nil,
|
|
197
198
|
# Constrains effort on reasoning for
|
|
198
199
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
199
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
200
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
201
|
-
# reasoning in a response.
|
|
200
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
201
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
202
|
+
# on reasoning in a response.
|
|
202
203
|
#
|
|
203
204
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
204
205
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -206,6 +207,7 @@ module OpenAI
|
|
|
206
207
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
207
208
|
# support `none`.
|
|
208
209
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
210
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
209
211
|
reasoning_effort: nil,
|
|
210
212
|
# Specifies the format that the model must output. Compatible with
|
|
211
213
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -125,9 +125,9 @@ module OpenAI
|
|
|
125
125
|
parallel_tool_calls: nil,
|
|
126
126
|
# Body param: Constrains effort on reasoning for
|
|
127
127
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
128
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
129
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
130
|
-
# reasoning in a response.
|
|
128
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
129
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
130
|
+
# on reasoning in a response.
|
|
131
131
|
#
|
|
132
132
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
133
133
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -135,6 +135,7 @@ module OpenAI
|
|
|
135
135
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
136
136
|
# support `none`.
|
|
137
137
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
138
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
138
139
|
reasoning_effort: nil,
|
|
139
140
|
# Body param: Specifies the format that the model must output. Compatible with
|
|
140
141
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -311,9 +312,9 @@ module OpenAI
|
|
|
311
312
|
parallel_tool_calls: nil,
|
|
312
313
|
# Body param: Constrains effort on reasoning for
|
|
313
314
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
314
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
315
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
316
|
-
# reasoning in a response.
|
|
315
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
316
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
317
|
+
# on reasoning in a response.
|
|
317
318
|
#
|
|
318
319
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
319
320
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -321,6 +322,7 @@ module OpenAI
|
|
|
321
322
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
322
323
|
# support `none`.
|
|
323
324
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
325
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
324
326
|
reasoning_effort: nil,
|
|
325
327
|
# Body param: Specifies the format that the model must output. Compatible with
|
|
326
328
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -228,9 +228,9 @@ module OpenAI
|
|
|
228
228
|
prompt_cache_retention: nil,
|
|
229
229
|
# Constrains effort on reasoning for
|
|
230
230
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
231
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
232
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
233
|
-
# reasoning in a response.
|
|
231
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
232
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
233
|
+
# on reasoning in a response.
|
|
234
234
|
#
|
|
235
235
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
236
236
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -238,6 +238,7 @@ module OpenAI
|
|
|
238
238
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
239
239
|
# support `none`.
|
|
240
240
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
241
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
241
242
|
reasoning_effort: nil,
|
|
242
243
|
# An object specifying the format that the model must output.
|
|
243
244
|
#
|
|
@@ -561,9 +562,9 @@ module OpenAI
|
|
|
561
562
|
prompt_cache_retention: nil,
|
|
562
563
|
# Constrains effort on reasoning for
|
|
563
564
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
564
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
565
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
566
|
-
# reasoning in a response.
|
|
565
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
566
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
567
|
+
# on reasoning in a response.
|
|
567
568
|
#
|
|
568
569
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
569
570
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -571,6 +572,7 @@ module OpenAI
|
|
|
571
572
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
572
573
|
# support `none`.
|
|
573
574
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
575
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
574
576
|
reasoning_effort: nil,
|
|
575
577
|
# An object specifying the format that the model must output.
|
|
576
578
|
#
|
|
@@ -12,6 +12,7 @@ module OpenAI
|
|
|
12
12
|
name: String,
|
|
13
13
|
expires_after: OpenAI::ContainerCreateParams::ExpiresAfter::OrHash,
|
|
14
14
|
file_ids: T::Array[String],
|
|
15
|
+
memory_limit: OpenAI::ContainerCreateParams::MemoryLimit::OrSymbol,
|
|
15
16
|
request_options: OpenAI::RequestOptions::OrHash
|
|
16
17
|
).returns(OpenAI::Models::ContainerCreateResponse)
|
|
17
18
|
end
|
|
@@ -22,6 +23,8 @@ module OpenAI
|
|
|
22
23
|
expires_after: nil,
|
|
23
24
|
# IDs of files to copy to the container.
|
|
24
25
|
file_ids: nil,
|
|
26
|
+
# Optional memory limit for the container. Defaults to "1g".
|
|
27
|
+
memory_limit: nil,
|
|
25
28
|
request_options: {}
|
|
26
29
|
)
|
|
27
30
|
end
|
|
@@ -21,6 +21,7 @@ module OpenAI
|
|
|
21
21
|
OpenAI::Responses::ResponseFunctionToolCall::OrHash,
|
|
22
22
|
OpenAI::Responses::ResponseInputItem::FunctionCallOutput::OrHash,
|
|
23
23
|
OpenAI::Responses::ResponseReasoningItem::OrHash,
|
|
24
|
+
OpenAI::Responses::ResponseCompactionItemParam::OrHash,
|
|
24
25
|
OpenAI::Responses::ResponseInputItem::ImageGenerationCall::OrHash,
|
|
25
26
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
26
27
|
OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash,
|
|
@@ -23,6 +23,7 @@ module OpenAI
|
|
|
23
23
|
OpenAI::Responses::ResponseFunctionToolCall::OrHash,
|
|
24
24
|
OpenAI::Responses::ResponseInputItem::FunctionCallOutput::OrHash,
|
|
25
25
|
OpenAI::Responses::ResponseReasoningItem::OrHash,
|
|
26
|
+
OpenAI::Responses::ResponseCompactionItemParam::OrHash,
|
|
26
27
|
OpenAI::Responses::ResponseInputItem::ImageGenerationCall::OrHash,
|
|
27
28
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
28
29
|
OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash,
|
|
@@ -109,15 +109,20 @@ module OpenAI
|
|
|
109
109
|
# limit, the conversation be truncated, meaning messages (starting from the
|
|
110
110
|
# oldest) will not be included in the model's context. A 32k context model with
|
|
111
111
|
# 4,096 max output tokens can only include 28,224 tokens in the context before
|
|
112
|
-
# truncation occurs.
|
|
113
|
-
#
|
|
114
|
-
#
|
|
115
|
-
#
|
|
116
|
-
#
|
|
117
|
-
#
|
|
118
|
-
#
|
|
119
|
-
#
|
|
120
|
-
#
|
|
112
|
+
# truncation occurs.
|
|
113
|
+
#
|
|
114
|
+
# Clients can configure truncation behavior to truncate with a lower max token
|
|
115
|
+
# limit, which is an effective way to control token usage and cost.
|
|
116
|
+
#
|
|
117
|
+
# Truncation will reduce the number of cached tokens on the next turn (busting the
|
|
118
|
+
# cache), since messages are dropped from the beginning of the context. However,
|
|
119
|
+
# clients can also configure truncation to retain messages up to a fraction of the
|
|
120
|
+
# maximum context size, which will reduce the need for future truncations and thus
|
|
121
|
+
# improve the cache rate.
|
|
122
|
+
#
|
|
123
|
+
# Truncation can be disabled entirely, which means the server will never truncate
|
|
124
|
+
# but would instead return an error if the conversation exceeds the model's input
|
|
125
|
+
# token limit.
|
|
121
126
|
truncation: nil,
|
|
122
127
|
# The type of session to create. Always `realtime` for the Realtime API.
|
|
123
128
|
type: :realtime,
|
|
@@ -942,6 +942,48 @@ module OpenAI
|
|
|
942
942
|
)
|
|
943
943
|
end
|
|
944
944
|
|
|
945
|
+
# Compact conversation
|
|
946
|
+
sig do
|
|
947
|
+
params(
|
|
948
|
+
input:
|
|
949
|
+
T.nilable(
|
|
950
|
+
OpenAI::Responses::ResponseCompactParams::Input::Variants
|
|
951
|
+
),
|
|
952
|
+
instructions: T.nilable(String),
|
|
953
|
+
model:
|
|
954
|
+
T.nilable(
|
|
955
|
+
T.any(
|
|
956
|
+
OpenAI::Responses::ResponseCompactParams::Model::OrSymbol,
|
|
957
|
+
String
|
|
958
|
+
)
|
|
959
|
+
),
|
|
960
|
+
previous_response_id: T.nilable(String),
|
|
961
|
+
request_options: OpenAI::RequestOptions::OrHash
|
|
962
|
+
).returns(OpenAI::Responses::CompactedResponse)
|
|
963
|
+
end
|
|
964
|
+
def compact(
|
|
965
|
+
# Text, image, or file inputs to the model, used to generate a response
|
|
966
|
+
input: nil,
|
|
967
|
+
# A system (or developer) message inserted into the model's context. When used
|
|
968
|
+
# along with `previous_response_id`, the instructions from a previous response
|
|
969
|
+
# will not be carried over to the next response. This makes it simple to swap out
|
|
970
|
+
# system (or developer) messages in new responses.
|
|
971
|
+
instructions: nil,
|
|
972
|
+
# Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
|
|
973
|
+
# wide range of models with different capabilities, performance characteristics,
|
|
974
|
+
# and price points. Refer to the
|
|
975
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
976
|
+
# available models.
|
|
977
|
+
model: nil,
|
|
978
|
+
# The unique ID of the previous response to the model. Use this to create
|
|
979
|
+
# multi-turn conversations. Learn more about
|
|
980
|
+
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
|
|
981
|
+
# Cannot be used in conjunction with `conversation`.
|
|
982
|
+
previous_response_id: nil,
|
|
983
|
+
request_options: {}
|
|
984
|
+
)
|
|
985
|
+
end
|
|
986
|
+
|
|
945
987
|
# @api private
|
|
946
988
|
sig { params(client: OpenAI::Client).returns(T.attached_class) }
|
|
947
989
|
def self.new(client:)
|
|
@@ -19,11 +19,13 @@ module OpenAI
|
|
|
19
19
|
prompt:,
|
|
20
20
|
# Optional image reference that guides generation.
|
|
21
21
|
input_reference: nil,
|
|
22
|
-
# The video generation model to use
|
|
22
|
+
# The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
23
|
+
# to `sora-2`.
|
|
23
24
|
model: nil,
|
|
24
|
-
# Clip duration in seconds. Defaults to 4 seconds.
|
|
25
|
+
# Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
25
26
|
seconds: nil,
|
|
26
|
-
# Output resolution formatted as width x height
|
|
27
|
+
# Output resolution formatted as width x height (allowed values: 720x1280,
|
|
28
|
+
# 1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
|
|
27
29
|
size: nil,
|
|
28
30
|
request_options: {}
|
|
29
31
|
)
|
|
@@ -22,6 +22,7 @@ module OpenAI
|
|
|
22
22
|
| :"gpt-5-codex"
|
|
23
23
|
| :"gpt-5-pro"
|
|
24
24
|
| :"gpt-5-pro-2025-10-06"
|
|
25
|
+
| :"gpt-5.1-codex-max"
|
|
25
26
|
|
|
26
27
|
module ResponsesOnlyModel
|
|
27
28
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -39,6 +40,7 @@ module OpenAI
|
|
|
39
40
|
GPT_5_CODEX: :"gpt-5-codex"
|
|
40
41
|
GPT_5_PRO: :"gpt-5-pro"
|
|
41
42
|
GPT_5_PRO_2025_10_06: :"gpt-5-pro-2025-10-06"
|
|
43
|
+
GPT_5_1_CODEX_MAX: :"gpt-5.1-codex-max"
|
|
42
44
|
|
|
43
45
|
def self?.values: -> ::Array[OpenAI::Models::AllModels::responses_only_model]
|
|
44
46
|
end
|