openai 0.36.1 → 0.37.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +14 -0
- data/README.md +1 -1
- data/lib/openai/models/all_models.rb +1 -0
- data/lib/openai/models/beta/assistant_create_params.rb +4 -3
- data/lib/openai/models/beta/assistant_update_params.rb +4 -3
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -3
- data/lib/openai/models/chat/completion_create_params.rb +4 -3
- data/lib/openai/models/container_create_params.rb +22 -1
- data/lib/openai/models/container_create_response.rb +32 -1
- data/lib/openai/models/container_list_response.rb +32 -1
- data/lib/openai/models/container_retrieve_response.rb +32 -1
- data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
- data/lib/openai/models/conversations/item_create_params.rb +2 -2
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +4 -3
- data/lib/openai/models/evals/run_cancel_response.rb +8 -6
- data/lib/openai/models/evals/run_create_params.rb +8 -6
- data/lib/openai/models/evals/run_create_response.rb +8 -6
- data/lib/openai/models/evals/run_list_response.rb +8 -6
- data/lib/openai/models/evals/run_retrieve_response.rb +8 -6
- data/lib/openai/models/graders/score_model_grader.rb +4 -3
- data/lib/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rb +43 -0
- data/lib/openai/models/realtime/output_audio_buffer_clear_event.rb +4 -4
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +13 -5
- data/lib/openai/models/realtime/realtime_client_event.rb +1 -1
- data/lib/openai/models/realtime/realtime_server_event.rb +16 -9
- data/lib/openai/models/realtime/realtime_session.rb +13 -5
- data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -9
- data/lib/openai/models/realtime/realtime_session_create_response.rb +27 -14
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +13 -5
- data/lib/openai/models/realtime/realtime_truncation.rb +14 -9
- data/lib/openai/models/reasoning.rb +4 -3
- data/lib/openai/models/reasoning_effort.rb +5 -3
- data/lib/openai/models/responses/compacted_response.rb +56 -0
- data/lib/openai/models/responses/input_token_count_params.rb +4 -4
- data/lib/openai/models/responses/response.rb +6 -6
- data/lib/openai/models/responses/response_compact_params.rb +344 -0
- data/lib/openai/models/responses/response_compaction_item.rb +43 -0
- data/lib/openai/models/responses/response_compaction_item_param.rb +36 -0
- data/lib/openai/models/responses/response_create_params.rb +4 -4
- data/lib/openai/models/responses/response_function_shell_call_output_content.rb +10 -10
- data/lib/openai/models/responses/response_function_shell_tool_call.rb +5 -5
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +2 -2
- data/lib/openai/models/responses/response_input_item.rb +20 -17
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/tool.rb +4 -2
- data/lib/openai/models/responses/tool_choice_shell.rb +1 -1
- data/lib/openai/models/responses_model.rb +1 -0
- data/lib/openai/models/video_create_params.rb +11 -6
- data/lib/openai/resources/containers.rb +3 -1
- data/lib/openai/resources/conversations/items.rb +1 -1
- data/lib/openai/resources/conversations.rb +1 -1
- data/lib/openai/resources/responses/input_tokens.rb +1 -1
- data/lib/openai/resources/responses.rb +33 -2
- data/lib/openai/resources/videos.rb +6 -3
- data/lib/openai/resources/webhooks.rb +0 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/manifest.yaml +1 -0
- data/rbi/openai/models/all_models.rbi +5 -0
- data/rbi/openai/models/beta/assistant_create_params.rbi +8 -6
- data/rbi/openai/models/beta/assistant_update_params.rbi +8 -6
- data/rbi/openai/models/beta/threads/run_create_params.rbi +8 -6
- data/rbi/openai/models/chat/completion_create_params.rbi +8 -6
- data/rbi/openai/models/container_create_params.rbi +51 -0
- data/rbi/openai/models/container_create_response.rbi +81 -3
- data/rbi/openai/models/container_list_response.rbi +80 -3
- data/rbi/openai/models/container_retrieve_response.rbi +83 -3
- data/rbi/openai/models/conversations/conversation_create_params.rbi +3 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +3 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -6
- data/rbi/openai/models/evals/run_cancel_response.rbi +16 -12
- data/rbi/openai/models/evals/run_create_params.rbi +16 -12
- data/rbi/openai/models/evals/run_create_response.rbi +16 -12
- data/rbi/openai/models/evals/run_list_response.rbi +16 -12
- data/rbi/openai/models/evals/run_retrieve_response.rbi +16 -12
- data/rbi/openai/models/graders/score_model_grader.rbi +8 -6
- data/rbi/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbi +56 -0
- data/rbi/openai/models/realtime/output_audio_buffer_clear_event.rbi +4 -4
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_server_event.rbi +6 -5
- data/rbi/openai/models/realtime/realtime_session.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +28 -18
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +52 -26
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_truncation.rbi +14 -9
- data/rbi/openai/models/reasoning.rbi +8 -6
- data/rbi/openai/models/reasoning_effort.rbi +5 -3
- data/rbi/openai/models/responses/compacted_response.rbi +105 -0
- data/rbi/openai/models/responses/response.rbi +1 -0
- data/rbi/openai/models/responses/response_compact_params.rbi +593 -0
- data/rbi/openai/models/responses/response_compaction_item.rbi +67 -0
- data/rbi/openai/models/responses/response_compaction_item_param.rbi +54 -0
- data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +9 -9
- data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +6 -6
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +1 -1
- data/rbi/openai/models/responses/response_input_item.rbi +18 -17
- data/rbi/openai/models/responses/response_output_item.rbi +1 -0
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +1 -0
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +1 -0
- data/rbi/openai/models/responses/tool.rbi +6 -3
- data/rbi/openai/models/responses/tool_choice_shell.rbi +1 -1
- data/rbi/openai/models/responses_model.rbi +5 -0
- data/rbi/openai/models/video_create_params.rbi +10 -6
- data/rbi/openai/resources/beta/assistants.rbi +8 -6
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -6
- data/rbi/openai/resources/chat/completions.rbi +8 -6
- data/rbi/openai/resources/containers.rbi +3 -0
- data/rbi/openai/resources/conversations/items.rbi +1 -0
- data/rbi/openai/resources/conversations.rbi +1 -0
- data/rbi/openai/resources/realtime/calls.rbi +14 -9
- data/rbi/openai/resources/responses.rbi +42 -0
- data/rbi/openai/resources/videos.rbi +5 -3
- data/sig/openai/models/all_models.rbs +2 -0
- data/sig/openai/models/container_create_params.rbs +23 -1
- data/sig/openai/models/container_create_response.rbs +32 -3
- data/sig/openai/models/container_list_response.rbs +32 -3
- data/sig/openai/models/container_retrieve_response.rbs +32 -3
- data/sig/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbs +32 -0
- data/sig/openai/models/realtime/realtime_server_event.rbs +1 -0
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/responses/compacted_response.rbs +42 -0
- data/sig/openai/models/responses/response_compact_params.rbs +226 -0
- data/sig/openai/models/responses/response_compaction_item.rbs +39 -0
- data/sig/openai/models/responses/response_compaction_item_param.rbs +28 -0
- data/sig/openai/models/responses/response_input_item.rbs +1 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses_model.rbs +2 -0
- data/sig/openai/resources/containers.rbs +1 -0
- data/sig/openai/resources/responses.rbs +8 -0
- metadata +31 -2
|
@@ -4,7 +4,8 @@ module OpenAI
|
|
|
4
4
|
{
|
|
5
5
|
name: String,
|
|
6
6
|
expires_after: OpenAI::ContainerCreateParams::ExpiresAfter,
|
|
7
|
-
file_ids: ::Array[String]
|
|
7
|
+
file_ids: ::Array[String],
|
|
8
|
+
memory_limit: OpenAI::Models::ContainerCreateParams::memory_limit
|
|
8
9
|
}
|
|
9
10
|
& OpenAI::Internal::Type::request_parameters
|
|
10
11
|
|
|
@@ -24,10 +25,17 @@ module OpenAI
|
|
|
24
25
|
|
|
25
26
|
def file_ids=: (::Array[String]) -> ::Array[String]
|
|
26
27
|
|
|
28
|
+
attr_reader memory_limit: OpenAI::Models::ContainerCreateParams::memory_limit?
|
|
29
|
+
|
|
30
|
+
def memory_limit=: (
|
|
31
|
+
OpenAI::Models::ContainerCreateParams::memory_limit
|
|
32
|
+
) -> OpenAI::Models::ContainerCreateParams::memory_limit
|
|
33
|
+
|
|
27
34
|
def initialize: (
|
|
28
35
|
name: String,
|
|
29
36
|
?expires_after: OpenAI::ContainerCreateParams::ExpiresAfter,
|
|
30
37
|
?file_ids: ::Array[String],
|
|
38
|
+
?memory_limit: OpenAI::Models::ContainerCreateParams::memory_limit,
|
|
31
39
|
?request_options: OpenAI::request_opts
|
|
32
40
|
) -> void
|
|
33
41
|
|
|
@@ -35,6 +43,7 @@ module OpenAI
|
|
|
35
43
|
name: String,
|
|
36
44
|
expires_after: OpenAI::ContainerCreateParams::ExpiresAfter,
|
|
37
45
|
file_ids: ::Array[String],
|
|
46
|
+
memory_limit: OpenAI::Models::ContainerCreateParams::memory_limit,
|
|
38
47
|
request_options: OpenAI::RequestOptions
|
|
39
48
|
}
|
|
40
49
|
|
|
@@ -69,6 +78,19 @@ module OpenAI
|
|
|
69
78
|
def self?.values: -> ::Array[OpenAI::Models::ContainerCreateParams::ExpiresAfter::anchor]
|
|
70
79
|
end
|
|
71
80
|
end
|
|
81
|
+
|
|
82
|
+
type memory_limit = :"1g" | :"4g" | :"16g" | :"64g"
|
|
83
|
+
|
|
84
|
+
module MemoryLimit
|
|
85
|
+
extend OpenAI::Internal::Type::Enum
|
|
86
|
+
|
|
87
|
+
MEMORY_LIMIT_1G: :"1g"
|
|
88
|
+
MEMORY_LIMIT_4G: :"4g"
|
|
89
|
+
MEMORY_LIMIT_16G: :"16g"
|
|
90
|
+
MEMORY_LIMIT_64G: :"64g"
|
|
91
|
+
|
|
92
|
+
def self?.values: -> ::Array[OpenAI::Models::ContainerCreateParams::memory_limit]
|
|
93
|
+
end
|
|
72
94
|
end
|
|
73
95
|
end
|
|
74
96
|
end
|
|
@@ -7,7 +7,9 @@ module OpenAI
|
|
|
7
7
|
name: String,
|
|
8
8
|
object: String,
|
|
9
9
|
status: String,
|
|
10
|
-
expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter
|
|
10
|
+
expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter,
|
|
11
|
+
last_active_at: Integer,
|
|
12
|
+
memory_limit: OpenAI::Models::ContainerCreateResponse::memory_limit
|
|
11
13
|
}
|
|
12
14
|
|
|
13
15
|
class ContainerCreateResponse < OpenAI::Internal::Type::BaseModel
|
|
@@ -27,13 +29,25 @@ module OpenAI
|
|
|
27
29
|
OpenAI::Models::ContainerCreateResponse::ExpiresAfter
|
|
28
30
|
) -> OpenAI::Models::ContainerCreateResponse::ExpiresAfter
|
|
29
31
|
|
|
32
|
+
attr_reader last_active_at: Integer?
|
|
33
|
+
|
|
34
|
+
def last_active_at=: (Integer) -> Integer
|
|
35
|
+
|
|
36
|
+
attr_reader memory_limit: OpenAI::Models::ContainerCreateResponse::memory_limit?
|
|
37
|
+
|
|
38
|
+
def memory_limit=: (
|
|
39
|
+
OpenAI::Models::ContainerCreateResponse::memory_limit
|
|
40
|
+
) -> OpenAI::Models::ContainerCreateResponse::memory_limit
|
|
41
|
+
|
|
30
42
|
def initialize: (
|
|
31
43
|
id: String,
|
|
32
44
|
created_at: Integer,
|
|
33
45
|
name: String,
|
|
34
46
|
object: String,
|
|
35
47
|
status: String,
|
|
36
|
-
?expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter
|
|
48
|
+
?expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter,
|
|
49
|
+
?last_active_at: Integer,
|
|
50
|
+
?memory_limit: OpenAI::Models::ContainerCreateResponse::memory_limit
|
|
37
51
|
) -> void
|
|
38
52
|
|
|
39
53
|
def to_hash: -> {
|
|
@@ -42,7 +56,9 @@ module OpenAI
|
|
|
42
56
|
name: String,
|
|
43
57
|
object: String,
|
|
44
58
|
status: String,
|
|
45
|
-
expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter
|
|
59
|
+
expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter,
|
|
60
|
+
last_active_at: Integer,
|
|
61
|
+
memory_limit: OpenAI::Models::ContainerCreateResponse::memory_limit
|
|
46
62
|
}
|
|
47
63
|
|
|
48
64
|
type expires_after =
|
|
@@ -82,6 +98,19 @@ module OpenAI
|
|
|
82
98
|
def self?.values: -> ::Array[OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor]
|
|
83
99
|
end
|
|
84
100
|
end
|
|
101
|
+
|
|
102
|
+
type memory_limit = :"1g" | :"4g" | :"16g" | :"64g"
|
|
103
|
+
|
|
104
|
+
module MemoryLimit
|
|
105
|
+
extend OpenAI::Internal::Type::Enum
|
|
106
|
+
|
|
107
|
+
MEMORY_LIMIT_1G: :"1g"
|
|
108
|
+
MEMORY_LIMIT_4G: :"4g"
|
|
109
|
+
MEMORY_LIMIT_16G: :"16g"
|
|
110
|
+
MEMORY_LIMIT_64G: :"64g"
|
|
111
|
+
|
|
112
|
+
def self?.values: -> ::Array[OpenAI::Models::ContainerCreateResponse::memory_limit]
|
|
113
|
+
end
|
|
85
114
|
end
|
|
86
115
|
end
|
|
87
116
|
end
|
|
@@ -7,7 +7,9 @@ module OpenAI
|
|
|
7
7
|
name: String,
|
|
8
8
|
object: String,
|
|
9
9
|
status: String,
|
|
10
|
-
expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter
|
|
10
|
+
expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter,
|
|
11
|
+
last_active_at: Integer,
|
|
12
|
+
memory_limit: OpenAI::Models::ContainerListResponse::memory_limit
|
|
11
13
|
}
|
|
12
14
|
|
|
13
15
|
class ContainerListResponse < OpenAI::Internal::Type::BaseModel
|
|
@@ -27,13 +29,25 @@ module OpenAI
|
|
|
27
29
|
OpenAI::Models::ContainerListResponse::ExpiresAfter
|
|
28
30
|
) -> OpenAI::Models::ContainerListResponse::ExpiresAfter
|
|
29
31
|
|
|
32
|
+
attr_reader last_active_at: Integer?
|
|
33
|
+
|
|
34
|
+
def last_active_at=: (Integer) -> Integer
|
|
35
|
+
|
|
36
|
+
attr_reader memory_limit: OpenAI::Models::ContainerListResponse::memory_limit?
|
|
37
|
+
|
|
38
|
+
def memory_limit=: (
|
|
39
|
+
OpenAI::Models::ContainerListResponse::memory_limit
|
|
40
|
+
) -> OpenAI::Models::ContainerListResponse::memory_limit
|
|
41
|
+
|
|
30
42
|
def initialize: (
|
|
31
43
|
id: String,
|
|
32
44
|
created_at: Integer,
|
|
33
45
|
name: String,
|
|
34
46
|
object: String,
|
|
35
47
|
status: String,
|
|
36
|
-
?expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter
|
|
48
|
+
?expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter,
|
|
49
|
+
?last_active_at: Integer,
|
|
50
|
+
?memory_limit: OpenAI::Models::ContainerListResponse::memory_limit
|
|
37
51
|
) -> void
|
|
38
52
|
|
|
39
53
|
def to_hash: -> {
|
|
@@ -42,7 +56,9 @@ module OpenAI
|
|
|
42
56
|
name: String,
|
|
43
57
|
object: String,
|
|
44
58
|
status: String,
|
|
45
|
-
expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter
|
|
59
|
+
expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter,
|
|
60
|
+
last_active_at: Integer,
|
|
61
|
+
memory_limit: OpenAI::Models::ContainerListResponse::memory_limit
|
|
46
62
|
}
|
|
47
63
|
|
|
48
64
|
type expires_after =
|
|
@@ -82,6 +98,19 @@ module OpenAI
|
|
|
82
98
|
def self?.values: -> ::Array[OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor]
|
|
83
99
|
end
|
|
84
100
|
end
|
|
101
|
+
|
|
102
|
+
type memory_limit = :"1g" | :"4g" | :"16g" | :"64g"
|
|
103
|
+
|
|
104
|
+
module MemoryLimit
|
|
105
|
+
extend OpenAI::Internal::Type::Enum
|
|
106
|
+
|
|
107
|
+
MEMORY_LIMIT_1G: :"1g"
|
|
108
|
+
MEMORY_LIMIT_4G: :"4g"
|
|
109
|
+
MEMORY_LIMIT_16G: :"16g"
|
|
110
|
+
MEMORY_LIMIT_64G: :"64g"
|
|
111
|
+
|
|
112
|
+
def self?.values: -> ::Array[OpenAI::Models::ContainerListResponse::memory_limit]
|
|
113
|
+
end
|
|
85
114
|
end
|
|
86
115
|
end
|
|
87
116
|
end
|
|
@@ -7,7 +7,9 @@ module OpenAI
|
|
|
7
7
|
name: String,
|
|
8
8
|
object: String,
|
|
9
9
|
status: String,
|
|
10
|
-
expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter
|
|
10
|
+
expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter,
|
|
11
|
+
last_active_at: Integer,
|
|
12
|
+
memory_limit: OpenAI::Models::ContainerRetrieveResponse::memory_limit
|
|
11
13
|
}
|
|
12
14
|
|
|
13
15
|
class ContainerRetrieveResponse < OpenAI::Internal::Type::BaseModel
|
|
@@ -27,13 +29,25 @@ module OpenAI
|
|
|
27
29
|
OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter
|
|
28
30
|
) -> OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter
|
|
29
31
|
|
|
32
|
+
attr_reader last_active_at: Integer?
|
|
33
|
+
|
|
34
|
+
def last_active_at=: (Integer) -> Integer
|
|
35
|
+
|
|
36
|
+
attr_reader memory_limit: OpenAI::Models::ContainerRetrieveResponse::memory_limit?
|
|
37
|
+
|
|
38
|
+
def memory_limit=: (
|
|
39
|
+
OpenAI::Models::ContainerRetrieveResponse::memory_limit
|
|
40
|
+
) -> OpenAI::Models::ContainerRetrieveResponse::memory_limit
|
|
41
|
+
|
|
30
42
|
def initialize: (
|
|
31
43
|
id: String,
|
|
32
44
|
created_at: Integer,
|
|
33
45
|
name: String,
|
|
34
46
|
object: String,
|
|
35
47
|
status: String,
|
|
36
|
-
?expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter
|
|
48
|
+
?expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter,
|
|
49
|
+
?last_active_at: Integer,
|
|
50
|
+
?memory_limit: OpenAI::Models::ContainerRetrieveResponse::memory_limit
|
|
37
51
|
) -> void
|
|
38
52
|
|
|
39
53
|
def to_hash: -> {
|
|
@@ -42,7 +56,9 @@ module OpenAI
|
|
|
42
56
|
name: String,
|
|
43
57
|
object: String,
|
|
44
58
|
status: String,
|
|
45
|
-
expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter
|
|
59
|
+
expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter,
|
|
60
|
+
last_active_at: Integer,
|
|
61
|
+
memory_limit: OpenAI::Models::ContainerRetrieveResponse::memory_limit
|
|
46
62
|
}
|
|
47
63
|
|
|
48
64
|
type expires_after =
|
|
@@ -82,6 +98,19 @@ module OpenAI
|
|
|
82
98
|
def self?.values: -> ::Array[OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor]
|
|
83
99
|
end
|
|
84
100
|
end
|
|
101
|
+
|
|
102
|
+
type memory_limit = :"1g" | :"4g" | :"16g" | :"64g"
|
|
103
|
+
|
|
104
|
+
module MemoryLimit
|
|
105
|
+
extend OpenAI::Internal::Type::Enum
|
|
106
|
+
|
|
107
|
+
MEMORY_LIMIT_1G: :"1g"
|
|
108
|
+
MEMORY_LIMIT_4G: :"4g"
|
|
109
|
+
MEMORY_LIMIT_16G: :"16g"
|
|
110
|
+
MEMORY_LIMIT_64G: :"64g"
|
|
111
|
+
|
|
112
|
+
def self?.values: -> ::Array[OpenAI::Models::ContainerRetrieveResponse::memory_limit]
|
|
113
|
+
end
|
|
85
114
|
end
|
|
86
115
|
end
|
|
87
116
|
end
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Realtime
|
|
4
|
+
type input_audio_buffer_dtmf_event_received_event =
|
|
5
|
+
{
|
|
6
|
+
event: String,
|
|
7
|
+
received_at: Integer,
|
|
8
|
+
type: :"input_audio_buffer.dtmf_event_received"
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
class InputAudioBufferDtmfEventReceivedEvent < OpenAI::Internal::Type::BaseModel
|
|
12
|
+
attr_accessor event: String
|
|
13
|
+
|
|
14
|
+
attr_accessor received_at: Integer
|
|
15
|
+
|
|
16
|
+
attr_accessor type: :"input_audio_buffer.dtmf_event_received"
|
|
17
|
+
|
|
18
|
+
def initialize: (
|
|
19
|
+
event: String,
|
|
20
|
+
received_at: Integer,
|
|
21
|
+
?type: :"input_audio_buffer.dtmf_event_received"
|
|
22
|
+
) -> void
|
|
23
|
+
|
|
24
|
+
def to_hash: -> {
|
|
25
|
+
event: String,
|
|
26
|
+
received_at: Integer,
|
|
27
|
+
type: :"input_audio_buffer.dtmf_event_received"
|
|
28
|
+
}
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|
|
@@ -13,6 +13,7 @@ module OpenAI
|
|
|
13
13
|
| OpenAI::Realtime::RealtimeErrorEvent
|
|
14
14
|
| OpenAI::Realtime::InputAudioBufferClearedEvent
|
|
15
15
|
| OpenAI::Realtime::InputAudioBufferCommittedEvent
|
|
16
|
+
| OpenAI::Realtime::InputAudioBufferDtmfEventReceivedEvent
|
|
16
17
|
| OpenAI::Realtime::InputAudioBufferSpeechStartedEvent
|
|
17
18
|
| OpenAI::Realtime::InputAudioBufferSpeechStoppedEvent
|
|
18
19
|
| OpenAI::Realtime::RateLimitsUpdatedEvent
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
module OpenAI
|
|
2
2
|
module Models
|
|
3
|
-
type reasoning_effort = :none | :minimal | :low | :medium | :high
|
|
3
|
+
type reasoning_effort = :none | :minimal | :low | :medium | :high | :xhigh
|
|
4
4
|
|
|
5
5
|
module ReasoningEffort
|
|
6
6
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -10,6 +10,7 @@ module OpenAI
|
|
|
10
10
|
LOW: :low
|
|
11
11
|
MEDIUM: :medium
|
|
12
12
|
HIGH: :high
|
|
13
|
+
XHIGH: :xhigh
|
|
13
14
|
|
|
14
15
|
def self?.values: -> ::Array[OpenAI::Models::reasoning_effort]
|
|
15
16
|
end
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Responses
|
|
4
|
+
type compacted_response =
|
|
5
|
+
{
|
|
6
|
+
id: String,
|
|
7
|
+
created_at: Integer,
|
|
8
|
+
object: :"response.compaction",
|
|
9
|
+
output: ::Array[OpenAI::Models::Responses::response_output_item],
|
|
10
|
+
usage: OpenAI::Responses::ResponseUsage
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
class CompactedResponse < OpenAI::Internal::Type::BaseModel
|
|
14
|
+
attr_accessor id: String
|
|
15
|
+
|
|
16
|
+
attr_accessor created_at: Integer
|
|
17
|
+
|
|
18
|
+
attr_accessor object: :"response.compaction"
|
|
19
|
+
|
|
20
|
+
attr_accessor output: ::Array[OpenAI::Models::Responses::response_output_item]
|
|
21
|
+
|
|
22
|
+
attr_accessor usage: OpenAI::Responses::ResponseUsage
|
|
23
|
+
|
|
24
|
+
def initialize: (
|
|
25
|
+
id: String,
|
|
26
|
+
created_at: Integer,
|
|
27
|
+
output: ::Array[OpenAI::Models::Responses::response_output_item],
|
|
28
|
+
usage: OpenAI::Responses::ResponseUsage,
|
|
29
|
+
?object: :"response.compaction"
|
|
30
|
+
) -> void
|
|
31
|
+
|
|
32
|
+
def to_hash: -> {
|
|
33
|
+
id: String,
|
|
34
|
+
created_at: Integer,
|
|
35
|
+
object: :"response.compaction",
|
|
36
|
+
output: ::Array[OpenAI::Models::Responses::response_output_item],
|
|
37
|
+
usage: OpenAI::Responses::ResponseUsage
|
|
38
|
+
}
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Responses
|
|
4
|
+
type response_compact_params =
|
|
5
|
+
{
|
|
6
|
+
input: OpenAI::Models::Responses::ResponseCompactParams::input?,
|
|
7
|
+
instructions: String?,
|
|
8
|
+
model: OpenAI::Models::Responses::ResponseCompactParams::model?,
|
|
9
|
+
previous_response_id: String?
|
|
10
|
+
}
|
|
11
|
+
& OpenAI::Internal::Type::request_parameters
|
|
12
|
+
|
|
13
|
+
class ResponseCompactParams < OpenAI::Internal::Type::BaseModel
|
|
14
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
|
15
|
+
include OpenAI::Internal::Type::RequestParameters
|
|
16
|
+
|
|
17
|
+
attr_accessor input: OpenAI::Models::Responses::ResponseCompactParams::input?
|
|
18
|
+
|
|
19
|
+
attr_accessor instructions: String?
|
|
20
|
+
|
|
21
|
+
attr_accessor model: OpenAI::Models::Responses::ResponseCompactParams::model?
|
|
22
|
+
|
|
23
|
+
attr_accessor previous_response_id: String?
|
|
24
|
+
|
|
25
|
+
def initialize: (
|
|
26
|
+
?input: OpenAI::Models::Responses::ResponseCompactParams::input?,
|
|
27
|
+
?instructions: String?,
|
|
28
|
+
?model: OpenAI::Models::Responses::ResponseCompactParams::model?,
|
|
29
|
+
?previous_response_id: String?,
|
|
30
|
+
?request_options: OpenAI::request_opts
|
|
31
|
+
) -> void
|
|
32
|
+
|
|
33
|
+
def to_hash: -> {
|
|
34
|
+
input: OpenAI::Models::Responses::ResponseCompactParams::input?,
|
|
35
|
+
instructions: String?,
|
|
36
|
+
model: OpenAI::Models::Responses::ResponseCompactParams::model?,
|
|
37
|
+
previous_response_id: String?,
|
|
38
|
+
request_options: OpenAI::RequestOptions
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
type input =
|
|
42
|
+
String | ::Array[OpenAI::Models::Responses::response_input_item]
|
|
43
|
+
|
|
44
|
+
module Input
|
|
45
|
+
extend OpenAI::Internal::Type::Union
|
|
46
|
+
|
|
47
|
+
def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCompactParams::input]
|
|
48
|
+
|
|
49
|
+
ResponseInputItemArray: OpenAI::Internal::Type::Converter
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
type model =
|
|
53
|
+
:"gpt-5.1"
|
|
54
|
+
| :"gpt-5.1-2025-11-13"
|
|
55
|
+
| :"gpt-5.1-codex"
|
|
56
|
+
| :"gpt-5.1-mini"
|
|
57
|
+
| :"gpt-5.1-chat-latest"
|
|
58
|
+
| :"gpt-5"
|
|
59
|
+
| :"gpt-5-mini"
|
|
60
|
+
| :"gpt-5-nano"
|
|
61
|
+
| :"gpt-5-2025-08-07"
|
|
62
|
+
| :"gpt-5-mini-2025-08-07"
|
|
63
|
+
| :"gpt-5-nano-2025-08-07"
|
|
64
|
+
| :"gpt-5-chat-latest"
|
|
65
|
+
| :"gpt-4.1"
|
|
66
|
+
| :"gpt-4.1-mini"
|
|
67
|
+
| :"gpt-4.1-nano"
|
|
68
|
+
| :"gpt-4.1-2025-04-14"
|
|
69
|
+
| :"gpt-4.1-mini-2025-04-14"
|
|
70
|
+
| :"gpt-4.1-nano-2025-04-14"
|
|
71
|
+
| :"o4-mini"
|
|
72
|
+
| :"o4-mini-2025-04-16"
|
|
73
|
+
| :o3
|
|
74
|
+
| :"o3-2025-04-16"
|
|
75
|
+
| :"o3-mini"
|
|
76
|
+
| :"o3-mini-2025-01-31"
|
|
77
|
+
| :o1
|
|
78
|
+
| :"o1-2024-12-17"
|
|
79
|
+
| :"o1-preview"
|
|
80
|
+
| :"o1-preview-2024-09-12"
|
|
81
|
+
| :"o1-mini"
|
|
82
|
+
| :"o1-mini-2024-09-12"
|
|
83
|
+
| :"gpt-4o"
|
|
84
|
+
| :"gpt-4o-2024-11-20"
|
|
85
|
+
| :"gpt-4o-2024-08-06"
|
|
86
|
+
| :"gpt-4o-2024-05-13"
|
|
87
|
+
| :"gpt-4o-audio-preview"
|
|
88
|
+
| :"gpt-4o-audio-preview-2024-10-01"
|
|
89
|
+
| :"gpt-4o-audio-preview-2024-12-17"
|
|
90
|
+
| :"gpt-4o-audio-preview-2025-06-03"
|
|
91
|
+
| :"gpt-4o-mini-audio-preview"
|
|
92
|
+
| :"gpt-4o-mini-audio-preview-2024-12-17"
|
|
93
|
+
| :"gpt-4o-search-preview"
|
|
94
|
+
| :"gpt-4o-mini-search-preview"
|
|
95
|
+
| :"gpt-4o-search-preview-2025-03-11"
|
|
96
|
+
| :"gpt-4o-mini-search-preview-2025-03-11"
|
|
97
|
+
| :"chatgpt-4o-latest"
|
|
98
|
+
| :"codex-mini-latest"
|
|
99
|
+
| :"gpt-4o-mini"
|
|
100
|
+
| :"gpt-4o-mini-2024-07-18"
|
|
101
|
+
| :"gpt-4-turbo"
|
|
102
|
+
| :"gpt-4-turbo-2024-04-09"
|
|
103
|
+
| :"gpt-4-0125-preview"
|
|
104
|
+
| :"gpt-4-turbo-preview"
|
|
105
|
+
| :"gpt-4-1106-preview"
|
|
106
|
+
| :"gpt-4-vision-preview"
|
|
107
|
+
| :"gpt-4"
|
|
108
|
+
| :"gpt-4-0314"
|
|
109
|
+
| :"gpt-4-0613"
|
|
110
|
+
| :"gpt-4-32k"
|
|
111
|
+
| :"gpt-4-32k-0314"
|
|
112
|
+
| :"gpt-4-32k-0613"
|
|
113
|
+
| :"gpt-3.5-turbo"
|
|
114
|
+
| :"gpt-3.5-turbo-16k"
|
|
115
|
+
| :"gpt-3.5-turbo-0301"
|
|
116
|
+
| :"gpt-3.5-turbo-0613"
|
|
117
|
+
| :"gpt-3.5-turbo-1106"
|
|
118
|
+
| :"gpt-3.5-turbo-0125"
|
|
119
|
+
| :"gpt-3.5-turbo-16k-0613"
|
|
120
|
+
| :"o1-pro"
|
|
121
|
+
| :"o1-pro-2025-03-19"
|
|
122
|
+
| :"o3-pro"
|
|
123
|
+
| :"o3-pro-2025-06-10"
|
|
124
|
+
| :"o3-deep-research"
|
|
125
|
+
| :"o3-deep-research-2025-06-26"
|
|
126
|
+
| :"o4-mini-deep-research"
|
|
127
|
+
| :"o4-mini-deep-research-2025-06-26"
|
|
128
|
+
| :"computer-use-preview"
|
|
129
|
+
| :"computer-use-preview-2025-03-11"
|
|
130
|
+
| :"gpt-5-codex"
|
|
131
|
+
| :"gpt-5-pro"
|
|
132
|
+
| :"gpt-5-pro-2025-10-06"
|
|
133
|
+
| :"gpt-5.1-codex-max"
|
|
134
|
+
| String
|
|
135
|
+
|
|
136
|
+
module Model
|
|
137
|
+
extend OpenAI::Internal::Type::Union
|
|
138
|
+
|
|
139
|
+
def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCompactParams::model]
|
|
140
|
+
|
|
141
|
+
GPT_5_1: :"gpt-5.1"
|
|
142
|
+
GPT_5_1_2025_11_13: :"gpt-5.1-2025-11-13"
|
|
143
|
+
GPT_5_1_CODEX: :"gpt-5.1-codex"
|
|
144
|
+
GPT_5_1_MINI: :"gpt-5.1-mini"
|
|
145
|
+
GPT_5_1_CHAT_LATEST: :"gpt-5.1-chat-latest"
|
|
146
|
+
GPT_5: :"gpt-5"
|
|
147
|
+
GPT_5_MINI: :"gpt-5-mini"
|
|
148
|
+
GPT_5_NANO: :"gpt-5-nano"
|
|
149
|
+
GPT_5_2025_08_07: :"gpt-5-2025-08-07"
|
|
150
|
+
GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07"
|
|
151
|
+
GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07"
|
|
152
|
+
GPT_5_CHAT_LATEST: :"gpt-5-chat-latest"
|
|
153
|
+
GPT_4_1: :"gpt-4.1"
|
|
154
|
+
GPT_4_1_MINI: :"gpt-4.1-mini"
|
|
155
|
+
GPT_4_1_NANO: :"gpt-4.1-nano"
|
|
156
|
+
GPT_4_1_2025_04_14: :"gpt-4.1-2025-04-14"
|
|
157
|
+
GPT_4_1_MINI_2025_04_14: :"gpt-4.1-mini-2025-04-14"
|
|
158
|
+
GPT_4_1_NANO_2025_04_14: :"gpt-4.1-nano-2025-04-14"
|
|
159
|
+
O4_MINI: :"o4-mini"
|
|
160
|
+
O4_MINI_2025_04_16: :"o4-mini-2025-04-16"
|
|
161
|
+
O3: :o3
|
|
162
|
+
O3_2025_04_16: :"o3-2025-04-16"
|
|
163
|
+
O3_MINI: :"o3-mini"
|
|
164
|
+
O3_MINI_2025_01_31: :"o3-mini-2025-01-31"
|
|
165
|
+
O1: :o1
|
|
166
|
+
O1_2024_12_17: :"o1-2024-12-17"
|
|
167
|
+
O1_PREVIEW: :"o1-preview"
|
|
168
|
+
O1_PREVIEW_2024_09_12: :"o1-preview-2024-09-12"
|
|
169
|
+
O1_MINI: :"o1-mini"
|
|
170
|
+
O1_MINI_2024_09_12: :"o1-mini-2024-09-12"
|
|
171
|
+
GPT_4O: :"gpt-4o"
|
|
172
|
+
GPT_4O_2024_11_20: :"gpt-4o-2024-11-20"
|
|
173
|
+
GPT_4O_2024_08_06: :"gpt-4o-2024-08-06"
|
|
174
|
+
GPT_4O_2024_05_13: :"gpt-4o-2024-05-13"
|
|
175
|
+
GPT_4O_AUDIO_PREVIEW: :"gpt-4o-audio-preview"
|
|
176
|
+
GPT_4O_AUDIO_PREVIEW_2024_10_01: :"gpt-4o-audio-preview-2024-10-01"
|
|
177
|
+
GPT_4O_AUDIO_PREVIEW_2024_12_17: :"gpt-4o-audio-preview-2024-12-17"
|
|
178
|
+
GPT_4O_AUDIO_PREVIEW_2025_06_03: :"gpt-4o-audio-preview-2025-06-03"
|
|
179
|
+
GPT_4O_MINI_AUDIO_PREVIEW: :"gpt-4o-mini-audio-preview"
|
|
180
|
+
GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17: :"gpt-4o-mini-audio-preview-2024-12-17"
|
|
181
|
+
GPT_4O_SEARCH_PREVIEW: :"gpt-4o-search-preview"
|
|
182
|
+
GPT_4O_MINI_SEARCH_PREVIEW: :"gpt-4o-mini-search-preview"
|
|
183
|
+
GPT_4O_SEARCH_PREVIEW_2025_03_11: :"gpt-4o-search-preview-2025-03-11"
|
|
184
|
+
GPT_4O_MINI_SEARCH_PREVIEW_2025_03_11: :"gpt-4o-mini-search-preview-2025-03-11"
|
|
185
|
+
CHATGPT_4O_LATEST: :"chatgpt-4o-latest"
|
|
186
|
+
CODEX_MINI_LATEST: :"codex-mini-latest"
|
|
187
|
+
GPT_4O_MINI: :"gpt-4o-mini"
|
|
188
|
+
GPT_4O_MINI_2024_07_18: :"gpt-4o-mini-2024-07-18"
|
|
189
|
+
GPT_4_TURBO: :"gpt-4-turbo"
|
|
190
|
+
GPT_4_TURBO_2024_04_09: :"gpt-4-turbo-2024-04-09"
|
|
191
|
+
GPT_4_0125_PREVIEW: :"gpt-4-0125-preview"
|
|
192
|
+
GPT_4_TURBO_PREVIEW: :"gpt-4-turbo-preview"
|
|
193
|
+
GPT_4_1106_PREVIEW: :"gpt-4-1106-preview"
|
|
194
|
+
GPT_4_VISION_PREVIEW: :"gpt-4-vision-preview"
|
|
195
|
+
GPT_4: :"gpt-4"
|
|
196
|
+
GPT_4_0314: :"gpt-4-0314"
|
|
197
|
+
GPT_4_0613: :"gpt-4-0613"
|
|
198
|
+
GPT_4_32K: :"gpt-4-32k"
|
|
199
|
+
GPT_4_32K_0314: :"gpt-4-32k-0314"
|
|
200
|
+
GPT_4_32K_0613: :"gpt-4-32k-0613"
|
|
201
|
+
GPT_3_5_TURBO: :"gpt-3.5-turbo"
|
|
202
|
+
GPT_3_5_TURBO_16K: :"gpt-3.5-turbo-16k"
|
|
203
|
+
GPT_3_5_TURBO_0301: :"gpt-3.5-turbo-0301"
|
|
204
|
+
GPT_3_5_TURBO_0613: :"gpt-3.5-turbo-0613"
|
|
205
|
+
GPT_3_5_TURBO_1106: :"gpt-3.5-turbo-1106"
|
|
206
|
+
GPT_3_5_TURBO_0125: :"gpt-3.5-turbo-0125"
|
|
207
|
+
GPT_3_5_TURBO_16K_0613: :"gpt-3.5-turbo-16k-0613"
|
|
208
|
+
O1_PRO: :"o1-pro"
|
|
209
|
+
O1_PRO_2025_03_19: :"o1-pro-2025-03-19"
|
|
210
|
+
O3_PRO: :"o3-pro"
|
|
211
|
+
O3_PRO_2025_06_10: :"o3-pro-2025-06-10"
|
|
212
|
+
O3_DEEP_RESEARCH: :"o3-deep-research"
|
|
213
|
+
O3_DEEP_RESEARCH_2025_06_26: :"o3-deep-research-2025-06-26"
|
|
214
|
+
O4_MINI_DEEP_RESEARCH: :"o4-mini-deep-research"
|
|
215
|
+
O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26"
|
|
216
|
+
COMPUTER_USE_PREVIEW: :"computer-use-preview"
|
|
217
|
+
COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11"
|
|
218
|
+
GPT_5_CODEX: :"gpt-5-codex"
|
|
219
|
+
GPT_5_PRO: :"gpt-5-pro"
|
|
220
|
+
GPT_5_PRO_2025_10_06: :"gpt-5-pro-2025-10-06"
|
|
221
|
+
GPT_5_1_CODEX_MAX: :"gpt-5.1-codex-max"
|
|
222
|
+
end
|
|
223
|
+
end
|
|
224
|
+
end
|
|
225
|
+
end
|
|
226
|
+
end
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Responses
|
|
4
|
+
type response_compaction_item =
|
|
5
|
+
{
|
|
6
|
+
id: String,
|
|
7
|
+
encrypted_content: String,
|
|
8
|
+
type: :compaction,
|
|
9
|
+
created_by: String
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
class ResponseCompactionItem < OpenAI::Internal::Type::BaseModel
|
|
13
|
+
attr_accessor id: String
|
|
14
|
+
|
|
15
|
+
attr_accessor encrypted_content: String
|
|
16
|
+
|
|
17
|
+
attr_accessor type: :compaction
|
|
18
|
+
|
|
19
|
+
attr_reader created_by: String?
|
|
20
|
+
|
|
21
|
+
def created_by=: (String) -> String
|
|
22
|
+
|
|
23
|
+
def initialize: (
|
|
24
|
+
id: String,
|
|
25
|
+
encrypted_content: String,
|
|
26
|
+
?created_by: String,
|
|
27
|
+
?type: :compaction
|
|
28
|
+
) -> void
|
|
29
|
+
|
|
30
|
+
def to_hash: -> {
|
|
31
|
+
id: String,
|
|
32
|
+
encrypted_content: String,
|
|
33
|
+
type: :compaction,
|
|
34
|
+
created_by: String
|
|
35
|
+
}
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Responses
|
|
4
|
+
type response_compaction_item_param =
|
|
5
|
+
{ encrypted_content: String, type: :compaction, id: String? }
|
|
6
|
+
|
|
7
|
+
class ResponseCompactionItemParam < OpenAI::Internal::Type::BaseModel
|
|
8
|
+
attr_accessor encrypted_content: String
|
|
9
|
+
|
|
10
|
+
attr_accessor type: :compaction
|
|
11
|
+
|
|
12
|
+
attr_accessor id: String?
|
|
13
|
+
|
|
14
|
+
def initialize: (
|
|
15
|
+
encrypted_content: String,
|
|
16
|
+
?id: String?,
|
|
17
|
+
?type: :compaction
|
|
18
|
+
) -> void
|
|
19
|
+
|
|
20
|
+
def to_hash: -> {
|
|
21
|
+
encrypted_content: String,
|
|
22
|
+
type: :compaction,
|
|
23
|
+
id: String?
|
|
24
|
+
}
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
@@ -12,6 +12,7 @@ module OpenAI
|
|
|
12
12
|
| OpenAI::Responses::ResponseFunctionToolCall
|
|
13
13
|
| OpenAI::Responses::ResponseInputItem::FunctionCallOutput
|
|
14
14
|
| OpenAI::Responses::ResponseReasoningItem
|
|
15
|
+
| OpenAI::Responses::ResponseCompactionItemParam
|
|
15
16
|
| OpenAI::Responses::ResponseInputItem::ImageGenerationCall
|
|
16
17
|
| OpenAI::Responses::ResponseCodeInterpreterToolCall
|
|
17
18
|
| OpenAI::Responses::ResponseInputItem::LocalShellCall
|
|
@@ -8,6 +8,7 @@ module OpenAI
|
|
|
8
8
|
| OpenAI::Responses::ResponseFunctionWebSearch
|
|
9
9
|
| OpenAI::Responses::ResponseComputerToolCall
|
|
10
10
|
| OpenAI::Responses::ResponseReasoningItem
|
|
11
|
+
| OpenAI::Responses::ResponseCompactionItem
|
|
11
12
|
| OpenAI::Responses::ResponseOutputItem::ImageGenerationCall
|
|
12
13
|
| OpenAI::Responses::ResponseCodeInterpreterToolCall
|
|
13
14
|
| OpenAI::Responses::ResponseOutputItem::LocalShellCall
|