openai 0.27.1 → 0.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +17 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +7 -10
- data/lib/openai/internal/type/base_stream.rb +0 -17
- data/lib/openai/models/batch.rb +23 -1
- data/lib/openai/models/batch_usage.rb +84 -0
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -4
- data/lib/openai/models/responses/response_function_call_arguments_done_event.rb +9 -1
- data/lib/openai/models/responses/response_function_call_output_item.rb +26 -0
- data/lib/openai/models/responses/response_function_call_output_item_list.rb +11 -0
- data/lib/openai/models/responses/response_function_tool_call_output_item.rb +47 -4
- data/lib/openai/models/responses/response_input_file_content.rb +52 -0
- data/lib/openai/models/responses/response_input_image_content.rb +65 -0
- data/lib/openai/models/responses/response_input_item.rb +19 -4
- data/lib/openai/models/responses/response_input_text_content.rb +28 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/rbi/openai/internal/type/base_stream.rbi +0 -15
- data/rbi/openai/models/batch.rbi +35 -1
- data/rbi/openai/models/batch_usage.rbi +139 -0
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +77 -5
- data/rbi/openai/models/responses/response_function_call_arguments_done_event.rbi +8 -0
- data/rbi/openai/models/responses/response_function_call_output_item.rbi +31 -0
- data/rbi/openai/models/responses/response_function_call_output_item_list.rbi +15 -0
- data/rbi/openai/models/responses/response_function_tool_call_output_item.rbi +72 -5
- data/rbi/openai/models/responses/response_input_file_content.rbi +75 -0
- data/rbi/openai/models/responses/response_input_image_content.rbi +125 -0
- data/rbi/openai/models/responses/response_input_item.rbi +36 -5
- data/rbi/openai/models/responses/response_input_text_content.rbi +39 -0
- data/rbi/openai/models.rbi +2 -0
- data/sig/openai/internal/type/base_stream.rbs +0 -4
- data/sig/openai/models/batch.rbs +16 -2
- data/sig/openai/models/batch_usage.rbs +60 -0
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +27 -4
- data/sig/openai/models/responses/response_function_call_arguments_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_function_call_output_item.rbs +16 -0
- data/sig/openai/models/responses/response_function_call_output_item_list.rbs +10 -0
- data/sig/openai/models/responses/response_function_tool_call_output_item.rbs +27 -4
- data/sig/openai/models/responses/response_input_file_content.rbs +42 -0
- data/sig/openai/models/responses/response_input_image_content.rbs +49 -0
- data/sig/openai/models/responses/response_input_item.rbs +14 -4
- data/sig/openai/models/responses/response_input_text_content.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- metadata +19 -1
data/lib/openai/models.rb
CHANGED
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -99,6 +99,7 @@ require_relative "openai/models/batch_error"
|
|
99
99
|
require_relative "openai/models/batch_list_params"
|
100
100
|
require_relative "openai/models/batch_request_counts"
|
101
101
|
require_relative "openai/models/batch_retrieve_params"
|
102
|
+
require_relative "openai/models/batch_usage"
|
102
103
|
require_relative "openai/models/beta/assistant"
|
103
104
|
require_relative "openai/models/beta/assistant_create_params"
|
104
105
|
require_relative "openai/models/beta/assistant_deleted"
|
@@ -537,6 +538,8 @@ require_relative "openai/models/responses/response_format_text_config"
|
|
537
538
|
require_relative "openai/models/responses/response_format_text_json_schema_config"
|
538
539
|
require_relative "openai/models/responses/response_function_call_arguments_delta_event"
|
539
540
|
require_relative "openai/models/responses/response_function_call_arguments_done_event"
|
541
|
+
require_relative "openai/models/responses/response_function_call_output_item"
|
542
|
+
require_relative "openai/models/responses/response_function_call_output_item_list"
|
540
543
|
require_relative "openai/models/responses/response_function_tool_call_item"
|
541
544
|
require_relative "openai/models/responses/response_function_tool_call_output_item"
|
542
545
|
require_relative "openai/models/responses/response_function_web_search"
|
@@ -550,9 +553,12 @@ require_relative "openai/models/responses/response_in_progress_event"
|
|
550
553
|
require_relative "openai/models/responses/response_input"
|
551
554
|
require_relative "openai/models/responses/response_input_audio"
|
552
555
|
require_relative "openai/models/responses/response_input_content"
|
556
|
+
require_relative "openai/models/responses/response_input_file_content"
|
557
|
+
require_relative "openai/models/responses/response_input_image_content"
|
553
558
|
require_relative "openai/models/responses/response_input_item"
|
554
559
|
require_relative "openai/models/responses/response_input_message_content_list"
|
555
560
|
require_relative "openai/models/responses/response_input_message_item"
|
561
|
+
require_relative "openai/models/responses/response_input_text_content"
|
556
562
|
require_relative "openai/models/responses/response_item"
|
557
563
|
require_relative "openai/models/responses/response_item_list"
|
558
564
|
require_relative "openai/models/responses/response_mcp_call_arguments_delta_event"
|
@@ -12,21 +12,6 @@ module OpenAI
|
|
12
12
|
Message = type_member(:in)
|
13
13
|
Elem = type_member(:out)
|
14
14
|
|
15
|
-
class << self
|
16
|
-
# Attempt to close the underlying transport when the stream itself is garbage
|
17
|
-
# collected.
|
18
|
-
#
|
19
|
-
# This should not be relied upon for resource clean up, as the garbage collector
|
20
|
-
# is not guaranteed to run.
|
21
|
-
sig do
|
22
|
-
params(stream: T::Enumerable[T.anything]).returns(
|
23
|
-
T.proc.params(arg0: Integer).void
|
24
|
-
)
|
25
|
-
end
|
26
|
-
def defer_closing(stream)
|
27
|
-
end
|
28
|
-
end
|
29
|
-
|
30
15
|
sig { returns(Integer) }
|
31
16
|
attr_reader :status
|
32
17
|
|
data/rbi/openai/models/batch.rbi
CHANGED
@@ -110,6 +110,17 @@ module OpenAI
|
|
110
110
|
sig { returns(T.nilable(T::Hash[Symbol, String])) }
|
111
111
|
attr_accessor :metadata
|
112
112
|
|
113
|
+
# Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a
|
114
|
+
# wide range of models with different capabilities, performance characteristics,
|
115
|
+
# and price points. Refer to the
|
116
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
117
|
+
# available models.
|
118
|
+
sig { returns(T.nilable(String)) }
|
119
|
+
attr_reader :model
|
120
|
+
|
121
|
+
sig { params(model: String).void }
|
122
|
+
attr_writer :model
|
123
|
+
|
113
124
|
# The ID of the file containing the outputs of successfully executed requests.
|
114
125
|
sig { returns(T.nilable(String)) }
|
115
126
|
attr_reader :output_file_id
|
@@ -124,6 +135,15 @@ module OpenAI
|
|
124
135
|
sig { params(request_counts: OpenAI::BatchRequestCounts::OrHash).void }
|
125
136
|
attr_writer :request_counts
|
126
137
|
|
138
|
+
# Represents token usage details including input tokens, output tokens, a
|
139
|
+
# breakdown of output tokens, and the total tokens used. Only populated on batches
|
140
|
+
# created after September 7, 2025.
|
141
|
+
sig { returns(T.nilable(OpenAI::BatchUsage)) }
|
142
|
+
attr_reader :usage
|
143
|
+
|
144
|
+
sig { params(usage: OpenAI::BatchUsage::OrHash).void }
|
145
|
+
attr_writer :usage
|
146
|
+
|
127
147
|
sig do
|
128
148
|
params(
|
129
149
|
id: String,
|
@@ -143,8 +163,10 @@ module OpenAI
|
|
143
163
|
finalizing_at: Integer,
|
144
164
|
in_progress_at: Integer,
|
145
165
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
166
|
+
model: String,
|
146
167
|
output_file_id: String,
|
147
168
|
request_counts: OpenAI::BatchRequestCounts::OrHash,
|
169
|
+
usage: OpenAI::BatchUsage::OrHash,
|
148
170
|
object: Symbol
|
149
171
|
).returns(T.attached_class)
|
150
172
|
end
|
@@ -186,10 +208,20 @@ module OpenAI
|
|
186
208
|
# Keys are strings with a maximum length of 64 characters. Values are strings with
|
187
209
|
# a maximum length of 512 characters.
|
188
210
|
metadata: nil,
|
211
|
+
# Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a
|
212
|
+
# wide range of models with different capabilities, performance characteristics,
|
213
|
+
# and price points. Refer to the
|
214
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
215
|
+
# available models.
|
216
|
+
model: nil,
|
189
217
|
# The ID of the file containing the outputs of successfully executed requests.
|
190
218
|
output_file_id: nil,
|
191
219
|
# The request counts for different statuses within the batch.
|
192
220
|
request_counts: nil,
|
221
|
+
# Represents token usage details including input tokens, output tokens, a
|
222
|
+
# breakdown of output tokens, and the total tokens used. Only populated on batches
|
223
|
+
# created after September 7, 2025.
|
224
|
+
usage: nil,
|
193
225
|
# The object type, which is always `batch`.
|
194
226
|
object: :batch
|
195
227
|
)
|
@@ -216,8 +248,10 @@ module OpenAI
|
|
216
248
|
finalizing_at: Integer,
|
217
249
|
in_progress_at: Integer,
|
218
250
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
251
|
+
model: String,
|
219
252
|
output_file_id: String,
|
220
|
-
request_counts: OpenAI::BatchRequestCounts
|
253
|
+
request_counts: OpenAI::BatchRequestCounts,
|
254
|
+
usage: OpenAI::BatchUsage
|
221
255
|
}
|
222
256
|
)
|
223
257
|
end
|
@@ -0,0 +1,139 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class BatchUsage < OpenAI::Internal::Type::BaseModel
|
6
|
+
OrHash =
|
7
|
+
T.type_alias { T.any(OpenAI::BatchUsage, OpenAI::Internal::AnyHash) }
|
8
|
+
|
9
|
+
# The number of input tokens.
|
10
|
+
sig { returns(Integer) }
|
11
|
+
attr_accessor :input_tokens
|
12
|
+
|
13
|
+
# A detailed breakdown of the input tokens.
|
14
|
+
sig { returns(OpenAI::BatchUsage::InputTokensDetails) }
|
15
|
+
attr_reader :input_tokens_details
|
16
|
+
|
17
|
+
sig do
|
18
|
+
params(
|
19
|
+
input_tokens_details: OpenAI::BatchUsage::InputTokensDetails::OrHash
|
20
|
+
).void
|
21
|
+
end
|
22
|
+
attr_writer :input_tokens_details
|
23
|
+
|
24
|
+
# The number of output tokens.
|
25
|
+
sig { returns(Integer) }
|
26
|
+
attr_accessor :output_tokens
|
27
|
+
|
28
|
+
# A detailed breakdown of the output tokens.
|
29
|
+
sig { returns(OpenAI::BatchUsage::OutputTokensDetails) }
|
30
|
+
attr_reader :output_tokens_details
|
31
|
+
|
32
|
+
sig do
|
33
|
+
params(
|
34
|
+
output_tokens_details: OpenAI::BatchUsage::OutputTokensDetails::OrHash
|
35
|
+
).void
|
36
|
+
end
|
37
|
+
attr_writer :output_tokens_details
|
38
|
+
|
39
|
+
# The total number of tokens used.
|
40
|
+
sig { returns(Integer) }
|
41
|
+
attr_accessor :total_tokens
|
42
|
+
|
43
|
+
# Represents token usage details including input tokens, output tokens, a
|
44
|
+
# breakdown of output tokens, and the total tokens used. Only populated on batches
|
45
|
+
# created after September 7, 2025.
|
46
|
+
sig do
|
47
|
+
params(
|
48
|
+
input_tokens: Integer,
|
49
|
+
input_tokens_details: OpenAI::BatchUsage::InputTokensDetails::OrHash,
|
50
|
+
output_tokens: Integer,
|
51
|
+
output_tokens_details:
|
52
|
+
OpenAI::BatchUsage::OutputTokensDetails::OrHash,
|
53
|
+
total_tokens: Integer
|
54
|
+
).returns(T.attached_class)
|
55
|
+
end
|
56
|
+
def self.new(
|
57
|
+
# The number of input tokens.
|
58
|
+
input_tokens:,
|
59
|
+
# A detailed breakdown of the input tokens.
|
60
|
+
input_tokens_details:,
|
61
|
+
# The number of output tokens.
|
62
|
+
output_tokens:,
|
63
|
+
# A detailed breakdown of the output tokens.
|
64
|
+
output_tokens_details:,
|
65
|
+
# The total number of tokens used.
|
66
|
+
total_tokens:
|
67
|
+
)
|
68
|
+
end
|
69
|
+
|
70
|
+
sig do
|
71
|
+
override.returns(
|
72
|
+
{
|
73
|
+
input_tokens: Integer,
|
74
|
+
input_tokens_details: OpenAI::BatchUsage::InputTokensDetails,
|
75
|
+
output_tokens: Integer,
|
76
|
+
output_tokens_details: OpenAI::BatchUsage::OutputTokensDetails,
|
77
|
+
total_tokens: Integer
|
78
|
+
}
|
79
|
+
)
|
80
|
+
end
|
81
|
+
def to_hash
|
82
|
+
end
|
83
|
+
|
84
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
85
|
+
OrHash =
|
86
|
+
T.type_alias do
|
87
|
+
T.any(
|
88
|
+
OpenAI::BatchUsage::InputTokensDetails,
|
89
|
+
OpenAI::Internal::AnyHash
|
90
|
+
)
|
91
|
+
end
|
92
|
+
|
93
|
+
# The number of tokens that were retrieved from the cache.
|
94
|
+
# [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
|
95
|
+
sig { returns(Integer) }
|
96
|
+
attr_accessor :cached_tokens
|
97
|
+
|
98
|
+
# A detailed breakdown of the input tokens.
|
99
|
+
sig { params(cached_tokens: Integer).returns(T.attached_class) }
|
100
|
+
def self.new(
|
101
|
+
# The number of tokens that were retrieved from the cache.
|
102
|
+
# [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
|
103
|
+
cached_tokens:
|
104
|
+
)
|
105
|
+
end
|
106
|
+
|
107
|
+
sig { override.returns({ cached_tokens: Integer }) }
|
108
|
+
def to_hash
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
|
113
|
+
OrHash =
|
114
|
+
T.type_alias do
|
115
|
+
T.any(
|
116
|
+
OpenAI::BatchUsage::OutputTokensDetails,
|
117
|
+
OpenAI::Internal::AnyHash
|
118
|
+
)
|
119
|
+
end
|
120
|
+
|
121
|
+
# The number of reasoning tokens.
|
122
|
+
sig { returns(Integer) }
|
123
|
+
attr_accessor :reasoning_tokens
|
124
|
+
|
125
|
+
# A detailed breakdown of the output tokens.
|
126
|
+
sig { params(reasoning_tokens: Integer).returns(T.attached_class) }
|
127
|
+
def self.new(
|
128
|
+
# The number of reasoning tokens.
|
129
|
+
reasoning_tokens:
|
130
|
+
)
|
131
|
+
end
|
132
|
+
|
133
|
+
sig { override.returns({ reasoning_tokens: Integer }) }
|
134
|
+
def to_hash
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
@@ -16,8 +16,13 @@ module OpenAI
|
|
16
16
|
sig { returns(String) }
|
17
17
|
attr_accessor :call_id
|
18
18
|
|
19
|
-
# The output from the custom tool call generated by your code.
|
20
|
-
|
19
|
+
# The output from the custom tool call generated by your code. Can be a string or
|
20
|
+
# an list of output content.
|
21
|
+
sig do
|
22
|
+
returns(
|
23
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::Variants
|
24
|
+
)
|
25
|
+
end
|
21
26
|
attr_accessor :output
|
22
27
|
|
23
28
|
# The type of the custom tool call output. Always `custom_tool_call_output`.
|
@@ -35,7 +40,8 @@ module OpenAI
|
|
35
40
|
sig do
|
36
41
|
params(
|
37
42
|
call_id: String,
|
38
|
-
output:
|
43
|
+
output:
|
44
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::Variants,
|
39
45
|
id: String,
|
40
46
|
type: Symbol
|
41
47
|
).returns(T.attached_class)
|
@@ -43,7 +49,8 @@ module OpenAI
|
|
43
49
|
def self.new(
|
44
50
|
# The call ID, used to map this custom tool call output to a custom tool call.
|
45
51
|
call_id:,
|
46
|
-
# The output from the custom tool call generated by your code.
|
52
|
+
# The output from the custom tool call generated by your code. Can be a string or
|
53
|
+
# an list of output content.
|
47
54
|
output:,
|
48
55
|
# The unique ID of the custom tool call output in the OpenAI platform.
|
49
56
|
id: nil,
|
@@ -54,11 +61,76 @@ module OpenAI
|
|
54
61
|
|
55
62
|
sig do
|
56
63
|
override.returns(
|
57
|
-
{
|
64
|
+
{
|
65
|
+
call_id: String,
|
66
|
+
output:
|
67
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::Variants,
|
68
|
+
type: Symbol,
|
69
|
+
id: String
|
70
|
+
}
|
58
71
|
)
|
59
72
|
end
|
60
73
|
def to_hash
|
61
74
|
end
|
75
|
+
|
76
|
+
# The output from the custom tool call generated by your code. Can be a string or
|
77
|
+
# an list of output content.
|
78
|
+
module Output
|
79
|
+
extend OpenAI::Internal::Type::Union
|
80
|
+
|
81
|
+
Variants =
|
82
|
+
T.type_alias do
|
83
|
+
T.any(
|
84
|
+
String,
|
85
|
+
T::Array[
|
86
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::OutputContentList::Variants
|
87
|
+
]
|
88
|
+
)
|
89
|
+
end
|
90
|
+
|
91
|
+
# A text input to the model.
|
92
|
+
module OutputContentList
|
93
|
+
extend OpenAI::Internal::Type::Union
|
94
|
+
|
95
|
+
Variants =
|
96
|
+
T.type_alias do
|
97
|
+
T.any(
|
98
|
+
OpenAI::Responses::ResponseInputText,
|
99
|
+
OpenAI::Responses::ResponseInputImage,
|
100
|
+
OpenAI::Responses::ResponseInputFile
|
101
|
+
)
|
102
|
+
end
|
103
|
+
|
104
|
+
sig do
|
105
|
+
override.returns(
|
106
|
+
T::Array[
|
107
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::OutputContentList::Variants
|
108
|
+
]
|
109
|
+
)
|
110
|
+
end
|
111
|
+
def self.variants
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
sig do
|
116
|
+
override.returns(
|
117
|
+
T::Array[
|
118
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::Variants
|
119
|
+
]
|
120
|
+
)
|
121
|
+
end
|
122
|
+
def self.variants
|
123
|
+
end
|
124
|
+
|
125
|
+
OutputContentListArray =
|
126
|
+
T.let(
|
127
|
+
OpenAI::Internal::Type::ArrayOf[
|
128
|
+
union:
|
129
|
+
OpenAI::Responses::ResponseCustomToolCallOutput::Output::OutputContentList
|
130
|
+
],
|
131
|
+
OpenAI::Internal::Type::Converter
|
132
|
+
)
|
133
|
+
end
|
62
134
|
end
|
63
135
|
end
|
64
136
|
end
|
@@ -20,6 +20,10 @@ module OpenAI
|
|
20
20
|
sig { returns(String) }
|
21
21
|
attr_accessor :item_id
|
22
22
|
|
23
|
+
# The name of the function that was called.
|
24
|
+
sig { returns(String) }
|
25
|
+
attr_accessor :name
|
26
|
+
|
23
27
|
# The index of the output item.
|
24
28
|
sig { returns(Integer) }
|
25
29
|
attr_accessor :output_index
|
@@ -36,6 +40,7 @@ module OpenAI
|
|
36
40
|
params(
|
37
41
|
arguments: String,
|
38
42
|
item_id: String,
|
43
|
+
name: String,
|
39
44
|
output_index: Integer,
|
40
45
|
sequence_number: Integer,
|
41
46
|
type: Symbol
|
@@ -46,6 +51,8 @@ module OpenAI
|
|
46
51
|
arguments:,
|
47
52
|
# The ID of the item.
|
48
53
|
item_id:,
|
54
|
+
# The name of the function that was called.
|
55
|
+
name:,
|
49
56
|
# The index of the output item.
|
50
57
|
output_index:,
|
51
58
|
# The sequence number of this event.
|
@@ -59,6 +66,7 @@ module OpenAI
|
|
59
66
|
{
|
60
67
|
arguments: String,
|
61
68
|
item_id: String,
|
69
|
+
name: String,
|
62
70
|
output_index: Integer,
|
63
71
|
sequence_number: Integer,
|
64
72
|
type: Symbol
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
# A text input to the model.
|
7
|
+
module ResponseFunctionCallOutputItem
|
8
|
+
extend OpenAI::Internal::Type::Union
|
9
|
+
|
10
|
+
Variants =
|
11
|
+
T.type_alias do
|
12
|
+
T.any(
|
13
|
+
OpenAI::Responses::ResponseInputTextContent,
|
14
|
+
OpenAI::Responses::ResponseInputImageContent,
|
15
|
+
OpenAI::Responses::ResponseInputFileContent
|
16
|
+
)
|
17
|
+
end
|
18
|
+
|
19
|
+
sig do
|
20
|
+
override.returns(
|
21
|
+
T::Array[
|
22
|
+
OpenAI::Responses::ResponseFunctionCallOutputItem::Variants
|
23
|
+
]
|
24
|
+
)
|
25
|
+
end
|
26
|
+
def self.variants
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
ResponseFunctionCallOutputItemList =
|
7
|
+
T.let(
|
8
|
+
OpenAI::Internal::Type::ArrayOf[
|
9
|
+
union: OpenAI::Responses::ResponseFunctionCallOutputItem
|
10
|
+
],
|
11
|
+
OpenAI::Internal::Type::Converter
|
12
|
+
)
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -20,8 +20,13 @@ module OpenAI
|
|
20
20
|
sig { returns(String) }
|
21
21
|
attr_accessor :call_id
|
22
22
|
|
23
|
-
#
|
24
|
-
|
23
|
+
# The output from the function call generated by your code. Can be a string or an
|
24
|
+
# list of output content.
|
25
|
+
sig do
|
26
|
+
returns(
|
27
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::Variants
|
28
|
+
)
|
29
|
+
end
|
25
30
|
attr_accessor :output
|
26
31
|
|
27
32
|
# The type of the function tool call output. Always `function_call_output`.
|
@@ -51,7 +56,8 @@ module OpenAI
|
|
51
56
|
params(
|
52
57
|
id: String,
|
53
58
|
call_id: String,
|
54
|
-
output:
|
59
|
+
output:
|
60
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::Variants,
|
55
61
|
status:
|
56
62
|
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::OrSymbol,
|
57
63
|
type: Symbol
|
@@ -62,7 +68,8 @@ module OpenAI
|
|
62
68
|
id:,
|
63
69
|
# The unique ID of the function tool call generated by the model.
|
64
70
|
call_id:,
|
65
|
-
#
|
71
|
+
# The output from the function call generated by your code. Can be a string or an
|
72
|
+
# list of output content.
|
66
73
|
output:,
|
67
74
|
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
|
68
75
|
# Populated when items are returned via API.
|
@@ -77,7 +84,8 @@ module OpenAI
|
|
77
84
|
{
|
78
85
|
id: String,
|
79
86
|
call_id: String,
|
80
|
-
output:
|
87
|
+
output:
|
88
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::Variants,
|
81
89
|
type: Symbol,
|
82
90
|
status:
|
83
91
|
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol
|
@@ -87,6 +95,65 @@ module OpenAI
|
|
87
95
|
def to_hash
|
88
96
|
end
|
89
97
|
|
98
|
+
# The output from the function call generated by your code. Can be a string or an
|
99
|
+
# list of output content.
|
100
|
+
module Output
|
101
|
+
extend OpenAI::Internal::Type::Union
|
102
|
+
|
103
|
+
Variants =
|
104
|
+
T.type_alias do
|
105
|
+
T.any(
|
106
|
+
String,
|
107
|
+
T::Array[
|
108
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::OutputContentList::Variants
|
109
|
+
]
|
110
|
+
)
|
111
|
+
end
|
112
|
+
|
113
|
+
# A text input to the model.
|
114
|
+
module OutputContentList
|
115
|
+
extend OpenAI::Internal::Type::Union
|
116
|
+
|
117
|
+
Variants =
|
118
|
+
T.type_alias do
|
119
|
+
T.any(
|
120
|
+
OpenAI::Responses::ResponseInputText,
|
121
|
+
OpenAI::Responses::ResponseInputImage,
|
122
|
+
OpenAI::Responses::ResponseInputFile
|
123
|
+
)
|
124
|
+
end
|
125
|
+
|
126
|
+
sig do
|
127
|
+
override.returns(
|
128
|
+
T::Array[
|
129
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::OutputContentList::Variants
|
130
|
+
]
|
131
|
+
)
|
132
|
+
end
|
133
|
+
def self.variants
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
sig do
|
138
|
+
override.returns(
|
139
|
+
T::Array[
|
140
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::Variants
|
141
|
+
]
|
142
|
+
)
|
143
|
+
end
|
144
|
+
def self.variants
|
145
|
+
end
|
146
|
+
|
147
|
+
OutputContentListArray =
|
148
|
+
T.let(
|
149
|
+
OpenAI::Internal::Type::ArrayOf[
|
150
|
+
union:
|
151
|
+
OpenAI::Responses::ResponseFunctionToolCallOutputItem::Output::OutputContentList
|
152
|
+
],
|
153
|
+
OpenAI::Internal::Type::Converter
|
154
|
+
)
|
155
|
+
end
|
156
|
+
|
90
157
|
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
|
91
158
|
# Populated when items are returned via API.
|
92
159
|
module Status
|
@@ -0,0 +1,75 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ResponseInputFileContent < OpenAI::Internal::Type::BaseModel
|
7
|
+
OrHash =
|
8
|
+
T.type_alias do
|
9
|
+
T.any(
|
10
|
+
OpenAI::Responses::ResponseInputFileContent,
|
11
|
+
OpenAI::Internal::AnyHash
|
12
|
+
)
|
13
|
+
end
|
14
|
+
|
15
|
+
# The type of the input item. Always `input_file`.
|
16
|
+
sig { returns(Symbol) }
|
17
|
+
attr_accessor :type
|
18
|
+
|
19
|
+
# The base64-encoded data of the file to be sent to the model.
|
20
|
+
sig { returns(T.nilable(String)) }
|
21
|
+
attr_accessor :file_data
|
22
|
+
|
23
|
+
# The ID of the file to be sent to the model.
|
24
|
+
sig { returns(T.nilable(String)) }
|
25
|
+
attr_accessor :file_id
|
26
|
+
|
27
|
+
# The URL of the file to be sent to the model.
|
28
|
+
sig { returns(T.nilable(String)) }
|
29
|
+
attr_accessor :file_url
|
30
|
+
|
31
|
+
# The name of the file to be sent to the model.
|
32
|
+
sig { returns(T.nilable(String)) }
|
33
|
+
attr_accessor :filename
|
34
|
+
|
35
|
+
# A file input to the model.
|
36
|
+
sig do
|
37
|
+
params(
|
38
|
+
file_data: T.nilable(String),
|
39
|
+
file_id: T.nilable(String),
|
40
|
+
file_url: T.nilable(String),
|
41
|
+
filename: T.nilable(String),
|
42
|
+
type: Symbol
|
43
|
+
).returns(T.attached_class)
|
44
|
+
end
|
45
|
+
def self.new(
|
46
|
+
# The base64-encoded data of the file to be sent to the model.
|
47
|
+
file_data: nil,
|
48
|
+
# The ID of the file to be sent to the model.
|
49
|
+
file_id: nil,
|
50
|
+
# The URL of the file to be sent to the model.
|
51
|
+
file_url: nil,
|
52
|
+
# The name of the file to be sent to the model.
|
53
|
+
filename: nil,
|
54
|
+
# The type of the input item. Always `input_file`.
|
55
|
+
type: :input_file
|
56
|
+
)
|
57
|
+
end
|
58
|
+
|
59
|
+
sig do
|
60
|
+
override.returns(
|
61
|
+
{
|
62
|
+
type: Symbol,
|
63
|
+
file_data: T.nilable(String),
|
64
|
+
file_id: T.nilable(String),
|
65
|
+
file_url: T.nilable(String),
|
66
|
+
filename: T.nilable(String)
|
67
|
+
}
|
68
|
+
)
|
69
|
+
end
|
70
|
+
def to_hash
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|