openai 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +21 -0
- data/README.md +1 -1
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +34 -10
- data/lib/openai/models/eval_create_params.rb +50 -5
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +50 -5
- data/lib/openai/models/evals/run_cancel_response.rb +48 -5
- data/lib/openai/models/evals/run_create_params.rb +50 -5
- data/lib/openai/models/evals/run_create_response.rb +48 -5
- data/lib/openai/models/evals/run_list_response.rb +48 -5
- data/lib/openai/models/evals/run_retrieve_response.rb +48 -5
- data/lib/openai/models/graders/label_model_grader.rb +48 -5
- data/lib/openai/models/graders/score_model_grader.rb +48 -5
- data/lib/openai/models/image_edit_completed_event.rb +198 -0
- data/lib/openai/models/image_edit_params.rb +36 -1
- data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
- data/lib/openai/models/image_edit_stream_event.rb +21 -0
- data/lib/openai/models/image_gen_completed_event.rb +198 -0
- data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
- data/lib/openai/models/image_gen_stream_event.rb +21 -0
- data/lib/openai/models/image_generate_params.rb +13 -1
- data/lib/openai/models/images_response.rb +3 -0
- data/lib/openai/models/responses/response_output_refusal.rb +2 -2
- data/lib/openai/models/responses/tool.rb +30 -1
- data/lib/openai/models.rb +12 -0
- data/lib/openai/resources/images.rb +140 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +4 -0
- data/rbi/openai/models/eval_create_params.rbi +76 -7
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +76 -7
- data/rbi/openai/models/evals/run_cancel_response.rbi +70 -5
- data/rbi/openai/models/evals/run_create_params.rbi +76 -7
- data/rbi/openai/models/evals/run_create_response.rbi +70 -5
- data/rbi/openai/models/evals/run_list_response.rbi +70 -5
- data/rbi/openai/models/evals/run_retrieve_response.rbi +70 -5
- data/rbi/openai/models/graders/label_model_grader.rbi +74 -7
- data/rbi/openai/models/graders/score_model_grader.rbi +74 -7
- data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
- data/rbi/openai/models/image_edit_params.rbi +51 -0
- data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
- data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
- data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
- data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
- data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
- data/rbi/openai/models/image_generate_params.rbi +12 -0
- data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
- data/rbi/openai/models/responses/tool.rbi +61 -0
- data/rbi/openai/models.rbi +12 -0
- data/rbi/openai/resources/images.rbi +225 -0
- data/sig/openai/models/eval_create_params.rbs +29 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +29 -0
- data/sig/openai/models/evals/run_cancel_response.rbs +33 -0
- data/sig/openai/models/evals/run_create_params.rbs +33 -0
- data/sig/openai/models/evals/run_create_response.rbs +33 -0
- data/sig/openai/models/evals/run_list_response.rbs +33 -0
- data/sig/openai/models/evals/run_retrieve_response.rbs +33 -0
- data/sig/openai/models/graders/label_model_grader.rbs +29 -0
- data/sig/openai/models/graders/score_model_grader.rbs +29 -0
- data/sig/openai/models/image_edit_completed_event.rbs +150 -0
- data/sig/openai/models/image_edit_params.rbs +21 -0
- data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_edit_stream_event.rbs +12 -0
- data/sig/openai/models/image_gen_completed_event.rbs +150 -0
- data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_gen_stream_event.rbs +12 -0
- data/sig/openai/models/image_generate_params.rbs +5 -0
- data/sig/openai/models/responses/tool.rbs +16 -0
- data/sig/openai/models.rbs +12 -0
- data/sig/openai/resources/images.rbs +38 -0
- metadata +20 -2
@@ -92,13 +92,15 @@ module OpenAI
|
|
92
92
|
)
|
93
93
|
end
|
94
94
|
|
95
|
-
#
|
95
|
+
# Inputs to the model - can contain template strings.
|
96
96
|
sig do
|
97
97
|
returns(
|
98
98
|
T.any(
|
99
99
|
String,
|
100
100
|
OpenAI::Responses::ResponseInputText,
|
101
|
-
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText
|
101
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText,
|
102
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage,
|
103
|
+
T::Array[T.anything]
|
102
104
|
)
|
103
105
|
)
|
104
106
|
end
|
@@ -139,14 +141,16 @@ module OpenAI
|
|
139
141
|
T.any(
|
140
142
|
String,
|
141
143
|
OpenAI::Responses::ResponseInputText::OrHash,
|
142
|
-
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText::OrHash
|
144
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText::OrHash,
|
145
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage::OrHash,
|
146
|
+
T::Array[T.anything]
|
143
147
|
),
|
144
148
|
role: OpenAI::Graders::ScoreModelGrader::Input::Role::OrSymbol,
|
145
149
|
type: OpenAI::Graders::ScoreModelGrader::Input::Type::OrSymbol
|
146
150
|
).returns(T.attached_class)
|
147
151
|
end
|
148
152
|
def self.new(
|
149
|
-
#
|
153
|
+
# Inputs to the model - can contain template strings.
|
150
154
|
content:,
|
151
155
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
152
156
|
# `developer`.
|
@@ -163,7 +167,9 @@ module OpenAI
|
|
163
167
|
T.any(
|
164
168
|
String,
|
165
169
|
OpenAI::Responses::ResponseInputText,
|
166
|
-
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText
|
170
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText,
|
171
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage,
|
172
|
+
T::Array[T.anything]
|
167
173
|
),
|
168
174
|
role: OpenAI::Graders::ScoreModelGrader::Input::Role::OrSymbol,
|
169
175
|
type: OpenAI::Graders::ScoreModelGrader::Input::Type::OrSymbol
|
@@ -173,7 +179,7 @@ module OpenAI
|
|
173
179
|
def to_hash
|
174
180
|
end
|
175
181
|
|
176
|
-
#
|
182
|
+
# Inputs to the model - can contain template strings.
|
177
183
|
module Content
|
178
184
|
extend OpenAI::Internal::Type::Union
|
179
185
|
|
@@ -182,7 +188,9 @@ module OpenAI
|
|
182
188
|
T.any(
|
183
189
|
String,
|
184
190
|
OpenAI::Responses::ResponseInputText,
|
185
|
-
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText
|
191
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText,
|
192
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage,
|
193
|
+
T::Array[T.anything]
|
186
194
|
)
|
187
195
|
end
|
188
196
|
|
@@ -220,6 +228,57 @@ module OpenAI
|
|
220
228
|
end
|
221
229
|
end
|
222
230
|
|
231
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
232
|
+
OrHash =
|
233
|
+
T.type_alias do
|
234
|
+
T.any(
|
235
|
+
OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage,
|
236
|
+
OpenAI::Internal::AnyHash
|
237
|
+
)
|
238
|
+
end
|
239
|
+
|
240
|
+
# The URL of the image input.
|
241
|
+
sig { returns(String) }
|
242
|
+
attr_accessor :image_url
|
243
|
+
|
244
|
+
# The type of the image input. Always `input_image`.
|
245
|
+
sig { returns(Symbol) }
|
246
|
+
attr_accessor :type
|
247
|
+
|
248
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
249
|
+
# `auto`. Defaults to `auto`.
|
250
|
+
sig { returns(T.nilable(String)) }
|
251
|
+
attr_reader :detail
|
252
|
+
|
253
|
+
sig { params(detail: String).void }
|
254
|
+
attr_writer :detail
|
255
|
+
|
256
|
+
# An image input to the model.
|
257
|
+
sig do
|
258
|
+
params(image_url: String, detail: String, type: Symbol).returns(
|
259
|
+
T.attached_class
|
260
|
+
)
|
261
|
+
end
|
262
|
+
def self.new(
|
263
|
+
# The URL of the image input.
|
264
|
+
image_url:,
|
265
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
266
|
+
# `auto`. Defaults to `auto`.
|
267
|
+
detail: nil,
|
268
|
+
# The type of the image input. Always `input_image`.
|
269
|
+
type: :input_image
|
270
|
+
)
|
271
|
+
end
|
272
|
+
|
273
|
+
sig do
|
274
|
+
override.returns(
|
275
|
+
{ image_url: String, type: Symbol, detail: String }
|
276
|
+
)
|
277
|
+
end
|
278
|
+
def to_hash
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
223
282
|
sig do
|
224
283
|
override.returns(
|
225
284
|
T::Array[
|
@@ -229,6 +288,14 @@ module OpenAI
|
|
229
288
|
end
|
230
289
|
def self.variants
|
231
290
|
end
|
291
|
+
|
292
|
+
AnArrayOfInputTextAndInputImageArray =
|
293
|
+
T.let(
|
294
|
+
OpenAI::Internal::Type::ArrayOf[
|
295
|
+
OpenAI::Internal::Type::Unknown
|
296
|
+
],
|
297
|
+
OpenAI::Internal::Type::Converter
|
298
|
+
)
|
232
299
|
end
|
233
300
|
|
234
301
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
@@ -0,0 +1,346 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
|
6
|
+
OrHash =
|
7
|
+
T.type_alias do
|
8
|
+
T.any(OpenAI::ImageEditCompletedEvent, OpenAI::Internal::AnyHash)
|
9
|
+
end
|
10
|
+
|
11
|
+
# Base64-encoded final edited image data, suitable for rendering as an image.
|
12
|
+
sig { returns(String) }
|
13
|
+
attr_accessor :b64_json
|
14
|
+
|
15
|
+
# The background setting for the edited image.
|
16
|
+
sig { returns(OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol) }
|
17
|
+
attr_accessor :background
|
18
|
+
|
19
|
+
# The Unix timestamp when the event was created.
|
20
|
+
sig { returns(Integer) }
|
21
|
+
attr_accessor :created_at
|
22
|
+
|
23
|
+
# The output format for the edited image.
|
24
|
+
sig do
|
25
|
+
returns(OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol)
|
26
|
+
end
|
27
|
+
attr_accessor :output_format
|
28
|
+
|
29
|
+
# The quality setting for the edited image.
|
30
|
+
sig { returns(OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) }
|
31
|
+
attr_accessor :quality
|
32
|
+
|
33
|
+
# The size of the edited image.
|
34
|
+
sig { returns(OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) }
|
35
|
+
attr_accessor :size
|
36
|
+
|
37
|
+
# The type of the event. Always `image_edit.completed`.
|
38
|
+
sig { returns(Symbol) }
|
39
|
+
attr_accessor :type
|
40
|
+
|
41
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
42
|
+
sig { returns(OpenAI::ImageEditCompletedEvent::Usage) }
|
43
|
+
attr_reader :usage
|
44
|
+
|
45
|
+
sig { params(usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash).void }
|
46
|
+
attr_writer :usage
|
47
|
+
|
48
|
+
# Emitted when image editing has completed and the final image is available.
|
49
|
+
sig do
|
50
|
+
params(
|
51
|
+
b64_json: String,
|
52
|
+
background: OpenAI::ImageEditCompletedEvent::Background::OrSymbol,
|
53
|
+
created_at: Integer,
|
54
|
+
output_format:
|
55
|
+
OpenAI::ImageEditCompletedEvent::OutputFormat::OrSymbol,
|
56
|
+
quality: OpenAI::ImageEditCompletedEvent::Quality::OrSymbol,
|
57
|
+
size: OpenAI::ImageEditCompletedEvent::Size::OrSymbol,
|
58
|
+
usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash,
|
59
|
+
type: Symbol
|
60
|
+
).returns(T.attached_class)
|
61
|
+
end
|
62
|
+
def self.new(
|
63
|
+
# Base64-encoded final edited image data, suitable for rendering as an image.
|
64
|
+
b64_json:,
|
65
|
+
# The background setting for the edited image.
|
66
|
+
background:,
|
67
|
+
# The Unix timestamp when the event was created.
|
68
|
+
created_at:,
|
69
|
+
# The output format for the edited image.
|
70
|
+
output_format:,
|
71
|
+
# The quality setting for the edited image.
|
72
|
+
quality:,
|
73
|
+
# The size of the edited image.
|
74
|
+
size:,
|
75
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
76
|
+
usage:,
|
77
|
+
# The type of the event. Always `image_edit.completed`.
|
78
|
+
type: :"image_edit.completed"
|
79
|
+
)
|
80
|
+
end
|
81
|
+
|
82
|
+
sig do
|
83
|
+
override.returns(
|
84
|
+
{
|
85
|
+
b64_json: String,
|
86
|
+
background:
|
87
|
+
OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol,
|
88
|
+
created_at: Integer,
|
89
|
+
output_format:
|
90
|
+
OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol,
|
91
|
+
quality: OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol,
|
92
|
+
size: OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol,
|
93
|
+
type: Symbol,
|
94
|
+
usage: OpenAI::ImageEditCompletedEvent::Usage
|
95
|
+
}
|
96
|
+
)
|
97
|
+
end
|
98
|
+
def to_hash
|
99
|
+
end
|
100
|
+
|
101
|
+
# The background setting for the edited image.
|
102
|
+
module Background
|
103
|
+
extend OpenAI::Internal::Type::Enum
|
104
|
+
|
105
|
+
TaggedSymbol =
|
106
|
+
T.type_alias do
|
107
|
+
T.all(Symbol, OpenAI::ImageEditCompletedEvent::Background)
|
108
|
+
end
|
109
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
110
|
+
|
111
|
+
TRANSPARENT =
|
112
|
+
T.let(
|
113
|
+
:transparent,
|
114
|
+
OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
|
115
|
+
)
|
116
|
+
OPAQUE =
|
117
|
+
T.let(
|
118
|
+
:opaque,
|
119
|
+
OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
|
120
|
+
)
|
121
|
+
AUTO =
|
122
|
+
T.let(
|
123
|
+
:auto,
|
124
|
+
OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
|
125
|
+
)
|
126
|
+
|
127
|
+
sig do
|
128
|
+
override.returns(
|
129
|
+
T::Array[OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol]
|
130
|
+
)
|
131
|
+
end
|
132
|
+
def self.values
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
# The output format for the edited image.
|
137
|
+
module OutputFormat
|
138
|
+
extend OpenAI::Internal::Type::Enum
|
139
|
+
|
140
|
+
TaggedSymbol =
|
141
|
+
T.type_alias do
|
142
|
+
T.all(Symbol, OpenAI::ImageEditCompletedEvent::OutputFormat)
|
143
|
+
end
|
144
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
145
|
+
|
146
|
+
PNG =
|
147
|
+
T.let(
|
148
|
+
:png,
|
149
|
+
OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
|
150
|
+
)
|
151
|
+
WEBP =
|
152
|
+
T.let(
|
153
|
+
:webp,
|
154
|
+
OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
|
155
|
+
)
|
156
|
+
JPEG =
|
157
|
+
T.let(
|
158
|
+
:jpeg,
|
159
|
+
OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
|
160
|
+
)
|
161
|
+
|
162
|
+
sig do
|
163
|
+
override.returns(
|
164
|
+
T::Array[
|
165
|
+
OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
|
166
|
+
]
|
167
|
+
)
|
168
|
+
end
|
169
|
+
def self.values
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
# The quality setting for the edited image.
|
174
|
+
module Quality
|
175
|
+
extend OpenAI::Internal::Type::Enum
|
176
|
+
|
177
|
+
TaggedSymbol =
|
178
|
+
T.type_alias do
|
179
|
+
T.all(Symbol, OpenAI::ImageEditCompletedEvent::Quality)
|
180
|
+
end
|
181
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
182
|
+
|
183
|
+
LOW =
|
184
|
+
T.let(:low, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
|
185
|
+
MEDIUM =
|
186
|
+
T.let(:medium, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
|
187
|
+
HIGH =
|
188
|
+
T.let(:high, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
|
189
|
+
AUTO =
|
190
|
+
T.let(:auto, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
|
191
|
+
|
192
|
+
sig do
|
193
|
+
override.returns(
|
194
|
+
T::Array[OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol]
|
195
|
+
)
|
196
|
+
end
|
197
|
+
def self.values
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
# The size of the edited image.
|
202
|
+
module Size
|
203
|
+
extend OpenAI::Internal::Type::Enum
|
204
|
+
|
205
|
+
TaggedSymbol =
|
206
|
+
T.type_alias { T.all(Symbol, OpenAI::ImageEditCompletedEvent::Size) }
|
207
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
208
|
+
|
209
|
+
SIZE_1024X1024 =
|
210
|
+
T.let(
|
211
|
+
:"1024x1024",
|
212
|
+
OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
|
213
|
+
)
|
214
|
+
SIZE_1024X1536 =
|
215
|
+
T.let(
|
216
|
+
:"1024x1536",
|
217
|
+
OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
|
218
|
+
)
|
219
|
+
SIZE_1536X1024 =
|
220
|
+
T.let(
|
221
|
+
:"1536x1024",
|
222
|
+
OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
|
223
|
+
)
|
224
|
+
AUTO = T.let(:auto, OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol)
|
225
|
+
|
226
|
+
sig do
|
227
|
+
override.returns(
|
228
|
+
T::Array[OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol]
|
229
|
+
)
|
230
|
+
end
|
231
|
+
def self.values
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
236
|
+
OrHash =
|
237
|
+
T.type_alias do
|
238
|
+
T.any(
|
239
|
+
OpenAI::ImageEditCompletedEvent::Usage,
|
240
|
+
OpenAI::Internal::AnyHash
|
241
|
+
)
|
242
|
+
end
|
243
|
+
|
244
|
+
# The number of tokens (images and text) in the input prompt.
|
245
|
+
sig { returns(Integer) }
|
246
|
+
attr_accessor :input_tokens
|
247
|
+
|
248
|
+
# The input tokens detailed information for the image generation.
|
249
|
+
sig do
|
250
|
+
returns(OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails)
|
251
|
+
end
|
252
|
+
attr_reader :input_tokens_details
|
253
|
+
|
254
|
+
sig do
|
255
|
+
params(
|
256
|
+
input_tokens_details:
|
257
|
+
OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash
|
258
|
+
).void
|
259
|
+
end
|
260
|
+
attr_writer :input_tokens_details
|
261
|
+
|
262
|
+
# The number of image tokens in the output image.
|
263
|
+
sig { returns(Integer) }
|
264
|
+
attr_accessor :output_tokens
|
265
|
+
|
266
|
+
# The total number of tokens (images and text) used for the image generation.
|
267
|
+
sig { returns(Integer) }
|
268
|
+
attr_accessor :total_tokens
|
269
|
+
|
270
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
271
|
+
sig do
|
272
|
+
params(
|
273
|
+
input_tokens: Integer,
|
274
|
+
input_tokens_details:
|
275
|
+
OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash,
|
276
|
+
output_tokens: Integer,
|
277
|
+
total_tokens: Integer
|
278
|
+
).returns(T.attached_class)
|
279
|
+
end
|
280
|
+
def self.new(
|
281
|
+
# The number of tokens (images and text) in the input prompt.
|
282
|
+
input_tokens:,
|
283
|
+
# The input tokens detailed information for the image generation.
|
284
|
+
input_tokens_details:,
|
285
|
+
# The number of image tokens in the output image.
|
286
|
+
output_tokens:,
|
287
|
+
# The total number of tokens (images and text) used for the image generation.
|
288
|
+
total_tokens:
|
289
|
+
)
|
290
|
+
end
|
291
|
+
|
292
|
+
sig do
|
293
|
+
override.returns(
|
294
|
+
{
|
295
|
+
input_tokens: Integer,
|
296
|
+
input_tokens_details:
|
297
|
+
OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
|
298
|
+
output_tokens: Integer,
|
299
|
+
total_tokens: Integer
|
300
|
+
}
|
301
|
+
)
|
302
|
+
end
|
303
|
+
def to_hash
|
304
|
+
end
|
305
|
+
|
306
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
307
|
+
OrHash =
|
308
|
+
T.type_alias do
|
309
|
+
T.any(
|
310
|
+
OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
|
311
|
+
OpenAI::Internal::AnyHash
|
312
|
+
)
|
313
|
+
end
|
314
|
+
|
315
|
+
# The number of image tokens in the input prompt.
|
316
|
+
sig { returns(Integer) }
|
317
|
+
attr_accessor :image_tokens
|
318
|
+
|
319
|
+
# The number of text tokens in the input prompt.
|
320
|
+
sig { returns(Integer) }
|
321
|
+
attr_accessor :text_tokens
|
322
|
+
|
323
|
+
# The input tokens detailed information for the image generation.
|
324
|
+
sig do
|
325
|
+
params(image_tokens: Integer, text_tokens: Integer).returns(
|
326
|
+
T.attached_class
|
327
|
+
)
|
328
|
+
end
|
329
|
+
def self.new(
|
330
|
+
# The number of image tokens in the input prompt.
|
331
|
+
image_tokens:,
|
332
|
+
# The number of text tokens in the input prompt.
|
333
|
+
text_tokens:
|
334
|
+
)
|
335
|
+
end
|
336
|
+
|
337
|
+
sig do
|
338
|
+
override.returns({ image_tokens: Integer, text_tokens: Integer })
|
339
|
+
end
|
340
|
+
def to_hash
|
341
|
+
end
|
342
|
+
end
|
343
|
+
end
|
344
|
+
end
|
345
|
+
end
|
346
|
+
end
|
@@ -36,6 +36,14 @@ module OpenAI
|
|
36
36
|
sig { returns(T.nilable(OpenAI::ImageEditParams::Background::OrSymbol)) }
|
37
37
|
attr_accessor :background
|
38
38
|
|
39
|
+
# Control how much effort the model will exert to match the style and features,
|
40
|
+
# especially facial features, of input images. This parameter is only supported
|
41
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
42
|
+
sig do
|
43
|
+
returns(T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol))
|
44
|
+
end
|
45
|
+
attr_accessor :input_fidelity
|
46
|
+
|
39
47
|
# An additional image whose fully transparent areas (e.g. where alpha is zero)
|
40
48
|
# indicate where `image` should be edited. If there are multiple images provided,
|
41
49
|
# the mask will be applied on the first image. Must be a valid PNG file, less than
|
@@ -70,6 +78,12 @@ module OpenAI
|
|
70
78
|
end
|
71
79
|
attr_accessor :output_format
|
72
80
|
|
81
|
+
# The number of partial images to generate. This parameter is used for streaming
|
82
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
83
|
+
# 0, the response will be a single image sent in one streaming event.
|
84
|
+
sig { returns(T.nilable(Integer)) }
|
85
|
+
attr_accessor :partial_images
|
86
|
+
|
73
87
|
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
74
88
|
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
75
89
|
# Defaults to `auto`.
|
@@ -105,12 +119,15 @@ module OpenAI
|
|
105
119
|
image: OpenAI::ImageEditParams::Image::Variants,
|
106
120
|
prompt: String,
|
107
121
|
background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
|
122
|
+
input_fidelity:
|
123
|
+
T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
|
108
124
|
mask: OpenAI::Internal::FileInput,
|
109
125
|
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
|
110
126
|
n: T.nilable(Integer),
|
111
127
|
output_compression: T.nilable(Integer),
|
112
128
|
output_format:
|
113
129
|
T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
|
130
|
+
partial_images: T.nilable(Integer),
|
114
131
|
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
|
115
132
|
response_format:
|
116
133
|
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
|
@@ -139,6 +156,10 @@ module OpenAI
|
|
139
156
|
# If `transparent`, the output format needs to support transparency, so it should
|
140
157
|
# be set to either `png` (default value) or `webp`.
|
141
158
|
background: nil,
|
159
|
+
# Control how much effort the model will exert to match the style and features,
|
160
|
+
# especially facial features, of input images. This parameter is only supported
|
161
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
162
|
+
input_fidelity: nil,
|
142
163
|
# An additional image whose fully transparent areas (e.g. where alpha is zero)
|
143
164
|
# indicate where `image` should be edited. If there are multiple images provided,
|
144
165
|
# the mask will be applied on the first image. Must be a valid PNG file, less than
|
@@ -158,6 +179,10 @@ module OpenAI
|
|
158
179
|
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
159
180
|
# default value is `png`.
|
160
181
|
output_format: nil,
|
182
|
+
# The number of partial images to generate. This parameter is used for streaming
|
183
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
184
|
+
# 0, the response will be a single image sent in one streaming event.
|
185
|
+
partial_images: nil,
|
161
186
|
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
162
187
|
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
163
188
|
# Defaults to `auto`.
|
@@ -186,12 +211,15 @@ module OpenAI
|
|
186
211
|
prompt: String,
|
187
212
|
background:
|
188
213
|
T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
|
214
|
+
input_fidelity:
|
215
|
+
T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
|
189
216
|
mask: OpenAI::Internal::FileInput,
|
190
217
|
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
|
191
218
|
n: T.nilable(Integer),
|
192
219
|
output_compression: T.nilable(Integer),
|
193
220
|
output_format:
|
194
221
|
T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
|
222
|
+
partial_images: T.nilable(Integer),
|
195
223
|
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
|
196
224
|
response_format:
|
197
225
|
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
|
@@ -258,6 +286,29 @@ module OpenAI
|
|
258
286
|
end
|
259
287
|
end
|
260
288
|
|
289
|
+
# Control how much effort the model will exert to match the style and features,
|
290
|
+
# especially facial features, of input images. This parameter is only supported
|
291
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
292
|
+
module InputFidelity
|
293
|
+
extend OpenAI::Internal::Type::Enum
|
294
|
+
|
295
|
+
TaggedSymbol =
|
296
|
+
T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::InputFidelity) }
|
297
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
298
|
+
|
299
|
+
HIGH =
|
300
|
+
T.let(:high, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol)
|
301
|
+
LOW = T.let(:low, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol)
|
302
|
+
|
303
|
+
sig do
|
304
|
+
override.returns(
|
305
|
+
T::Array[OpenAI::ImageEditParams::InputFidelity::TaggedSymbol]
|
306
|
+
)
|
307
|
+
end
|
308
|
+
def self.values
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
261
312
|
# The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
262
313
|
# supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
263
314
|
# is used.
|