openai 0.13.1 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/image_edit_completed_event.rb +198 -0
  5. data/lib/openai/models/image_edit_params.rb +36 -1
  6. data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
  7. data/lib/openai/models/image_edit_stream_event.rb +21 -0
  8. data/lib/openai/models/image_gen_completed_event.rb +198 -0
  9. data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
  10. data/lib/openai/models/image_gen_stream_event.rb +21 -0
  11. data/lib/openai/models/image_generate_params.rb +13 -1
  12. data/lib/openai/models/images_response.rb +3 -0
  13. data/lib/openai/models/responses/response_output_refusal.rb +2 -2
  14. data/lib/openai/models/responses/tool.rb +30 -1
  15. data/lib/openai/models.rb +12 -0
  16. data/lib/openai/resources/images.rb +140 -2
  17. data/lib/openai/version.rb +1 -1
  18. data/lib/openai.rb +6 -0
  19. data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
  20. data/rbi/openai/models/image_edit_params.rbi +51 -0
  21. data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
  22. data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
  23. data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
  24. data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
  25. data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
  26. data/rbi/openai/models/image_generate_params.rbi +12 -0
  27. data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
  28. data/rbi/openai/models/responses/tool.rbi +61 -0
  29. data/rbi/openai/models.rbi +12 -0
  30. data/rbi/openai/resources/images.rbi +225 -0
  31. data/sig/openai/models/image_edit_completed_event.rbs +150 -0
  32. data/sig/openai/models/image_edit_params.rbs +21 -0
  33. data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
  34. data/sig/openai/models/image_edit_stream_event.rbs +12 -0
  35. data/sig/openai/models/image_gen_completed_event.rbs +150 -0
  36. data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
  37. data/sig/openai/models/image_gen_stream_event.rbs +12 -0
  38. data/sig/openai/models/image_generate_params.rbs +5 -0
  39. data/sig/openai/models/responses/tool.rbs +16 -0
  40. data/sig/openai/models.rbs +12 -0
  41. data/sig/openai/resources/images.rbs +38 -0
  42. metadata +20 -2
@@ -0,0 +1,346 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
6
+ OrHash =
7
+ T.type_alias do
8
+ T.any(OpenAI::ImageEditCompletedEvent, OpenAI::Internal::AnyHash)
9
+ end
10
+
11
+ # Base64-encoded final edited image data, suitable for rendering as an image.
12
+ sig { returns(String) }
13
+ attr_accessor :b64_json
14
+
15
+ # The background setting for the edited image.
16
+ sig { returns(OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol) }
17
+ attr_accessor :background
18
+
19
+ # The Unix timestamp when the event was created.
20
+ sig { returns(Integer) }
21
+ attr_accessor :created_at
22
+
23
+ # The output format for the edited image.
24
+ sig do
25
+ returns(OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol)
26
+ end
27
+ attr_accessor :output_format
28
+
29
+ # The quality setting for the edited image.
30
+ sig { returns(OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) }
31
+ attr_accessor :quality
32
+
33
+ # The size of the edited image.
34
+ sig { returns(OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) }
35
+ attr_accessor :size
36
+
37
+ # The type of the event. Always `image_edit.completed`.
38
+ sig { returns(Symbol) }
39
+ attr_accessor :type
40
+
41
+ # For `gpt-image-1` only, the token usage information for the image generation.
42
+ sig { returns(OpenAI::ImageEditCompletedEvent::Usage) }
43
+ attr_reader :usage
44
+
45
+ sig { params(usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash).void }
46
+ attr_writer :usage
47
+
48
+ # Emitted when image editing has completed and the final image is available.
49
+ sig do
50
+ params(
51
+ b64_json: String,
52
+ background: OpenAI::ImageEditCompletedEvent::Background::OrSymbol,
53
+ created_at: Integer,
54
+ output_format:
55
+ OpenAI::ImageEditCompletedEvent::OutputFormat::OrSymbol,
56
+ quality: OpenAI::ImageEditCompletedEvent::Quality::OrSymbol,
57
+ size: OpenAI::ImageEditCompletedEvent::Size::OrSymbol,
58
+ usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash,
59
+ type: Symbol
60
+ ).returns(T.attached_class)
61
+ end
62
+ def self.new(
63
+ # Base64-encoded final edited image data, suitable for rendering as an image.
64
+ b64_json:,
65
+ # The background setting for the edited image.
66
+ background:,
67
+ # The Unix timestamp when the event was created.
68
+ created_at:,
69
+ # The output format for the edited image.
70
+ output_format:,
71
+ # The quality setting for the edited image.
72
+ quality:,
73
+ # The size of the edited image.
74
+ size:,
75
+ # For `gpt-image-1` only, the token usage information for the image generation.
76
+ usage:,
77
+ # The type of the event. Always `image_edit.completed`.
78
+ type: :"image_edit.completed"
79
+ )
80
+ end
81
+
82
+ sig do
83
+ override.returns(
84
+ {
85
+ b64_json: String,
86
+ background:
87
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol,
88
+ created_at: Integer,
89
+ output_format:
90
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol,
91
+ quality: OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol,
92
+ size: OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol,
93
+ type: Symbol,
94
+ usage: OpenAI::ImageEditCompletedEvent::Usage
95
+ }
96
+ )
97
+ end
98
+ def to_hash
99
+ end
100
+
101
+ # The background setting for the edited image.
102
+ module Background
103
+ extend OpenAI::Internal::Type::Enum
104
+
105
+ TaggedSymbol =
106
+ T.type_alias do
107
+ T.all(Symbol, OpenAI::ImageEditCompletedEvent::Background)
108
+ end
109
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
110
+
111
+ TRANSPARENT =
112
+ T.let(
113
+ :transparent,
114
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
115
+ )
116
+ OPAQUE =
117
+ T.let(
118
+ :opaque,
119
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
120
+ )
121
+ AUTO =
122
+ T.let(
123
+ :auto,
124
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
125
+ )
126
+
127
+ sig do
128
+ override.returns(
129
+ T::Array[OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol]
130
+ )
131
+ end
132
+ def self.values
133
+ end
134
+ end
135
+
136
+ # The output format for the edited image.
137
+ module OutputFormat
138
+ extend OpenAI::Internal::Type::Enum
139
+
140
+ TaggedSymbol =
141
+ T.type_alias do
142
+ T.all(Symbol, OpenAI::ImageEditCompletedEvent::OutputFormat)
143
+ end
144
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
145
+
146
+ PNG =
147
+ T.let(
148
+ :png,
149
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
150
+ )
151
+ WEBP =
152
+ T.let(
153
+ :webp,
154
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
155
+ )
156
+ JPEG =
157
+ T.let(
158
+ :jpeg,
159
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
160
+ )
161
+
162
+ sig do
163
+ override.returns(
164
+ T::Array[
165
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
166
+ ]
167
+ )
168
+ end
169
+ def self.values
170
+ end
171
+ end
172
+
173
+ # The quality setting for the edited image.
174
+ module Quality
175
+ extend OpenAI::Internal::Type::Enum
176
+
177
+ TaggedSymbol =
178
+ T.type_alias do
179
+ T.all(Symbol, OpenAI::ImageEditCompletedEvent::Quality)
180
+ end
181
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
182
+
183
+ LOW =
184
+ T.let(:low, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
185
+ MEDIUM =
186
+ T.let(:medium, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
187
+ HIGH =
188
+ T.let(:high, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
189
+ AUTO =
190
+ T.let(:auto, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
191
+
192
+ sig do
193
+ override.returns(
194
+ T::Array[OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol]
195
+ )
196
+ end
197
+ def self.values
198
+ end
199
+ end
200
+
201
+ # The size of the edited image.
202
+ module Size
203
+ extend OpenAI::Internal::Type::Enum
204
+
205
+ TaggedSymbol =
206
+ T.type_alias { T.all(Symbol, OpenAI::ImageEditCompletedEvent::Size) }
207
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
208
+
209
+ SIZE_1024X1024 =
210
+ T.let(
211
+ :"1024x1024",
212
+ OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
213
+ )
214
+ SIZE_1024X1536 =
215
+ T.let(
216
+ :"1024x1536",
217
+ OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
218
+ )
219
+ SIZE_1536X1024 =
220
+ T.let(
221
+ :"1536x1024",
222
+ OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
223
+ )
224
+ AUTO = T.let(:auto, OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol)
225
+
226
+ sig do
227
+ override.returns(
228
+ T::Array[OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol]
229
+ )
230
+ end
231
+ def self.values
232
+ end
233
+ end
234
+
235
+ class Usage < OpenAI::Internal::Type::BaseModel
236
+ OrHash =
237
+ T.type_alias do
238
+ T.any(
239
+ OpenAI::ImageEditCompletedEvent::Usage,
240
+ OpenAI::Internal::AnyHash
241
+ )
242
+ end
243
+
244
+ # The number of tokens (images and text) in the input prompt.
245
+ sig { returns(Integer) }
246
+ attr_accessor :input_tokens
247
+
248
+ # The input tokens detailed information for the image generation.
249
+ sig do
250
+ returns(OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails)
251
+ end
252
+ attr_reader :input_tokens_details
253
+
254
+ sig do
255
+ params(
256
+ input_tokens_details:
257
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash
258
+ ).void
259
+ end
260
+ attr_writer :input_tokens_details
261
+
262
+ # The number of image tokens in the output image.
263
+ sig { returns(Integer) }
264
+ attr_accessor :output_tokens
265
+
266
+ # The total number of tokens (images and text) used for the image generation.
267
+ sig { returns(Integer) }
268
+ attr_accessor :total_tokens
269
+
270
+ # For `gpt-image-1` only, the token usage information for the image generation.
271
+ sig do
272
+ params(
273
+ input_tokens: Integer,
274
+ input_tokens_details:
275
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash,
276
+ output_tokens: Integer,
277
+ total_tokens: Integer
278
+ ).returns(T.attached_class)
279
+ end
280
+ def self.new(
281
+ # The number of tokens (images and text) in the input prompt.
282
+ input_tokens:,
283
+ # The input tokens detailed information for the image generation.
284
+ input_tokens_details:,
285
+ # The number of image tokens in the output image.
286
+ output_tokens:,
287
+ # The total number of tokens (images and text) used for the image generation.
288
+ total_tokens:
289
+ )
290
+ end
291
+
292
+ sig do
293
+ override.returns(
294
+ {
295
+ input_tokens: Integer,
296
+ input_tokens_details:
297
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
298
+ output_tokens: Integer,
299
+ total_tokens: Integer
300
+ }
301
+ )
302
+ end
303
+ def to_hash
304
+ end
305
+
306
+ class InputTokensDetails < OpenAI::Internal::Type::BaseModel
307
+ OrHash =
308
+ T.type_alias do
309
+ T.any(
310
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
311
+ OpenAI::Internal::AnyHash
312
+ )
313
+ end
314
+
315
+ # The number of image tokens in the input prompt.
316
+ sig { returns(Integer) }
317
+ attr_accessor :image_tokens
318
+
319
+ # The number of text tokens in the input prompt.
320
+ sig { returns(Integer) }
321
+ attr_accessor :text_tokens
322
+
323
+ # The input tokens detailed information for the image generation.
324
+ sig do
325
+ params(image_tokens: Integer, text_tokens: Integer).returns(
326
+ T.attached_class
327
+ )
328
+ end
329
+ def self.new(
330
+ # The number of image tokens in the input prompt.
331
+ image_tokens:,
332
+ # The number of text tokens in the input prompt.
333
+ text_tokens:
334
+ )
335
+ end
336
+
337
+ sig do
338
+ override.returns({ image_tokens: Integer, text_tokens: Integer })
339
+ end
340
+ def to_hash
341
+ end
342
+ end
343
+ end
344
+ end
345
+ end
346
+ end
@@ -36,6 +36,14 @@ module OpenAI
36
36
  sig { returns(T.nilable(OpenAI::ImageEditParams::Background::OrSymbol)) }
37
37
  attr_accessor :background
38
38
 
39
+ # Control how much effort the model will exert to match the style and features,
40
+ # especially facial features, of input images. This parameter is only supported
41
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
42
+ sig do
43
+ returns(T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol))
44
+ end
45
+ attr_accessor :input_fidelity
46
+
39
47
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
40
48
  # indicate where `image` should be edited. If there are multiple images provided,
41
49
  # the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -70,6 +78,12 @@ module OpenAI
70
78
  end
71
79
  attr_accessor :output_format
72
80
 
81
+ # The number of partial images to generate. This parameter is used for streaming
82
+ # responses that return partial images. Value must be between 0 and 3. When set to
83
+ # 0, the response will be a single image sent in one streaming event.
84
+ sig { returns(T.nilable(Integer)) }
85
+ attr_accessor :partial_images
86
+
73
87
  # The quality of the image that will be generated. `high`, `medium` and `low` are
74
88
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
75
89
  # Defaults to `auto`.
@@ -105,12 +119,15 @@ module OpenAI
105
119
  image: OpenAI::ImageEditParams::Image::Variants,
106
120
  prompt: String,
107
121
  background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
122
+ input_fidelity:
123
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
108
124
  mask: OpenAI::Internal::FileInput,
109
125
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
110
126
  n: T.nilable(Integer),
111
127
  output_compression: T.nilable(Integer),
112
128
  output_format:
113
129
  T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
130
+ partial_images: T.nilable(Integer),
114
131
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
115
132
  response_format:
116
133
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -139,6 +156,10 @@ module OpenAI
139
156
  # If `transparent`, the output format needs to support transparency, so it should
140
157
  # be set to either `png` (default value) or `webp`.
141
158
  background: nil,
159
+ # Control how much effort the model will exert to match the style and features,
160
+ # especially facial features, of input images. This parameter is only supported
161
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
162
+ input_fidelity: nil,
142
163
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
143
164
  # indicate where `image` should be edited. If there are multiple images provided,
144
165
  # the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -158,6 +179,10 @@ module OpenAI
158
179
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
159
180
  # default value is `png`.
160
181
  output_format: nil,
182
+ # The number of partial images to generate. This parameter is used for streaming
183
+ # responses that return partial images. Value must be between 0 and 3. When set to
184
+ # 0, the response will be a single image sent in one streaming event.
185
+ partial_images: nil,
161
186
  # The quality of the image that will be generated. `high`, `medium` and `low` are
162
187
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
163
188
  # Defaults to `auto`.
@@ -186,12 +211,15 @@ module OpenAI
186
211
  prompt: String,
187
212
  background:
188
213
  T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
214
+ input_fidelity:
215
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
189
216
  mask: OpenAI::Internal::FileInput,
190
217
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
191
218
  n: T.nilable(Integer),
192
219
  output_compression: T.nilable(Integer),
193
220
  output_format:
194
221
  T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
222
+ partial_images: T.nilable(Integer),
195
223
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
196
224
  response_format:
197
225
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -258,6 +286,29 @@ module OpenAI
258
286
  end
259
287
  end
260
288
 
289
+ # Control how much effort the model will exert to match the style and features,
290
+ # especially facial features, of input images. This parameter is only supported
291
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
292
+ module InputFidelity
293
+ extend OpenAI::Internal::Type::Enum
294
+
295
+ TaggedSymbol =
296
+ T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::InputFidelity) }
297
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
298
+
299
+ HIGH =
300
+ T.let(:high, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol)
301
+ LOW = T.let(:low, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol)
302
+
303
+ sig do
304
+ override.returns(
305
+ T::Array[OpenAI::ImageEditParams::InputFidelity::TaggedSymbol]
306
+ )
307
+ end
308
+ def self.values
309
+ end
310
+ end
311
+
261
312
  # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
262
313
  # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
263
314
  # is used.
@@ -0,0 +1,249 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel
6
+ OrHash =
7
+ T.type_alias do
8
+ T.any(OpenAI::ImageEditPartialImageEvent, OpenAI::Internal::AnyHash)
9
+ end
10
+
11
+ # Base64-encoded partial image data, suitable for rendering as an image.
12
+ sig { returns(String) }
13
+ attr_accessor :b64_json
14
+
15
+ # The background setting for the requested edited image.
16
+ sig do
17
+ returns(OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol)
18
+ end
19
+ attr_accessor :background
20
+
21
+ # The Unix timestamp when the event was created.
22
+ sig { returns(Integer) }
23
+ attr_accessor :created_at
24
+
25
+ # The output format for the requested edited image.
26
+ sig do
27
+ returns(OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol)
28
+ end
29
+ attr_accessor :output_format
30
+
31
+ # 0-based index for the partial image (streaming).
32
+ sig { returns(Integer) }
33
+ attr_accessor :partial_image_index
34
+
35
+ # The quality setting for the requested edited image.
36
+ sig { returns(OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol) }
37
+ attr_accessor :quality
38
+
39
+ # The size of the requested edited image.
40
+ sig { returns(OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol) }
41
+ attr_accessor :size
42
+
43
+ # The type of the event. Always `image_edit.partial_image`.
44
+ sig { returns(Symbol) }
45
+ attr_accessor :type
46
+
47
+ # Emitted when a partial image is available during image editing streaming.
48
+ sig do
49
+ params(
50
+ b64_json: String,
51
+ background: OpenAI::ImageEditPartialImageEvent::Background::OrSymbol,
52
+ created_at: Integer,
53
+ output_format:
54
+ OpenAI::ImageEditPartialImageEvent::OutputFormat::OrSymbol,
55
+ partial_image_index: Integer,
56
+ quality: OpenAI::ImageEditPartialImageEvent::Quality::OrSymbol,
57
+ size: OpenAI::ImageEditPartialImageEvent::Size::OrSymbol,
58
+ type: Symbol
59
+ ).returns(T.attached_class)
60
+ end
61
+ def self.new(
62
+ # Base64-encoded partial image data, suitable for rendering as an image.
63
+ b64_json:,
64
+ # The background setting for the requested edited image.
65
+ background:,
66
+ # The Unix timestamp when the event was created.
67
+ created_at:,
68
+ # The output format for the requested edited image.
69
+ output_format:,
70
+ # 0-based index for the partial image (streaming).
71
+ partial_image_index:,
72
+ # The quality setting for the requested edited image.
73
+ quality:,
74
+ # The size of the requested edited image.
75
+ size:,
76
+ # The type of the event. Always `image_edit.partial_image`.
77
+ type: :"image_edit.partial_image"
78
+ )
79
+ end
80
+
81
+ sig do
82
+ override.returns(
83
+ {
84
+ b64_json: String,
85
+ background:
86
+ OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol,
87
+ created_at: Integer,
88
+ output_format:
89
+ OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol,
90
+ partial_image_index: Integer,
91
+ quality: OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol,
92
+ size: OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol,
93
+ type: Symbol
94
+ }
95
+ )
96
+ end
97
+ def to_hash
98
+ end
99
+
100
+ # The background setting for the requested edited image.
101
+ module Background
102
+ extend OpenAI::Internal::Type::Enum
103
+
104
+ TaggedSymbol =
105
+ T.type_alias do
106
+ T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Background)
107
+ end
108
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
109
+
110
+ TRANSPARENT =
111
+ T.let(
112
+ :transparent,
113
+ OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol
114
+ )
115
+ OPAQUE =
116
+ T.let(
117
+ :opaque,
118
+ OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol
119
+ )
120
+ AUTO =
121
+ T.let(
122
+ :auto,
123
+ OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol
124
+ )
125
+
126
+ sig do
127
+ override.returns(
128
+ T::Array[
129
+ OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol
130
+ ]
131
+ )
132
+ end
133
+ def self.values
134
+ end
135
+ end
136
+
137
+ # The output format for the requested edited image.
138
+ module OutputFormat
139
+ extend OpenAI::Internal::Type::Enum
140
+
141
+ TaggedSymbol =
142
+ T.type_alias do
143
+ T.all(Symbol, OpenAI::ImageEditPartialImageEvent::OutputFormat)
144
+ end
145
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
146
+
147
+ PNG =
148
+ T.let(
149
+ :png,
150
+ OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol
151
+ )
152
+ WEBP =
153
+ T.let(
154
+ :webp,
155
+ OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol
156
+ )
157
+ JPEG =
158
+ T.let(
159
+ :jpeg,
160
+ OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol
161
+ )
162
+
163
+ sig do
164
+ override.returns(
165
+ T::Array[
166
+ OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol
167
+ ]
168
+ )
169
+ end
170
+ def self.values
171
+ end
172
+ end
173
+
174
+ # The quality setting for the requested edited image.
175
+ module Quality
176
+ extend OpenAI::Internal::Type::Enum
177
+
178
+ TaggedSymbol =
179
+ T.type_alias do
180
+ T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Quality)
181
+ end
182
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
183
+
184
+ LOW =
185
+ T.let(:low, OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol)
186
+ MEDIUM =
187
+ T.let(
188
+ :medium,
189
+ OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol
190
+ )
191
+ HIGH =
192
+ T.let(
193
+ :high,
194
+ OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol
195
+ )
196
+ AUTO =
197
+ T.let(
198
+ :auto,
199
+ OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol
200
+ )
201
+
202
+ sig do
203
+ override.returns(
204
+ T::Array[OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol]
205
+ )
206
+ end
207
+ def self.values
208
+ end
209
+ end
210
+
211
+ # The size of the requested edited image.
212
+ module Size
213
+ extend OpenAI::Internal::Type::Enum
214
+
215
+ TaggedSymbol =
216
+ T.type_alias do
217
+ T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Size)
218
+ end
219
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
220
+
221
+ SIZE_1024X1024 =
222
+ T.let(
223
+ :"1024x1024",
224
+ OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol
225
+ )
226
+ SIZE_1024X1536 =
227
+ T.let(
228
+ :"1024x1536",
229
+ OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol
230
+ )
231
+ SIZE_1536X1024 =
232
+ T.let(
233
+ :"1536x1024",
234
+ OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol
235
+ )
236
+ AUTO =
237
+ T.let(:auto, OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol)
238
+
239
+ sig do
240
+ override.returns(
241
+ T::Array[OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol]
242
+ )
243
+ end
244
+ def self.values
245
+ end
246
+ end
247
+ end
248
+ end
249
+ end