openai 0.13.1 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/image_edit_completed_event.rb +198 -0
  5. data/lib/openai/models/image_edit_params.rb +36 -1
  6. data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
  7. data/lib/openai/models/image_edit_stream_event.rb +21 -0
  8. data/lib/openai/models/image_gen_completed_event.rb +198 -0
  9. data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
  10. data/lib/openai/models/image_gen_stream_event.rb +21 -0
  11. data/lib/openai/models/image_generate_params.rb +13 -1
  12. data/lib/openai/models/images_response.rb +3 -0
  13. data/lib/openai/models/responses/response_output_refusal.rb +2 -2
  14. data/lib/openai/models/responses/tool.rb +30 -1
  15. data/lib/openai/models.rb +12 -0
  16. data/lib/openai/resources/images.rb +140 -2
  17. data/lib/openai/version.rb +1 -1
  18. data/lib/openai.rb +6 -0
  19. data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
  20. data/rbi/openai/models/image_edit_params.rbi +51 -0
  21. data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
  22. data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
  23. data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
  24. data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
  25. data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
  26. data/rbi/openai/models/image_generate_params.rbi +12 -0
  27. data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
  28. data/rbi/openai/models/responses/tool.rbi +61 -0
  29. data/rbi/openai/models.rbi +12 -0
  30. data/rbi/openai/resources/images.rbi +225 -0
  31. data/sig/openai/models/image_edit_completed_event.rbs +150 -0
  32. data/sig/openai/models/image_edit_params.rbs +21 -0
  33. data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
  34. data/sig/openai/models/image_edit_stream_event.rbs +12 -0
  35. data/sig/openai/models/image_gen_completed_event.rbs +150 -0
  36. data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
  37. data/sig/openai/models/image_gen_stream_event.rbs +12 -0
  38. data/sig/openai/models/image_generate_params.rbs +5 -0
  39. data/sig/openai/models/responses/tool.rbs +16 -0
  40. data/sig/openai/models.rbs +12 -0
  41. data/sig/openai/resources/images.rbs +38 -0
  42. metadata +20 -2
@@ -565,6 +565,18 @@ module OpenAI
565
565
  end
566
566
  attr_writer :background
567
567
 
568
+ # Control how much effort the model will exert to match the style and features,
569
+ # especially facial features, of input images. This parameter is only supported
570
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
571
+ sig do
572
+ returns(
573
+ T.nilable(
574
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol
575
+ )
576
+ )
577
+ end
578
+ attr_accessor :input_fidelity
579
+
568
580
  # Optional mask for inpainting. Contains `image_url` (string, optional) and
569
581
  # `file_id` (string, optional).
570
582
  sig do
@@ -695,6 +707,10 @@ module OpenAI
695
707
  params(
696
708
  background:
697
709
  OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol,
710
+ input_fidelity:
711
+ T.nilable(
712
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol
713
+ ),
698
714
  input_image_mask:
699
715
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash,
700
716
  model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
@@ -714,6 +730,10 @@ module OpenAI
714
730
  # Background type for the generated image. One of `transparent`, `opaque`, or
715
731
  # `auto`. Default: `auto`.
716
732
  background: nil,
733
+ # Control how much effort the model will exert to match the style and features,
734
+ # especially facial features, of input images. This parameter is only supported
735
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
736
+ input_fidelity: nil,
717
737
  # Optional mask for inpainting. Contains `image_url` (string, optional) and
718
738
  # `file_id` (string, optional).
719
739
  input_image_mask: nil,
@@ -746,6 +766,10 @@ module OpenAI
746
766
  type: Symbol,
747
767
  background:
748
768
  OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol,
769
+ input_fidelity:
770
+ T.nilable(
771
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol
772
+ ),
749
773
  input_image_mask:
750
774
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask,
751
775
  model:
@@ -806,6 +830,43 @@ module OpenAI
806
830
  end
807
831
  end
808
832
 
833
+ # Control how much effort the model will exert to match the style and features,
834
+ # especially facial features, of input images. This parameter is only supported
835
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
836
+ module InputFidelity
837
+ extend OpenAI::Internal::Type::Enum
838
+
839
+ TaggedSymbol =
840
+ T.type_alias do
841
+ T.all(
842
+ Symbol,
843
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity
844
+ )
845
+ end
846
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
847
+
848
+ HIGH =
849
+ T.let(
850
+ :high,
851
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol
852
+ )
853
+ LOW =
854
+ T.let(
855
+ :low,
856
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol
857
+ )
858
+
859
+ sig do
860
+ override.returns(
861
+ T::Array[
862
+ OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol
863
+ ]
864
+ )
865
+ end
866
+ def self.values
867
+ end
868
+ end
869
+
809
870
  class InputImageMask < OpenAI::Internal::Type::BaseModel
810
871
  OrHash =
811
872
  T.type_alias do
@@ -115,10 +115,22 @@ module OpenAI
115
115
 
116
116
  ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams
117
117
 
118
+ ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent
119
+
118
120
  ImageEditParams = OpenAI::Models::ImageEditParams
119
121
 
122
+ ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent
123
+
124
+ ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent
125
+
126
+ ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent
127
+
120
128
  ImageGenerateParams = OpenAI::Models::ImageGenerateParams
121
129
 
130
+ ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent
131
+
132
+ ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent
133
+
122
134
  ImageModel = OpenAI::Models::ImageModel
123
135
 
124
136
  ImagesResponse = OpenAI::Models::ImagesResponse
@@ -42,6 +42,8 @@ module OpenAI
42
42
  )
43
43
  end
44
44
 
45
+ # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
46
+ #
45
47
  # Creates an edited or extended image given one or more source images and a
46
48
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
47
49
  sig do
@@ -49,17 +51,21 @@ module OpenAI
49
51
  image: OpenAI::ImageEditParams::Image::Variants,
50
52
  prompt: String,
51
53
  background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
54
+ input_fidelity:
55
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
52
56
  mask: OpenAI::Internal::FileInput,
53
57
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
54
58
  n: T.nilable(Integer),
55
59
  output_compression: T.nilable(Integer),
56
60
  output_format:
57
61
  T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
62
+ partial_images: T.nilable(Integer),
58
63
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
59
64
  response_format:
60
65
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
61
66
  size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol),
62
67
  user: String,
68
+ stream: T.noreturn,
63
69
  request_options: OpenAI::RequestOptions::OrHash
64
70
  ).returns(OpenAI::ImagesResponse)
65
71
  end
@@ -83,6 +89,10 @@ module OpenAI
83
89
  # If `transparent`, the output format needs to support transparency, so it should
84
90
  # be set to either `png` (default value) or `webp`.
85
91
  background: nil,
92
+ # Control how much effort the model will exert to match the style and features,
93
+ # especially facial features, of input images. This parameter is only supported
94
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
95
+ input_fidelity: nil,
86
96
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
87
97
  # indicate where `image` should be edited. If there are multiple images provided,
88
98
  # the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -102,6 +112,10 @@ module OpenAI
102
112
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
103
113
  # default value is `png`.
104
114
  output_format: nil,
115
+ # The number of partial images to generate. This parameter is used for streaming
116
+ # responses that return partial images. Value must be between 0 and 3. When set to
117
+ # 0, the response will be a single image sent in one streaming event.
118
+ partial_images: nil,
105
119
  # The quality of the image that will be generated. `high`, `medium` and `low` are
106
120
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
107
121
  # Defaults to `auto`.
@@ -119,10 +133,115 @@ module OpenAI
119
133
  # and detect abuse.
120
134
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
121
135
  user: nil,
136
+ # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
137
+ # streaming and non-streaming use cases, respectively.
138
+ stream: false,
122
139
  request_options: {}
123
140
  )
124
141
  end
125
142
 
143
+ # See {OpenAI::Resources::Images#edit} for non-streaming counterpart.
144
+ #
145
+ # Creates an edited or extended image given one or more source images and a
146
+ # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
147
+ sig do
148
+ params(
149
+ image: OpenAI::ImageEditParams::Image::Variants,
150
+ prompt: String,
151
+ background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
152
+ input_fidelity:
153
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
154
+ mask: OpenAI::Internal::FileInput,
155
+ model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
156
+ n: T.nilable(Integer),
157
+ output_compression: T.nilable(Integer),
158
+ output_format:
159
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
160
+ partial_images: T.nilable(Integer),
161
+ quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
162
+ response_format:
163
+ T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
164
+ size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol),
165
+ user: String,
166
+ stream: T.noreturn,
167
+ request_options: OpenAI::RequestOptions::OrHash
168
+ ).returns(
169
+ OpenAI::Internal::Stream[OpenAI::ImageEditStreamEvent::Variants]
170
+ )
171
+ end
172
+ def edit_stream_raw(
173
+ # The image(s) to edit. Must be a supported image file or an array of images.
174
+ #
175
+ # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
176
+ # 50MB. You can provide up to 16 images.
177
+ #
178
+ # For `dall-e-2`, you can only provide one image, and it should be a square `png`
179
+ # file less than 4MB.
180
+ image:,
181
+ # A text description of the desired image(s). The maximum length is 1000
182
+ # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
183
+ prompt:,
184
+ # Allows to set transparency for the background of the generated image(s). This
185
+ # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
186
+ # `opaque` or `auto` (default value). When `auto` is used, the model will
187
+ # automatically determine the best background for the image.
188
+ #
189
+ # If `transparent`, the output format needs to support transparency, so it should
190
+ # be set to either `png` (default value) or `webp`.
191
+ background: nil,
192
+ # Control how much effort the model will exert to match the style and features,
193
+ # especially facial features, of input images. This parameter is only supported
194
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
195
+ input_fidelity: nil,
196
+ # An additional image whose fully transparent areas (e.g. where alpha is zero)
197
+ # indicate where `image` should be edited. If there are multiple images provided,
198
+ # the mask will be applied on the first image. Must be a valid PNG file, less than
199
+ # 4MB, and have the same dimensions as `image`.
200
+ mask: nil,
201
+ # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
202
+ # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
203
+ # is used.
204
+ model: nil,
205
+ # The number of images to generate. Must be between 1 and 10.
206
+ n: nil,
207
+ # The compression level (0-100%) for the generated images. This parameter is only
208
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
209
+ # defaults to 100.
210
+ output_compression: nil,
211
+ # The format in which the generated images are returned. This parameter is only
212
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
213
+ # default value is `png`.
214
+ output_format: nil,
215
+ # The number of partial images to generate. This parameter is used for streaming
216
+ # responses that return partial images. Value must be between 0 and 3. When set to
217
+ # 0, the response will be a single image sent in one streaming event.
218
+ partial_images: nil,
219
+ # The quality of the image that will be generated. `high`, `medium` and `low` are
220
+ # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
221
+ # Defaults to `auto`.
222
+ quality: nil,
223
+ # The format in which the generated images are returned. Must be one of `url` or
224
+ # `b64_json`. URLs are only valid for 60 minutes after the image has been
225
+ # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
226
+ # will always return base64-encoded images.
227
+ response_format: nil,
228
+ # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
229
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for
230
+ # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
231
+ size: nil,
232
+ # A unique identifier representing your end-user, which can help OpenAI to monitor
233
+ # and detect abuse.
234
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
235
+ user: nil,
236
+ # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
237
+ # streaming and non-streaming use cases, respectively.
238
+ stream: true,
239
+ request_options: {}
240
+ )
241
+ end
242
+
243
+ # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
244
+ #
126
245
  # Creates an image given a prompt.
127
246
  # [Learn more](https://platform.openai.com/docs/guides/images).
128
247
  sig do
@@ -137,12 +256,14 @@ module OpenAI
137
256
  output_compression: T.nilable(Integer),
138
257
  output_format:
139
258
  T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
259
+ partial_images: T.nilable(Integer),
140
260
  quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
141
261
  response_format:
142
262
  T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
143
263
  size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol),
144
264
  style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol),
145
265
  user: String,
266
+ stream: T.noreturn,
146
267
  request_options: OpenAI::RequestOptions::OrHash
147
268
  ).returns(OpenAI::ImagesResponse)
148
269
  end
@@ -176,6 +297,107 @@ module OpenAI
176
297
  # The format in which the generated images are returned. This parameter is only
177
298
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
178
299
  output_format: nil,
300
+ # The number of partial images to generate. This parameter is used for streaming
301
+ # responses that return partial images. Value must be between 0 and 3. When set to
302
+ # 0, the response will be a single image sent in one streaming event.
303
+ partial_images: nil,
304
+ # The quality of the image that will be generated.
305
+ #
306
+ # - `auto` (default value) will automatically select the best quality for the
307
+ # given model.
308
+ # - `high`, `medium` and `low` are supported for `gpt-image-1`.
309
+ # - `hd` and `standard` are supported for `dall-e-3`.
310
+ # - `standard` is the only option for `dall-e-2`.
311
+ quality: nil,
312
+ # The format in which generated images with `dall-e-2` and `dall-e-3` are
313
+ # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
314
+ # after the image has been generated. This parameter isn't supported for
315
+ # `gpt-image-1` which will always return base64-encoded images.
316
+ response_format: nil,
317
+ # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
318
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for
319
+ # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
320
+ # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
321
+ size: nil,
322
+ # The style of the generated images. This parameter is only supported for
323
+ # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
324
+ # towards generating hyper-real and dramatic images. Natural causes the model to
325
+ # produce more natural, less hyper-real looking images.
326
+ style: nil,
327
+ # A unique identifier representing your end-user, which can help OpenAI to monitor
328
+ # and detect abuse.
329
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
330
+ user: nil,
331
+ # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
332
+ # for streaming and non-streaming use cases, respectively.
333
+ stream: false,
334
+ request_options: {}
335
+ )
336
+ end
337
+
338
+ # See {OpenAI::Resources::Images#generate} for non-streaming counterpart.
339
+ #
340
+ # Creates an image given a prompt.
341
+ # [Learn more](https://platform.openai.com/docs/guides/images).
342
+ sig do
343
+ params(
344
+ prompt: String,
345
+ background:
346
+ T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol),
347
+ model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
348
+ moderation:
349
+ T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol),
350
+ n: T.nilable(Integer),
351
+ output_compression: T.nilable(Integer),
352
+ output_format:
353
+ T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
354
+ partial_images: T.nilable(Integer),
355
+ quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
356
+ response_format:
357
+ T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
358
+ size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol),
359
+ style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol),
360
+ user: String,
361
+ stream: T.noreturn,
362
+ request_options: OpenAI::RequestOptions::OrHash
363
+ ).returns(
364
+ OpenAI::Internal::Stream[OpenAI::ImageGenStreamEvent::Variants]
365
+ )
366
+ end
367
+ def generate_stream_raw(
368
+ # A text description of the desired image(s). The maximum length is 32000
369
+ # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
370
+ # for `dall-e-3`.
371
+ prompt:,
372
+ # Allows to set transparency for the background of the generated image(s). This
373
+ # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
374
+ # `opaque` or `auto` (default value). When `auto` is used, the model will
375
+ # automatically determine the best background for the image.
376
+ #
377
+ # If `transparent`, the output format needs to support transparency, so it should
378
+ # be set to either `png` (default value) or `webp`.
379
+ background: nil,
380
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
381
+ # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
382
+ # `gpt-image-1` is used.
383
+ model: nil,
384
+ # Control the content-moderation level for images generated by `gpt-image-1`. Must
385
+ # be either `low` for less restrictive filtering or `auto` (default value).
386
+ moderation: nil,
387
+ # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
388
+ # `n=1` is supported.
389
+ n: nil,
390
+ # The compression level (0-100%) for the generated images. This parameter is only
391
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
392
+ # defaults to 100.
393
+ output_compression: nil,
394
+ # The format in which the generated images are returned. This parameter is only
395
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
396
+ output_format: nil,
397
+ # The number of partial images to generate. This parameter is used for streaming
398
+ # responses that return partial images. Value must be between 0 and 3. When set to
399
+ # 0, the response will be a single image sent in one streaming event.
400
+ partial_images: nil,
179
401
  # The quality of the image that will be generated.
180
402
  #
181
403
  # - `auto` (default value) will automatically select the best quality for the
@@ -203,6 +425,9 @@ module OpenAI
203
425
  # and detect abuse.
204
426
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
205
427
  user: nil,
428
+ # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
429
+ # for streaming and non-streaming use cases, respectively.
430
+ stream: true,
206
431
  request_options: {}
207
432
  )
208
433
  end
@@ -0,0 +1,150 @@
1
+ module OpenAI
2
+ module Models
3
+ type image_edit_completed_event =
4
+ {
5
+ :b64_json => String,
6
+ background: OpenAI::Models::ImageEditCompletedEvent::background,
7
+ created_at: Integer,
8
+ output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
9
+ quality: OpenAI::Models::ImageEditCompletedEvent::quality,
10
+ size: OpenAI::Models::ImageEditCompletedEvent::size,
11
+ type: :"image_edit.completed",
12
+ usage: OpenAI::ImageEditCompletedEvent::Usage
13
+ }
14
+
15
+ class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
16
+ attr_accessor b64_json: String
17
+
18
+ attr_accessor background: OpenAI::Models::ImageEditCompletedEvent::background
19
+
20
+ attr_accessor created_at: Integer
21
+
22
+ attr_accessor output_format: OpenAI::Models::ImageEditCompletedEvent::output_format
23
+
24
+ attr_accessor quality: OpenAI::Models::ImageEditCompletedEvent::quality
25
+
26
+ attr_accessor size: OpenAI::Models::ImageEditCompletedEvent::size
27
+
28
+ attr_accessor type: :"image_edit.completed"
29
+
30
+ attr_accessor usage: OpenAI::ImageEditCompletedEvent::Usage
31
+
32
+ def initialize: (
33
+ b64_json: String,
34
+ background: OpenAI::Models::ImageEditCompletedEvent::background,
35
+ created_at: Integer,
36
+ output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
37
+ quality: OpenAI::Models::ImageEditCompletedEvent::quality,
38
+ size: OpenAI::Models::ImageEditCompletedEvent::size,
39
+ usage: OpenAI::ImageEditCompletedEvent::Usage,
40
+ ?type: :"image_edit.completed"
41
+ ) -> void
42
+
43
+ def to_hash: -> {
44
+ :b64_json => String,
45
+ background: OpenAI::Models::ImageEditCompletedEvent::background,
46
+ created_at: Integer,
47
+ output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
48
+ quality: OpenAI::Models::ImageEditCompletedEvent::quality,
49
+ size: OpenAI::Models::ImageEditCompletedEvent::size,
50
+ type: :"image_edit.completed",
51
+ usage: OpenAI::ImageEditCompletedEvent::Usage
52
+ }
53
+
54
+ type background = :transparent | :opaque | :auto
55
+
56
+ module Background
57
+ extend OpenAI::Internal::Type::Enum
58
+
59
+ TRANSPARENT: :transparent
60
+ OPAQUE: :opaque
61
+ AUTO: :auto
62
+
63
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::background]
64
+ end
65
+
66
+ type output_format = :png | :webp | :jpeg
67
+
68
+ module OutputFormat
69
+ extend OpenAI::Internal::Type::Enum
70
+
71
+ PNG: :png
72
+ WEBP: :webp
73
+ JPEG: :jpeg
74
+
75
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::output_format]
76
+ end
77
+
78
+ type quality = :low | :medium | :high | :auto
79
+
80
+ module Quality
81
+ extend OpenAI::Internal::Type::Enum
82
+
83
+ LOW: :low
84
+ MEDIUM: :medium
85
+ HIGH: :high
86
+ AUTO: :auto
87
+
88
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::quality]
89
+ end
90
+
91
+ type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto
92
+
93
+ module Size
94
+ extend OpenAI::Internal::Type::Enum
95
+
96
+ SIZE_1024X1024: :"1024x1024"
97
+ SIZE_1024X1536: :"1024x1536"
98
+ SIZE_1536X1024: :"1536x1024"
99
+ AUTO: :auto
100
+
101
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::size]
102
+ end
103
+
104
+ type usage =
105
+ {
106
+ input_tokens: Integer,
107
+ input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
108
+ output_tokens: Integer,
109
+ total_tokens: Integer
110
+ }
111
+
112
+ class Usage < OpenAI::Internal::Type::BaseModel
113
+ attr_accessor input_tokens: Integer
114
+
115
+ attr_accessor input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails
116
+
117
+ attr_accessor output_tokens: Integer
118
+
119
+ attr_accessor total_tokens: Integer
120
+
121
+ def initialize: (
122
+ input_tokens: Integer,
123
+ input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
124
+ output_tokens: Integer,
125
+ total_tokens: Integer
126
+ ) -> void
127
+
128
+ def to_hash: -> {
129
+ input_tokens: Integer,
130
+ input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
131
+ output_tokens: Integer,
132
+ total_tokens: Integer
133
+ }
134
+
135
+ type input_tokens_details =
136
+ { image_tokens: Integer, text_tokens: Integer }
137
+
138
+ class InputTokensDetails < OpenAI::Internal::Type::BaseModel
139
+ attr_accessor image_tokens: Integer
140
+
141
+ attr_accessor text_tokens: Integer
142
+
143
+ def initialize: (image_tokens: Integer, text_tokens: Integer) -> void
144
+
145
+ def to_hash: -> { image_tokens: Integer, text_tokens: Integer }
146
+ end
147
+ end
148
+ end
149
+ end
150
+ end
@@ -5,11 +5,13 @@ module OpenAI
5
5
  image: OpenAI::Models::ImageEditParams::image,
6
6
  prompt: String,
7
7
  background: OpenAI::Models::ImageEditParams::background?,
8
+ input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
8
9
  mask: OpenAI::Internal::file_input,
9
10
  model: OpenAI::Models::ImageEditParams::model?,
10
11
  n: Integer?,
11
12
  output_compression: Integer?,
12
13
  output_format: OpenAI::Models::ImageEditParams::output_format?,
14
+ partial_images: Integer?,
13
15
  quality: OpenAI::Models::ImageEditParams::quality?,
14
16
  response_format: OpenAI::Models::ImageEditParams::response_format?,
15
17
  size: OpenAI::Models::ImageEditParams::size?,
@@ -27,6 +29,8 @@ module OpenAI
27
29
 
28
30
  attr_accessor background: OpenAI::Models::ImageEditParams::background?
29
31
 
32
+ attr_accessor input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?
33
+
30
34
  attr_reader mask: OpenAI::Internal::file_input?
31
35
 
32
36
  def mask=: (OpenAI::Internal::file_input) -> OpenAI::Internal::file_input
@@ -39,6 +43,8 @@ module OpenAI
39
43
 
40
44
  attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format?
41
45
 
46
+ attr_accessor partial_images: Integer?
47
+
42
48
  attr_accessor quality: OpenAI::Models::ImageEditParams::quality?
43
49
 
44
50
  attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format?
@@ -53,11 +59,13 @@ module OpenAI
53
59
  image: OpenAI::Models::ImageEditParams::image,
54
60
  prompt: String,
55
61
  ?background: OpenAI::Models::ImageEditParams::background?,
62
+ ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
56
63
  ?mask: OpenAI::Internal::file_input,
57
64
  ?model: OpenAI::Models::ImageEditParams::model?,
58
65
  ?n: Integer?,
59
66
  ?output_compression: Integer?,
60
67
  ?output_format: OpenAI::Models::ImageEditParams::output_format?,
68
+ ?partial_images: Integer?,
61
69
  ?quality: OpenAI::Models::ImageEditParams::quality?,
62
70
  ?response_format: OpenAI::Models::ImageEditParams::response_format?,
63
71
  ?size: OpenAI::Models::ImageEditParams::size?,
@@ -69,11 +77,13 @@ module OpenAI
69
77
  image: OpenAI::Models::ImageEditParams::image,
70
78
  prompt: String,
71
79
  background: OpenAI::Models::ImageEditParams::background?,
80
+ input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
72
81
  mask: OpenAI::Internal::file_input,
73
82
  model: OpenAI::Models::ImageEditParams::model?,
74
83
  n: Integer?,
75
84
  output_compression: Integer?,
76
85
  output_format: OpenAI::Models::ImageEditParams::output_format?,
86
+ partial_images: Integer?,
77
87
  quality: OpenAI::Models::ImageEditParams::quality?,
78
88
  response_format: OpenAI::Models::ImageEditParams::response_format?,
79
89
  size: OpenAI::Models::ImageEditParams::size?,
@@ -104,6 +114,17 @@ module OpenAI
104
114
  def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::background]
105
115
  end
106
116
 
117
+ type input_fidelity = :high | :low
118
+
119
+ module InputFidelity
120
+ extend OpenAI::Internal::Type::Enum
121
+
122
+ HIGH: :high
123
+ LOW: :low
124
+
125
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::input_fidelity]
126
+ end
127
+
107
128
  type model = String | OpenAI::Models::image_model
108
129
 
109
130
  module Model