openai 0.40.0 → 0.41.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
  5. data/lib/openai/models/graders/grader_input_item.rb +87 -0
  6. data/lib/openai/models/graders/grader_inputs.rb +0 -80
  7. data/lib/openai/models/image.rb +6 -6
  8. data/lib/openai/models/image_edit_completed_event.rb +5 -3
  9. data/lib/openai/models/image_edit_params.rb +34 -32
  10. data/lib/openai/models/image_gen_completed_event.rb +5 -3
  11. data/lib/openai/models/image_generate_params.rb +38 -36
  12. data/lib/openai/models/image_model.rb +1 -0
  13. data/lib/openai/models/images_response.rb +31 -1
  14. data/lib/openai/models/responses/tool.rb +22 -8
  15. data/lib/openai/resources/images.rb +6 -6
  16. data/lib/openai/version.rb +1 -1
  17. data/lib/openai.rb +1 -0
  18. data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
  19. data/rbi/openai/models/graders/grader_inputs.rbi +0 -105
  20. data/rbi/openai/models/image.rbi +10 -10
  21. data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
  22. data/rbi/openai/models/image_edit_params.rbi +49 -46
  23. data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
  24. data/rbi/openai/models/image_generate_params.rbi +54 -51
  25. data/rbi/openai/models/image_model.rbi +1 -0
  26. data/rbi/openai/models/images_response.rbi +61 -3
  27. data/rbi/openai/models/responses/tool.rbi +38 -16
  28. data/rbi/openai/resources/images.rbi +72 -68
  29. data/sig/openai/models/graders/grader_input_item.rbs +55 -0
  30. data/sig/openai/models/graders/grader_inputs.rbs +0 -50
  31. data/sig/openai/models/image_model.rbs +6 -1
  32. data/sig/openai/models/images_response.rbs +25 -3
  33. data/sig/openai/models/responses/tool.rbs +4 -4
  34. metadata +5 -2
@@ -38,7 +38,8 @@ module OpenAI
38
38
  sig { returns(Symbol) }
39
39
  attr_accessor :type
40
40
 
41
- # For `gpt-image-1` only, the token usage information for the image generation.
41
+ # For the GPT image models only, the token usage information for the image
42
+ # generation.
42
43
  sig { returns(OpenAI::ImageEditCompletedEvent::Usage) }
43
44
  attr_reader :usage
44
45
 
@@ -72,7 +73,8 @@ module OpenAI
72
73
  quality:,
73
74
  # The size of the edited image.
74
75
  size:,
75
- # For `gpt-image-1` only, the token usage information for the image generation.
76
+ # For the GPT image models only, the token usage information for the image
77
+ # generation.
76
78
  usage:,
77
79
  # The type of the event. Always `image_edit.completed`.
78
80
  type: :"image_edit.completed"
@@ -267,7 +269,8 @@ module OpenAI
267
269
  sig { returns(Integer) }
268
270
  attr_accessor :total_tokens
269
271
 
270
- # For `gpt-image-1` only, the token usage information for the image generation.
272
+ # For the GPT image models only, the token usage information for the image
273
+ # generation.
271
274
  sig do
272
275
  params(
273
276
  input_tokens: Integer,
@@ -13,7 +13,8 @@ module OpenAI
13
13
 
14
14
  # The image(s) to edit. Must be a supported image file or an array of images.
15
15
  #
16
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
16
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
17
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
17
18
  # 50MB. You can provide up to 16 images.
18
19
  #
19
20
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
@@ -22,14 +23,14 @@ module OpenAI
22
23
  attr_accessor :image
23
24
 
24
25
  # A text description of the desired image(s). The maximum length is 1000
25
- # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
26
+ # characters for `dall-e-2`, and 32000 characters for the GPT image models.
26
27
  sig { returns(String) }
27
28
  attr_accessor :prompt
28
29
 
29
30
  # Allows to set transparency for the background of the generated image(s). This
30
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
31
- # `opaque` or `auto` (default value). When `auto` is used, the model will
32
- # automatically determine the best background for the image.
31
+ # parameter is only supported for the GPT image models. Must be one of
32
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
33
+ # model will automatically determine the best background for the image.
33
34
  #
34
35
  # If `transparent`, the output format needs to support transparency, so it should
35
36
  # be set to either `png` (default value) or `webp`.
@@ -55,9 +56,9 @@ module OpenAI
55
56
  sig { params(mask: OpenAI::Internal::FileInput).void }
56
57
  attr_writer :mask
57
58
 
58
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
59
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
60
- # is used.
59
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
60
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
61
+ # image models is used.
61
62
  sig { returns(T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol))) }
62
63
  attr_accessor :model
63
64
 
@@ -66,13 +67,13 @@ module OpenAI
66
67
  attr_accessor :n
67
68
 
68
69
  # The compression level (0-100%) for the generated images. This parameter is only
69
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
70
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
70
71
  # defaults to 100.
71
72
  sig { returns(T.nilable(Integer)) }
72
73
  attr_accessor :output_compression
73
74
 
74
75
  # The format in which the generated images are returned. This parameter is only
75
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
76
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
76
77
  # default value is `png`.
77
78
  sig do
78
79
  returns(T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol))
@@ -89,23 +90,23 @@ module OpenAI
89
90
  attr_accessor :partial_images
90
91
 
91
92
  # The quality of the image that will be generated. `high`, `medium` and `low` are
92
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
93
- # Defaults to `auto`.
93
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
94
+ # quality. Defaults to `auto`.
94
95
  sig { returns(T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol)) }
95
96
  attr_accessor :quality
96
97
 
97
98
  # The format in which the generated images are returned. Must be one of `url` or
98
99
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
99
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
100
- # will always return base64-encoded images.
100
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
101
+ # models always return base64-encoded images.
101
102
  sig do
102
103
  returns(T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol))
103
104
  end
104
105
  attr_accessor :response_format
105
106
 
106
107
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
107
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
108
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
108
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
109
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
109
110
  sig { returns(T.nilable(OpenAI::ImageEditParams::Size::OrSymbol)) }
110
111
  attr_accessor :size
111
112
 
@@ -143,19 +144,20 @@ module OpenAI
143
144
  def self.new(
144
145
  # The image(s) to edit. Must be a supported image file or an array of images.
145
146
  #
146
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
147
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
148
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
147
149
  # 50MB. You can provide up to 16 images.
148
150
  #
149
151
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
150
152
  # file less than 4MB.
151
153
  image:,
152
154
  # A text description of the desired image(s). The maximum length is 1000
153
- # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
155
+ # characters for `dall-e-2`, and 32000 characters for the GPT image models.
154
156
  prompt:,
155
157
  # Allows to set transparency for the background of the generated image(s). This
156
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
157
- # `opaque` or `auto` (default value). When `auto` is used, the model will
158
- # automatically determine the best background for the image.
158
+ # parameter is only supported for the GPT image models. Must be one of
159
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
160
+ # model will automatically determine the best background for the image.
159
161
  #
160
162
  # If `transparent`, the output format needs to support transparency, so it should
161
163
  # be set to either `png` (default value) or `webp`.
@@ -170,18 +172,18 @@ module OpenAI
170
172
  # the mask will be applied on the first image. Must be a valid PNG file, less than
171
173
  # 4MB, and have the same dimensions as `image`.
172
174
  mask: nil,
173
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
174
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
175
- # is used.
175
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
176
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
177
+ # image models is used.
176
178
  model: nil,
177
179
  # The number of images to generate. Must be between 1 and 10.
178
180
  n: nil,
179
181
  # The compression level (0-100%) for the generated images. This parameter is only
180
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
182
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
181
183
  # defaults to 100.
182
184
  output_compression: nil,
183
185
  # The format in which the generated images are returned. This parameter is only
184
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
186
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
185
187
  # default value is `png`.
186
188
  output_format: nil,
187
189
  # The number of partial images to generate. This parameter is used for streaming
@@ -192,17 +194,17 @@ module OpenAI
192
194
  # are generated if the full image is generated more quickly.
193
195
  partial_images: nil,
194
196
  # The quality of the image that will be generated. `high`, `medium` and `low` are
195
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
196
- # Defaults to `auto`.
197
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
198
+ # quality. Defaults to `auto`.
197
199
  quality: nil,
198
200
  # The format in which the generated images are returned. Must be one of `url` or
199
201
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
200
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
201
- # will always return base64-encoded images.
202
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
203
+ # models always return base64-encoded images.
202
204
  response_format: nil,
203
205
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
204
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
205
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
206
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
207
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
206
208
  size: nil,
207
209
  # A unique identifier representing your end-user, which can help OpenAI to monitor
208
210
  # and detect abuse.
@@ -242,7 +244,8 @@ module OpenAI
242
244
 
243
245
  # The image(s) to edit. Must be a supported image file or an array of images.
244
246
  #
245
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
247
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
248
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
246
249
  # 50MB. You can provide up to 16 images.
247
250
  #
248
251
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
@@ -266,9 +269,9 @@ module OpenAI
266
269
  end
267
270
 
268
271
  # Allows to set transparency for the background of the generated image(s). This
269
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
270
- # `opaque` or `auto` (default value). When `auto` is used, the model will
271
- # automatically determine the best background for the image.
272
+ # parameter is only supported for the GPT image models. Must be one of
273
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
274
+ # model will automatically determine the best background for the image.
272
275
  #
273
276
  # If `transparent`, the output format needs to support transparency, so it should
274
277
  # be set to either `png` (default value) or `webp`.
@@ -318,9 +321,9 @@ module OpenAI
318
321
  end
319
322
  end
320
323
 
321
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
322
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
323
- # is used.
324
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
325
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
326
+ # image models is used.
324
327
  module Model
325
328
  extend OpenAI::Internal::Type::Union
326
329
 
@@ -335,7 +338,7 @@ module OpenAI
335
338
  end
336
339
 
337
340
  # The format in which the generated images are returned. This parameter is only
338
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
341
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
339
342
  # default value is `png`.
340
343
  module OutputFormat
341
344
  extend OpenAI::Internal::Type::Enum
@@ -358,8 +361,8 @@ module OpenAI
358
361
  end
359
362
 
360
363
  # The quality of the image that will be generated. `high`, `medium` and `low` are
361
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
362
- # Defaults to `auto`.
364
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
365
+ # quality. Defaults to `auto`.
363
366
  module Quality
364
367
  extend OpenAI::Internal::Type::Enum
365
368
 
@@ -385,8 +388,8 @@ module OpenAI
385
388
 
386
389
  # The format in which the generated images are returned. Must be one of `url` or
387
390
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
388
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
389
- # will always return base64-encoded images.
391
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
392
+ # models always return base64-encoded images.
390
393
  module ResponseFormat
391
394
  extend OpenAI::Internal::Type::Enum
392
395
 
@@ -413,8 +416,8 @@ module OpenAI
413
416
  end
414
417
 
415
418
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
416
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
417
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
419
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
420
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
418
421
  module Size
419
422
  extend OpenAI::Internal::Type::Enum
420
423
 
@@ -38,7 +38,8 @@ module OpenAI
38
38
  sig { returns(Symbol) }
39
39
  attr_accessor :type
40
40
 
41
- # For `gpt-image-1` only, the token usage information for the image generation.
41
+ # For the GPT image models only, the token usage information for the image
42
+ # generation.
42
43
  sig { returns(OpenAI::ImageGenCompletedEvent::Usage) }
43
44
  attr_reader :usage
44
45
 
@@ -71,7 +72,8 @@ module OpenAI
71
72
  quality:,
72
73
  # The size of the generated image.
73
74
  size:,
74
- # For `gpt-image-1` only, the token usage information for the image generation.
75
+ # For the GPT image models only, the token usage information for the image
76
+ # generation.
75
77
  usage:,
76
78
  # The type of the event. Always `image_generation.completed`.
77
79
  type: :"image_generation.completed"
@@ -260,7 +262,8 @@ module OpenAI
260
262
  sig { returns(Integer) }
261
263
  attr_accessor :total_tokens
262
264
 
263
- # For `gpt-image-1` only, the token usage information for the image generation.
265
+ # For the GPT image models only, the token usage information for the image
266
+ # generation.
264
267
  sig do
265
268
  params(
266
269
  input_tokens: Integer,
@@ -12,15 +12,15 @@ module OpenAI
12
12
  end
13
13
 
14
14
  # A text description of the desired image(s). The maximum length is 32000
15
- # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
16
- # for `dall-e-3`.
15
+ # characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
16
+ # characters for `dall-e-3`.
17
17
  sig { returns(String) }
18
18
  attr_accessor :prompt
19
19
 
20
20
  # Allows to set transparency for the background of the generated image(s). This
21
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
22
- # `opaque` or `auto` (default value). When `auto` is used, the model will
23
- # automatically determine the best background for the image.
21
+ # parameter is only supported for the GPT image models. Must be one of
22
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
23
+ # model will automatically determine the best background for the image.
24
24
  #
25
25
  # If `transparent`, the output format needs to support transparency, so it should
26
26
  # be set to either `png` (default value) or `webp`.
@@ -29,14 +29,15 @@ module OpenAI
29
29
  end
30
30
  attr_accessor :background
31
31
 
32
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
33
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
34
- # `gpt-image-1` is used.
32
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
33
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
34
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
35
35
  sig { returns(T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol))) }
36
36
  attr_accessor :model
37
37
 
38
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
39
- # be either `low` for less restrictive filtering or `auto` (default value).
38
+ # Control the content-moderation level for images generated by the GPT image
39
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
40
+ # value).
40
41
  sig do
41
42
  returns(T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol))
42
43
  end
@@ -48,13 +49,13 @@ module OpenAI
48
49
  attr_accessor :n
49
50
 
50
51
  # The compression level (0-100%) for the generated images. This parameter is only
51
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
52
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
52
53
  # defaults to 100.
53
54
  sig { returns(T.nilable(Integer)) }
54
55
  attr_accessor :output_compression
55
56
 
56
57
  # The format in which the generated images are returned. This parameter is only
57
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
58
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
58
59
  sig do
59
60
  returns(T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol))
60
61
  end
@@ -73,7 +74,7 @@ module OpenAI
73
74
  #
74
75
  # - `auto` (default value) will automatically select the best quality for the
75
76
  # given model.
76
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
77
+ # - `high`, `medium` and `low` are supported for the GPT image models.
77
78
  # - `hd` and `standard` are supported for `dall-e-3`.
78
79
  # - `standard` is the only option for `dall-e-2`.
79
80
  sig { returns(T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol)) }
@@ -81,8 +82,8 @@ module OpenAI
81
82
 
82
83
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
83
84
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
84
- # after the image has been generated. This parameter isn't supported for
85
- # `gpt-image-1` which will always return base64-encoded images.
85
+ # after the image has been generated. This parameter isn't supported for the GPT
86
+ # image models, which always return base64-encoded images.
86
87
  sig do
87
88
  returns(
88
89
  T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol)
@@ -91,9 +92,9 @@ module OpenAI
91
92
  attr_accessor :response_format
92
93
 
93
94
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
94
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
95
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
96
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
95
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
96
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
97
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
97
98
  sig { returns(T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol)) }
98
99
  attr_accessor :size
99
100
 
@@ -137,33 +138,34 @@ module OpenAI
137
138
  end
138
139
  def self.new(
139
140
  # A text description of the desired image(s). The maximum length is 32000
140
- # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
141
- # for `dall-e-3`.
141
+ # characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
142
+ # characters for `dall-e-3`.
142
143
  prompt:,
143
144
  # Allows to set transparency for the background of the generated image(s). This
144
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
145
- # `opaque` or `auto` (default value). When `auto` is used, the model will
146
- # automatically determine the best background for the image.
145
+ # parameter is only supported for the GPT image models. Must be one of
146
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
147
+ # model will automatically determine the best background for the image.
147
148
  #
148
149
  # If `transparent`, the output format needs to support transparency, so it should
149
150
  # be set to either `png` (default value) or `webp`.
150
151
  background: nil,
151
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
152
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
153
- # `gpt-image-1` is used.
152
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
153
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
154
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
154
155
  model: nil,
155
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
156
- # be either `low` for less restrictive filtering or `auto` (default value).
156
+ # Control the content-moderation level for images generated by the GPT image
157
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
158
+ # value).
157
159
  moderation: nil,
158
160
  # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
159
161
  # `n=1` is supported.
160
162
  n: nil,
161
163
  # The compression level (0-100%) for the generated images. This parameter is only
162
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
164
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
163
165
  # defaults to 100.
164
166
  output_compression: nil,
165
167
  # The format in which the generated images are returned. This parameter is only
166
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
168
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
167
169
  output_format: nil,
168
170
  # The number of partial images to generate. This parameter is used for streaming
169
171
  # responses that return partial images. Value must be between 0 and 3. When set to
@@ -176,19 +178,19 @@ module OpenAI
176
178
  #
177
179
  # - `auto` (default value) will automatically select the best quality for the
178
180
  # given model.
179
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
181
+ # - `high`, `medium` and `low` are supported for the GPT image models.
180
182
  # - `hd` and `standard` are supported for `dall-e-3`.
181
183
  # - `standard` is the only option for `dall-e-2`.
182
184
  quality: nil,
183
185
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
184
186
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
185
- # after the image has been generated. This parameter isn't supported for
186
- # `gpt-image-1` which will always return base64-encoded images.
187
+ # after the image has been generated. This parameter isn't supported for the GPT
188
+ # image models, which always return base64-encoded images.
187
189
  response_format: nil,
188
190
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
189
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
190
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
191
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
191
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
192
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
193
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
192
194
  size: nil,
193
195
  # The style of the generated images. This parameter is only supported for
194
196
  # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
@@ -231,9 +233,9 @@ module OpenAI
231
233
  end
232
234
 
233
235
  # Allows to set transparency for the background of the generated image(s). This
234
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
235
- # `opaque` or `auto` (default value). When `auto` is used, the model will
236
- # automatically determine the best background for the image.
236
+ # parameter is only supported for the GPT image models. Must be one of
237
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
238
+ # model will automatically determine the best background for the image.
237
239
  #
238
240
  # If `transparent`, the output format needs to support transparency, so it should
239
241
  # be set to either `png` (default value) or `webp`.
@@ -265,9 +267,9 @@ module OpenAI
265
267
  end
266
268
  end
267
269
 
268
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
269
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
270
- # `gpt-image-1` is used.
270
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
271
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
272
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
271
273
  module Model
272
274
  extend OpenAI::Internal::Type::Union
273
275
 
@@ -283,8 +285,9 @@ module OpenAI
283
285
  end
284
286
  end
285
287
 
286
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
287
- # be either `low` for less restrictive filtering or `auto` (default value).
288
+ # Control the content-moderation level for images generated by the GPT image
289
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
290
+ # value).
288
291
  module Moderation
289
292
  extend OpenAI::Internal::Type::Enum
290
293
 
@@ -308,7 +311,7 @@ module OpenAI
308
311
  end
309
312
 
310
313
  # The format in which the generated images are returned. This parameter is only
311
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
314
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
312
315
  module OutputFormat
313
316
  extend OpenAI::Internal::Type::Enum
314
317
 
@@ -338,7 +341,7 @@ module OpenAI
338
341
  #
339
342
  # - `auto` (default value) will automatically select the best quality for the
340
343
  # given model.
341
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
344
+ # - `high`, `medium` and `low` are supported for the GPT image models.
342
345
  # - `hd` and `standard` are supported for `dall-e-3`.
343
346
  # - `standard` is the only option for `dall-e-2`.
344
347
  module Quality
@@ -368,8 +371,8 @@ module OpenAI
368
371
 
369
372
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
370
373
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
371
- # after the image has been generated. This parameter isn't supported for
372
- # `gpt-image-1` which will always return base64-encoded images.
374
+ # after the image has been generated. This parameter isn't supported for the GPT
375
+ # image models, which always return base64-encoded images.
373
376
  module ResponseFormat
374
377
  extend OpenAI::Internal::Type::Enum
375
378
 
@@ -397,9 +400,9 @@ module OpenAI
397
400
  end
398
401
 
399
402
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
400
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
401
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
402
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
403
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
404
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
405
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
403
406
  module Size
404
407
  extend OpenAI::Internal::Type::Enum
405
408
 
@@ -8,6 +8,7 @@ module OpenAI
8
8
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ImageModel) }
9
9
  OrSymbol = T.type_alias { T.any(Symbol, String) }
10
10
 
11
+ GPT_IMAGE_1_5 = T.let(:"gpt-image-1.5", OpenAI::ImageModel::TaggedSymbol)
11
12
  DALL_E_2 = T.let(:"dall-e-2", OpenAI::ImageModel::TaggedSymbol)
12
13
  DALL_E_3 = T.let(:"dall-e-3", OpenAI::ImageModel::TaggedSymbol)
13
14
  GPT_IMAGE_1 = T.let(:"gpt-image-1", OpenAI::ImageModel::TaggedSymbol)
@@ -232,6 +232,20 @@ module OpenAI
232
232
  sig { returns(Integer) }
233
233
  attr_accessor :total_tokens
234
234
 
235
+ # The output token details for the image generation.
236
+ sig do
237
+ returns(T.nilable(OpenAI::ImagesResponse::Usage::OutputTokensDetails))
238
+ end
239
+ attr_reader :output_tokens_details
240
+
241
+ sig do
242
+ params(
243
+ output_tokens_details:
244
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails::OrHash
245
+ ).void
246
+ end
247
+ attr_writer :output_tokens_details
248
+
235
249
  # For `gpt-image-1` only, the token usage information for the image generation.
236
250
  sig do
237
251
  params(
@@ -239,7 +253,9 @@ module OpenAI
239
253
  input_tokens_details:
240
254
  OpenAI::ImagesResponse::Usage::InputTokensDetails::OrHash,
241
255
  output_tokens: Integer,
242
- total_tokens: Integer
256
+ total_tokens: Integer,
257
+ output_tokens_details:
258
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails::OrHash
243
259
  ).returns(T.attached_class)
244
260
  end
245
261
  def self.new(
@@ -250,7 +266,9 @@ module OpenAI
250
266
  # The number of output tokens generated by the model.
251
267
  output_tokens:,
252
268
  # The total number of tokens (images and text) used for the image generation.
253
- total_tokens:
269
+ total_tokens:,
270
+ # The output token details for the image generation.
271
+ output_tokens_details: nil
254
272
  )
255
273
  end
256
274
 
@@ -261,7 +279,9 @@ module OpenAI
261
279
  input_tokens_details:
262
280
  OpenAI::ImagesResponse::Usage::InputTokensDetails,
263
281
  output_tokens: Integer,
264
- total_tokens: Integer
282
+ total_tokens: Integer,
283
+ output_tokens_details:
284
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails
265
285
  }
266
286
  )
267
287
  end
@@ -305,6 +325,44 @@ module OpenAI
305
325
  def to_hash
306
326
  end
307
327
  end
328
+
329
+ class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
330
+ OrHash =
331
+ T.type_alias do
332
+ T.any(
333
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails,
334
+ OpenAI::Internal::AnyHash
335
+ )
336
+ end
337
+
338
+ # The number of image output tokens generated by the model.
339
+ sig { returns(Integer) }
340
+ attr_accessor :image_tokens
341
+
342
+ # The number of text output tokens generated by the model.
343
+ sig { returns(Integer) }
344
+ attr_accessor :text_tokens
345
+
346
+ # The output token details for the image generation.
347
+ sig do
348
+ params(image_tokens: Integer, text_tokens: Integer).returns(
349
+ T.attached_class
350
+ )
351
+ end
352
+ def self.new(
353
+ # The number of image output tokens generated by the model.
354
+ image_tokens:,
355
+ # The number of text output tokens generated by the model.
356
+ text_tokens:
357
+ )
358
+ end
359
+
360
+ sig do
361
+ override.returns({ image_tokens: Integer, text_tokens: Integer })
362
+ end
363
+ def to_hash
364
+ end
365
+ end
308
366
  end
309
367
  end
310
368
  end