openai 0.40.0 → 0.41.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
  5. data/lib/openai/models/graders/grader_input_item.rb +87 -0
  6. data/lib/openai/models/graders/grader_inputs.rb +0 -80
  7. data/lib/openai/models/image.rb +6 -6
  8. data/lib/openai/models/image_edit_completed_event.rb +5 -3
  9. data/lib/openai/models/image_edit_params.rb +34 -32
  10. data/lib/openai/models/image_gen_completed_event.rb +5 -3
  11. data/lib/openai/models/image_generate_params.rb +38 -36
  12. data/lib/openai/models/image_model.rb +1 -0
  13. data/lib/openai/models/images_response.rb +31 -1
  14. data/lib/openai/models/responses/tool.rb +22 -8
  15. data/lib/openai/resources/images.rb +6 -6
  16. data/lib/openai/version.rb +1 -1
  17. data/lib/openai.rb +1 -0
  18. data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
  19. data/rbi/openai/models/graders/grader_inputs.rbi +0 -105
  20. data/rbi/openai/models/image.rbi +10 -10
  21. data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
  22. data/rbi/openai/models/image_edit_params.rbi +49 -46
  23. data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
  24. data/rbi/openai/models/image_generate_params.rbi +54 -51
  25. data/rbi/openai/models/image_model.rbi +1 -0
  26. data/rbi/openai/models/images_response.rbi +61 -3
  27. data/rbi/openai/models/responses/tool.rbi +38 -16
  28. data/rbi/openai/resources/images.rbi +72 -68
  29. data/sig/openai/models/graders/grader_input_item.rbs +55 -0
  30. data/sig/openai/models/graders/grader_inputs.rbs +0 -50
  31. data/sig/openai/models/image_model.rbs +6 -1
  32. data/sig/openai/models/images_response.rbs +25 -3
  33. data/sig/openai/models/responses/tool.rbs +4 -4
  34. metadata +5 -2
@@ -888,7 +888,10 @@ module OpenAI
888
888
  sig do
889
889
  returns(
890
890
  T.nilable(
891
- OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
891
+ T.any(
892
+ String,
893
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
894
+ )
892
895
  )
893
896
  )
894
897
  end
@@ -896,7 +899,11 @@ module OpenAI
896
899
 
897
900
  sig do
898
901
  params(
899
- model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
902
+ model:
903
+ T.any(
904
+ String,
905
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
906
+ )
900
907
  ).void
901
908
  end
902
909
  attr_writer :model
@@ -990,7 +997,7 @@ module OpenAI
990
997
  end
991
998
  attr_writer :size
992
999
 
993
- # A tool that generates images using a model like `gpt-image-1`.
1000
+ # A tool that generates images using the GPT image models.
994
1001
  sig do
995
1002
  params(
996
1003
  background:
@@ -1001,7 +1008,11 @@ module OpenAI
1001
1008
  ),
1002
1009
  input_image_mask:
1003
1010
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash,
1004
- model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
1011
+ model:
1012
+ T.any(
1013
+ String,
1014
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
1015
+ ),
1005
1016
  moderation:
1006
1017
  OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol,
1007
1018
  output_compression: Integer,
@@ -1062,7 +1073,10 @@ module OpenAI
1062
1073
  input_image_mask:
1063
1074
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask,
1064
1075
  model:
1065
- OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
1076
+ T.any(
1077
+ String,
1078
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
1079
+ ),
1066
1080
  moderation:
1067
1081
  OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol,
1068
1082
  output_compression: Integer,
@@ -1202,7 +1216,25 @@ module OpenAI
1202
1216
 
1203
1217
  # The image generation model to use. Default: `gpt-image-1`.
1204
1218
  module Model
1205
- extend OpenAI::Internal::Type::Enum
1219
+ extend OpenAI::Internal::Type::Union
1220
+
1221
+ Variants =
1222
+ T.type_alias do
1223
+ T.any(
1224
+ String,
1225
+ OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1226
+ )
1227
+ end
1228
+
1229
+ sig do
1230
+ override.returns(
1231
+ T::Array[
1232
+ OpenAI::Responses::Tool::ImageGeneration::Model::Variants
1233
+ ]
1234
+ )
1235
+ end
1236
+ def self.variants
1237
+ end
1206
1238
 
1207
1239
  TaggedSymbol =
1208
1240
  T.type_alias do
@@ -1220,16 +1252,6 @@ module OpenAI
1220
1252
  :"gpt-image-1-mini",
1221
1253
  OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1222
1254
  )
1223
-
1224
- sig do
1225
- override.returns(
1226
- T::Array[
1227
- OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1228
- ]
1229
- )
1230
- end
1231
- def self.values
1232
- end
1233
1255
  end
1234
1256
 
1235
1257
  # Moderation level for the generated image. Default: `auto`.
@@ -72,19 +72,20 @@ module OpenAI
72
72
  def edit(
73
73
  # The image(s) to edit. Must be a supported image file or an array of images.
74
74
  #
75
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
75
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
76
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
76
77
  # 50MB. You can provide up to 16 images.
77
78
  #
78
79
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
79
80
  # file less than 4MB.
80
81
  image:,
81
82
  # A text description of the desired image(s). The maximum length is 1000
82
- # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
83
+ # characters for `dall-e-2`, and 32000 characters for the GPT image models.
83
84
  prompt:,
84
85
  # Allows to set transparency for the background of the generated image(s). This
85
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
86
- # `opaque` or `auto` (default value). When `auto` is used, the model will
87
- # automatically determine the best background for the image.
86
+ # parameter is only supported for the GPT image models. Must be one of
87
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
88
+ # model will automatically determine the best background for the image.
88
89
  #
89
90
  # If `transparent`, the output format needs to support transparency, so it should
90
91
  # be set to either `png` (default value) or `webp`.
@@ -99,18 +100,18 @@ module OpenAI
99
100
  # the mask will be applied on the first image. Must be a valid PNG file, less than
100
101
  # 4MB, and have the same dimensions as `image`.
101
102
  mask: nil,
102
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
103
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
104
- # is used.
103
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
104
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
105
+ # image models is used.
105
106
  model: nil,
106
107
  # The number of images to generate. Must be between 1 and 10.
107
108
  n: nil,
108
109
  # The compression level (0-100%) for the generated images. This parameter is only
109
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
110
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
110
111
  # defaults to 100.
111
112
  output_compression: nil,
112
113
  # The format in which the generated images are returned. This parameter is only
113
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
114
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
114
115
  # default value is `png`.
115
116
  output_format: nil,
116
117
  # The number of partial images to generate. This parameter is used for streaming
@@ -121,17 +122,17 @@ module OpenAI
121
122
  # are generated if the full image is generated more quickly.
122
123
  partial_images: nil,
123
124
  # The quality of the image that will be generated. `high`, `medium` and `low` are
124
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
125
- # Defaults to `auto`.
125
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
126
+ # quality. Defaults to `auto`.
126
127
  quality: nil,
127
128
  # The format in which the generated images are returned. Must be one of `url` or
128
129
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
129
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
130
- # will always return base64-encoded images.
130
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
131
+ # models always return base64-encoded images.
131
132
  response_format: nil,
132
133
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
133
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
134
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
134
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
135
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
135
136
  size: nil,
136
137
  # A unique identifier representing your end-user, which can help OpenAI to monitor
137
138
  # and detect abuse.
@@ -176,19 +177,20 @@ module OpenAI
176
177
  def edit_stream_raw(
177
178
  # The image(s) to edit. Must be a supported image file or an array of images.
178
179
  #
179
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
180
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
181
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
180
182
  # 50MB. You can provide up to 16 images.
181
183
  #
182
184
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
183
185
  # file less than 4MB.
184
186
  image:,
185
187
  # A text description of the desired image(s). The maximum length is 1000
186
- # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
188
+ # characters for `dall-e-2`, and 32000 characters for the GPT image models.
187
189
  prompt:,
188
190
  # Allows to set transparency for the background of the generated image(s). This
189
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
190
- # `opaque` or `auto` (default value). When `auto` is used, the model will
191
- # automatically determine the best background for the image.
191
+ # parameter is only supported for the GPT image models. Must be one of
192
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
193
+ # model will automatically determine the best background for the image.
192
194
  #
193
195
  # If `transparent`, the output format needs to support transparency, so it should
194
196
  # be set to either `png` (default value) or `webp`.
@@ -203,18 +205,18 @@ module OpenAI
203
205
  # the mask will be applied on the first image. Must be a valid PNG file, less than
204
206
  # 4MB, and have the same dimensions as `image`.
205
207
  mask: nil,
206
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
207
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
208
- # is used.
208
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
209
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
210
+ # image models is used.
209
211
  model: nil,
210
212
  # The number of images to generate. Must be between 1 and 10.
211
213
  n: nil,
212
214
  # The compression level (0-100%) for the generated images. This parameter is only
213
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
215
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
214
216
  # defaults to 100.
215
217
  output_compression: nil,
216
218
  # The format in which the generated images are returned. This parameter is only
217
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
219
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
218
220
  # default value is `png`.
219
221
  output_format: nil,
220
222
  # The number of partial images to generate. This parameter is used for streaming
@@ -225,17 +227,17 @@ module OpenAI
225
227
  # are generated if the full image is generated more quickly.
226
228
  partial_images: nil,
227
229
  # The quality of the image that will be generated. `high`, `medium` and `low` are
228
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
229
- # Defaults to `auto`.
230
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
231
+ # quality. Defaults to `auto`.
230
232
  quality: nil,
231
233
  # The format in which the generated images are returned. Must be one of `url` or
232
234
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
233
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
234
- # will always return base64-encoded images.
235
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
236
+ # models always return base64-encoded images.
235
237
  response_format: nil,
236
238
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
237
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
238
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
239
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
240
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
239
241
  size: nil,
240
242
  # A unique identifier representing your end-user, which can help OpenAI to monitor
241
243
  # and detect abuse.
@@ -277,33 +279,34 @@ module OpenAI
277
279
  end
278
280
  def generate(
279
281
  # A text description of the desired image(s). The maximum length is 32000
280
- # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
281
- # for `dall-e-3`.
282
+ # characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
283
+ # characters for `dall-e-3`.
282
284
  prompt:,
283
285
  # Allows to set transparency for the background of the generated image(s). This
284
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
285
- # `opaque` or `auto` (default value). When `auto` is used, the model will
286
- # automatically determine the best background for the image.
286
+ # parameter is only supported for the GPT image models. Must be one of
287
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
288
+ # model will automatically determine the best background for the image.
287
289
  #
288
290
  # If `transparent`, the output format needs to support transparency, so it should
289
291
  # be set to either `png` (default value) or `webp`.
290
292
  background: nil,
291
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
292
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
293
- # `gpt-image-1` is used.
293
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
294
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
295
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
294
296
  model: nil,
295
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
296
- # be either `low` for less restrictive filtering or `auto` (default value).
297
+ # Control the content-moderation level for images generated by the GPT image
298
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
299
+ # value).
297
300
  moderation: nil,
298
301
  # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
299
302
  # `n=1` is supported.
300
303
  n: nil,
301
304
  # The compression level (0-100%) for the generated images. This parameter is only
302
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
305
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
303
306
  # defaults to 100.
304
307
  output_compression: nil,
305
308
  # The format in which the generated images are returned. This parameter is only
306
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
309
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
307
310
  output_format: nil,
308
311
  # The number of partial images to generate. This parameter is used for streaming
309
312
  # responses that return partial images. Value must be between 0 and 3. When set to
@@ -316,19 +319,19 @@ module OpenAI
316
319
  #
317
320
  # - `auto` (default value) will automatically select the best quality for the
318
321
  # given model.
319
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
322
+ # - `high`, `medium` and `low` are supported for the GPT image models.
320
323
  # - `hd` and `standard` are supported for `dall-e-3`.
321
324
  # - `standard` is the only option for `dall-e-2`.
322
325
  quality: nil,
323
326
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
324
327
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
325
- # after the image has been generated. This parameter isn't supported for
326
- # `gpt-image-1` which will always return base64-encoded images.
328
+ # after the image has been generated. This parameter isn't supported for the GPT
329
+ # image models, which always return base64-encoded images.
327
330
  response_format: nil,
328
331
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
329
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
330
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
331
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
332
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
333
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
334
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
332
335
  size: nil,
333
336
  # The style of the generated images. This parameter is only supported for
334
337
  # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
@@ -377,33 +380,34 @@ module OpenAI
377
380
  end
378
381
  def generate_stream_raw(
379
382
  # A text description of the desired image(s). The maximum length is 32000
380
- # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
381
- # for `dall-e-3`.
383
+ # characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
384
+ # characters for `dall-e-3`.
382
385
  prompt:,
383
386
  # Allows to set transparency for the background of the generated image(s). This
384
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
385
- # `opaque` or `auto` (default value). When `auto` is used, the model will
386
- # automatically determine the best background for the image.
387
+ # parameter is only supported for the GPT image models. Must be one of
388
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
389
+ # model will automatically determine the best background for the image.
387
390
  #
388
391
  # If `transparent`, the output format needs to support transparency, so it should
389
392
  # be set to either `png` (default value) or `webp`.
390
393
  background: nil,
391
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
392
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
393
- # `gpt-image-1` is used.
394
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
395
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
396
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
394
397
  model: nil,
395
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
396
- # be either `low` for less restrictive filtering or `auto` (default value).
398
+ # Control the content-moderation level for images generated by the GPT image
399
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
400
+ # value).
397
401
  moderation: nil,
398
402
  # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
399
403
  # `n=1` is supported.
400
404
  n: nil,
401
405
  # The compression level (0-100%) for the generated images. This parameter is only
402
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
406
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
403
407
  # defaults to 100.
404
408
  output_compression: nil,
405
409
  # The format in which the generated images are returned. This parameter is only
406
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
410
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
407
411
  output_format: nil,
408
412
  # The number of partial images to generate. This parameter is used for streaming
409
413
  # responses that return partial images. Value must be between 0 and 3. When set to
@@ -416,19 +420,19 @@ module OpenAI
416
420
  #
417
421
  # - `auto` (default value) will automatically select the best quality for the
418
422
  # given model.
419
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
423
+ # - `high`, `medium` and `low` are supported for the GPT image models.
420
424
  # - `hd` and `standard` are supported for `dall-e-3`.
421
425
  # - `standard` is the only option for `dall-e-2`.
422
426
  quality: nil,
423
427
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
424
428
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
425
- # after the image has been generated. This parameter isn't supported for
426
- # `gpt-image-1` which will always return base64-encoded images.
429
+ # after the image has been generated. This parameter isn't supported for the GPT
430
+ # image models, which always return base64-encoded images.
427
431
  response_format: nil,
428
432
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
429
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
430
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
431
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
433
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
434
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
435
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
432
436
  size: nil,
433
437
  # The style of the generated images. This parameter is only supported for
434
438
  # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
@@ -0,0 +1,55 @@
1
+ module OpenAI
2
+ module Models
3
+ module Graders
4
+ type grader_input_item =
5
+ String
6
+ | OpenAI::Responses::ResponseInputText
7
+ | OpenAI::Graders::GraderInputItem::OutputText
8
+ | OpenAI::Graders::GraderInputItem::InputImage
9
+ | OpenAI::Responses::ResponseInputAudio
10
+
11
+ module GraderInputItem
12
+ extend OpenAI::Internal::Type::Union
13
+
14
+ type output_text = { text: String, type: :output_text }
15
+
16
+ class OutputText < OpenAI::Internal::Type::BaseModel
17
+ attr_accessor text: String
18
+
19
+ attr_accessor type: :output_text
20
+
21
+ def initialize: (text: String, ?type: :output_text) -> void
22
+
23
+ def to_hash: -> { text: String, type: :output_text }
24
+ end
25
+
26
+ type input_image =
27
+ { image_url: String, type: :input_image, detail: String }
28
+
29
+ class InputImage < OpenAI::Internal::Type::BaseModel
30
+ attr_accessor image_url: String
31
+
32
+ attr_accessor type: :input_image
33
+
34
+ attr_reader detail: String?
35
+
36
+ def detail=: (String) -> String
37
+
38
+ def initialize: (
39
+ image_url: String,
40
+ ?detail: String,
41
+ ?type: :input_image
42
+ ) -> void
43
+
44
+ def to_hash: -> {
45
+ image_url: String,
46
+ type: :input_image,
47
+ detail: String
48
+ }
49
+ end
50
+
51
+ def self?.variants: -> ::Array[OpenAI::Models::Graders::grader_input_item]
52
+ end
53
+ end
54
+ end
55
+ end
@@ -3,56 +3,6 @@ module OpenAI
3
3
  GraderInputs: OpenAI::Internal::Type::Converter
4
4
 
5
5
  module Graders
6
- type grader_input_item =
7
- String
8
- | OpenAI::Responses::ResponseInputText
9
- | OpenAI::Graders::GraderInputItem::OutputText
10
- | OpenAI::Graders::GraderInputItem::InputImage
11
- | OpenAI::Responses::ResponseInputAudio
12
-
13
- module GraderInputItem
14
- extend OpenAI::Internal::Type::Union
15
-
16
- type output_text = { text: String, type: :output_text }
17
-
18
- class OutputText < OpenAI::Internal::Type::BaseModel
19
- attr_accessor text: String
20
-
21
- attr_accessor type: :output_text
22
-
23
- def initialize: (text: String, ?type: :output_text) -> void
24
-
25
- def to_hash: -> { text: String, type: :output_text }
26
- end
27
-
28
- type input_image =
29
- { image_url: String, type: :input_image, detail: String }
30
-
31
- class InputImage < OpenAI::Internal::Type::BaseModel
32
- attr_accessor image_url: String
33
-
34
- attr_accessor type: :input_image
35
-
36
- attr_reader detail: String?
37
-
38
- def detail=: (String) -> String
39
-
40
- def initialize: (
41
- image_url: String,
42
- ?detail: String,
43
- ?type: :input_image
44
- ) -> void
45
-
46
- def to_hash: -> {
47
- image_url: String,
48
- type: :input_image,
49
- detail: String
50
- }
51
- end
52
-
53
- def self?.variants: -> ::Array[OpenAI::Models::Graders::grader_input_item]
54
- end
55
-
56
6
  type grader_inputs = ::Array[OpenAI::Models::Graders::grader_input_item]
57
7
 
58
8
  GraderInputs: OpenAI::Internal::Type::Converter
@@ -1,11 +1,16 @@
1
1
  module OpenAI
2
2
  module Models
3
3
  type image_model =
4
- :"dall-e-2" | :"dall-e-3" | :"gpt-image-1" | :"gpt-image-1-mini"
4
+ :"gpt-image-1.5"
5
+ | :"dall-e-2"
6
+ | :"dall-e-3"
7
+ | :"gpt-image-1"
8
+ | :"gpt-image-1-mini"
5
9
 
6
10
  module ImageModel
7
11
  extend OpenAI::Internal::Type::Enum
8
12
 
13
+ GPT_IMAGE_1_5: :"gpt-image-1.5"
9
14
  DALL_E_2: :"dall-e-2"
10
15
  DALL_E_3: :"dall-e-3"
11
16
  GPT_IMAGE_1: :"gpt-image-1"
@@ -120,7 +120,8 @@ module OpenAI
120
120
  input_tokens: Integer,
121
121
  input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails,
122
122
  output_tokens: Integer,
123
- total_tokens: Integer
123
+ total_tokens: Integer,
124
+ output_tokens_details: OpenAI::ImagesResponse::Usage::OutputTokensDetails
124
125
  }
125
126
 
126
127
  class Usage < OpenAI::Internal::Type::BaseModel
@@ -132,18 +133,26 @@ module OpenAI
132
133
 
133
134
  attr_accessor total_tokens: Integer
134
135
 
136
+ attr_reader output_tokens_details: OpenAI::ImagesResponse::Usage::OutputTokensDetails?
137
+
138
+ def output_tokens_details=: (
139
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails
140
+ ) -> OpenAI::ImagesResponse::Usage::OutputTokensDetails
141
+
135
142
  def initialize: (
136
143
  input_tokens: Integer,
137
144
  input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails,
138
145
  output_tokens: Integer,
139
- total_tokens: Integer
146
+ total_tokens: Integer,
147
+ ?output_tokens_details: OpenAI::ImagesResponse::Usage::OutputTokensDetails
140
148
  ) -> void
141
149
 
142
150
  def to_hash: -> {
143
151
  input_tokens: Integer,
144
152
  input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails,
145
153
  output_tokens: Integer,
146
- total_tokens: Integer
154
+ total_tokens: Integer,
155
+ output_tokens_details: OpenAI::ImagesResponse::Usage::OutputTokensDetails
147
156
  }
148
157
 
149
158
  type input_tokens_details =
@@ -158,6 +167,19 @@ module OpenAI
158
167
 
159
168
  def to_hash: -> { image_tokens: Integer, text_tokens: Integer }
160
169
  end
170
+
171
+ type output_tokens_details =
172
+ { image_tokens: Integer, text_tokens: Integer }
173
+
174
+ class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
175
+ attr_accessor image_tokens: Integer
176
+
177
+ attr_accessor text_tokens: Integer
178
+
179
+ def initialize: (image_tokens: Integer, text_tokens: Integer) -> void
180
+
181
+ def to_hash: -> { image_tokens: Integer, text_tokens: Integer }
182
+ end
161
183
  end
162
184
  end
163
185
  end
@@ -442,15 +442,15 @@ module OpenAI
442
442
  def to_hash: -> { file_id: String, image_url: String }
443
443
  end
444
444
 
445
- type model = :"gpt-image-1" | :"gpt-image-1-mini"
445
+ type model = String | :"gpt-image-1" | :"gpt-image-1-mini"
446
446
 
447
447
  module Model
448
- extend OpenAI::Internal::Type::Enum
448
+ extend OpenAI::Internal::Type::Union
449
+
450
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::model]
449
451
 
450
452
  GPT_IMAGE_1: :"gpt-image-1"
451
453
  GPT_IMAGE_1_MINI: :"gpt-image-1-mini"
452
-
453
- def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::model]
454
454
  end
455
455
 
456
456
  type moderation = :auto | :low
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.40.0
4
+ version: 0.41.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-12-15 00:00:00.000000000 Z
11
+ date: 2025-12-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: base64
@@ -399,6 +399,7 @@ files:
399
399
  - lib/openai/models/fine_tuning/supervised_method.rb
400
400
  - lib/openai/models/function_definition.rb
401
401
  - lib/openai/models/function_parameters.rb
402
+ - lib/openai/models/graders/grader_input_item.rb
402
403
  - lib/openai/models/graders/grader_inputs.rb
403
404
  - lib/openai/models/graders/label_model_grader.rb
404
405
  - lib/openai/models/graders/multi_grader.rb
@@ -1147,6 +1148,7 @@ files:
1147
1148
  - rbi/openai/models/fine_tuning/supervised_method.rbi
1148
1149
  - rbi/openai/models/function_definition.rbi
1149
1150
  - rbi/openai/models/function_parameters.rbi
1151
+ - rbi/openai/models/graders/grader_input_item.rbi
1150
1152
  - rbi/openai/models/graders/grader_inputs.rbi
1151
1153
  - rbi/openai/models/graders/label_model_grader.rbi
1152
1154
  - rbi/openai/models/graders/multi_grader.rbi
@@ -1885,6 +1887,7 @@ files:
1885
1887
  - sig/openai/models/fine_tuning/supervised_method.rbs
1886
1888
  - sig/openai/models/function_definition.rbs
1887
1889
  - sig/openai/models/function_parameters.rbs
1890
+ - sig/openai/models/graders/grader_input_item.rbs
1888
1891
  - sig/openai/models/graders/grader_inputs.rbs
1889
1892
  - sig/openai/models/graders/label_model_grader.rbs
1890
1893
  - sig/openai/models/graders/multi_grader.rbs