openai 0.39.0 → 0.41.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
  5. data/lib/openai/models/eval_create_params.rb +12 -13
  6. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +12 -13
  7. data/lib/openai/models/evals/run_cancel_response.rb +12 -13
  8. data/lib/openai/models/evals/run_create_params.rb +12 -13
  9. data/lib/openai/models/evals/run_create_response.rb +12 -13
  10. data/lib/openai/models/evals/run_list_response.rb +12 -13
  11. data/lib/openai/models/evals/run_retrieve_response.rb +12 -13
  12. data/lib/openai/models/graders/grader_input_item.rb +87 -0
  13. data/lib/openai/models/graders/grader_inputs.rb +13 -0
  14. data/lib/openai/models/graders/label_model_grader.rb +12 -13
  15. data/lib/openai/models/graders/score_model_grader.rb +12 -13
  16. data/lib/openai/models/image.rb +6 -6
  17. data/lib/openai/models/image_edit_completed_event.rb +5 -3
  18. data/lib/openai/models/image_edit_params.rb +34 -32
  19. data/lib/openai/models/image_gen_completed_event.rb +5 -3
  20. data/lib/openai/models/image_generate_params.rb +38 -36
  21. data/lib/openai/models/image_model.rb +1 -0
  22. data/lib/openai/models/images_response.rb +31 -1
  23. data/lib/openai/models/responses/tool.rb +22 -8
  24. data/lib/openai/models/video_model.rb +3 -0
  25. data/lib/openai/resources/images.rb +6 -6
  26. data/lib/openai/version.rb +1 -1
  27. data/lib/openai.rb +2 -0
  28. data/rbi/openai/models/eval_create_params.rbi +35 -16
  29. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +35 -16
  30. data/rbi/openai/models/evals/run_cancel_response.rbi +17 -14
  31. data/rbi/openai/models/evals/run_create_params.rbi +35 -16
  32. data/rbi/openai/models/evals/run_create_response.rbi +17 -14
  33. data/rbi/openai/models/evals/run_list_response.rbi +17 -14
  34. data/rbi/openai/models/evals/run_retrieve_response.rbi +17 -14
  35. data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
  36. data/rbi/openai/models/graders/grader_inputs.rbi +18 -0
  37. data/rbi/openai/models/graders/label_model_grader.rbi +35 -16
  38. data/rbi/openai/models/graders/score_model_grader.rbi +35 -16
  39. data/rbi/openai/models/image.rbi +10 -10
  40. data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
  41. data/rbi/openai/models/image_edit_params.rbi +49 -46
  42. data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
  43. data/rbi/openai/models/image_generate_params.rbi +54 -51
  44. data/rbi/openai/models/image_model.rbi +1 -0
  45. data/rbi/openai/models/images_response.rbi +61 -3
  46. data/rbi/openai/models/responses/tool.rbi +38 -16
  47. data/rbi/openai/models/video_model.rbi +6 -0
  48. data/rbi/openai/resources/images.rbi +72 -68
  49. data/sig/openai/models/eval_create_params.rbs +1 -3
  50. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +1 -3
  51. data/sig/openai/models/evals/run_cancel_response.rbs +1 -3
  52. data/sig/openai/models/evals/run_create_params.rbs +1 -3
  53. data/sig/openai/models/evals/run_create_response.rbs +1 -3
  54. data/sig/openai/models/evals/run_list_response.rbs +1 -3
  55. data/sig/openai/models/evals/run_retrieve_response.rbs +1 -3
  56. data/sig/openai/models/graders/grader_input_item.rbs +55 -0
  57. data/sig/openai/models/graders/grader_inputs.rbs +11 -0
  58. data/sig/openai/models/graders/label_model_grader.rbs +1 -3
  59. data/sig/openai/models/graders/score_model_grader.rbs +1 -3
  60. data/sig/openai/models/image_model.rbs +6 -1
  61. data/sig/openai/models/images_response.rbs +25 -3
  62. data/sig/openai/models/responses/tool.rbs +4 -4
  63. data/sig/openai/models/video_model.rbs +9 -1
  64. metadata +8 -2
@@ -12,15 +12,15 @@ module OpenAI
12
12
  end
13
13
 
14
14
  # A text description of the desired image(s). The maximum length is 32000
15
- # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
16
- # for `dall-e-3`.
15
+ # characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
16
+ # characters for `dall-e-3`.
17
17
  sig { returns(String) }
18
18
  attr_accessor :prompt
19
19
 
20
20
  # Allows to set transparency for the background of the generated image(s). This
21
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
22
- # `opaque` or `auto` (default value). When `auto` is used, the model will
23
- # automatically determine the best background for the image.
21
+ # parameter is only supported for the GPT image models. Must be one of
22
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
23
+ # model will automatically determine the best background for the image.
24
24
  #
25
25
  # If `transparent`, the output format needs to support transparency, so it should
26
26
  # be set to either `png` (default value) or `webp`.
@@ -29,14 +29,15 @@ module OpenAI
29
29
  end
30
30
  attr_accessor :background
31
31
 
32
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
33
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
34
- # `gpt-image-1` is used.
32
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
33
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
34
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
35
35
  sig { returns(T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol))) }
36
36
  attr_accessor :model
37
37
 
38
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
39
- # be either `low` for less restrictive filtering or `auto` (default value).
38
+ # Control the content-moderation level for images generated by the GPT image
39
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
40
+ # value).
40
41
  sig do
41
42
  returns(T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol))
42
43
  end
@@ -48,13 +49,13 @@ module OpenAI
48
49
  attr_accessor :n
49
50
 
50
51
  # The compression level (0-100%) for the generated images. This parameter is only
51
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
52
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
52
53
  # defaults to 100.
53
54
  sig { returns(T.nilable(Integer)) }
54
55
  attr_accessor :output_compression
55
56
 
56
57
  # The format in which the generated images are returned. This parameter is only
57
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
58
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
58
59
  sig do
59
60
  returns(T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol))
60
61
  end
@@ -73,7 +74,7 @@ module OpenAI
73
74
  #
74
75
  # - `auto` (default value) will automatically select the best quality for the
75
76
  # given model.
76
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
77
+ # - `high`, `medium` and `low` are supported for the GPT image models.
77
78
  # - `hd` and `standard` are supported for `dall-e-3`.
78
79
  # - `standard` is the only option for `dall-e-2`.
79
80
  sig { returns(T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol)) }
@@ -81,8 +82,8 @@ module OpenAI
81
82
 
82
83
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
83
84
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
84
- # after the image has been generated. This parameter isn't supported for
85
- # `gpt-image-1` which will always return base64-encoded images.
85
+ # after the image has been generated. This parameter isn't supported for the GPT
86
+ # image models, which always return base64-encoded images.
86
87
  sig do
87
88
  returns(
88
89
  T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol)
@@ -91,9 +92,9 @@ module OpenAI
91
92
  attr_accessor :response_format
92
93
 
93
94
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
94
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
95
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
96
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
95
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
96
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
97
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
97
98
  sig { returns(T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol)) }
98
99
  attr_accessor :size
99
100
 
@@ -137,33 +138,34 @@ module OpenAI
137
138
  end
138
139
  def self.new(
139
140
  # A text description of the desired image(s). The maximum length is 32000
140
- # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
141
- # for `dall-e-3`.
141
+ # characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
142
+ # characters for `dall-e-3`.
142
143
  prompt:,
143
144
  # Allows to set transparency for the background of the generated image(s). This
144
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
145
- # `opaque` or `auto` (default value). When `auto` is used, the model will
146
- # automatically determine the best background for the image.
145
+ # parameter is only supported for the GPT image models. Must be one of
146
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
147
+ # model will automatically determine the best background for the image.
147
148
  #
148
149
  # If `transparent`, the output format needs to support transparency, so it should
149
150
  # be set to either `png` (default value) or `webp`.
150
151
  background: nil,
151
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
152
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
153
- # `gpt-image-1` is used.
152
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
153
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
154
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
154
155
  model: nil,
155
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
156
- # be either `low` for less restrictive filtering or `auto` (default value).
156
+ # Control the content-moderation level for images generated by the GPT image
157
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
158
+ # value).
157
159
  moderation: nil,
158
160
  # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
159
161
  # `n=1` is supported.
160
162
  n: nil,
161
163
  # The compression level (0-100%) for the generated images. This parameter is only
162
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
164
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
163
165
  # defaults to 100.
164
166
  output_compression: nil,
165
167
  # The format in which the generated images are returned. This parameter is only
166
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
168
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
167
169
  output_format: nil,
168
170
  # The number of partial images to generate. This parameter is used for streaming
169
171
  # responses that return partial images. Value must be between 0 and 3. When set to
@@ -176,19 +178,19 @@ module OpenAI
176
178
  #
177
179
  # - `auto` (default value) will automatically select the best quality for the
178
180
  # given model.
179
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
181
+ # - `high`, `medium` and `low` are supported for the GPT image models.
180
182
  # - `hd` and `standard` are supported for `dall-e-3`.
181
183
  # - `standard` is the only option for `dall-e-2`.
182
184
  quality: nil,
183
185
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
184
186
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
185
- # after the image has been generated. This parameter isn't supported for
186
- # `gpt-image-1` which will always return base64-encoded images.
187
+ # after the image has been generated. This parameter isn't supported for the GPT
188
+ # image models, which always return base64-encoded images.
187
189
  response_format: nil,
188
190
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
189
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
190
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
191
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
191
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
192
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
193
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
192
194
  size: nil,
193
195
  # The style of the generated images. This parameter is only supported for
194
196
  # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
@@ -231,9 +233,9 @@ module OpenAI
231
233
  end
232
234
 
233
235
  # Allows to set transparency for the background of the generated image(s). This
234
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
235
- # `opaque` or `auto` (default value). When `auto` is used, the model will
236
- # automatically determine the best background for the image.
236
+ # parameter is only supported for the GPT image models. Must be one of
237
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
238
+ # model will automatically determine the best background for the image.
237
239
  #
238
240
  # If `transparent`, the output format needs to support transparency, so it should
239
241
  # be set to either `png` (default value) or `webp`.
@@ -265,9 +267,9 @@ module OpenAI
265
267
  end
266
268
  end
267
269
 
268
- # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
269
- # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
270
- # `gpt-image-1` is used.
270
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
271
+ # image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
272
+ # `dall-e-2` unless a parameter specific to the GPT image models is used.
271
273
  module Model
272
274
  extend OpenAI::Internal::Type::Union
273
275
 
@@ -283,8 +285,9 @@ module OpenAI
283
285
  end
284
286
  end
285
287
 
286
- # Control the content-moderation level for images generated by `gpt-image-1`. Must
287
- # be either `low` for less restrictive filtering or `auto` (default value).
288
+ # Control the content-moderation level for images generated by the GPT image
289
+ # models. Must be either `low` for less restrictive filtering or `auto` (default
290
+ # value).
288
291
  module Moderation
289
292
  extend OpenAI::Internal::Type::Enum
290
293
 
@@ -308,7 +311,7 @@ module OpenAI
308
311
  end
309
312
 
310
313
  # The format in which the generated images are returned. This parameter is only
311
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
314
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
312
315
  module OutputFormat
313
316
  extend OpenAI::Internal::Type::Enum
314
317
 
@@ -338,7 +341,7 @@ module OpenAI
338
341
  #
339
342
  # - `auto` (default value) will automatically select the best quality for the
340
343
  # given model.
341
- # - `high`, `medium` and `low` are supported for `gpt-image-1`.
344
+ # - `high`, `medium` and `low` are supported for the GPT image models.
342
345
  # - `hd` and `standard` are supported for `dall-e-3`.
343
346
  # - `standard` is the only option for `dall-e-2`.
344
347
  module Quality
@@ -368,8 +371,8 @@ module OpenAI
368
371
 
369
372
  # The format in which generated images with `dall-e-2` and `dall-e-3` are
370
373
  # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
371
- # after the image has been generated. This parameter isn't supported for
372
- # `gpt-image-1` which will always return base64-encoded images.
374
+ # after the image has been generated. This parameter isn't supported for the GPT
375
+ # image models, which always return base64-encoded images.
373
376
  module ResponseFormat
374
377
  extend OpenAI::Internal::Type::Enum
375
378
 
@@ -397,9 +400,9 @@ module OpenAI
397
400
  end
398
401
 
399
402
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
400
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
401
- # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
402
- # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
403
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
404
+ # models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
405
+ # `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
403
406
  module Size
404
407
  extend OpenAI::Internal::Type::Enum
405
408
 
@@ -8,6 +8,7 @@ module OpenAI
8
8
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ImageModel) }
9
9
  OrSymbol = T.type_alias { T.any(Symbol, String) }
10
10
 
11
+ GPT_IMAGE_1_5 = T.let(:"gpt-image-1.5", OpenAI::ImageModel::TaggedSymbol)
11
12
  DALL_E_2 = T.let(:"dall-e-2", OpenAI::ImageModel::TaggedSymbol)
12
13
  DALL_E_3 = T.let(:"dall-e-3", OpenAI::ImageModel::TaggedSymbol)
13
14
  GPT_IMAGE_1 = T.let(:"gpt-image-1", OpenAI::ImageModel::TaggedSymbol)
@@ -232,6 +232,20 @@ module OpenAI
232
232
  sig { returns(Integer) }
233
233
  attr_accessor :total_tokens
234
234
 
235
+ # The output token details for the image generation.
236
+ sig do
237
+ returns(T.nilable(OpenAI::ImagesResponse::Usage::OutputTokensDetails))
238
+ end
239
+ attr_reader :output_tokens_details
240
+
241
+ sig do
242
+ params(
243
+ output_tokens_details:
244
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails::OrHash
245
+ ).void
246
+ end
247
+ attr_writer :output_tokens_details
248
+
235
249
  # For `gpt-image-1` only, the token usage information for the image generation.
236
250
  sig do
237
251
  params(
@@ -239,7 +253,9 @@ module OpenAI
239
253
  input_tokens_details:
240
254
  OpenAI::ImagesResponse::Usage::InputTokensDetails::OrHash,
241
255
  output_tokens: Integer,
242
- total_tokens: Integer
256
+ total_tokens: Integer,
257
+ output_tokens_details:
258
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails::OrHash
243
259
  ).returns(T.attached_class)
244
260
  end
245
261
  def self.new(
@@ -250,7 +266,9 @@ module OpenAI
250
266
  # The number of output tokens generated by the model.
251
267
  output_tokens:,
252
268
  # The total number of tokens (images and text) used for the image generation.
253
- total_tokens:
269
+ total_tokens:,
270
+ # The output token details for the image generation.
271
+ output_tokens_details: nil
254
272
  )
255
273
  end
256
274
 
@@ -261,7 +279,9 @@ module OpenAI
261
279
  input_tokens_details:
262
280
  OpenAI::ImagesResponse::Usage::InputTokensDetails,
263
281
  output_tokens: Integer,
264
- total_tokens: Integer
282
+ total_tokens: Integer,
283
+ output_tokens_details:
284
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails
265
285
  }
266
286
  )
267
287
  end
@@ -305,6 +325,44 @@ module OpenAI
305
325
  def to_hash
306
326
  end
307
327
  end
328
+
329
+ class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
330
+ OrHash =
331
+ T.type_alias do
332
+ T.any(
333
+ OpenAI::ImagesResponse::Usage::OutputTokensDetails,
334
+ OpenAI::Internal::AnyHash
335
+ )
336
+ end
337
+
338
+ # The number of image output tokens generated by the model.
339
+ sig { returns(Integer) }
340
+ attr_accessor :image_tokens
341
+
342
+ # The number of text output tokens generated by the model.
343
+ sig { returns(Integer) }
344
+ attr_accessor :text_tokens
345
+
346
+ # The output token details for the image generation.
347
+ sig do
348
+ params(image_tokens: Integer, text_tokens: Integer).returns(
349
+ T.attached_class
350
+ )
351
+ end
352
+ def self.new(
353
+ # The number of image output tokens generated by the model.
354
+ image_tokens:,
355
+ # The number of text output tokens generated by the model.
356
+ text_tokens:
357
+ )
358
+ end
359
+
360
+ sig do
361
+ override.returns({ image_tokens: Integer, text_tokens: Integer })
362
+ end
363
+ def to_hash
364
+ end
365
+ end
308
366
  end
309
367
  end
310
368
  end
@@ -888,7 +888,10 @@ module OpenAI
888
888
  sig do
889
889
  returns(
890
890
  T.nilable(
891
- OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
891
+ T.any(
892
+ String,
893
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
894
+ )
892
895
  )
893
896
  )
894
897
  end
@@ -896,7 +899,11 @@ module OpenAI
896
899
 
897
900
  sig do
898
901
  params(
899
- model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
902
+ model:
903
+ T.any(
904
+ String,
905
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
906
+ )
900
907
  ).void
901
908
  end
902
909
  attr_writer :model
@@ -990,7 +997,7 @@ module OpenAI
990
997
  end
991
998
  attr_writer :size
992
999
 
993
- # A tool that generates images using a model like `gpt-image-1`.
1000
+ # A tool that generates images using the GPT image models.
994
1001
  sig do
995
1002
  params(
996
1003
  background:
@@ -1001,7 +1008,11 @@ module OpenAI
1001
1008
  ),
1002
1009
  input_image_mask:
1003
1010
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash,
1004
- model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
1011
+ model:
1012
+ T.any(
1013
+ String,
1014
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
1015
+ ),
1005
1016
  moderation:
1006
1017
  OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol,
1007
1018
  output_compression: Integer,
@@ -1062,7 +1073,10 @@ module OpenAI
1062
1073
  input_image_mask:
1063
1074
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask,
1064
1075
  model:
1065
- OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
1076
+ T.any(
1077
+ String,
1078
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
1079
+ ),
1066
1080
  moderation:
1067
1081
  OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol,
1068
1082
  output_compression: Integer,
@@ -1202,7 +1216,25 @@ module OpenAI
1202
1216
 
1203
1217
  # The image generation model to use. Default: `gpt-image-1`.
1204
1218
  module Model
1205
- extend OpenAI::Internal::Type::Enum
1219
+ extend OpenAI::Internal::Type::Union
1220
+
1221
+ Variants =
1222
+ T.type_alias do
1223
+ T.any(
1224
+ String,
1225
+ OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1226
+ )
1227
+ end
1228
+
1229
+ sig do
1230
+ override.returns(
1231
+ T::Array[
1232
+ OpenAI::Responses::Tool::ImageGeneration::Model::Variants
1233
+ ]
1234
+ )
1235
+ end
1236
+ def self.variants
1237
+ end
1206
1238
 
1207
1239
  TaggedSymbol =
1208
1240
  T.type_alias do
@@ -1220,16 +1252,6 @@ module OpenAI
1220
1252
  :"gpt-image-1-mini",
1221
1253
  OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1222
1254
  )
1223
-
1224
- sig do
1225
- override.returns(
1226
- T::Array[
1227
- OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1228
- ]
1229
- )
1230
- end
1231
- def self.values
1232
- end
1233
1255
  end
1234
1256
 
1235
1257
  # Moderation level for the generated image. Default: `auto`.
@@ -10,6 +10,12 @@ module OpenAI
10
10
 
11
11
  SORA_2 = T.let(:"sora-2", OpenAI::VideoModel::TaggedSymbol)
12
12
  SORA_2_PRO = T.let(:"sora-2-pro", OpenAI::VideoModel::TaggedSymbol)
13
+ SORA_2_2025_10_06 =
14
+ T.let(:"sora-2-2025-10-06", OpenAI::VideoModel::TaggedSymbol)
15
+ SORA_2_PRO_2025_10_06 =
16
+ T.let(:"sora-2-pro-2025-10-06", OpenAI::VideoModel::TaggedSymbol)
17
+ SORA_2_2025_12_08 =
18
+ T.let(:"sora-2-2025-12-08", OpenAI::VideoModel::TaggedSymbol)
13
19
 
14
20
  sig { override.returns(T::Array[OpenAI::VideoModel::TaggedSymbol]) }
15
21
  def self.values