openai 0.39.0 → 0.41.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
  5. data/lib/openai/models/eval_create_params.rb +12 -13
  6. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +12 -13
  7. data/lib/openai/models/evals/run_cancel_response.rb +12 -13
  8. data/lib/openai/models/evals/run_create_params.rb +12 -13
  9. data/lib/openai/models/evals/run_create_response.rb +12 -13
  10. data/lib/openai/models/evals/run_list_response.rb +12 -13
  11. data/lib/openai/models/evals/run_retrieve_response.rb +12 -13
  12. data/lib/openai/models/graders/grader_input_item.rb +87 -0
  13. data/lib/openai/models/graders/grader_inputs.rb +13 -0
  14. data/lib/openai/models/graders/label_model_grader.rb +12 -13
  15. data/lib/openai/models/graders/score_model_grader.rb +12 -13
  16. data/lib/openai/models/image.rb +6 -6
  17. data/lib/openai/models/image_edit_completed_event.rb +5 -3
  18. data/lib/openai/models/image_edit_params.rb +34 -32
  19. data/lib/openai/models/image_gen_completed_event.rb +5 -3
  20. data/lib/openai/models/image_generate_params.rb +38 -36
  21. data/lib/openai/models/image_model.rb +1 -0
  22. data/lib/openai/models/images_response.rb +31 -1
  23. data/lib/openai/models/responses/tool.rb +22 -8
  24. data/lib/openai/models/video_model.rb +3 -0
  25. data/lib/openai/resources/images.rb +6 -6
  26. data/lib/openai/version.rb +1 -1
  27. data/lib/openai.rb +2 -0
  28. data/rbi/openai/models/eval_create_params.rbi +35 -16
  29. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +35 -16
  30. data/rbi/openai/models/evals/run_cancel_response.rbi +17 -14
  31. data/rbi/openai/models/evals/run_create_params.rbi +35 -16
  32. data/rbi/openai/models/evals/run_create_response.rbi +17 -14
  33. data/rbi/openai/models/evals/run_list_response.rbi +17 -14
  34. data/rbi/openai/models/evals/run_retrieve_response.rbi +17 -14
  35. data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
  36. data/rbi/openai/models/graders/grader_inputs.rbi +18 -0
  37. data/rbi/openai/models/graders/label_model_grader.rbi +35 -16
  38. data/rbi/openai/models/graders/score_model_grader.rbi +35 -16
  39. data/rbi/openai/models/image.rbi +10 -10
  40. data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
  41. data/rbi/openai/models/image_edit_params.rbi +49 -46
  42. data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
  43. data/rbi/openai/models/image_generate_params.rbi +54 -51
  44. data/rbi/openai/models/image_model.rbi +1 -0
  45. data/rbi/openai/models/images_response.rbi +61 -3
  46. data/rbi/openai/models/responses/tool.rbi +38 -16
  47. data/rbi/openai/models/video_model.rbi +6 -0
  48. data/rbi/openai/resources/images.rbi +72 -68
  49. data/sig/openai/models/eval_create_params.rbs +1 -3
  50. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +1 -3
  51. data/sig/openai/models/evals/run_cancel_response.rbs +1 -3
  52. data/sig/openai/models/evals/run_create_params.rbs +1 -3
  53. data/sig/openai/models/evals/run_create_response.rbs +1 -3
  54. data/sig/openai/models/evals/run_list_response.rbs +1 -3
  55. data/sig/openai/models/evals/run_retrieve_response.rbs +1 -3
  56. data/sig/openai/models/graders/grader_input_item.rbs +55 -0
  57. data/sig/openai/models/graders/grader_inputs.rbs +11 -0
  58. data/sig/openai/models/graders/label_model_grader.rbs +1 -3
  59. data/sig/openai/models/graders/score_model_grader.rbs +1 -3
  60. data/sig/openai/models/image_model.rbs +6 -1
  61. data/sig/openai/models/images_response.rbs +25 -3
  62. data/sig/openai/models/responses/tool.rbs +4 -4
  63. data/sig/openai/models/video_model.rbs +9 -1
  64. metadata +8 -2
@@ -0,0 +1,87 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Graders
6
+ # A single content item: input text, output text, input image, or input audio.
7
+ module GraderInputItem
8
+ extend OpenAI::Internal::Type::Union
9
+
10
+ # A text input to the model.
11
+ variant String
12
+
13
+ # A text input to the model.
14
+ variant -> { OpenAI::Responses::ResponseInputText }
15
+
16
+ # A text output from the model.
17
+ variant -> { OpenAI::Graders::GraderInputItem::OutputText }
18
+
19
+ # An image input block used within EvalItem content arrays.
20
+ variant -> { OpenAI::Graders::GraderInputItem::InputImage }
21
+
22
+ # An audio input to the model.
23
+ variant -> { OpenAI::Responses::ResponseInputAudio }
24
+
25
+ class OutputText < OpenAI::Internal::Type::BaseModel
26
+ # @!attribute text
27
+ # The text output from the model.
28
+ #
29
+ # @return [String]
30
+ required :text, String
31
+
32
+ # @!attribute type
33
+ # The type of the output text. Always `output_text`.
34
+ #
35
+ # @return [Symbol, :output_text]
36
+ required :type, const: :output_text
37
+
38
+ # @!method initialize(text:, type: :output_text)
39
+ # Some parameter documentations has been truncated, see
40
+ # {OpenAI::Models::Graders::GraderInputItem::OutputText} for more details.
41
+ #
42
+ # A text output from the model.
43
+ #
44
+ # @param text [String] The text output from the model.
45
+ #
46
+ # @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
47
+ end
48
+
49
+ class InputImage < OpenAI::Internal::Type::BaseModel
50
+ # @!attribute image_url
51
+ # The URL of the image input.
52
+ #
53
+ # @return [String]
54
+ required :image_url, String
55
+
56
+ # @!attribute type
57
+ # The type of the image input. Always `input_image`.
58
+ #
59
+ # @return [Symbol, :input_image]
60
+ required :type, const: :input_image
61
+
62
+ # @!attribute detail
63
+ # The detail level of the image to be sent to the model. One of `high`, `low`, or
64
+ # `auto`. Defaults to `auto`.
65
+ #
66
+ # @return [String, nil]
67
+ optional :detail, String
68
+
69
+ # @!method initialize(image_url:, detail: nil, type: :input_image)
70
+ # Some parameter documentations has been truncated, see
71
+ # {OpenAI::Models::Graders::GraderInputItem::InputImage} for more details.
72
+ #
73
+ # An image input block used within EvalItem content arrays.
74
+ #
75
+ # @param image_url [String] The URL of the image input.
76
+ #
77
+ # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
78
+ #
79
+ # @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
80
+ end
81
+
82
+ # @!method self.variants
83
+ # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio)]
84
+ end
85
+ end
86
+ end
87
+ end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Graders
6
+ # @type [OpenAI::Internal::Type::Converter]
7
+ GraderInputs = OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Graders::GraderInputItem }]
8
+ end
9
+
10
+ # @type [OpenAI::Internal::Type::Converter]
11
+ GraderInputs = Graders::GraderInputs
12
+ end
13
+ end
@@ -57,9 +57,10 @@ module OpenAI
57
57
 
58
58
  class Input < OpenAI::Internal::Type::BaseModel
59
59
  # @!attribute content
60
- # Inputs to the model - can contain template strings.
60
+ # Inputs to the model - can contain template strings. Supports text, output text,
61
+ # input images, and input audio, either as a single item or an array of items.
61
62
  #
62
- # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<Object>]
63
+ # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio>]
63
64
  required :content, union: -> { OpenAI::Graders::LabelModelGrader::Input::Content }
64
65
 
65
66
  # @!attribute role
@@ -85,13 +86,14 @@ module OpenAI
85
86
  # `assistant` role are presumed to have been generated by the model in previous
86
87
  # interactions.
87
88
  #
88
- # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<Object>] Inputs to the model - can contain template strings.
89
+ # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio>] Inputs to the model - can contain template strings. Supports text, output text,
89
90
  #
90
91
  # @param role [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or
91
92
  #
92
93
  # @param type [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Type] The type of the message input. Always `message`.
93
94
 
94
- # Inputs to the model - can contain template strings.
95
+ # Inputs to the model - can contain template strings. Supports text, output text,
96
+ # input images, and input audio, either as a single item or an array of items.
95
97
  #
96
98
  # @see OpenAI::Models::Graders::LabelModelGrader::Input#content
97
99
  module Content
@@ -106,14 +108,15 @@ module OpenAI
106
108
  # A text output from the model.
107
109
  variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::OutputText }
108
110
 
109
- # An image input to the model.
111
+ # An image input block used within EvalItem content arrays.
110
112
  variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::InputImage }
111
113
 
112
114
  # An audio input to the model.
113
115
  variant -> { OpenAI::Responses::ResponseInputAudio }
114
116
 
115
- # A list of inputs, each of which may be either an input text, input image, or input audio object.
116
- variant -> { OpenAI::Models::Graders::LabelModelGrader::Input::Content::AnArrayOfInputTextInputImageAndInputAudioArray }
117
+ # A list of inputs, each of which may be either an input text, output text, input
118
+ # image, or input audio object.
119
+ variant -> { OpenAI::Graders::GraderInputs }
117
120
 
118
121
  class OutputText < OpenAI::Internal::Type::BaseModel
119
122
  # @!attribute text
@@ -165,7 +168,7 @@ module OpenAI
165
168
  # {OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage} for more
166
169
  # details.
167
170
  #
168
- # An image input to the model.
171
+ # An image input block used within EvalItem content arrays.
169
172
  #
170
173
  # @param image_url [String] The URL of the image input.
171
174
  #
@@ -175,11 +178,7 @@ module OpenAI
175
178
  end
176
179
 
177
180
  # @!method self.variants
178
- # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<Object>)]
179
-
180
- # @type [OpenAI::Internal::Type::Converter]
181
- AnArrayOfInputTextInputImageAndInputAudioArray =
182
- OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
181
+ # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio>)]
183
182
  end
184
183
 
185
184
  # The role of the message input. One of `user`, `assistant`, `system`, or
@@ -61,9 +61,10 @@ module OpenAI
61
61
 
62
62
  class Input < OpenAI::Internal::Type::BaseModel
63
63
  # @!attribute content
64
- # Inputs to the model - can contain template strings.
64
+ # Inputs to the model - can contain template strings. Supports text, output text,
65
+ # input images, and input audio, either as a single item or an array of items.
65
66
  #
66
- # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<Object>]
67
+ # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio>]
67
68
  required :content, union: -> { OpenAI::Graders::ScoreModelGrader::Input::Content }
68
69
 
69
70
  # @!attribute role
@@ -89,13 +90,14 @@ module OpenAI
89
90
  # `assistant` role are presumed to have been generated by the model in previous
90
91
  # interactions.
91
92
  #
92
- # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<Object>] Inputs to the model - can contain template strings.
93
+ # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio>] Inputs to the model - can contain template strings. Supports text, output text,
93
94
  #
94
95
  # @param role [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or
95
96
  #
96
97
  # @param type [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Type] The type of the message input. Always `message`.
97
98
 
98
- # Inputs to the model - can contain template strings.
99
+ # Inputs to the model - can contain template strings. Supports text, output text,
100
+ # input images, and input audio, either as a single item or an array of items.
99
101
  #
100
102
  # @see OpenAI::Models::Graders::ScoreModelGrader::Input#content
101
103
  module Content
@@ -110,14 +112,15 @@ module OpenAI
110
112
  # A text output from the model.
111
113
  variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText }
112
114
 
113
- # An image input to the model.
115
+ # An image input block used within EvalItem content arrays.
114
116
  variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage }
115
117
 
116
118
  # An audio input to the model.
117
119
  variant -> { OpenAI::Responses::ResponseInputAudio }
118
120
 
119
- # A list of inputs, each of which may be either an input text, input image, or input audio object.
120
- variant -> { OpenAI::Models::Graders::ScoreModelGrader::Input::Content::AnArrayOfInputTextInputImageAndInputAudioArray }
121
+ # A list of inputs, each of which may be either an input text, output text, input
122
+ # image, or input audio object.
123
+ variant -> { OpenAI::Graders::GraderInputs }
121
124
 
122
125
  class OutputText < OpenAI::Internal::Type::BaseModel
123
126
  # @!attribute text
@@ -169,7 +172,7 @@ module OpenAI
169
172
  # {OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage} for more
170
173
  # details.
171
174
  #
172
- # An image input to the model.
175
+ # An image input block used within EvalItem content arrays.
173
176
  #
174
177
  # @param image_url [String] The URL of the image input.
175
178
  #
@@ -179,11 +182,7 @@ module OpenAI
179
182
  end
180
183
 
181
184
  # @!method self.variants
182
- # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<Object>)]
183
-
184
- # @type [OpenAI::Internal::Type::Converter]
185
- AnArrayOfInputTextInputImageAndInputAudioArray =
186
- OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
185
+ # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, OpenAI::Models::Responses::ResponseInputAudio, Array<String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio>)]
187
186
  end
188
187
 
189
188
  # The role of the message input. One of `user`, `assistant`, `system`, or
@@ -4,9 +4,9 @@ module OpenAI
4
4
  module Models
5
5
  class Image < OpenAI::Internal::Type::BaseModel
6
6
  # @!attribute b64_json
7
- # The base64-encoded JSON of the generated image. Default value for `gpt-image-1`,
8
- # and only present if `response_format` is set to `b64_json` for `dall-e-2` and
9
- # `dall-e-3`.
7
+ # The base64-encoded JSON of the generated image. Returned by default for the GPT
8
+ # image models, and only present if `response_format` is set to `b64_json` for
9
+ # `dall-e-2` and `dall-e-3`.
10
10
  #
11
11
  # @return [String, nil]
12
12
  optional :b64_json, String
@@ -19,8 +19,8 @@ module OpenAI
19
19
 
20
20
  # @!attribute url
21
21
  # When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
22
- # `response_format` is set to `url` (default value). Unsupported for
23
- # `gpt-image-1`.
22
+ # `response_format` is set to `url` (default value). Unsupported for the GPT image
23
+ # models.
24
24
  #
25
25
  # @return [String, nil]
26
26
  optional :url, String
@@ -31,7 +31,7 @@ module OpenAI
31
31
  #
32
32
  # Represents the content or the URL of an image generated by the OpenAI API.
33
33
  #
34
- # @param b64_json [String] The base64-encoded JSON of the generated image. Default value for `gpt-image-1`,
34
+ # @param b64_json [String] The base64-encoded JSON of the generated image. Returned by default for the GPT
35
35
  #
36
36
  # @param revised_prompt [String] For `dall-e-3` only, the revised prompt that was used to generate the image.
37
37
  #
@@ -46,7 +46,8 @@ module OpenAI
46
46
  required :type, const: :"image_edit.completed"
47
47
 
48
48
  # @!attribute usage
49
- # For `gpt-image-1` only, the token usage information for the image generation.
49
+ # For the GPT image models only, the token usage information for the image
50
+ # generation.
50
51
  #
51
52
  # @return [OpenAI::Models::ImageEditCompletedEvent::Usage]
52
53
  required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage }
@@ -69,7 +70,7 @@ module OpenAI
69
70
  #
70
71
  # @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image.
71
72
  #
72
- # @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
73
+ # @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For the GPT image models only, the token usage information for the image generat
73
74
  #
74
75
  # @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`.
75
76
 
@@ -161,7 +162,8 @@ module OpenAI
161
162
  # Some parameter documentations has been truncated, see
162
163
  # {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details.
163
164
  #
164
- # For `gpt-image-1` only, the token usage information for the image generation.
165
+ # For the GPT image models only, the token usage information for the image
166
+ # generation.
165
167
  #
166
168
  # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
167
169
  #
@@ -12,7 +12,8 @@ module OpenAI
12
12
  # @!attribute image
13
13
  # The image(s) to edit. Must be a supported image file or an array of images.
14
14
  #
15
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
15
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
16
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
16
17
  # 50MB. You can provide up to 16 images.
17
18
  #
18
19
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
@@ -23,16 +24,16 @@ module OpenAI
23
24
 
24
25
  # @!attribute prompt
25
26
  # A text description of the desired image(s). The maximum length is 1000
26
- # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
27
+ # characters for `dall-e-2`, and 32000 characters for the GPT image models.
27
28
  #
28
29
  # @return [String]
29
30
  required :prompt, String
30
31
 
31
32
  # @!attribute background
32
33
  # Allows to set transparency for the background of the generated image(s). This
33
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
34
- # `opaque` or `auto` (default value). When `auto` is used, the model will
35
- # automatically determine the best background for the image.
34
+ # parameter is only supported for the GPT image models. Must be one of
35
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
36
+ # model will automatically determine the best background for the image.
36
37
  #
37
38
  # If `transparent`, the output format needs to support transparency, so it should
38
39
  # be set to either `png` (default value) or `webp`.
@@ -59,9 +60,9 @@ module OpenAI
59
60
  optional :mask, OpenAI::Internal::Type::FileInput
60
61
 
61
62
  # @!attribute model
62
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
63
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
64
- # is used.
63
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
64
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
65
+ # image models is used.
65
66
  #
66
67
  # @return [String, Symbol, OpenAI::Models::ImageModel, nil]
67
68
  optional :model, union: -> { OpenAI::ImageEditParams::Model }, nil?: true
@@ -74,7 +75,7 @@ module OpenAI
74
75
 
75
76
  # @!attribute output_compression
76
77
  # The compression level (0-100%) for the generated images. This parameter is only
77
- # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
78
+ # supported for the GPT image models with the `webp` or `jpeg` output formats, and
78
79
  # defaults to 100.
79
80
  #
80
81
  # @return [Integer, nil]
@@ -82,7 +83,7 @@ module OpenAI
82
83
 
83
84
  # @!attribute output_format
84
85
  # The format in which the generated images are returned. This parameter is only
85
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
86
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
86
87
  # default value is `png`.
87
88
  #
88
89
  # @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
@@ -101,8 +102,8 @@ module OpenAI
101
102
 
102
103
  # @!attribute quality
103
104
  # The quality of the image that will be generated. `high`, `medium` and `low` are
104
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
105
- # Defaults to `auto`.
105
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
106
+ # quality. Defaults to `auto`.
106
107
  #
107
108
  # @return [Symbol, OpenAI::Models::ImageEditParams::Quality, nil]
108
109
  optional :quality, enum: -> { OpenAI::ImageEditParams::Quality }, nil?: true
@@ -110,16 +111,16 @@ module OpenAI
110
111
  # @!attribute response_format
111
112
  # The format in which the generated images are returned. Must be one of `url` or
112
113
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
113
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
114
- # will always return base64-encoded images.
114
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
115
+ # models always return base64-encoded images.
115
116
  #
116
117
  # @return [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil]
117
118
  optional :response_format, enum: -> { OpenAI::ImageEditParams::ResponseFormat }, nil?: true
118
119
 
119
120
  # @!attribute size
120
121
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
121
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
122
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
122
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
123
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
123
124
  #
124
125
  # @return [Symbol, OpenAI::Models::ImageEditParams::Size, nil]
125
126
  optional :size, enum: -> { OpenAI::ImageEditParams::Size }, nil?: true
@@ -146,7 +147,7 @@ module OpenAI
146
147
  #
147
148
  # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
148
149
  #
149
- # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup
150
+ # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and the GPT image models
150
151
  #
151
152
  # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
152
153
  #
@@ -168,7 +169,8 @@ module OpenAI
168
169
 
169
170
  # The image(s) to edit. Must be a supported image file or an array of images.
170
171
  #
171
- # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
172
+ # For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
173
+ # `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
172
174
  # 50MB. You can provide up to 16 images.
173
175
  #
174
176
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
@@ -188,9 +190,9 @@ module OpenAI
188
190
  end
189
191
 
190
192
  # Allows to set transparency for the background of the generated image(s). This
191
- # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
192
- # `opaque` or `auto` (default value). When `auto` is used, the model will
193
- # automatically determine the best background for the image.
193
+ # parameter is only supported for the GPT image models. Must be one of
194
+ # `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
195
+ # model will automatically determine the best background for the image.
194
196
  #
195
197
  # If `transparent`, the output format needs to support transparency, so it should
196
198
  # be set to either `png` (default value) or `webp`.
@@ -219,15 +221,15 @@ module OpenAI
219
221
  # @return [Array<Symbol>]
220
222
  end
221
223
 
222
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
223
- # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
224
- # is used.
224
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models
225
+ # are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
226
+ # image models is used.
225
227
  module Model
226
228
  extend OpenAI::Internal::Type::Union
227
229
 
228
230
  variant String
229
231
 
230
- # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
232
+ # The model to use for image generation. Only `dall-e-2` and the GPT image models are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT image models is used.
231
233
  variant enum: -> { OpenAI::ImageModel }
232
234
 
233
235
  # @!method self.variants
@@ -235,7 +237,7 @@ module OpenAI
235
237
  end
236
238
 
237
239
  # The format in which the generated images are returned. This parameter is only
238
- # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
240
+ # supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
239
241
  # default value is `png`.
240
242
  module OutputFormat
241
243
  extend OpenAI::Internal::Type::Enum
@@ -249,8 +251,8 @@ module OpenAI
249
251
  end
250
252
 
251
253
  # The quality of the image that will be generated. `high`, `medium` and `low` are
252
- # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
253
- # Defaults to `auto`.
254
+ # only supported for the GPT image models. `dall-e-2` only supports `standard`
255
+ # quality. Defaults to `auto`.
254
256
  module Quality
255
257
  extend OpenAI::Internal::Type::Enum
256
258
 
@@ -266,8 +268,8 @@ module OpenAI
266
268
 
267
269
  # The format in which the generated images are returned. Must be one of `url` or
268
270
  # `b64_json`. URLs are only valid for 60 minutes after the image has been
269
- # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
270
- # will always return base64-encoded images.
271
+ # generated. This parameter is only supported for `dall-e-2`, as the GPT image
272
+ # models always return base64-encoded images.
271
273
  module ResponseFormat
272
274
  extend OpenAI::Internal::Type::Enum
273
275
 
@@ -279,8 +281,8 @@ module OpenAI
279
281
  end
280
282
 
281
283
  # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
282
- # (landscape), `1024x1536` (portrait), or `auto` (default value) for
283
- # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
284
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
285
+ # models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
284
286
  module Size
285
287
  extend OpenAI::Internal::Type::Enum
286
288
 
@@ -46,7 +46,8 @@ module OpenAI
46
46
  required :type, const: :"image_generation.completed"
47
47
 
48
48
  # @!attribute usage
49
- # For `gpt-image-1` only, the token usage information for the image generation.
49
+ # For the GPT image models only, the token usage information for the image
50
+ # generation.
50
51
  #
51
52
  # @return [OpenAI::Models::ImageGenCompletedEvent::Usage]
52
53
  required :usage, -> { OpenAI::ImageGenCompletedEvent::Usage }
@@ -69,7 +70,7 @@ module OpenAI
69
70
  #
70
71
  # @param size [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] The size of the generated image.
71
72
  #
72
- # @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
73
+ # @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For the GPT image models only, the token usage information for the image generat
73
74
  #
74
75
  # @param type [Symbol, :"image_generation.completed"] The type of the event. Always `image_generation.completed`.
75
76
 
@@ -161,7 +162,8 @@ module OpenAI
161
162
  # Some parameter documentations has been truncated, see
162
163
  # {OpenAI::Models::ImageGenCompletedEvent::Usage} for more details.
163
164
  #
164
- # For `gpt-image-1` only, the token usage information for the image generation.
165
+ # For the GPT image models only, the token usage information for the image
166
+ # generation.
165
167
  #
166
168
  # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
167
169
  #