openai 0.40.0 → 0.41.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +15 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
- data/lib/openai/models/graders/grader_input_item.rb +87 -0
- data/lib/openai/models/graders/grader_inputs.rb +0 -80
- data/lib/openai/models/image.rb +6 -6
- data/lib/openai/models/image_edit_completed_event.rb +5 -3
- data/lib/openai/models/image_edit_params.rb +34 -32
- data/lib/openai/models/image_gen_completed_event.rb +5 -3
- data/lib/openai/models/image_generate_params.rb +38 -36
- data/lib/openai/models/image_model.rb +1 -0
- data/lib/openai/models/images_response.rb +31 -1
- data/lib/openai/models/responses/tool.rb +22 -8
- data/lib/openai/resources/images.rb +6 -6
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +1 -0
- data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
- data/rbi/openai/models/graders/grader_inputs.rbi +0 -105
- data/rbi/openai/models/image.rbi +10 -10
- data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
- data/rbi/openai/models/image_edit_params.rbi +49 -46
- data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
- data/rbi/openai/models/image_generate_params.rbi +54 -51
- data/rbi/openai/models/image_model.rbi +1 -0
- data/rbi/openai/models/images_response.rbi +61 -3
- data/rbi/openai/models/responses/tool.rbi +38 -16
- data/rbi/openai/resources/images.rbi +72 -68
- data/sig/openai/models/graders/grader_input_item.rbs +55 -0
- data/sig/openai/models/graders/grader_inputs.rbs +0 -50
- data/sig/openai/models/image_model.rbs +6 -1
- data/sig/openai/models/images_response.rbs +25 -3
- data/sig/openai/models/responses/tool.rbs +4 -4
- metadata +5 -2
|
@@ -11,17 +11,17 @@ module OpenAI
|
|
|
11
11
|
|
|
12
12
|
# @!attribute prompt
|
|
13
13
|
# A text description of the desired image(s). The maximum length is 32000
|
|
14
|
-
# characters for
|
|
15
|
-
# for `dall-e-3`.
|
|
14
|
+
# characters for the GPT image models, 1000 characters for `dall-e-2` and 4000
|
|
15
|
+
# characters for `dall-e-3`.
|
|
16
16
|
#
|
|
17
17
|
# @return [String]
|
|
18
18
|
required :prompt, String
|
|
19
19
|
|
|
20
20
|
# @!attribute background
|
|
21
21
|
# Allows to set transparency for the background of the generated image(s). This
|
|
22
|
-
# parameter is only supported for
|
|
23
|
-
# `opaque` or `auto` (default value). When `auto` is used, the
|
|
24
|
-
# automatically determine the best background for the image.
|
|
22
|
+
# parameter is only supported for the GPT image models. Must be one of
|
|
23
|
+
# `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
24
|
+
# model will automatically determine the best background for the image.
|
|
25
25
|
#
|
|
26
26
|
# If `transparent`, the output format needs to support transparency, so it should
|
|
27
27
|
# be set to either `png` (default value) or `webp`.
|
|
@@ -30,16 +30,17 @@ module OpenAI
|
|
|
30
30
|
optional :background, enum: -> { OpenAI::ImageGenerateParams::Background }, nil?: true
|
|
31
31
|
|
|
32
32
|
# @!attribute model
|
|
33
|
-
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
34
|
-
# `gpt-image-1
|
|
35
|
-
# `
|
|
33
|
+
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
|
|
34
|
+
# image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
|
|
35
|
+
# `dall-e-2` unless a parameter specific to the GPT image models is used.
|
|
36
36
|
#
|
|
37
37
|
# @return [String, Symbol, OpenAI::Models::ImageModel, nil]
|
|
38
38
|
optional :model, union: -> { OpenAI::ImageGenerateParams::Model }, nil?: true
|
|
39
39
|
|
|
40
40
|
# @!attribute moderation
|
|
41
|
-
# Control the content-moderation level for images generated by
|
|
42
|
-
# be either `low` for less restrictive filtering or `auto` (default
|
|
41
|
+
# Control the content-moderation level for images generated by the GPT image
|
|
42
|
+
# models. Must be either `low` for less restrictive filtering or `auto` (default
|
|
43
|
+
# value).
|
|
43
44
|
#
|
|
44
45
|
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil]
|
|
45
46
|
optional :moderation, enum: -> { OpenAI::ImageGenerateParams::Moderation }, nil?: true
|
|
@@ -53,7 +54,7 @@ module OpenAI
|
|
|
53
54
|
|
|
54
55
|
# @!attribute output_compression
|
|
55
56
|
# The compression level (0-100%) for the generated images. This parameter is only
|
|
56
|
-
# supported for
|
|
57
|
+
# supported for the GPT image models with the `webp` or `jpeg` output formats, and
|
|
57
58
|
# defaults to 100.
|
|
58
59
|
#
|
|
59
60
|
# @return [Integer, nil]
|
|
@@ -61,7 +62,7 @@ module OpenAI
|
|
|
61
62
|
|
|
62
63
|
# @!attribute output_format
|
|
63
64
|
# The format in which the generated images are returned. This parameter is only
|
|
64
|
-
# supported for
|
|
65
|
+
# supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
|
|
65
66
|
#
|
|
66
67
|
# @return [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil]
|
|
67
68
|
optional :output_format, enum: -> { OpenAI::ImageGenerateParams::OutputFormat }, nil?: true
|
|
@@ -82,7 +83,7 @@ module OpenAI
|
|
|
82
83
|
#
|
|
83
84
|
# - `auto` (default value) will automatically select the best quality for the
|
|
84
85
|
# given model.
|
|
85
|
-
# - `high`, `medium` and `low` are supported for
|
|
86
|
+
# - `high`, `medium` and `low` are supported for the GPT image models.
|
|
86
87
|
# - `hd` and `standard` are supported for `dall-e-3`.
|
|
87
88
|
# - `standard` is the only option for `dall-e-2`.
|
|
88
89
|
#
|
|
@@ -92,17 +93,17 @@ module OpenAI
|
|
|
92
93
|
# @!attribute response_format
|
|
93
94
|
# The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
94
95
|
# returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
95
|
-
# after the image has been generated. This parameter isn't supported for
|
|
96
|
-
#
|
|
96
|
+
# after the image has been generated. This parameter isn't supported for the GPT
|
|
97
|
+
# image models, which always return base64-encoded images.
|
|
97
98
|
#
|
|
98
99
|
# @return [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil]
|
|
99
100
|
optional :response_format, enum: -> { OpenAI::ImageGenerateParams::ResponseFormat }, nil?: true
|
|
100
101
|
|
|
101
102
|
# @!attribute size
|
|
102
103
|
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
103
|
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
104
|
-
#
|
|
105
|
-
#
|
|
104
|
+
# (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
105
|
+
# models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
|
|
106
|
+
# `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
106
107
|
#
|
|
107
108
|
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil]
|
|
108
109
|
optional :size, enum: -> { OpenAI::ImageGenerateParams::Size }, nil?: true
|
|
@@ -132,9 +133,9 @@ module OpenAI
|
|
|
132
133
|
#
|
|
133
134
|
# @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s).
|
|
134
135
|
#
|
|
135
|
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
136
|
+
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT i
|
|
136
137
|
#
|
|
137
|
-
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by
|
|
138
|
+
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by the GPT image model
|
|
138
139
|
#
|
|
139
140
|
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
140
141
|
#
|
|
@@ -157,9 +158,9 @@ module OpenAI
|
|
|
157
158
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
|
158
159
|
|
|
159
160
|
# Allows to set transparency for the background of the generated image(s). This
|
|
160
|
-
# parameter is only supported for
|
|
161
|
-
# `opaque` or `auto` (default value). When `auto` is used, the
|
|
162
|
-
# automatically determine the best background for the image.
|
|
161
|
+
# parameter is only supported for the GPT image models. Must be one of
|
|
162
|
+
# `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
163
|
+
# model will automatically determine the best background for the image.
|
|
163
164
|
#
|
|
164
165
|
# If `transparent`, the output format needs to support transparency, so it should
|
|
165
166
|
# be set to either `png` (default value) or `webp`.
|
|
@@ -174,23 +175,24 @@ module OpenAI
|
|
|
174
175
|
# @return [Array<Symbol>]
|
|
175
176
|
end
|
|
176
177
|
|
|
177
|
-
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
178
|
-
# `gpt-image-1
|
|
179
|
-
# `
|
|
178
|
+
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT
|
|
179
|
+
# image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to
|
|
180
|
+
# `dall-e-2` unless a parameter specific to the GPT image models is used.
|
|
180
181
|
module Model
|
|
181
182
|
extend OpenAI::Internal::Type::Union
|
|
182
183
|
|
|
183
184
|
variant String
|
|
184
185
|
|
|
185
|
-
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1
|
|
186
|
+
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT image model (`gpt-image-1`, `gpt-image-1-mini`, `gpt-image-1.5`). Defaults to `dall-e-2` unless a parameter specific to the GPT image models is used.
|
|
186
187
|
variant enum: -> { OpenAI::ImageModel }
|
|
187
188
|
|
|
188
189
|
# @!method self.variants
|
|
189
190
|
# @return [Array(String, Symbol, OpenAI::Models::ImageModel)]
|
|
190
191
|
end
|
|
191
192
|
|
|
192
|
-
# Control the content-moderation level for images generated by
|
|
193
|
-
# be either `low` for less restrictive filtering or `auto` (default
|
|
193
|
+
# Control the content-moderation level for images generated by the GPT image
|
|
194
|
+
# models. Must be either `low` for less restrictive filtering or `auto` (default
|
|
195
|
+
# value).
|
|
194
196
|
module Moderation
|
|
195
197
|
extend OpenAI::Internal::Type::Enum
|
|
196
198
|
|
|
@@ -202,7 +204,7 @@ module OpenAI
|
|
|
202
204
|
end
|
|
203
205
|
|
|
204
206
|
# The format in which the generated images are returned. This parameter is only
|
|
205
|
-
# supported for
|
|
207
|
+
# supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`.
|
|
206
208
|
module OutputFormat
|
|
207
209
|
extend OpenAI::Internal::Type::Enum
|
|
208
210
|
|
|
@@ -218,7 +220,7 @@ module OpenAI
|
|
|
218
220
|
#
|
|
219
221
|
# - `auto` (default value) will automatically select the best quality for the
|
|
220
222
|
# given model.
|
|
221
|
-
# - `high`, `medium` and `low` are supported for
|
|
223
|
+
# - `high`, `medium` and `low` are supported for the GPT image models.
|
|
222
224
|
# - `hd` and `standard` are supported for `dall-e-3`.
|
|
223
225
|
# - `standard` is the only option for `dall-e-2`.
|
|
224
226
|
module Quality
|
|
@@ -237,8 +239,8 @@ module OpenAI
|
|
|
237
239
|
|
|
238
240
|
# The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
239
241
|
# returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
240
|
-
# after the image has been generated. This parameter isn't supported for
|
|
241
|
-
#
|
|
242
|
+
# after the image has been generated. This parameter isn't supported for the GPT
|
|
243
|
+
# image models, which always return base64-encoded images.
|
|
242
244
|
module ResponseFormat
|
|
243
245
|
extend OpenAI::Internal::Type::Enum
|
|
244
246
|
|
|
@@ -250,9 +252,9 @@ module OpenAI
|
|
|
250
252
|
end
|
|
251
253
|
|
|
252
254
|
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
253
|
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
254
|
-
#
|
|
255
|
-
#
|
|
255
|
+
# (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
256
|
+
# models, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of
|
|
257
|
+
# `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
256
258
|
module Size
|
|
257
259
|
extend OpenAI::Internal::Type::Enum
|
|
258
260
|
|
|
@@ -151,7 +151,13 @@ module OpenAI
|
|
|
151
151
|
# @return [Integer]
|
|
152
152
|
required :total_tokens, Integer
|
|
153
153
|
|
|
154
|
-
# @!
|
|
154
|
+
# @!attribute output_tokens_details
|
|
155
|
+
# The output token details for the image generation.
|
|
156
|
+
#
|
|
157
|
+
# @return [OpenAI::Models::ImagesResponse::Usage::OutputTokensDetails, nil]
|
|
158
|
+
optional :output_tokens_details, -> { OpenAI::ImagesResponse::Usage::OutputTokensDetails }
|
|
159
|
+
|
|
160
|
+
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:, output_tokens_details: nil)
|
|
155
161
|
# For `gpt-image-1` only, the token usage information for the image generation.
|
|
156
162
|
#
|
|
157
163
|
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
|
|
@@ -161,6 +167,8 @@ module OpenAI
|
|
|
161
167
|
# @param output_tokens [Integer] The number of output tokens generated by the model.
|
|
162
168
|
#
|
|
163
169
|
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
|
|
170
|
+
#
|
|
171
|
+
# @param output_tokens_details [OpenAI::Models::ImagesResponse::Usage::OutputTokensDetails] The output token details for the image generation.
|
|
164
172
|
|
|
165
173
|
# @see OpenAI::Models::ImagesResponse::Usage#input_tokens_details
|
|
166
174
|
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
|
@@ -183,6 +191,28 @@ module OpenAI
|
|
|
183
191
|
#
|
|
184
192
|
# @param text_tokens [Integer] The number of text tokens in the input prompt.
|
|
185
193
|
end
|
|
194
|
+
|
|
195
|
+
# @see OpenAI::Models::ImagesResponse::Usage#output_tokens_details
|
|
196
|
+
class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
|
|
197
|
+
# @!attribute image_tokens
|
|
198
|
+
# The number of image output tokens generated by the model.
|
|
199
|
+
#
|
|
200
|
+
# @return [Integer]
|
|
201
|
+
required :image_tokens, Integer
|
|
202
|
+
|
|
203
|
+
# @!attribute text_tokens
|
|
204
|
+
# The number of text output tokens generated by the model.
|
|
205
|
+
#
|
|
206
|
+
# @return [Integer]
|
|
207
|
+
required :text_tokens, Integer
|
|
208
|
+
|
|
209
|
+
# @!method initialize(image_tokens:, text_tokens:)
|
|
210
|
+
# The output token details for the image generation.
|
|
211
|
+
#
|
|
212
|
+
# @param image_tokens [Integer] The number of image output tokens generated by the model.
|
|
213
|
+
#
|
|
214
|
+
# @param text_tokens [Integer] The number of text output tokens generated by the model.
|
|
215
|
+
end
|
|
186
216
|
end
|
|
187
217
|
end
|
|
188
218
|
end
|
|
@@ -27,7 +27,7 @@ module OpenAI
|
|
|
27
27
|
# A tool that runs Python code to help generate a response to a prompt.
|
|
28
28
|
variant :code_interpreter, -> { OpenAI::Responses::Tool::CodeInterpreter }
|
|
29
29
|
|
|
30
|
-
# A tool that generates images using
|
|
30
|
+
# A tool that generates images using the GPT image models.
|
|
31
31
|
variant :image_generation, -> { OpenAI::Responses::Tool::ImageGeneration }
|
|
32
32
|
|
|
33
33
|
# A tool that allows the model to execute shell commands in a local environment.
|
|
@@ -473,8 +473,8 @@ module OpenAI
|
|
|
473
473
|
# @!attribute model
|
|
474
474
|
# The image generation model to use. Default: `gpt-image-1`.
|
|
475
475
|
#
|
|
476
|
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model, nil]
|
|
477
|
-
optional :model,
|
|
476
|
+
# @return [String, Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model, nil]
|
|
477
|
+
optional :model, union: -> { OpenAI::Responses::Tool::ImageGeneration::Model }
|
|
478
478
|
|
|
479
479
|
# @!attribute moderation
|
|
480
480
|
# Moderation level for the generated image. Default: `auto`.
|
|
@@ -520,7 +520,7 @@ module OpenAI
|
|
|
520
520
|
# Some parameter documentations has been truncated, see
|
|
521
521
|
# {OpenAI::Models::Responses::Tool::ImageGeneration} for more details.
|
|
522
522
|
#
|
|
523
|
-
# A tool that generates images using
|
|
523
|
+
# A tool that generates images using the GPT image models.
|
|
524
524
|
#
|
|
525
525
|
# @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Background type for the generated image. One of `transparent`,
|
|
526
526
|
#
|
|
@@ -528,7 +528,7 @@ module OpenAI
|
|
|
528
528
|
#
|
|
529
529
|
# @param input_image_mask [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask] Optional mask for inpainting. Contains `image_url`
|
|
530
530
|
#
|
|
531
|
-
# @param model [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model] The image generation model to use. Default: `gpt-image-1`.
|
|
531
|
+
# @param model [String, Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model] The image generation model to use. Default: `gpt-image-1`.
|
|
532
532
|
#
|
|
533
533
|
# @param moderation [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Moderation] Moderation level for the generated image. Default: `auto`.
|
|
534
534
|
#
|
|
@@ -606,13 +606,27 @@ module OpenAI
|
|
|
606
606
|
#
|
|
607
607
|
# @see OpenAI::Models::Responses::Tool::ImageGeneration#model
|
|
608
608
|
module Model
|
|
609
|
-
extend OpenAI::Internal::Type::
|
|
609
|
+
extend OpenAI::Internal::Type::Union
|
|
610
|
+
|
|
611
|
+
variant String
|
|
612
|
+
|
|
613
|
+
variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::GPT_IMAGE_1 }
|
|
614
|
+
|
|
615
|
+
variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::GPT_IMAGE_1_MINI }
|
|
616
|
+
|
|
617
|
+
# @!method self.variants
|
|
618
|
+
# @return [Array(String, Symbol)]
|
|
619
|
+
|
|
620
|
+
define_sorbet_constant!(:Variants) do
|
|
621
|
+
T.type_alias { T.any(String, OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol) }
|
|
622
|
+
end
|
|
623
|
+
|
|
624
|
+
# @!group
|
|
610
625
|
|
|
611
626
|
GPT_IMAGE_1 = :"gpt-image-1"
|
|
612
627
|
GPT_IMAGE_1_MINI = :"gpt-image-1-mini"
|
|
613
628
|
|
|
614
|
-
# @!
|
|
615
|
-
# @return [Array<Symbol>]
|
|
629
|
+
# @!endgroup
|
|
616
630
|
end
|
|
617
631
|
|
|
618
632
|
# Moderation level for the generated image. Default: `auto`.
|
|
@@ -59,7 +59,7 @@ module OpenAI
|
|
|
59
59
|
#
|
|
60
60
|
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
|
|
61
61
|
#
|
|
62
|
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and
|
|
62
|
+
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and the GPT image models
|
|
63
63
|
#
|
|
64
64
|
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
|
|
65
65
|
#
|
|
@@ -118,7 +118,7 @@ module OpenAI
|
|
|
118
118
|
#
|
|
119
119
|
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
|
|
120
120
|
#
|
|
121
|
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and
|
|
121
|
+
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and the GPT image models
|
|
122
122
|
#
|
|
123
123
|
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
|
|
124
124
|
#
|
|
@@ -173,9 +173,9 @@ module OpenAI
|
|
|
173
173
|
#
|
|
174
174
|
# @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s).
|
|
175
175
|
#
|
|
176
|
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
176
|
+
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT i
|
|
177
177
|
#
|
|
178
|
-
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by
|
|
178
|
+
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by the GPT image model
|
|
179
179
|
#
|
|
180
180
|
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
181
181
|
#
|
|
@@ -229,9 +229,9 @@ module OpenAI
|
|
|
229
229
|
#
|
|
230
230
|
# @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s).
|
|
231
231
|
#
|
|
232
|
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
232
|
+
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or a GPT i
|
|
233
233
|
#
|
|
234
|
-
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by
|
|
234
|
+
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by the GPT image model
|
|
235
235
|
#
|
|
236
236
|
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
237
237
|
#
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
|
@@ -385,6 +385,7 @@ require_relative "openai/models/fine_tuning/supervised_hyperparameters"
|
|
|
385
385
|
require_relative "openai/models/fine_tuning/supervised_method"
|
|
386
386
|
require_relative "openai/models/function_definition"
|
|
387
387
|
require_relative "openai/models/function_parameters"
|
|
388
|
+
require_relative "openai/models/graders/grader_input_item"
|
|
388
389
|
require_relative "openai/models/graders/grader_inputs"
|
|
389
390
|
require_relative "openai/models/graders/label_model_grader"
|
|
390
391
|
require_relative "openai/models/graders/multi_grader"
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# typed: strong
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Graders
|
|
6
|
+
# A single content item: input text, output text, input image, or input audio.
|
|
7
|
+
module GraderInputItem
|
|
8
|
+
extend OpenAI::Internal::Type::Union
|
|
9
|
+
|
|
10
|
+
Variants =
|
|
11
|
+
T.type_alias do
|
|
12
|
+
T.any(
|
|
13
|
+
String,
|
|
14
|
+
OpenAI::Responses::ResponseInputText,
|
|
15
|
+
OpenAI::Graders::GraderInputItem::OutputText,
|
|
16
|
+
OpenAI::Graders::GraderInputItem::InputImage,
|
|
17
|
+
OpenAI::Responses::ResponseInputAudio
|
|
18
|
+
)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
class OutputText < OpenAI::Internal::Type::BaseModel
|
|
22
|
+
OrHash =
|
|
23
|
+
T.type_alias do
|
|
24
|
+
T.any(
|
|
25
|
+
OpenAI::Graders::GraderInputItem::OutputText,
|
|
26
|
+
OpenAI::Internal::AnyHash
|
|
27
|
+
)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# The text output from the model.
|
|
31
|
+
sig { returns(String) }
|
|
32
|
+
attr_accessor :text
|
|
33
|
+
|
|
34
|
+
# The type of the output text. Always `output_text`.
|
|
35
|
+
sig { returns(Symbol) }
|
|
36
|
+
attr_accessor :type
|
|
37
|
+
|
|
38
|
+
# A text output from the model.
|
|
39
|
+
sig { params(text: String, type: Symbol).returns(T.attached_class) }
|
|
40
|
+
def self.new(
|
|
41
|
+
# The text output from the model.
|
|
42
|
+
text:,
|
|
43
|
+
# The type of the output text. Always `output_text`.
|
|
44
|
+
type: :output_text
|
|
45
|
+
)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
sig { override.returns({ text: String, type: Symbol }) }
|
|
49
|
+
def to_hash
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
|
54
|
+
OrHash =
|
|
55
|
+
T.type_alias do
|
|
56
|
+
T.any(
|
|
57
|
+
OpenAI::Graders::GraderInputItem::InputImage,
|
|
58
|
+
OpenAI::Internal::AnyHash
|
|
59
|
+
)
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
# The URL of the image input.
|
|
63
|
+
sig { returns(String) }
|
|
64
|
+
attr_accessor :image_url
|
|
65
|
+
|
|
66
|
+
# The type of the image input. Always `input_image`.
|
|
67
|
+
sig { returns(Symbol) }
|
|
68
|
+
attr_accessor :type
|
|
69
|
+
|
|
70
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
71
|
+
# `auto`. Defaults to `auto`.
|
|
72
|
+
sig { returns(T.nilable(String)) }
|
|
73
|
+
attr_reader :detail
|
|
74
|
+
|
|
75
|
+
sig { params(detail: String).void }
|
|
76
|
+
attr_writer :detail
|
|
77
|
+
|
|
78
|
+
# An image input block used within EvalItem content arrays.
|
|
79
|
+
sig do
|
|
80
|
+
params(image_url: String, detail: String, type: Symbol).returns(
|
|
81
|
+
T.attached_class
|
|
82
|
+
)
|
|
83
|
+
end
|
|
84
|
+
def self.new(
|
|
85
|
+
# The URL of the image input.
|
|
86
|
+
image_url:,
|
|
87
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
88
|
+
# `auto`. Defaults to `auto`.
|
|
89
|
+
detail: nil,
|
|
90
|
+
# The type of the image input. Always `input_image`.
|
|
91
|
+
type: :input_image
|
|
92
|
+
)
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
sig do
|
|
96
|
+
override.returns(
|
|
97
|
+
{ image_url: String, type: Symbol, detail: String }
|
|
98
|
+
)
|
|
99
|
+
end
|
|
100
|
+
def to_hash
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
sig do
|
|
105
|
+
override.returns(T::Array[OpenAI::Graders::GraderInputItem::Variants])
|
|
106
|
+
end
|
|
107
|
+
def self.variants
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
end
|
|
111
|
+
end
|
|
112
|
+
end
|
|
@@ -6,111 +6,6 @@ module OpenAI
|
|
|
6
6
|
T.let(Graders::GraderInputs, OpenAI::Internal::Type::Converter)
|
|
7
7
|
|
|
8
8
|
module Graders
|
|
9
|
-
# A single content item: input text, output text, input image, or input audio.
|
|
10
|
-
module GraderInputItem
|
|
11
|
-
extend OpenAI::Internal::Type::Union
|
|
12
|
-
|
|
13
|
-
Variants =
|
|
14
|
-
T.type_alias do
|
|
15
|
-
T.any(
|
|
16
|
-
String,
|
|
17
|
-
OpenAI::Responses::ResponseInputText,
|
|
18
|
-
OpenAI::Graders::GraderInputItem::OutputText,
|
|
19
|
-
OpenAI::Graders::GraderInputItem::InputImage,
|
|
20
|
-
OpenAI::Responses::ResponseInputAudio
|
|
21
|
-
)
|
|
22
|
-
end
|
|
23
|
-
|
|
24
|
-
class OutputText < OpenAI::Internal::Type::BaseModel
|
|
25
|
-
OrHash =
|
|
26
|
-
T.type_alias do
|
|
27
|
-
T.any(
|
|
28
|
-
OpenAI::Graders::GraderInputItem::OutputText,
|
|
29
|
-
OpenAI::Internal::AnyHash
|
|
30
|
-
)
|
|
31
|
-
end
|
|
32
|
-
|
|
33
|
-
# The text output from the model.
|
|
34
|
-
sig { returns(String) }
|
|
35
|
-
attr_accessor :text
|
|
36
|
-
|
|
37
|
-
# The type of the output text. Always `output_text`.
|
|
38
|
-
sig { returns(Symbol) }
|
|
39
|
-
attr_accessor :type
|
|
40
|
-
|
|
41
|
-
# A text output from the model.
|
|
42
|
-
sig { params(text: String, type: Symbol).returns(T.attached_class) }
|
|
43
|
-
def self.new(
|
|
44
|
-
# The text output from the model.
|
|
45
|
-
text:,
|
|
46
|
-
# The type of the output text. Always `output_text`.
|
|
47
|
-
type: :output_text
|
|
48
|
-
)
|
|
49
|
-
end
|
|
50
|
-
|
|
51
|
-
sig { override.returns({ text: String, type: Symbol }) }
|
|
52
|
-
def to_hash
|
|
53
|
-
end
|
|
54
|
-
end
|
|
55
|
-
|
|
56
|
-
class InputImage < OpenAI::Internal::Type::BaseModel
|
|
57
|
-
OrHash =
|
|
58
|
-
T.type_alias do
|
|
59
|
-
T.any(
|
|
60
|
-
OpenAI::Graders::GraderInputItem::InputImage,
|
|
61
|
-
OpenAI::Internal::AnyHash
|
|
62
|
-
)
|
|
63
|
-
end
|
|
64
|
-
|
|
65
|
-
# The URL of the image input.
|
|
66
|
-
sig { returns(String) }
|
|
67
|
-
attr_accessor :image_url
|
|
68
|
-
|
|
69
|
-
# The type of the image input. Always `input_image`.
|
|
70
|
-
sig { returns(Symbol) }
|
|
71
|
-
attr_accessor :type
|
|
72
|
-
|
|
73
|
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
74
|
-
# `auto`. Defaults to `auto`.
|
|
75
|
-
sig { returns(T.nilable(String)) }
|
|
76
|
-
attr_reader :detail
|
|
77
|
-
|
|
78
|
-
sig { params(detail: String).void }
|
|
79
|
-
attr_writer :detail
|
|
80
|
-
|
|
81
|
-
# An image input block used within EvalItem content arrays.
|
|
82
|
-
sig do
|
|
83
|
-
params(image_url: String, detail: String, type: Symbol).returns(
|
|
84
|
-
T.attached_class
|
|
85
|
-
)
|
|
86
|
-
end
|
|
87
|
-
def self.new(
|
|
88
|
-
# The URL of the image input.
|
|
89
|
-
image_url:,
|
|
90
|
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
91
|
-
# `auto`. Defaults to `auto`.
|
|
92
|
-
detail: nil,
|
|
93
|
-
# The type of the image input. Always `input_image`.
|
|
94
|
-
type: :input_image
|
|
95
|
-
)
|
|
96
|
-
end
|
|
97
|
-
|
|
98
|
-
sig do
|
|
99
|
-
override.returns(
|
|
100
|
-
{ image_url: String, type: Symbol, detail: String }
|
|
101
|
-
)
|
|
102
|
-
end
|
|
103
|
-
def to_hash
|
|
104
|
-
end
|
|
105
|
-
end
|
|
106
|
-
|
|
107
|
-
sig do
|
|
108
|
-
override.returns(T::Array[OpenAI::Graders::GraderInputItem::Variants])
|
|
109
|
-
end
|
|
110
|
-
def self.variants
|
|
111
|
-
end
|
|
112
|
-
end
|
|
113
|
-
|
|
114
9
|
GraderInputs =
|
|
115
10
|
T.let(
|
|
116
11
|
OpenAI::Internal::Type::ArrayOf[
|
data/rbi/openai/models/image.rbi
CHANGED
|
@@ -5,9 +5,9 @@ module OpenAI
|
|
|
5
5
|
class Image < OpenAI::Internal::Type::BaseModel
|
|
6
6
|
OrHash = T.type_alias { T.any(OpenAI::Image, OpenAI::Internal::AnyHash) }
|
|
7
7
|
|
|
8
|
-
# The base64-encoded JSON of the generated image.
|
|
9
|
-
# and only present if `response_format` is set to `b64_json` for
|
|
10
|
-
# `dall-e-3`.
|
|
8
|
+
# The base64-encoded JSON of the generated image. Returned by default for the GPT
|
|
9
|
+
# image models, and only present if `response_format` is set to `b64_json` for
|
|
10
|
+
# `dall-e-2` and `dall-e-3`.
|
|
11
11
|
sig { returns(T.nilable(String)) }
|
|
12
12
|
attr_reader :b64_json
|
|
13
13
|
|
|
@@ -22,8 +22,8 @@ module OpenAI
|
|
|
22
22
|
attr_writer :revised_prompt
|
|
23
23
|
|
|
24
24
|
# When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
|
|
25
|
-
# `response_format` is set to `url` (default value). Unsupported for
|
|
26
|
-
#
|
|
25
|
+
# `response_format` is set to `url` (default value). Unsupported for the GPT image
|
|
26
|
+
# models.
|
|
27
27
|
sig { returns(T.nilable(String)) }
|
|
28
28
|
attr_reader :url
|
|
29
29
|
|
|
@@ -37,15 +37,15 @@ module OpenAI
|
|
|
37
37
|
)
|
|
38
38
|
end
|
|
39
39
|
def self.new(
|
|
40
|
-
# The base64-encoded JSON of the generated image.
|
|
41
|
-
# and only present if `response_format` is set to `b64_json` for
|
|
42
|
-
# `dall-e-3`.
|
|
40
|
+
# The base64-encoded JSON of the generated image. Returned by default for the GPT
|
|
41
|
+
# image models, and only present if `response_format` is set to `b64_json` for
|
|
42
|
+
# `dall-e-2` and `dall-e-3`.
|
|
43
43
|
b64_json: nil,
|
|
44
44
|
# For `dall-e-3` only, the revised prompt that was used to generate the image.
|
|
45
45
|
revised_prompt: nil,
|
|
46
46
|
# When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
|
|
47
|
-
# `response_format` is set to `url` (default value). Unsupported for
|
|
48
|
-
#
|
|
47
|
+
# `response_format` is set to `url` (default value). Unsupported for the GPT image
|
|
48
|
+
# models.
|
|
49
49
|
url: nil
|
|
50
50
|
)
|
|
51
51
|
end
|