openai 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +21 -0
- data/README.md +1 -1
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +34 -10
- data/lib/openai/models/eval_create_params.rb +50 -5
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +50 -5
- data/lib/openai/models/evals/run_cancel_response.rb +48 -5
- data/lib/openai/models/evals/run_create_params.rb +50 -5
- data/lib/openai/models/evals/run_create_response.rb +48 -5
- data/lib/openai/models/evals/run_list_response.rb +48 -5
- data/lib/openai/models/evals/run_retrieve_response.rb +48 -5
- data/lib/openai/models/graders/label_model_grader.rb +48 -5
- data/lib/openai/models/graders/score_model_grader.rb +48 -5
- data/lib/openai/models/image_edit_completed_event.rb +198 -0
- data/lib/openai/models/image_edit_params.rb +36 -1
- data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
- data/lib/openai/models/image_edit_stream_event.rb +21 -0
- data/lib/openai/models/image_gen_completed_event.rb +198 -0
- data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
- data/lib/openai/models/image_gen_stream_event.rb +21 -0
- data/lib/openai/models/image_generate_params.rb +13 -1
- data/lib/openai/models/images_response.rb +3 -0
- data/lib/openai/models/responses/response_output_refusal.rb +2 -2
- data/lib/openai/models/responses/tool.rb +30 -1
- data/lib/openai/models.rb +12 -0
- data/lib/openai/resources/images.rb +140 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +4 -0
- data/rbi/openai/models/eval_create_params.rbi +76 -7
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +76 -7
- data/rbi/openai/models/evals/run_cancel_response.rbi +70 -5
- data/rbi/openai/models/evals/run_create_params.rbi +76 -7
- data/rbi/openai/models/evals/run_create_response.rbi +70 -5
- data/rbi/openai/models/evals/run_list_response.rbi +70 -5
- data/rbi/openai/models/evals/run_retrieve_response.rbi +70 -5
- data/rbi/openai/models/graders/label_model_grader.rbi +74 -7
- data/rbi/openai/models/graders/score_model_grader.rbi +74 -7
- data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
- data/rbi/openai/models/image_edit_params.rbi +51 -0
- data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
- data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
- data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
- data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
- data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
- data/rbi/openai/models/image_generate_params.rbi +12 -0
- data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
- data/rbi/openai/models/responses/tool.rbi +61 -0
- data/rbi/openai/models.rbi +12 -0
- data/rbi/openai/resources/images.rbi +225 -0
- data/sig/openai/models/eval_create_params.rbs +29 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +29 -0
- data/sig/openai/models/evals/run_cancel_response.rbs +33 -0
- data/sig/openai/models/evals/run_create_params.rbs +33 -0
- data/sig/openai/models/evals/run_create_response.rbs +33 -0
- data/sig/openai/models/evals/run_list_response.rbs +33 -0
- data/sig/openai/models/evals/run_retrieve_response.rbs +33 -0
- data/sig/openai/models/graders/label_model_grader.rbs +29 -0
- data/sig/openai/models/graders/score_model_grader.rbs +29 -0
- data/sig/openai/models/image_edit_completed_event.rbs +150 -0
- data/sig/openai/models/image_edit_params.rbs +21 -0
- data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_edit_stream_event.rbs +12 -0
- data/sig/openai/models/image_gen_completed_event.rbs +150 -0
- data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_gen_stream_event.rbs +12 -0
- data/sig/openai/models/image_generate_params.rbs +5 -0
- data/sig/openai/models/responses/tool.rbs +16 -0
- data/sig/openai/models.rbs +12 -0
- data/sig/openai/resources/images.rbs +38 -0
- metadata +20 -2
@@ -42,6 +42,8 @@ module OpenAI
|
|
42
42
|
)
|
43
43
|
end
|
44
44
|
|
45
|
+
# See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
|
46
|
+
#
|
45
47
|
# Creates an edited or extended image given one or more source images and a
|
46
48
|
# prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
47
49
|
sig do
|
@@ -49,17 +51,21 @@ module OpenAI
|
|
49
51
|
image: OpenAI::ImageEditParams::Image::Variants,
|
50
52
|
prompt: String,
|
51
53
|
background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
|
54
|
+
input_fidelity:
|
55
|
+
T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
|
52
56
|
mask: OpenAI::Internal::FileInput,
|
53
57
|
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
|
54
58
|
n: T.nilable(Integer),
|
55
59
|
output_compression: T.nilable(Integer),
|
56
60
|
output_format:
|
57
61
|
T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
|
62
|
+
partial_images: T.nilable(Integer),
|
58
63
|
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
|
59
64
|
response_format:
|
60
65
|
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
|
61
66
|
size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol),
|
62
67
|
user: String,
|
68
|
+
stream: T.noreturn,
|
63
69
|
request_options: OpenAI::RequestOptions::OrHash
|
64
70
|
).returns(OpenAI::ImagesResponse)
|
65
71
|
end
|
@@ -83,6 +89,10 @@ module OpenAI
|
|
83
89
|
# If `transparent`, the output format needs to support transparency, so it should
|
84
90
|
# be set to either `png` (default value) or `webp`.
|
85
91
|
background: nil,
|
92
|
+
# Control how much effort the model will exert to match the style and features,
|
93
|
+
# especially facial features, of input images. This parameter is only supported
|
94
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
95
|
+
input_fidelity: nil,
|
86
96
|
# An additional image whose fully transparent areas (e.g. where alpha is zero)
|
87
97
|
# indicate where `image` should be edited. If there are multiple images provided,
|
88
98
|
# the mask will be applied on the first image. Must be a valid PNG file, less than
|
@@ -102,6 +112,10 @@ module OpenAI
|
|
102
112
|
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
103
113
|
# default value is `png`.
|
104
114
|
output_format: nil,
|
115
|
+
# The number of partial images to generate. This parameter is used for streaming
|
116
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
117
|
+
# 0, the response will be a single image sent in one streaming event.
|
118
|
+
partial_images: nil,
|
105
119
|
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
106
120
|
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
107
121
|
# Defaults to `auto`.
|
@@ -119,10 +133,115 @@ module OpenAI
|
|
119
133
|
# and detect abuse.
|
120
134
|
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
121
135
|
user: nil,
|
136
|
+
# There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
|
137
|
+
# streaming and non-streaming use cases, respectively.
|
138
|
+
stream: false,
|
122
139
|
request_options: {}
|
123
140
|
)
|
124
141
|
end
|
125
142
|
|
143
|
+
# See {OpenAI::Resources::Images#edit} for non-streaming counterpart.
|
144
|
+
#
|
145
|
+
# Creates an edited or extended image given one or more source images and a
|
146
|
+
# prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
147
|
+
sig do
|
148
|
+
params(
|
149
|
+
image: OpenAI::ImageEditParams::Image::Variants,
|
150
|
+
prompt: String,
|
151
|
+
background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
|
152
|
+
input_fidelity:
|
153
|
+
T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
|
154
|
+
mask: OpenAI::Internal::FileInput,
|
155
|
+
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
|
156
|
+
n: T.nilable(Integer),
|
157
|
+
output_compression: T.nilable(Integer),
|
158
|
+
output_format:
|
159
|
+
T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
|
160
|
+
partial_images: T.nilable(Integer),
|
161
|
+
quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
|
162
|
+
response_format:
|
163
|
+
T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
|
164
|
+
size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol),
|
165
|
+
user: String,
|
166
|
+
stream: T.noreturn,
|
167
|
+
request_options: OpenAI::RequestOptions::OrHash
|
168
|
+
).returns(
|
169
|
+
OpenAI::Internal::Stream[OpenAI::ImageEditStreamEvent::Variants]
|
170
|
+
)
|
171
|
+
end
|
172
|
+
def edit_stream_raw(
|
173
|
+
# The image(s) to edit. Must be a supported image file or an array of images.
|
174
|
+
#
|
175
|
+
# For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
176
|
+
# 50MB. You can provide up to 16 images.
|
177
|
+
#
|
178
|
+
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
179
|
+
# file less than 4MB.
|
180
|
+
image:,
|
181
|
+
# A text description of the desired image(s). The maximum length is 1000
|
182
|
+
# characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
183
|
+
prompt:,
|
184
|
+
# Allows to set transparency for the background of the generated image(s). This
|
185
|
+
# parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
186
|
+
# `opaque` or `auto` (default value). When `auto` is used, the model will
|
187
|
+
# automatically determine the best background for the image.
|
188
|
+
#
|
189
|
+
# If `transparent`, the output format needs to support transparency, so it should
|
190
|
+
# be set to either `png` (default value) or `webp`.
|
191
|
+
background: nil,
|
192
|
+
# Control how much effort the model will exert to match the style and features,
|
193
|
+
# especially facial features, of input images. This parameter is only supported
|
194
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
195
|
+
input_fidelity: nil,
|
196
|
+
# An additional image whose fully transparent areas (e.g. where alpha is zero)
|
197
|
+
# indicate where `image` should be edited. If there are multiple images provided,
|
198
|
+
# the mask will be applied on the first image. Must be a valid PNG file, less than
|
199
|
+
# 4MB, and have the same dimensions as `image`.
|
200
|
+
mask: nil,
|
201
|
+
# The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
202
|
+
# supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
203
|
+
# is used.
|
204
|
+
model: nil,
|
205
|
+
# The number of images to generate. Must be between 1 and 10.
|
206
|
+
n: nil,
|
207
|
+
# The compression level (0-100%) for the generated images. This parameter is only
|
208
|
+
# supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
209
|
+
# defaults to 100.
|
210
|
+
output_compression: nil,
|
211
|
+
# The format in which the generated images are returned. This parameter is only
|
212
|
+
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
213
|
+
# default value is `png`.
|
214
|
+
output_format: nil,
|
215
|
+
# The number of partial images to generate. This parameter is used for streaming
|
216
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
217
|
+
# 0, the response will be a single image sent in one streaming event.
|
218
|
+
partial_images: nil,
|
219
|
+
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
220
|
+
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
221
|
+
# Defaults to `auto`.
|
222
|
+
quality: nil,
|
223
|
+
# The format in which the generated images are returned. Must be one of `url` or
|
224
|
+
# `b64_json`. URLs are only valid for 60 minutes after the image has been
|
225
|
+
# generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
226
|
+
# will always return base64-encoded images.
|
227
|
+
response_format: nil,
|
228
|
+
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
229
|
+
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
230
|
+
# `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
231
|
+
size: nil,
|
232
|
+
# A unique identifier representing your end-user, which can help OpenAI to monitor
|
233
|
+
# and detect abuse.
|
234
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
235
|
+
user: nil,
|
236
|
+
# There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
|
237
|
+
# streaming and non-streaming use cases, respectively.
|
238
|
+
stream: true,
|
239
|
+
request_options: {}
|
240
|
+
)
|
241
|
+
end
|
242
|
+
|
243
|
+
# See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
|
244
|
+
#
|
126
245
|
# Creates an image given a prompt.
|
127
246
|
# [Learn more](https://platform.openai.com/docs/guides/images).
|
128
247
|
sig do
|
@@ -137,12 +256,14 @@ module OpenAI
|
|
137
256
|
output_compression: T.nilable(Integer),
|
138
257
|
output_format:
|
139
258
|
T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
|
259
|
+
partial_images: T.nilable(Integer),
|
140
260
|
quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
|
141
261
|
response_format:
|
142
262
|
T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
|
143
263
|
size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol),
|
144
264
|
style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol),
|
145
265
|
user: String,
|
266
|
+
stream: T.noreturn,
|
146
267
|
request_options: OpenAI::RequestOptions::OrHash
|
147
268
|
).returns(OpenAI::ImagesResponse)
|
148
269
|
end
|
@@ -176,6 +297,107 @@ module OpenAI
|
|
176
297
|
# The format in which the generated images are returned. This parameter is only
|
177
298
|
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
178
299
|
output_format: nil,
|
300
|
+
# The number of partial images to generate. This parameter is used for streaming
|
301
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
302
|
+
# 0, the response will be a single image sent in one streaming event.
|
303
|
+
partial_images: nil,
|
304
|
+
# The quality of the image that will be generated.
|
305
|
+
#
|
306
|
+
# - `auto` (default value) will automatically select the best quality for the
|
307
|
+
# given model.
|
308
|
+
# - `high`, `medium` and `low` are supported for `gpt-image-1`.
|
309
|
+
# - `hd` and `standard` are supported for `dall-e-3`.
|
310
|
+
# - `standard` is the only option for `dall-e-2`.
|
311
|
+
quality: nil,
|
312
|
+
# The format in which generated images with `dall-e-2` and `dall-e-3` are
|
313
|
+
# returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
314
|
+
# after the image has been generated. This parameter isn't supported for
|
315
|
+
# `gpt-image-1` which will always return base64-encoded images.
|
316
|
+
response_format: nil,
|
317
|
+
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
318
|
+
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
319
|
+
# `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
320
|
+
# one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
321
|
+
size: nil,
|
322
|
+
# The style of the generated images. This parameter is only supported for
|
323
|
+
# `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
324
|
+
# towards generating hyper-real and dramatic images. Natural causes the model to
|
325
|
+
# produce more natural, less hyper-real looking images.
|
326
|
+
style: nil,
|
327
|
+
# A unique identifier representing your end-user, which can help OpenAI to monitor
|
328
|
+
# and detect abuse.
|
329
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
330
|
+
user: nil,
|
331
|
+
# There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
|
332
|
+
# for streaming and non-streaming use cases, respectively.
|
333
|
+
stream: false,
|
334
|
+
request_options: {}
|
335
|
+
)
|
336
|
+
end
|
337
|
+
|
338
|
+
# See {OpenAI::Resources::Images#generate} for non-streaming counterpart.
|
339
|
+
#
|
340
|
+
# Creates an image given a prompt.
|
341
|
+
# [Learn more](https://platform.openai.com/docs/guides/images).
|
342
|
+
sig do
|
343
|
+
params(
|
344
|
+
prompt: String,
|
345
|
+
background:
|
346
|
+
T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol),
|
347
|
+
model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
|
348
|
+
moderation:
|
349
|
+
T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol),
|
350
|
+
n: T.nilable(Integer),
|
351
|
+
output_compression: T.nilable(Integer),
|
352
|
+
output_format:
|
353
|
+
T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
|
354
|
+
partial_images: T.nilable(Integer),
|
355
|
+
quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
|
356
|
+
response_format:
|
357
|
+
T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
|
358
|
+
size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol),
|
359
|
+
style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol),
|
360
|
+
user: String,
|
361
|
+
stream: T.noreturn,
|
362
|
+
request_options: OpenAI::RequestOptions::OrHash
|
363
|
+
).returns(
|
364
|
+
OpenAI::Internal::Stream[OpenAI::ImageGenStreamEvent::Variants]
|
365
|
+
)
|
366
|
+
end
|
367
|
+
def generate_stream_raw(
|
368
|
+
# A text description of the desired image(s). The maximum length is 32000
|
369
|
+
# characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
370
|
+
# for `dall-e-3`.
|
371
|
+
prompt:,
|
372
|
+
# Allows to set transparency for the background of the generated image(s). This
|
373
|
+
# parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
374
|
+
# `opaque` or `auto` (default value). When `auto` is used, the model will
|
375
|
+
# automatically determine the best background for the image.
|
376
|
+
#
|
377
|
+
# If `transparent`, the output format needs to support transparency, so it should
|
378
|
+
# be set to either `png` (default value) or `webp`.
|
379
|
+
background: nil,
|
380
|
+
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
381
|
+
# `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
382
|
+
# `gpt-image-1` is used.
|
383
|
+
model: nil,
|
384
|
+
# Control the content-moderation level for images generated by `gpt-image-1`. Must
|
385
|
+
# be either `low` for less restrictive filtering or `auto` (default value).
|
386
|
+
moderation: nil,
|
387
|
+
# The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
388
|
+
# `n=1` is supported.
|
389
|
+
n: nil,
|
390
|
+
# The compression level (0-100%) for the generated images. This parameter is only
|
391
|
+
# supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
392
|
+
# defaults to 100.
|
393
|
+
output_compression: nil,
|
394
|
+
# The format in which the generated images are returned. This parameter is only
|
395
|
+
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
396
|
+
output_format: nil,
|
397
|
+
# The number of partial images to generate. This parameter is used for streaming
|
398
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
399
|
+
# 0, the response will be a single image sent in one streaming event.
|
400
|
+
partial_images: nil,
|
179
401
|
# The quality of the image that will be generated.
|
180
402
|
#
|
181
403
|
# - `auto` (default value) will automatically select the best quality for the
|
@@ -203,6 +425,9 @@ module OpenAI
|
|
203
425
|
# and detect abuse.
|
204
426
|
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
205
427
|
user: nil,
|
428
|
+
# There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
|
429
|
+
# for streaming and non-streaming use cases, respectively.
|
430
|
+
stream: true,
|
206
431
|
request_options: {}
|
207
432
|
)
|
208
433
|
end
|
@@ -218,6 +218,8 @@ module OpenAI
|
|
218
218
|
String
|
219
219
|
| OpenAI::Responses::ResponseInputText
|
220
220
|
| OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText
|
221
|
+
| OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage
|
222
|
+
| ::Array[top]
|
221
223
|
|
222
224
|
module Content
|
223
225
|
extend OpenAI::Internal::Type::Union
|
@@ -234,7 +236,34 @@ module OpenAI
|
|
234
236
|
def to_hash: -> { text: String, type: :output_text }
|
235
237
|
end
|
236
238
|
|
239
|
+
type input_image =
|
240
|
+
{ image_url: String, type: :input_image, detail: String }
|
241
|
+
|
242
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
243
|
+
attr_accessor image_url: String
|
244
|
+
|
245
|
+
attr_accessor type: :input_image
|
246
|
+
|
247
|
+
attr_reader detail: String?
|
248
|
+
|
249
|
+
def detail=: (String) -> String
|
250
|
+
|
251
|
+
def initialize: (
|
252
|
+
image_url: String,
|
253
|
+
?detail: String,
|
254
|
+
?type: :input_image
|
255
|
+
) -> void
|
256
|
+
|
257
|
+
def to_hash: -> {
|
258
|
+
image_url: String,
|
259
|
+
type: :input_image,
|
260
|
+
detail: String
|
261
|
+
}
|
262
|
+
end
|
263
|
+
|
237
264
|
def self?.variants: -> ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content]
|
265
|
+
|
266
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
238
267
|
end
|
239
268
|
|
240
269
|
type role = :user | :assistant | :system | :developer
|
@@ -234,6 +234,8 @@ module OpenAI
|
|
234
234
|
String
|
235
235
|
| OpenAI::Responses::ResponseInputText
|
236
236
|
| OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::OutputText
|
237
|
+
| OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::InputImage
|
238
|
+
| ::Array[top]
|
237
239
|
|
238
240
|
module Content
|
239
241
|
extend OpenAI::Internal::Type::Union
|
@@ -250,7 +252,34 @@ module OpenAI
|
|
250
252
|
def to_hash: -> { text: String, type: :output_text }
|
251
253
|
end
|
252
254
|
|
255
|
+
type input_image =
|
256
|
+
{ image_url: String, type: :input_image, detail: String }
|
257
|
+
|
258
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
259
|
+
attr_accessor image_url: String
|
260
|
+
|
261
|
+
attr_accessor type: :input_image
|
262
|
+
|
263
|
+
attr_reader detail: String?
|
264
|
+
|
265
|
+
def detail=: (String) -> String
|
266
|
+
|
267
|
+
def initialize: (
|
268
|
+
image_url: String,
|
269
|
+
?detail: String,
|
270
|
+
?type: :input_image
|
271
|
+
) -> void
|
272
|
+
|
273
|
+
def to_hash: -> {
|
274
|
+
image_url: String,
|
275
|
+
type: :input_image,
|
276
|
+
detail: String
|
277
|
+
}
|
278
|
+
end
|
279
|
+
|
253
280
|
def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::content]
|
281
|
+
|
282
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
254
283
|
end
|
255
284
|
|
256
285
|
type role = :user | :assistant | :system | :developer
|
@@ -350,6 +350,8 @@ module OpenAI
|
|
350
350
|
String
|
351
351
|
| OpenAI::Responses::ResponseInputText
|
352
352
|
| OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
|
353
|
+
| OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
|
354
|
+
| ::Array[top]
|
353
355
|
|
354
356
|
module Content
|
355
357
|
extend OpenAI::Internal::Type::Union
|
@@ -369,7 +371,38 @@ module OpenAI
|
|
369
371
|
def to_hash: -> { text: String, type: :output_text }
|
370
372
|
end
|
371
373
|
|
374
|
+
type input_image =
|
375
|
+
{
|
376
|
+
image_url: String,
|
377
|
+
type: :input_image,
|
378
|
+
detail: String
|
379
|
+
}
|
380
|
+
|
381
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
382
|
+
attr_accessor image_url: String
|
383
|
+
|
384
|
+
attr_accessor type: :input_image
|
385
|
+
|
386
|
+
attr_reader detail: String?
|
387
|
+
|
388
|
+
def detail=: (String) -> String
|
389
|
+
|
390
|
+
def initialize: (
|
391
|
+
image_url: String,
|
392
|
+
?detail: String,
|
393
|
+
?type: :input_image
|
394
|
+
) -> void
|
395
|
+
|
396
|
+
def to_hash: -> {
|
397
|
+
image_url: String,
|
398
|
+
type: :input_image,
|
399
|
+
detail: String
|
400
|
+
}
|
401
|
+
end
|
402
|
+
|
372
403
|
def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
|
404
|
+
|
405
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
373
406
|
end
|
374
407
|
|
375
408
|
type role = :user | :assistant | :system | :developer
|
@@ -313,6 +313,8 @@ module OpenAI
|
|
313
313
|
String
|
314
314
|
| OpenAI::Responses::ResponseInputText
|
315
315
|
| OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText
|
316
|
+
| OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage
|
317
|
+
| ::Array[top]
|
316
318
|
|
317
319
|
module Content
|
318
320
|
extend OpenAI::Internal::Type::Union
|
@@ -332,7 +334,38 @@ module OpenAI
|
|
332
334
|
def to_hash: -> { text: String, type: :output_text }
|
333
335
|
end
|
334
336
|
|
337
|
+
type input_image =
|
338
|
+
{
|
339
|
+
image_url: String,
|
340
|
+
type: :input_image,
|
341
|
+
detail: String
|
342
|
+
}
|
343
|
+
|
344
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
345
|
+
attr_accessor image_url: String
|
346
|
+
|
347
|
+
attr_accessor type: :input_image
|
348
|
+
|
349
|
+
attr_reader detail: String?
|
350
|
+
|
351
|
+
def detail=: (String) -> String
|
352
|
+
|
353
|
+
def initialize: (
|
354
|
+
image_url: String,
|
355
|
+
?detail: String,
|
356
|
+
?type: :input_image
|
357
|
+
) -> void
|
358
|
+
|
359
|
+
def to_hash: -> {
|
360
|
+
image_url: String,
|
361
|
+
type: :input_image,
|
362
|
+
detail: String
|
363
|
+
}
|
364
|
+
end
|
365
|
+
|
335
366
|
def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content]
|
367
|
+
|
368
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
336
369
|
end
|
337
370
|
|
338
371
|
type role = :user | :assistant | :system | :developer
|
@@ -350,6 +350,8 @@ module OpenAI
|
|
350
350
|
String
|
351
351
|
| OpenAI::Responses::ResponseInputText
|
352
352
|
| OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
|
353
|
+
| OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
|
354
|
+
| ::Array[top]
|
353
355
|
|
354
356
|
module Content
|
355
357
|
extend OpenAI::Internal::Type::Union
|
@@ -369,7 +371,38 @@ module OpenAI
|
|
369
371
|
def to_hash: -> { text: String, type: :output_text }
|
370
372
|
end
|
371
373
|
|
374
|
+
type input_image =
|
375
|
+
{
|
376
|
+
image_url: String,
|
377
|
+
type: :input_image,
|
378
|
+
detail: String
|
379
|
+
}
|
380
|
+
|
381
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
382
|
+
attr_accessor image_url: String
|
383
|
+
|
384
|
+
attr_accessor type: :input_image
|
385
|
+
|
386
|
+
attr_reader detail: String?
|
387
|
+
|
388
|
+
def detail=: (String) -> String
|
389
|
+
|
390
|
+
def initialize: (
|
391
|
+
image_url: String,
|
392
|
+
?detail: String,
|
393
|
+
?type: :input_image
|
394
|
+
) -> void
|
395
|
+
|
396
|
+
def to_hash: -> {
|
397
|
+
image_url: String,
|
398
|
+
type: :input_image,
|
399
|
+
detail: String
|
400
|
+
}
|
401
|
+
end
|
402
|
+
|
372
403
|
def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
|
404
|
+
|
405
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
373
406
|
end
|
374
407
|
|
375
408
|
type role = :user | :assistant | :system | :developer
|
@@ -350,6 +350,8 @@ module OpenAI
|
|
350
350
|
String
|
351
351
|
| OpenAI::Responses::ResponseInputText
|
352
352
|
| OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
|
353
|
+
| OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
|
354
|
+
| ::Array[top]
|
353
355
|
|
354
356
|
module Content
|
355
357
|
extend OpenAI::Internal::Type::Union
|
@@ -369,7 +371,38 @@ module OpenAI
|
|
369
371
|
def to_hash: -> { text: String, type: :output_text }
|
370
372
|
end
|
371
373
|
|
374
|
+
type input_image =
|
375
|
+
{
|
376
|
+
image_url: String,
|
377
|
+
type: :input_image,
|
378
|
+
detail: String
|
379
|
+
}
|
380
|
+
|
381
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
382
|
+
attr_accessor image_url: String
|
383
|
+
|
384
|
+
attr_accessor type: :input_image
|
385
|
+
|
386
|
+
attr_reader detail: String?
|
387
|
+
|
388
|
+
def detail=: (String) -> String
|
389
|
+
|
390
|
+
def initialize: (
|
391
|
+
image_url: String,
|
392
|
+
?detail: String,
|
393
|
+
?type: :input_image
|
394
|
+
) -> void
|
395
|
+
|
396
|
+
def to_hash: -> {
|
397
|
+
image_url: String,
|
398
|
+
type: :input_image,
|
399
|
+
detail: String
|
400
|
+
}
|
401
|
+
end
|
402
|
+
|
372
403
|
def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
|
404
|
+
|
405
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
373
406
|
end
|
374
407
|
|
375
408
|
type role = :user | :assistant | :system | :developer
|
@@ -350,6 +350,8 @@ module OpenAI
|
|
350
350
|
String
|
351
351
|
| OpenAI::Responses::ResponseInputText
|
352
352
|
| OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
|
353
|
+
| OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
|
354
|
+
| ::Array[top]
|
353
355
|
|
354
356
|
module Content
|
355
357
|
extend OpenAI::Internal::Type::Union
|
@@ -369,7 +371,38 @@ module OpenAI
|
|
369
371
|
def to_hash: -> { text: String, type: :output_text }
|
370
372
|
end
|
371
373
|
|
374
|
+
type input_image =
|
375
|
+
{
|
376
|
+
image_url: String,
|
377
|
+
type: :input_image,
|
378
|
+
detail: String
|
379
|
+
}
|
380
|
+
|
381
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
382
|
+
attr_accessor image_url: String
|
383
|
+
|
384
|
+
attr_accessor type: :input_image
|
385
|
+
|
386
|
+
attr_reader detail: String?
|
387
|
+
|
388
|
+
def detail=: (String) -> String
|
389
|
+
|
390
|
+
def initialize: (
|
391
|
+
image_url: String,
|
392
|
+
?detail: String,
|
393
|
+
?type: :input_image
|
394
|
+
) -> void
|
395
|
+
|
396
|
+
def to_hash: -> {
|
397
|
+
image_url: String,
|
398
|
+
type: :input_image,
|
399
|
+
detail: String
|
400
|
+
}
|
401
|
+
end
|
402
|
+
|
372
403
|
def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
|
404
|
+
|
405
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
373
406
|
end
|
374
407
|
|
375
408
|
type role = :user | :assistant | :system | :developer
|
@@ -78,6 +78,8 @@ module OpenAI
|
|
78
78
|
String
|
79
79
|
| OpenAI::Responses::ResponseInputText
|
80
80
|
| OpenAI::Graders::LabelModelGrader::Input::Content::OutputText
|
81
|
+
| OpenAI::Graders::LabelModelGrader::Input::Content::InputImage
|
82
|
+
| ::Array[top]
|
81
83
|
|
82
84
|
module Content
|
83
85
|
extend OpenAI::Internal::Type::Union
|
@@ -94,7 +96,34 @@ module OpenAI
|
|
94
96
|
def to_hash: -> { text: String, type: :output_text }
|
95
97
|
end
|
96
98
|
|
99
|
+
type input_image =
|
100
|
+
{ image_url: String, type: :input_image, detail: String }
|
101
|
+
|
102
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
103
|
+
attr_accessor image_url: String
|
104
|
+
|
105
|
+
attr_accessor type: :input_image
|
106
|
+
|
107
|
+
attr_reader detail: String?
|
108
|
+
|
109
|
+
def detail=: (String) -> String
|
110
|
+
|
111
|
+
def initialize: (
|
112
|
+
image_url: String,
|
113
|
+
?detail: String,
|
114
|
+
?type: :input_image
|
115
|
+
) -> void
|
116
|
+
|
117
|
+
def to_hash: -> {
|
118
|
+
image_url: String,
|
119
|
+
type: :input_image,
|
120
|
+
detail: String
|
121
|
+
}
|
122
|
+
end
|
123
|
+
|
97
124
|
def self?.variants: -> ::Array[OpenAI::Models::Graders::LabelModelGrader::Input::content]
|
125
|
+
|
126
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
98
127
|
end
|
99
128
|
|
100
129
|
type role = :user | :assistant | :system | :developer
|