openai 0.40.0 → 0.41.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +15 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
- data/lib/openai/models/graders/grader_input_item.rb +87 -0
- data/lib/openai/models/graders/grader_inputs.rb +0 -80
- data/lib/openai/models/image.rb +6 -6
- data/lib/openai/models/image_edit_completed_event.rb +5 -3
- data/lib/openai/models/image_edit_params.rb +34 -32
- data/lib/openai/models/image_gen_completed_event.rb +5 -3
- data/lib/openai/models/image_generate_params.rb +38 -36
- data/lib/openai/models/image_model.rb +1 -0
- data/lib/openai/models/images_response.rb +31 -1
- data/lib/openai/models/responses/tool.rb +22 -8
- data/lib/openai/resources/images.rb +6 -6
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +1 -0
- data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
- data/rbi/openai/models/graders/grader_inputs.rbi +0 -105
- data/rbi/openai/models/image.rbi +10 -10
- data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
- data/rbi/openai/models/image_edit_params.rbi +49 -46
- data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
- data/rbi/openai/models/image_generate_params.rbi +54 -51
- data/rbi/openai/models/image_model.rbi +1 -0
- data/rbi/openai/models/images_response.rbi +61 -3
- data/rbi/openai/models/responses/tool.rbi +38 -16
- data/rbi/openai/resources/images.rbi +72 -68
- data/sig/openai/models/graders/grader_input_item.rbs +55 -0
- data/sig/openai/models/graders/grader_inputs.rbs +0 -50
- data/sig/openai/models/image_model.rbs +6 -1
- data/sig/openai/models/images_response.rbs +25 -3
- data/sig/openai/models/responses/tool.rbs +4 -4
- metadata +5 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 827bd4115ede761270037faa16dc64fe6082a23af8c31f9e0c52cb7d384b7b47
|
|
4
|
+
data.tar.gz: f9f51366eb2e4e2218677f06af68c9e075aa17eced0fbb51696672df9dcb504e
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: e74d83fab6b6609be2d2898f275076823f6634f2e447c122b3ef8ab70c8f6d65f743e3f99e0e113ab118b2a9e6b6c3e29b861ba614220e22afd5c204f8a8caa3
|
|
7
|
+
data.tar.gz: 0ba69a2dfdd8e87b6933b8349d8368ecdc3519129315b8f6cd7f906e16b85bad1a02ce78fe3ee4834399657e8f534283533c411103fe0ebf8d30d34111c4b303
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,20 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.41.0 (2025-12-16)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v0.40.0...v0.41.0](https://github.com/openai/openai-ruby/compare/v0.40.0...v0.41.0)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* **api:** gpt-image-1.5 ([8a7fdb3](https://github.com/openai/openai-ruby/commit/8a7fdb3c5228f765a0e18f9bb1f8c9c24dcf037b))
|
|
10
|
+
* **api:** manual updates for java ([3aeb38f](https://github.com/openai/openai-ruby/commit/3aeb38f958498574d2009da300d2d964206955e5))
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
### Bug Fixes
|
|
14
|
+
|
|
15
|
+
* **api:** manual updates for ruby build ([c142813](https://github.com/openai/openai-ruby/commit/c142813f8948131abe980ef919aade6ed10b7456))
|
|
16
|
+
* calling `break` out of streams should be instantaneous ([7fc53db](https://github.com/openai/openai-ruby/commit/7fc53db685dfa10c64472a337e7093f20a2ea597))
|
|
17
|
+
|
|
3
18
|
## 0.40.0 (2025-12-13)
|
|
4
19
|
|
|
5
20
|
Full Changelog: [v0.39.0...v0.40.0](https://github.com/openai/openai-ruby/compare/v0.39.0...v0.40.0)
|
data/README.md
CHANGED
|
@@ -153,17 +153,19 @@ module OpenAI
|
|
|
153
153
|
end
|
|
154
154
|
|
|
155
155
|
self.class.calibrate_socket_timeout(conn, deadline)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
156
|
+
::Kernel.catch(:jump) do
|
|
157
|
+
conn.request(req) do |rsp|
|
|
158
|
+
y << [req, rsp]
|
|
159
|
+
::Kernel.throw(:jump) if finished
|
|
160
|
+
|
|
161
|
+
rsp.read_body do |bytes|
|
|
162
|
+
y << bytes.force_encoding(Encoding::BINARY)
|
|
163
|
+
::Kernel.throw(:jump) if finished
|
|
164
|
+
|
|
165
|
+
self.class.calibrate_socket_timeout(conn, deadline)
|
|
166
|
+
end
|
|
167
|
+
eof = true
|
|
165
168
|
end
|
|
166
|
-
eof = true
|
|
167
169
|
end
|
|
168
170
|
end
|
|
169
171
|
ensure
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Graders
|
|
6
|
+
# A single content item: input text, output text, input image, or input audio.
|
|
7
|
+
module GraderInputItem
|
|
8
|
+
extend OpenAI::Internal::Type::Union
|
|
9
|
+
|
|
10
|
+
# A text input to the model.
|
|
11
|
+
variant String
|
|
12
|
+
|
|
13
|
+
# A text input to the model.
|
|
14
|
+
variant -> { OpenAI::Responses::ResponseInputText }
|
|
15
|
+
|
|
16
|
+
# A text output from the model.
|
|
17
|
+
variant -> { OpenAI::Graders::GraderInputItem::OutputText }
|
|
18
|
+
|
|
19
|
+
# An image input block used within EvalItem content arrays.
|
|
20
|
+
variant -> { OpenAI::Graders::GraderInputItem::InputImage }
|
|
21
|
+
|
|
22
|
+
# An audio input to the model.
|
|
23
|
+
variant -> { OpenAI::Responses::ResponseInputAudio }
|
|
24
|
+
|
|
25
|
+
class OutputText < OpenAI::Internal::Type::BaseModel
|
|
26
|
+
# @!attribute text
|
|
27
|
+
# The text output from the model.
|
|
28
|
+
#
|
|
29
|
+
# @return [String]
|
|
30
|
+
required :text, String
|
|
31
|
+
|
|
32
|
+
# @!attribute type
|
|
33
|
+
# The type of the output text. Always `output_text`.
|
|
34
|
+
#
|
|
35
|
+
# @return [Symbol, :output_text]
|
|
36
|
+
required :type, const: :output_text
|
|
37
|
+
|
|
38
|
+
# @!method initialize(text:, type: :output_text)
|
|
39
|
+
# Some parameter documentations has been truncated, see
|
|
40
|
+
# {OpenAI::Models::Graders::GraderInputItem::OutputText} for more details.
|
|
41
|
+
#
|
|
42
|
+
# A text output from the model.
|
|
43
|
+
#
|
|
44
|
+
# @param text [String] The text output from the model.
|
|
45
|
+
#
|
|
46
|
+
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
|
50
|
+
# @!attribute image_url
|
|
51
|
+
# The URL of the image input.
|
|
52
|
+
#
|
|
53
|
+
# @return [String]
|
|
54
|
+
required :image_url, String
|
|
55
|
+
|
|
56
|
+
# @!attribute type
|
|
57
|
+
# The type of the image input. Always `input_image`.
|
|
58
|
+
#
|
|
59
|
+
# @return [Symbol, :input_image]
|
|
60
|
+
required :type, const: :input_image
|
|
61
|
+
|
|
62
|
+
# @!attribute detail
|
|
63
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
64
|
+
# `auto`. Defaults to `auto`.
|
|
65
|
+
#
|
|
66
|
+
# @return [String, nil]
|
|
67
|
+
optional :detail, String
|
|
68
|
+
|
|
69
|
+
# @!method initialize(image_url:, detail: nil, type: :input_image)
|
|
70
|
+
# Some parameter documentations has been truncated, see
|
|
71
|
+
# {OpenAI::Models::Graders::GraderInputItem::InputImage} for more details.
|
|
72
|
+
#
|
|
73
|
+
# An image input block used within EvalItem content arrays.
|
|
74
|
+
#
|
|
75
|
+
# @param image_url [String] The URL of the image input.
|
|
76
|
+
#
|
|
77
|
+
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
78
|
+
#
|
|
79
|
+
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# @!method self.variants
|
|
83
|
+
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio)]
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
@@ -3,86 +3,6 @@
|
|
|
3
3
|
module OpenAI
|
|
4
4
|
module Models
|
|
5
5
|
module Graders
|
|
6
|
-
# A single content item: input text, output text, input image, or input audio.
|
|
7
|
-
module GraderInputItem
|
|
8
|
-
extend OpenAI::Internal::Type::Union
|
|
9
|
-
|
|
10
|
-
# A text input to the model.
|
|
11
|
-
variant String
|
|
12
|
-
|
|
13
|
-
# A text input to the model.
|
|
14
|
-
variant -> { OpenAI::Responses::ResponseInputText }
|
|
15
|
-
|
|
16
|
-
# A text output from the model.
|
|
17
|
-
variant -> { OpenAI::Graders::GraderInputItem::OutputText }
|
|
18
|
-
|
|
19
|
-
# An image input block used within EvalItem content arrays.
|
|
20
|
-
variant -> { OpenAI::Graders::GraderInputItem::InputImage }
|
|
21
|
-
|
|
22
|
-
# An audio input to the model.
|
|
23
|
-
variant -> { OpenAI::Responses::ResponseInputAudio }
|
|
24
|
-
|
|
25
|
-
class OutputText < OpenAI::Internal::Type::BaseModel
|
|
26
|
-
# @!attribute text
|
|
27
|
-
# The text output from the model.
|
|
28
|
-
#
|
|
29
|
-
# @return [String]
|
|
30
|
-
required :text, String
|
|
31
|
-
|
|
32
|
-
# @!attribute type
|
|
33
|
-
# The type of the output text. Always `output_text`.
|
|
34
|
-
#
|
|
35
|
-
# @return [Symbol, :output_text]
|
|
36
|
-
required :type, const: :output_text
|
|
37
|
-
|
|
38
|
-
# @!method initialize(text:, type: :output_text)
|
|
39
|
-
# Some parameter documentations has been truncated, see
|
|
40
|
-
# {OpenAI::Models::Graders::GraderInputItem::OutputText} for more details.
|
|
41
|
-
#
|
|
42
|
-
# A text output from the model.
|
|
43
|
-
#
|
|
44
|
-
# @param text [String] The text output from the model.
|
|
45
|
-
#
|
|
46
|
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
|
47
|
-
end
|
|
48
|
-
|
|
49
|
-
class InputImage < OpenAI::Internal::Type::BaseModel
|
|
50
|
-
# @!attribute image_url
|
|
51
|
-
# The URL of the image input.
|
|
52
|
-
#
|
|
53
|
-
# @return [String]
|
|
54
|
-
required :image_url, String
|
|
55
|
-
|
|
56
|
-
# @!attribute type
|
|
57
|
-
# The type of the image input. Always `input_image`.
|
|
58
|
-
#
|
|
59
|
-
# @return [Symbol, :input_image]
|
|
60
|
-
required :type, const: :input_image
|
|
61
|
-
|
|
62
|
-
# @!attribute detail
|
|
63
|
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
64
|
-
# `auto`. Defaults to `auto`.
|
|
65
|
-
#
|
|
66
|
-
# @return [String, nil]
|
|
67
|
-
optional :detail, String
|
|
68
|
-
|
|
69
|
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
|
|
70
|
-
# Some parameter documentations has been truncated, see
|
|
71
|
-
# {OpenAI::Models::Graders::GraderInputItem::InputImage} for more details.
|
|
72
|
-
#
|
|
73
|
-
# An image input block used within EvalItem content arrays.
|
|
74
|
-
#
|
|
75
|
-
# @param image_url [String] The URL of the image input.
|
|
76
|
-
#
|
|
77
|
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
|
|
78
|
-
#
|
|
79
|
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
|
|
80
|
-
end
|
|
81
|
-
|
|
82
|
-
# @!method self.variants
|
|
83
|
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::GraderInputItem::OutputText, OpenAI::Models::Graders::GraderInputItem::InputImage, OpenAI::Models::Responses::ResponseInputAudio)]
|
|
84
|
-
end
|
|
85
|
-
|
|
86
6
|
# @type [OpenAI::Internal::Type::Converter]
|
|
87
7
|
GraderInputs = OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Graders::GraderInputItem }]
|
|
88
8
|
end
|
data/lib/openai/models/image.rb
CHANGED
|
@@ -4,9 +4,9 @@ module OpenAI
|
|
|
4
4
|
module Models
|
|
5
5
|
class Image < OpenAI::Internal::Type::BaseModel
|
|
6
6
|
# @!attribute b64_json
|
|
7
|
-
# The base64-encoded JSON of the generated image.
|
|
8
|
-
# and only present if `response_format` is set to `b64_json` for
|
|
9
|
-
# `dall-e-3`.
|
|
7
|
+
# The base64-encoded JSON of the generated image. Returned by default for the GPT
|
|
8
|
+
# image models, and only present if `response_format` is set to `b64_json` for
|
|
9
|
+
# `dall-e-2` and `dall-e-3`.
|
|
10
10
|
#
|
|
11
11
|
# @return [String, nil]
|
|
12
12
|
optional :b64_json, String
|
|
@@ -19,8 +19,8 @@ module OpenAI
|
|
|
19
19
|
|
|
20
20
|
# @!attribute url
|
|
21
21
|
# When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
|
|
22
|
-
# `response_format` is set to `url` (default value). Unsupported for
|
|
23
|
-
#
|
|
22
|
+
# `response_format` is set to `url` (default value). Unsupported for the GPT image
|
|
23
|
+
# models.
|
|
24
24
|
#
|
|
25
25
|
# @return [String, nil]
|
|
26
26
|
optional :url, String
|
|
@@ -31,7 +31,7 @@ module OpenAI
|
|
|
31
31
|
#
|
|
32
32
|
# Represents the content or the URL of an image generated by the OpenAI API.
|
|
33
33
|
#
|
|
34
|
-
# @param b64_json [String] The base64-encoded JSON of the generated image.
|
|
34
|
+
# @param b64_json [String] The base64-encoded JSON of the generated image. Returned by default for the GPT
|
|
35
35
|
#
|
|
36
36
|
# @param revised_prompt [String] For `dall-e-3` only, the revised prompt that was used to generate the image.
|
|
37
37
|
#
|
|
@@ -46,7 +46,8 @@ module OpenAI
|
|
|
46
46
|
required :type, const: :"image_edit.completed"
|
|
47
47
|
|
|
48
48
|
# @!attribute usage
|
|
49
|
-
# For
|
|
49
|
+
# For the GPT image models only, the token usage information for the image
|
|
50
|
+
# generation.
|
|
50
51
|
#
|
|
51
52
|
# @return [OpenAI::Models::ImageEditCompletedEvent::Usage]
|
|
52
53
|
required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage }
|
|
@@ -69,7 +70,7 @@ module OpenAI
|
|
|
69
70
|
#
|
|
70
71
|
# @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image.
|
|
71
72
|
#
|
|
72
|
-
# @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For
|
|
73
|
+
# @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For the GPT image models only, the token usage information for the image generat
|
|
73
74
|
#
|
|
74
75
|
# @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`.
|
|
75
76
|
|
|
@@ -161,7 +162,8 @@ module OpenAI
|
|
|
161
162
|
# Some parameter documentations has been truncated, see
|
|
162
163
|
# {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details.
|
|
163
164
|
#
|
|
164
|
-
# For
|
|
165
|
+
# For the GPT image models only, the token usage information for the image
|
|
166
|
+
# generation.
|
|
165
167
|
#
|
|
166
168
|
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
|
|
167
169
|
#
|
|
@@ -12,7 +12,8 @@ module OpenAI
|
|
|
12
12
|
# @!attribute image
|
|
13
13
|
# The image(s) to edit. Must be a supported image file or an array of images.
|
|
14
14
|
#
|
|
15
|
-
# For `gpt-image-1`,
|
|
15
|
+
# For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
|
|
16
|
+
# `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
|
|
16
17
|
# 50MB. You can provide up to 16 images.
|
|
17
18
|
#
|
|
18
19
|
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
@@ -23,16 +24,16 @@ module OpenAI
|
|
|
23
24
|
|
|
24
25
|
# @!attribute prompt
|
|
25
26
|
# A text description of the desired image(s). The maximum length is 1000
|
|
26
|
-
# characters for `dall-e-2`, and 32000 characters for
|
|
27
|
+
# characters for `dall-e-2`, and 32000 characters for the GPT image models.
|
|
27
28
|
#
|
|
28
29
|
# @return [String]
|
|
29
30
|
required :prompt, String
|
|
30
31
|
|
|
31
32
|
# @!attribute background
|
|
32
33
|
# Allows to set transparency for the background of the generated image(s). This
|
|
33
|
-
# parameter is only supported for
|
|
34
|
-
# `opaque` or `auto` (default value). When `auto` is used, the
|
|
35
|
-
# automatically determine the best background for the image.
|
|
34
|
+
# parameter is only supported for the GPT image models. Must be one of
|
|
35
|
+
# `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
36
|
+
# model will automatically determine the best background for the image.
|
|
36
37
|
#
|
|
37
38
|
# If `transparent`, the output format needs to support transparency, so it should
|
|
38
39
|
# be set to either `png` (default value) or `webp`.
|
|
@@ -59,9 +60,9 @@ module OpenAI
|
|
|
59
60
|
optional :mask, OpenAI::Internal::Type::FileInput
|
|
60
61
|
|
|
61
62
|
# @!attribute model
|
|
62
|
-
# The model to use for image generation. Only `dall-e-2` and
|
|
63
|
-
# supported. Defaults to `dall-e-2` unless a parameter specific to
|
|
64
|
-
# is used.
|
|
63
|
+
# The model to use for image generation. Only `dall-e-2` and the GPT image models
|
|
64
|
+
# are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
|
|
65
|
+
# image models is used.
|
|
65
66
|
#
|
|
66
67
|
# @return [String, Symbol, OpenAI::Models::ImageModel, nil]
|
|
67
68
|
optional :model, union: -> { OpenAI::ImageEditParams::Model }, nil?: true
|
|
@@ -74,7 +75,7 @@ module OpenAI
|
|
|
74
75
|
|
|
75
76
|
# @!attribute output_compression
|
|
76
77
|
# The compression level (0-100%) for the generated images. This parameter is only
|
|
77
|
-
# supported for
|
|
78
|
+
# supported for the GPT image models with the `webp` or `jpeg` output formats, and
|
|
78
79
|
# defaults to 100.
|
|
79
80
|
#
|
|
80
81
|
# @return [Integer, nil]
|
|
@@ -82,7 +83,7 @@ module OpenAI
|
|
|
82
83
|
|
|
83
84
|
# @!attribute output_format
|
|
84
85
|
# The format in which the generated images are returned. This parameter is only
|
|
85
|
-
# supported for
|
|
86
|
+
# supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
|
|
86
87
|
# default value is `png`.
|
|
87
88
|
#
|
|
88
89
|
# @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
|
|
@@ -101,8 +102,8 @@ module OpenAI
|
|
|
101
102
|
|
|
102
103
|
# @!attribute quality
|
|
103
104
|
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
104
|
-
# only supported for
|
|
105
|
-
# Defaults to `auto`.
|
|
105
|
+
# only supported for the GPT image models. `dall-e-2` only supports `standard`
|
|
106
|
+
# quality. Defaults to `auto`.
|
|
106
107
|
#
|
|
107
108
|
# @return [Symbol, OpenAI::Models::ImageEditParams::Quality, nil]
|
|
108
109
|
optional :quality, enum: -> { OpenAI::ImageEditParams::Quality }, nil?: true
|
|
@@ -110,16 +111,16 @@ module OpenAI
|
|
|
110
111
|
# @!attribute response_format
|
|
111
112
|
# The format in which the generated images are returned. Must be one of `url` or
|
|
112
113
|
# `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
113
|
-
# generated. This parameter is only supported for `dall-e-2`, as
|
|
114
|
-
#
|
|
114
|
+
# generated. This parameter is only supported for `dall-e-2`, as the GPT image
|
|
115
|
+
# models always return base64-encoded images.
|
|
115
116
|
#
|
|
116
117
|
# @return [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil]
|
|
117
118
|
optional :response_format, enum: -> { OpenAI::ImageEditParams::ResponseFormat }, nil?: true
|
|
118
119
|
|
|
119
120
|
# @!attribute size
|
|
120
121
|
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
121
|
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
122
|
-
#
|
|
122
|
+
# (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
123
|
+
# models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
123
124
|
#
|
|
124
125
|
# @return [Symbol, OpenAI::Models::ImageEditParams::Size, nil]
|
|
125
126
|
optional :size, enum: -> { OpenAI::ImageEditParams::Size }, nil?: true
|
|
@@ -146,7 +147,7 @@ module OpenAI
|
|
|
146
147
|
#
|
|
147
148
|
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
|
|
148
149
|
#
|
|
149
|
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and
|
|
150
|
+
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and the GPT image models
|
|
150
151
|
#
|
|
151
152
|
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
|
|
152
153
|
#
|
|
@@ -168,7 +169,8 @@ module OpenAI
|
|
|
168
169
|
|
|
169
170
|
# The image(s) to edit. Must be a supported image file or an array of images.
|
|
170
171
|
#
|
|
171
|
-
# For `gpt-image-1`,
|
|
172
|
+
# For the GPT image models (`gpt-image-1`, `gpt-image-1-mini`, and
|
|
173
|
+
# `gpt-image-1.5`), each image should be a `png`, `webp`, or `jpg` file less than
|
|
172
174
|
# 50MB. You can provide up to 16 images.
|
|
173
175
|
#
|
|
174
176
|
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
@@ -188,9 +190,9 @@ module OpenAI
|
|
|
188
190
|
end
|
|
189
191
|
|
|
190
192
|
# Allows to set transparency for the background of the generated image(s). This
|
|
191
|
-
# parameter is only supported for
|
|
192
|
-
# `opaque` or `auto` (default value). When `auto` is used, the
|
|
193
|
-
# automatically determine the best background for the image.
|
|
193
|
+
# parameter is only supported for the GPT image models. Must be one of
|
|
194
|
+
# `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
|
|
195
|
+
# model will automatically determine the best background for the image.
|
|
194
196
|
#
|
|
195
197
|
# If `transparent`, the output format needs to support transparency, so it should
|
|
196
198
|
# be set to either `png` (default value) or `webp`.
|
|
@@ -219,15 +221,15 @@ module OpenAI
|
|
|
219
221
|
# @return [Array<Symbol>]
|
|
220
222
|
end
|
|
221
223
|
|
|
222
|
-
# The model to use for image generation. Only `dall-e-2` and
|
|
223
|
-
# supported. Defaults to `dall-e-2` unless a parameter specific to
|
|
224
|
-
# is used.
|
|
224
|
+
# The model to use for image generation. Only `dall-e-2` and the GPT image models
|
|
225
|
+
# are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT
|
|
226
|
+
# image models is used.
|
|
225
227
|
module Model
|
|
226
228
|
extend OpenAI::Internal::Type::Union
|
|
227
229
|
|
|
228
230
|
variant String
|
|
229
231
|
|
|
230
|
-
# The model to use for image generation. Only `dall-e-2` and
|
|
232
|
+
# The model to use for image generation. Only `dall-e-2` and the GPT image models are supported. Defaults to `dall-e-2` unless a parameter specific to the GPT image models is used.
|
|
231
233
|
variant enum: -> { OpenAI::ImageModel }
|
|
232
234
|
|
|
233
235
|
# @!method self.variants
|
|
@@ -235,7 +237,7 @@ module OpenAI
|
|
|
235
237
|
end
|
|
236
238
|
|
|
237
239
|
# The format in which the generated images are returned. This parameter is only
|
|
238
|
-
# supported for
|
|
240
|
+
# supported for the GPT image models. Must be one of `png`, `jpeg`, or `webp`. The
|
|
239
241
|
# default value is `png`.
|
|
240
242
|
module OutputFormat
|
|
241
243
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -249,8 +251,8 @@ module OpenAI
|
|
|
249
251
|
end
|
|
250
252
|
|
|
251
253
|
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
252
|
-
# only supported for
|
|
253
|
-
# Defaults to `auto`.
|
|
254
|
+
# only supported for the GPT image models. `dall-e-2` only supports `standard`
|
|
255
|
+
# quality. Defaults to `auto`.
|
|
254
256
|
module Quality
|
|
255
257
|
extend OpenAI::Internal::Type::Enum
|
|
256
258
|
|
|
@@ -266,8 +268,8 @@ module OpenAI
|
|
|
266
268
|
|
|
267
269
|
# The format in which the generated images are returned. Must be one of `url` or
|
|
268
270
|
# `b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
269
|
-
# generated. This parameter is only supported for `dall-e-2`, as
|
|
270
|
-
#
|
|
271
|
+
# generated. This parameter is only supported for `dall-e-2`, as the GPT image
|
|
272
|
+
# models always return base64-encoded images.
|
|
271
273
|
module ResponseFormat
|
|
272
274
|
extend OpenAI::Internal::Type::Enum
|
|
273
275
|
|
|
@@ -279,8 +281,8 @@ module OpenAI
|
|
|
279
281
|
end
|
|
280
282
|
|
|
281
283
|
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
282
|
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
283
|
-
#
|
|
284
|
+
# (landscape), `1024x1536` (portrait), or `auto` (default value) for the GPT image
|
|
285
|
+
# models, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
284
286
|
module Size
|
|
285
287
|
extend OpenAI::Internal::Type::Enum
|
|
286
288
|
|
|
@@ -46,7 +46,8 @@ module OpenAI
|
|
|
46
46
|
required :type, const: :"image_generation.completed"
|
|
47
47
|
|
|
48
48
|
# @!attribute usage
|
|
49
|
-
# For
|
|
49
|
+
# For the GPT image models only, the token usage information for the image
|
|
50
|
+
# generation.
|
|
50
51
|
#
|
|
51
52
|
# @return [OpenAI::Models::ImageGenCompletedEvent::Usage]
|
|
52
53
|
required :usage, -> { OpenAI::ImageGenCompletedEvent::Usage }
|
|
@@ -69,7 +70,7 @@ module OpenAI
|
|
|
69
70
|
#
|
|
70
71
|
# @param size [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] The size of the generated image.
|
|
71
72
|
#
|
|
72
|
-
# @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For
|
|
73
|
+
# @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For the GPT image models only, the token usage information for the image generat
|
|
73
74
|
#
|
|
74
75
|
# @param type [Symbol, :"image_generation.completed"] The type of the event. Always `image_generation.completed`.
|
|
75
76
|
|
|
@@ -161,7 +162,8 @@ module OpenAI
|
|
|
161
162
|
# Some parameter documentations has been truncated, see
|
|
162
163
|
# {OpenAI::Models::ImageGenCompletedEvent::Usage} for more details.
|
|
163
164
|
#
|
|
164
|
-
# For
|
|
165
|
+
# For the GPT image models only, the token usage information for the image
|
|
166
|
+
# generation.
|
|
165
167
|
#
|
|
166
168
|
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
|
|
167
169
|
#
|