openai 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +21 -0
- data/README.md +1 -1
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +34 -10
- data/lib/openai/models/eval_create_params.rb +50 -5
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +50 -5
- data/lib/openai/models/evals/run_cancel_response.rb +48 -5
- data/lib/openai/models/evals/run_create_params.rb +50 -5
- data/lib/openai/models/evals/run_create_response.rb +48 -5
- data/lib/openai/models/evals/run_list_response.rb +48 -5
- data/lib/openai/models/evals/run_retrieve_response.rb +48 -5
- data/lib/openai/models/graders/label_model_grader.rb +48 -5
- data/lib/openai/models/graders/score_model_grader.rb +48 -5
- data/lib/openai/models/image_edit_completed_event.rb +198 -0
- data/lib/openai/models/image_edit_params.rb +36 -1
- data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
- data/lib/openai/models/image_edit_stream_event.rb +21 -0
- data/lib/openai/models/image_gen_completed_event.rb +198 -0
- data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
- data/lib/openai/models/image_gen_stream_event.rb +21 -0
- data/lib/openai/models/image_generate_params.rb +13 -1
- data/lib/openai/models/images_response.rb +3 -0
- data/lib/openai/models/responses/response_output_refusal.rb +2 -2
- data/lib/openai/models/responses/tool.rb +30 -1
- data/lib/openai/models.rb +12 -0
- data/lib/openai/resources/images.rb +140 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +4 -0
- data/rbi/openai/models/eval_create_params.rbi +76 -7
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +76 -7
- data/rbi/openai/models/evals/run_cancel_response.rbi +70 -5
- data/rbi/openai/models/evals/run_create_params.rbi +76 -7
- data/rbi/openai/models/evals/run_create_response.rbi +70 -5
- data/rbi/openai/models/evals/run_list_response.rbi +70 -5
- data/rbi/openai/models/evals/run_retrieve_response.rbi +70 -5
- data/rbi/openai/models/graders/label_model_grader.rbi +74 -7
- data/rbi/openai/models/graders/score_model_grader.rbi +74 -7
- data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
- data/rbi/openai/models/image_edit_params.rbi +51 -0
- data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
- data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
- data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
- data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
- data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
- data/rbi/openai/models/image_generate_params.rbi +12 -0
- data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
- data/rbi/openai/models/responses/tool.rbi +61 -0
- data/rbi/openai/models.rbi +12 -0
- data/rbi/openai/resources/images.rbi +225 -0
- data/sig/openai/models/eval_create_params.rbs +29 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +29 -0
- data/sig/openai/models/evals/run_cancel_response.rbs +33 -0
- data/sig/openai/models/evals/run_create_params.rbs +33 -0
- data/sig/openai/models/evals/run_create_response.rbs +33 -0
- data/sig/openai/models/evals/run_list_response.rbs +33 -0
- data/sig/openai/models/evals/run_retrieve_response.rbs +33 -0
- data/sig/openai/models/graders/label_model_grader.rbs +29 -0
- data/sig/openai/models/graders/score_model_grader.rbs +29 -0
- data/sig/openai/models/image_edit_completed_event.rbs +150 -0
- data/sig/openai/models/image_edit_params.rbs +21 -0
- data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_edit_stream_event.rbs +12 -0
- data/sig/openai/models/image_gen_completed_event.rbs +150 -0
- data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_gen_stream_event.rbs +12 -0
- data/sig/openai/models/image_generate_params.rbs +5 -0
- data/sig/openai/models/responses/tool.rbs +16 -0
- data/sig/openai/models.rbs +12 -0
- data/sig/openai/resources/images.rbs +38 -0
- metadata +20 -2
@@ -82,6 +82,8 @@ module OpenAI
|
|
82
82
|
String
|
83
83
|
| OpenAI::Responses::ResponseInputText
|
84
84
|
| OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText
|
85
|
+
| OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage
|
86
|
+
| ::Array[top]
|
85
87
|
|
86
88
|
module Content
|
87
89
|
extend OpenAI::Internal::Type::Union
|
@@ -98,7 +100,34 @@ module OpenAI
|
|
98
100
|
def to_hash: -> { text: String, type: :output_text }
|
99
101
|
end
|
100
102
|
|
103
|
+
type input_image =
|
104
|
+
{ image_url: String, type: :input_image, detail: String }
|
105
|
+
|
106
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
107
|
+
attr_accessor image_url: String
|
108
|
+
|
109
|
+
attr_accessor type: :input_image
|
110
|
+
|
111
|
+
attr_reader detail: String?
|
112
|
+
|
113
|
+
def detail=: (String) -> String
|
114
|
+
|
115
|
+
def initialize: (
|
116
|
+
image_url: String,
|
117
|
+
?detail: String,
|
118
|
+
?type: :input_image
|
119
|
+
) -> void
|
120
|
+
|
121
|
+
def to_hash: -> {
|
122
|
+
image_url: String,
|
123
|
+
type: :input_image,
|
124
|
+
detail: String
|
125
|
+
}
|
126
|
+
end
|
127
|
+
|
101
128
|
def self?.variants: -> ::Array[OpenAI::Models::Graders::ScoreModelGrader::Input::content]
|
129
|
+
|
130
|
+
AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
|
102
131
|
end
|
103
132
|
|
104
133
|
type role = :user | :assistant | :system | :developer
|
@@ -0,0 +1,150 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type image_edit_completed_event =
|
4
|
+
{
|
5
|
+
:b64_json => String,
|
6
|
+
background: OpenAI::Models::ImageEditCompletedEvent::background,
|
7
|
+
created_at: Integer,
|
8
|
+
output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
|
9
|
+
quality: OpenAI::Models::ImageEditCompletedEvent::quality,
|
10
|
+
size: OpenAI::Models::ImageEditCompletedEvent::size,
|
11
|
+
type: :"image_edit.completed",
|
12
|
+
usage: OpenAI::ImageEditCompletedEvent::Usage
|
13
|
+
}
|
14
|
+
|
15
|
+
class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
|
16
|
+
attr_accessor b64_json: String
|
17
|
+
|
18
|
+
attr_accessor background: OpenAI::Models::ImageEditCompletedEvent::background
|
19
|
+
|
20
|
+
attr_accessor created_at: Integer
|
21
|
+
|
22
|
+
attr_accessor output_format: OpenAI::Models::ImageEditCompletedEvent::output_format
|
23
|
+
|
24
|
+
attr_accessor quality: OpenAI::Models::ImageEditCompletedEvent::quality
|
25
|
+
|
26
|
+
attr_accessor size: OpenAI::Models::ImageEditCompletedEvent::size
|
27
|
+
|
28
|
+
attr_accessor type: :"image_edit.completed"
|
29
|
+
|
30
|
+
attr_accessor usage: OpenAI::ImageEditCompletedEvent::Usage
|
31
|
+
|
32
|
+
def initialize: (
|
33
|
+
b64_json: String,
|
34
|
+
background: OpenAI::Models::ImageEditCompletedEvent::background,
|
35
|
+
created_at: Integer,
|
36
|
+
output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
|
37
|
+
quality: OpenAI::Models::ImageEditCompletedEvent::quality,
|
38
|
+
size: OpenAI::Models::ImageEditCompletedEvent::size,
|
39
|
+
usage: OpenAI::ImageEditCompletedEvent::Usage,
|
40
|
+
?type: :"image_edit.completed"
|
41
|
+
) -> void
|
42
|
+
|
43
|
+
def to_hash: -> {
|
44
|
+
:b64_json => String,
|
45
|
+
background: OpenAI::Models::ImageEditCompletedEvent::background,
|
46
|
+
created_at: Integer,
|
47
|
+
output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
|
48
|
+
quality: OpenAI::Models::ImageEditCompletedEvent::quality,
|
49
|
+
size: OpenAI::Models::ImageEditCompletedEvent::size,
|
50
|
+
type: :"image_edit.completed",
|
51
|
+
usage: OpenAI::ImageEditCompletedEvent::Usage
|
52
|
+
}
|
53
|
+
|
54
|
+
type background = :transparent | :opaque | :auto
|
55
|
+
|
56
|
+
module Background
|
57
|
+
extend OpenAI::Internal::Type::Enum
|
58
|
+
|
59
|
+
TRANSPARENT: :transparent
|
60
|
+
OPAQUE: :opaque
|
61
|
+
AUTO: :auto
|
62
|
+
|
63
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::background]
|
64
|
+
end
|
65
|
+
|
66
|
+
type output_format = :png | :webp | :jpeg
|
67
|
+
|
68
|
+
module OutputFormat
|
69
|
+
extend OpenAI::Internal::Type::Enum
|
70
|
+
|
71
|
+
PNG: :png
|
72
|
+
WEBP: :webp
|
73
|
+
JPEG: :jpeg
|
74
|
+
|
75
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::output_format]
|
76
|
+
end
|
77
|
+
|
78
|
+
type quality = :low | :medium | :high | :auto
|
79
|
+
|
80
|
+
module Quality
|
81
|
+
extend OpenAI::Internal::Type::Enum
|
82
|
+
|
83
|
+
LOW: :low
|
84
|
+
MEDIUM: :medium
|
85
|
+
HIGH: :high
|
86
|
+
AUTO: :auto
|
87
|
+
|
88
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::quality]
|
89
|
+
end
|
90
|
+
|
91
|
+
type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto
|
92
|
+
|
93
|
+
module Size
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
SIZE_1024X1024: :"1024x1024"
|
97
|
+
SIZE_1024X1536: :"1024x1536"
|
98
|
+
SIZE_1536X1024: :"1536x1024"
|
99
|
+
AUTO: :auto
|
100
|
+
|
101
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::size]
|
102
|
+
end
|
103
|
+
|
104
|
+
type usage =
|
105
|
+
{
|
106
|
+
input_tokens: Integer,
|
107
|
+
input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
|
108
|
+
output_tokens: Integer,
|
109
|
+
total_tokens: Integer
|
110
|
+
}
|
111
|
+
|
112
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
113
|
+
attr_accessor input_tokens: Integer
|
114
|
+
|
115
|
+
attr_accessor input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails
|
116
|
+
|
117
|
+
attr_accessor output_tokens: Integer
|
118
|
+
|
119
|
+
attr_accessor total_tokens: Integer
|
120
|
+
|
121
|
+
def initialize: (
|
122
|
+
input_tokens: Integer,
|
123
|
+
input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
|
124
|
+
output_tokens: Integer,
|
125
|
+
total_tokens: Integer
|
126
|
+
) -> void
|
127
|
+
|
128
|
+
def to_hash: -> {
|
129
|
+
input_tokens: Integer,
|
130
|
+
input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
|
131
|
+
output_tokens: Integer,
|
132
|
+
total_tokens: Integer
|
133
|
+
}
|
134
|
+
|
135
|
+
type input_tokens_details =
|
136
|
+
{ image_tokens: Integer, text_tokens: Integer }
|
137
|
+
|
138
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
139
|
+
attr_accessor image_tokens: Integer
|
140
|
+
|
141
|
+
attr_accessor text_tokens: Integer
|
142
|
+
|
143
|
+
def initialize: (image_tokens: Integer, text_tokens: Integer) -> void
|
144
|
+
|
145
|
+
def to_hash: -> { image_tokens: Integer, text_tokens: Integer }
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
@@ -5,11 +5,13 @@ module OpenAI
|
|
5
5
|
image: OpenAI::Models::ImageEditParams::image,
|
6
6
|
prompt: String,
|
7
7
|
background: OpenAI::Models::ImageEditParams::background?,
|
8
|
+
input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
|
8
9
|
mask: OpenAI::Internal::file_input,
|
9
10
|
model: OpenAI::Models::ImageEditParams::model?,
|
10
11
|
n: Integer?,
|
11
12
|
output_compression: Integer?,
|
12
13
|
output_format: OpenAI::Models::ImageEditParams::output_format?,
|
14
|
+
partial_images: Integer?,
|
13
15
|
quality: OpenAI::Models::ImageEditParams::quality?,
|
14
16
|
response_format: OpenAI::Models::ImageEditParams::response_format?,
|
15
17
|
size: OpenAI::Models::ImageEditParams::size?,
|
@@ -27,6 +29,8 @@ module OpenAI
|
|
27
29
|
|
28
30
|
attr_accessor background: OpenAI::Models::ImageEditParams::background?
|
29
31
|
|
32
|
+
attr_accessor input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?
|
33
|
+
|
30
34
|
attr_reader mask: OpenAI::Internal::file_input?
|
31
35
|
|
32
36
|
def mask=: (OpenAI::Internal::file_input) -> OpenAI::Internal::file_input
|
@@ -39,6 +43,8 @@ module OpenAI
|
|
39
43
|
|
40
44
|
attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format?
|
41
45
|
|
46
|
+
attr_accessor partial_images: Integer?
|
47
|
+
|
42
48
|
attr_accessor quality: OpenAI::Models::ImageEditParams::quality?
|
43
49
|
|
44
50
|
attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format?
|
@@ -53,11 +59,13 @@ module OpenAI
|
|
53
59
|
image: OpenAI::Models::ImageEditParams::image,
|
54
60
|
prompt: String,
|
55
61
|
?background: OpenAI::Models::ImageEditParams::background?,
|
62
|
+
?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
|
56
63
|
?mask: OpenAI::Internal::file_input,
|
57
64
|
?model: OpenAI::Models::ImageEditParams::model?,
|
58
65
|
?n: Integer?,
|
59
66
|
?output_compression: Integer?,
|
60
67
|
?output_format: OpenAI::Models::ImageEditParams::output_format?,
|
68
|
+
?partial_images: Integer?,
|
61
69
|
?quality: OpenAI::Models::ImageEditParams::quality?,
|
62
70
|
?response_format: OpenAI::Models::ImageEditParams::response_format?,
|
63
71
|
?size: OpenAI::Models::ImageEditParams::size?,
|
@@ -69,11 +77,13 @@ module OpenAI
|
|
69
77
|
image: OpenAI::Models::ImageEditParams::image,
|
70
78
|
prompt: String,
|
71
79
|
background: OpenAI::Models::ImageEditParams::background?,
|
80
|
+
input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
|
72
81
|
mask: OpenAI::Internal::file_input,
|
73
82
|
model: OpenAI::Models::ImageEditParams::model?,
|
74
83
|
n: Integer?,
|
75
84
|
output_compression: Integer?,
|
76
85
|
output_format: OpenAI::Models::ImageEditParams::output_format?,
|
86
|
+
partial_images: Integer?,
|
77
87
|
quality: OpenAI::Models::ImageEditParams::quality?,
|
78
88
|
response_format: OpenAI::Models::ImageEditParams::response_format?,
|
79
89
|
size: OpenAI::Models::ImageEditParams::size?,
|
@@ -104,6 +114,17 @@ module OpenAI
|
|
104
114
|
def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::background]
|
105
115
|
end
|
106
116
|
|
117
|
+
type input_fidelity = :high | :low
|
118
|
+
|
119
|
+
module InputFidelity
|
120
|
+
extend OpenAI::Internal::Type::Enum
|
121
|
+
|
122
|
+
HIGH: :high
|
123
|
+
LOW: :low
|
124
|
+
|
125
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::input_fidelity]
|
126
|
+
end
|
127
|
+
|
107
128
|
type model = String | OpenAI::Models::image_model
|
108
129
|
|
109
130
|
module Model
|
@@ -0,0 +1,105 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type image_edit_partial_image_event =
|
4
|
+
{
|
5
|
+
:b64_json => String,
|
6
|
+
background: OpenAI::Models::ImageEditPartialImageEvent::background,
|
7
|
+
created_at: Integer,
|
8
|
+
output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format,
|
9
|
+
partial_image_index: Integer,
|
10
|
+
quality: OpenAI::Models::ImageEditPartialImageEvent::quality,
|
11
|
+
size: OpenAI::Models::ImageEditPartialImageEvent::size,
|
12
|
+
type: :"image_edit.partial_image"
|
13
|
+
}
|
14
|
+
|
15
|
+
class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel
|
16
|
+
attr_accessor b64_json: String
|
17
|
+
|
18
|
+
attr_accessor background: OpenAI::Models::ImageEditPartialImageEvent::background
|
19
|
+
|
20
|
+
attr_accessor created_at: Integer
|
21
|
+
|
22
|
+
attr_accessor output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format
|
23
|
+
|
24
|
+
attr_accessor partial_image_index: Integer
|
25
|
+
|
26
|
+
attr_accessor quality: OpenAI::Models::ImageEditPartialImageEvent::quality
|
27
|
+
|
28
|
+
attr_accessor size: OpenAI::Models::ImageEditPartialImageEvent::size
|
29
|
+
|
30
|
+
attr_accessor type: :"image_edit.partial_image"
|
31
|
+
|
32
|
+
def initialize: (
|
33
|
+
b64_json: String,
|
34
|
+
background: OpenAI::Models::ImageEditPartialImageEvent::background,
|
35
|
+
created_at: Integer,
|
36
|
+
output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format,
|
37
|
+
partial_image_index: Integer,
|
38
|
+
quality: OpenAI::Models::ImageEditPartialImageEvent::quality,
|
39
|
+
size: OpenAI::Models::ImageEditPartialImageEvent::size,
|
40
|
+
?type: :"image_edit.partial_image"
|
41
|
+
) -> void
|
42
|
+
|
43
|
+
def to_hash: -> {
|
44
|
+
:b64_json => String,
|
45
|
+
background: OpenAI::Models::ImageEditPartialImageEvent::background,
|
46
|
+
created_at: Integer,
|
47
|
+
output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format,
|
48
|
+
partial_image_index: Integer,
|
49
|
+
quality: OpenAI::Models::ImageEditPartialImageEvent::quality,
|
50
|
+
size: OpenAI::Models::ImageEditPartialImageEvent::size,
|
51
|
+
type: :"image_edit.partial_image"
|
52
|
+
}
|
53
|
+
|
54
|
+
type background = :transparent | :opaque | :auto
|
55
|
+
|
56
|
+
module Background
|
57
|
+
extend OpenAI::Internal::Type::Enum
|
58
|
+
|
59
|
+
TRANSPARENT: :transparent
|
60
|
+
OPAQUE: :opaque
|
61
|
+
AUTO: :auto
|
62
|
+
|
63
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::background]
|
64
|
+
end
|
65
|
+
|
66
|
+
type output_format = :png | :webp | :jpeg
|
67
|
+
|
68
|
+
module OutputFormat
|
69
|
+
extend OpenAI::Internal::Type::Enum
|
70
|
+
|
71
|
+
PNG: :png
|
72
|
+
WEBP: :webp
|
73
|
+
JPEG: :jpeg
|
74
|
+
|
75
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::output_format]
|
76
|
+
end
|
77
|
+
|
78
|
+
type quality = :low | :medium | :high | :auto
|
79
|
+
|
80
|
+
module Quality
|
81
|
+
extend OpenAI::Internal::Type::Enum
|
82
|
+
|
83
|
+
LOW: :low
|
84
|
+
MEDIUM: :medium
|
85
|
+
HIGH: :high
|
86
|
+
AUTO: :auto
|
87
|
+
|
88
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::quality]
|
89
|
+
end
|
90
|
+
|
91
|
+
type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto
|
92
|
+
|
93
|
+
module Size
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
SIZE_1024X1024: :"1024x1024"
|
97
|
+
SIZE_1024X1536: :"1024x1536"
|
98
|
+
SIZE_1536X1024: :"1536x1024"
|
99
|
+
AUTO: :auto
|
100
|
+
|
101
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::size]
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
@@ -0,0 +1,12 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type image_edit_stream_event =
|
4
|
+
OpenAI::ImageEditPartialImageEvent | OpenAI::ImageEditCompletedEvent
|
5
|
+
|
6
|
+
module ImageEditStreamEvent
|
7
|
+
extend OpenAI::Internal::Type::Union
|
8
|
+
|
9
|
+
def self?.variants: -> ::Array[OpenAI::Models::image_edit_stream_event]
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
@@ -0,0 +1,150 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type image_gen_completed_event =
|
4
|
+
{
|
5
|
+
:b64_json => String,
|
6
|
+
background: OpenAI::Models::ImageGenCompletedEvent::background,
|
7
|
+
created_at: Integer,
|
8
|
+
output_format: OpenAI::Models::ImageGenCompletedEvent::output_format,
|
9
|
+
quality: OpenAI::Models::ImageGenCompletedEvent::quality,
|
10
|
+
size: OpenAI::Models::ImageGenCompletedEvent::size,
|
11
|
+
type: :"image_generation.completed",
|
12
|
+
usage: OpenAI::ImageGenCompletedEvent::Usage
|
13
|
+
}
|
14
|
+
|
15
|
+
class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel
|
16
|
+
attr_accessor b64_json: String
|
17
|
+
|
18
|
+
attr_accessor background: OpenAI::Models::ImageGenCompletedEvent::background
|
19
|
+
|
20
|
+
attr_accessor created_at: Integer
|
21
|
+
|
22
|
+
attr_accessor output_format: OpenAI::Models::ImageGenCompletedEvent::output_format
|
23
|
+
|
24
|
+
attr_accessor quality: OpenAI::Models::ImageGenCompletedEvent::quality
|
25
|
+
|
26
|
+
attr_accessor size: OpenAI::Models::ImageGenCompletedEvent::size
|
27
|
+
|
28
|
+
attr_accessor type: :"image_generation.completed"
|
29
|
+
|
30
|
+
attr_accessor usage: OpenAI::ImageGenCompletedEvent::Usage
|
31
|
+
|
32
|
+
def initialize: (
|
33
|
+
b64_json: String,
|
34
|
+
background: OpenAI::Models::ImageGenCompletedEvent::background,
|
35
|
+
created_at: Integer,
|
36
|
+
output_format: OpenAI::Models::ImageGenCompletedEvent::output_format,
|
37
|
+
quality: OpenAI::Models::ImageGenCompletedEvent::quality,
|
38
|
+
size: OpenAI::Models::ImageGenCompletedEvent::size,
|
39
|
+
usage: OpenAI::ImageGenCompletedEvent::Usage,
|
40
|
+
?type: :"image_generation.completed"
|
41
|
+
) -> void
|
42
|
+
|
43
|
+
def to_hash: -> {
|
44
|
+
:b64_json => String,
|
45
|
+
background: OpenAI::Models::ImageGenCompletedEvent::background,
|
46
|
+
created_at: Integer,
|
47
|
+
output_format: OpenAI::Models::ImageGenCompletedEvent::output_format,
|
48
|
+
quality: OpenAI::Models::ImageGenCompletedEvent::quality,
|
49
|
+
size: OpenAI::Models::ImageGenCompletedEvent::size,
|
50
|
+
type: :"image_generation.completed",
|
51
|
+
usage: OpenAI::ImageGenCompletedEvent::Usage
|
52
|
+
}
|
53
|
+
|
54
|
+
type background = :transparent | :opaque | :auto
|
55
|
+
|
56
|
+
module Background
|
57
|
+
extend OpenAI::Internal::Type::Enum
|
58
|
+
|
59
|
+
TRANSPARENT: :transparent
|
60
|
+
OPAQUE: :opaque
|
61
|
+
AUTO: :auto
|
62
|
+
|
63
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::background]
|
64
|
+
end
|
65
|
+
|
66
|
+
type output_format = :png | :webp | :jpeg
|
67
|
+
|
68
|
+
module OutputFormat
|
69
|
+
extend OpenAI::Internal::Type::Enum
|
70
|
+
|
71
|
+
PNG: :png
|
72
|
+
WEBP: :webp
|
73
|
+
JPEG: :jpeg
|
74
|
+
|
75
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::output_format]
|
76
|
+
end
|
77
|
+
|
78
|
+
type quality = :low | :medium | :high | :auto
|
79
|
+
|
80
|
+
module Quality
|
81
|
+
extend OpenAI::Internal::Type::Enum
|
82
|
+
|
83
|
+
LOW: :low
|
84
|
+
MEDIUM: :medium
|
85
|
+
HIGH: :high
|
86
|
+
AUTO: :auto
|
87
|
+
|
88
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::quality]
|
89
|
+
end
|
90
|
+
|
91
|
+
type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto
|
92
|
+
|
93
|
+
module Size
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
SIZE_1024X1024: :"1024x1024"
|
97
|
+
SIZE_1024X1536: :"1024x1536"
|
98
|
+
SIZE_1536X1024: :"1536x1024"
|
99
|
+
AUTO: :auto
|
100
|
+
|
101
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::size]
|
102
|
+
end
|
103
|
+
|
104
|
+
type usage =
|
105
|
+
{
|
106
|
+
input_tokens: Integer,
|
107
|
+
input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails,
|
108
|
+
output_tokens: Integer,
|
109
|
+
total_tokens: Integer
|
110
|
+
}
|
111
|
+
|
112
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
113
|
+
attr_accessor input_tokens: Integer
|
114
|
+
|
115
|
+
attr_accessor input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails
|
116
|
+
|
117
|
+
attr_accessor output_tokens: Integer
|
118
|
+
|
119
|
+
attr_accessor total_tokens: Integer
|
120
|
+
|
121
|
+
def initialize: (
|
122
|
+
input_tokens: Integer,
|
123
|
+
input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails,
|
124
|
+
output_tokens: Integer,
|
125
|
+
total_tokens: Integer
|
126
|
+
) -> void
|
127
|
+
|
128
|
+
def to_hash: -> {
|
129
|
+
input_tokens: Integer,
|
130
|
+
input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails,
|
131
|
+
output_tokens: Integer,
|
132
|
+
total_tokens: Integer
|
133
|
+
}
|
134
|
+
|
135
|
+
type input_tokens_details =
|
136
|
+
{ image_tokens: Integer, text_tokens: Integer }
|
137
|
+
|
138
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
139
|
+
attr_accessor image_tokens: Integer
|
140
|
+
|
141
|
+
attr_accessor text_tokens: Integer
|
142
|
+
|
143
|
+
def initialize: (image_tokens: Integer, text_tokens: Integer) -> void
|
144
|
+
|
145
|
+
def to_hash: -> { image_tokens: Integer, text_tokens: Integer }
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
@@ -0,0 +1,105 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type image_gen_partial_image_event =
|
4
|
+
{
|
5
|
+
:b64_json => String,
|
6
|
+
background: OpenAI::Models::ImageGenPartialImageEvent::background,
|
7
|
+
created_at: Integer,
|
8
|
+
output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format,
|
9
|
+
partial_image_index: Integer,
|
10
|
+
quality: OpenAI::Models::ImageGenPartialImageEvent::quality,
|
11
|
+
size: OpenAI::Models::ImageGenPartialImageEvent::size,
|
12
|
+
type: :"image_generation.partial_image"
|
13
|
+
}
|
14
|
+
|
15
|
+
class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel
|
16
|
+
attr_accessor b64_json: String
|
17
|
+
|
18
|
+
attr_accessor background: OpenAI::Models::ImageGenPartialImageEvent::background
|
19
|
+
|
20
|
+
attr_accessor created_at: Integer
|
21
|
+
|
22
|
+
attr_accessor output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format
|
23
|
+
|
24
|
+
attr_accessor partial_image_index: Integer
|
25
|
+
|
26
|
+
attr_accessor quality: OpenAI::Models::ImageGenPartialImageEvent::quality
|
27
|
+
|
28
|
+
attr_accessor size: OpenAI::Models::ImageGenPartialImageEvent::size
|
29
|
+
|
30
|
+
attr_accessor type: :"image_generation.partial_image"
|
31
|
+
|
32
|
+
def initialize: (
|
33
|
+
b64_json: String,
|
34
|
+
background: OpenAI::Models::ImageGenPartialImageEvent::background,
|
35
|
+
created_at: Integer,
|
36
|
+
output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format,
|
37
|
+
partial_image_index: Integer,
|
38
|
+
quality: OpenAI::Models::ImageGenPartialImageEvent::quality,
|
39
|
+
size: OpenAI::Models::ImageGenPartialImageEvent::size,
|
40
|
+
?type: :"image_generation.partial_image"
|
41
|
+
) -> void
|
42
|
+
|
43
|
+
def to_hash: -> {
|
44
|
+
:b64_json => String,
|
45
|
+
background: OpenAI::Models::ImageGenPartialImageEvent::background,
|
46
|
+
created_at: Integer,
|
47
|
+
output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format,
|
48
|
+
partial_image_index: Integer,
|
49
|
+
quality: OpenAI::Models::ImageGenPartialImageEvent::quality,
|
50
|
+
size: OpenAI::Models::ImageGenPartialImageEvent::size,
|
51
|
+
type: :"image_generation.partial_image"
|
52
|
+
}
|
53
|
+
|
54
|
+
type background = :transparent | :opaque | :auto
|
55
|
+
|
56
|
+
module Background
|
57
|
+
extend OpenAI::Internal::Type::Enum
|
58
|
+
|
59
|
+
TRANSPARENT: :transparent
|
60
|
+
OPAQUE: :opaque
|
61
|
+
AUTO: :auto
|
62
|
+
|
63
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::background]
|
64
|
+
end
|
65
|
+
|
66
|
+
type output_format = :png | :webp | :jpeg
|
67
|
+
|
68
|
+
module OutputFormat
|
69
|
+
extend OpenAI::Internal::Type::Enum
|
70
|
+
|
71
|
+
PNG: :png
|
72
|
+
WEBP: :webp
|
73
|
+
JPEG: :jpeg
|
74
|
+
|
75
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::output_format]
|
76
|
+
end
|
77
|
+
|
78
|
+
type quality = :low | :medium | :high | :auto
|
79
|
+
|
80
|
+
module Quality
|
81
|
+
extend OpenAI::Internal::Type::Enum
|
82
|
+
|
83
|
+
LOW: :low
|
84
|
+
MEDIUM: :medium
|
85
|
+
HIGH: :high
|
86
|
+
AUTO: :auto
|
87
|
+
|
88
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::quality]
|
89
|
+
end
|
90
|
+
|
91
|
+
type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto
|
92
|
+
|
93
|
+
module Size
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
SIZE_1024X1024: :"1024x1024"
|
97
|
+
SIZE_1024X1536: :"1024x1536"
|
98
|
+
SIZE_1536X1024: :"1536x1024"
|
99
|
+
AUTO: :auto
|
100
|
+
|
101
|
+
def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::size]
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
@@ -0,0 +1,12 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type image_gen_stream_event =
|
4
|
+
OpenAI::ImageGenPartialImageEvent | OpenAI::ImageGenCompletedEvent
|
5
|
+
|
6
|
+
module ImageGenStreamEvent
|
7
|
+
extend OpenAI::Internal::Type::Union
|
8
|
+
|
9
|
+
def self?.variants: -> ::Array[OpenAI::Models::image_gen_stream_event]
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
@@ -9,6 +9,7 @@ module OpenAI
|
|
9
9
|
n: Integer?,
|
10
10
|
output_compression: Integer?,
|
11
11
|
output_format: OpenAI::Models::ImageGenerateParams::output_format?,
|
12
|
+
partial_images: Integer?,
|
12
13
|
quality: OpenAI::Models::ImageGenerateParams::quality?,
|
13
14
|
response_format: OpenAI::Models::ImageGenerateParams::response_format?,
|
14
15
|
size: OpenAI::Models::ImageGenerateParams::size?,
|
@@ -35,6 +36,8 @@ module OpenAI
|
|
35
36
|
|
36
37
|
attr_accessor output_format: OpenAI::Models::ImageGenerateParams::output_format?
|
37
38
|
|
39
|
+
attr_accessor partial_images: Integer?
|
40
|
+
|
38
41
|
attr_accessor quality: OpenAI::Models::ImageGenerateParams::quality?
|
39
42
|
|
40
43
|
attr_accessor response_format: OpenAI::Models::ImageGenerateParams::response_format?
|
@@ -55,6 +58,7 @@ module OpenAI
|
|
55
58
|
?n: Integer?,
|
56
59
|
?output_compression: Integer?,
|
57
60
|
?output_format: OpenAI::Models::ImageGenerateParams::output_format?,
|
61
|
+
?partial_images: Integer?,
|
58
62
|
?quality: OpenAI::Models::ImageGenerateParams::quality?,
|
59
63
|
?response_format: OpenAI::Models::ImageGenerateParams::response_format?,
|
60
64
|
?size: OpenAI::Models::ImageGenerateParams::size?,
|
@@ -71,6 +75,7 @@ module OpenAI
|
|
71
75
|
n: Integer?,
|
72
76
|
output_compression: Integer?,
|
73
77
|
output_format: OpenAI::Models::ImageGenerateParams::output_format?,
|
78
|
+
partial_images: Integer?,
|
74
79
|
quality: OpenAI::Models::ImageGenerateParams::quality?,
|
75
80
|
response_format: OpenAI::Models::ImageGenerateParams::response_format?,
|
76
81
|
size: OpenAI::Models::ImageGenerateParams::size?,
|