openai 0.13.1 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +26 -0
- data/README.md +3 -3
- data/lib/openai/models/audio/speech_create_params.rb +0 -9
- data/lib/openai/models/chat/chat_completion.rb +2 -2
- data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
- data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
- data/lib/openai/models/chat/completion_create_params.rb +2 -2
- data/lib/openai/models/function_definition.rb +1 -1
- data/lib/openai/models/image_edit_completed_event.rb +198 -0
- data/lib/openai/models/image_edit_params.rb +39 -1
- data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
- data/lib/openai/models/image_edit_stream_event.rb +21 -0
- data/lib/openai/models/image_gen_completed_event.rb +198 -0
- data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
- data/lib/openai/models/image_gen_stream_event.rb +21 -0
- data/lib/openai/models/image_generate_params.rb +16 -1
- data/lib/openai/models/images_response.rb +2 -2
- data/lib/openai/models/responses/response.rb +2 -2
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
- data/lib/openai/models/responses/response_create_params.rb +2 -2
- data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
- data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
- data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
- data/lib/openai/models/responses/response_output_refusal.rb +2 -2
- data/lib/openai/models/responses/response_stream_event.rb +1 -7
- data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
- data/lib/openai/models/responses/response_text_done_event.rb +66 -1
- data/lib/openai/models/responses/tool.rb +30 -1
- data/lib/openai/models.rb +12 -0
- data/lib/openai/resources/images.rb +140 -2
- data/lib/openai/resources/responses.rb +2 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -2
- data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
- data/rbi/openai/models/chat/chat_completion.rbi +3 -3
- data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
- data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
- data/rbi/openai/models/function_definition.rbi +2 -2
- data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
- data/rbi/openai/models/image_edit_params.rbi +57 -0
- data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
- data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
- data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
- data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
- data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
- data/rbi/openai/models/image_generate_params.rbi +18 -0
- data/rbi/openai/models/images_response.rbi +2 -2
- data/rbi/openai/models/responses/response.rbi +3 -3
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
- data/rbi/openai/models/responses/response_create_params.rbi +3 -3
- data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
- data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
- data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
- data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
- data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
- data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
- data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
- data/rbi/openai/models/responses/tool.rbi +61 -0
- data/rbi/openai/models.rbi +12 -0
- data/rbi/openai/resources/chat/completions.rbi +2 -2
- data/rbi/openai/resources/images.rbi +237 -0
- data/rbi/openai/resources/responses.rbi +2 -2
- data/sig/openai/models/audio/speech_create_params.rbs +0 -6
- data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
- data/sig/openai/models/image_edit_completed_event.rbs +150 -0
- data/sig/openai/models/image_edit_params.rbs +21 -0
- data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_edit_stream_event.rbs +12 -0
- data/sig/openai/models/image_gen_completed_event.rbs +150 -0
- data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_gen_stream_event.rbs +12 -0
- data/sig/openai/models/image_generate_params.rbs +5 -0
- data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
- data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
- data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
- data/sig/openai/models/responses/response_stream_event.rbs +0 -2
- data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
- data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
- data/sig/openai/models/responses/tool.rbs +16 -0
- data/sig/openai/models.rbs +12 -0
- data/sig/openai/resources/images.rbs +38 -0
- metadata +20 -8
- data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
- data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
- data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
- data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
- data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -0,0 +1,198 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel
|
6
|
+
# @!attribute b64_json
|
7
|
+
# Base64-encoded image data, suitable for rendering as an image.
|
8
|
+
#
|
9
|
+
# @return [String]
|
10
|
+
required :b64_json, String
|
11
|
+
|
12
|
+
# @!attribute background
|
13
|
+
# The background setting for the generated image.
|
14
|
+
#
|
15
|
+
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background]
|
16
|
+
required :background, enum: -> { OpenAI::ImageGenCompletedEvent::Background }
|
17
|
+
|
18
|
+
# @!attribute created_at
|
19
|
+
# The Unix timestamp when the event was created.
|
20
|
+
#
|
21
|
+
# @return [Integer]
|
22
|
+
required :created_at, Integer
|
23
|
+
|
24
|
+
# @!attribute output_format
|
25
|
+
# The output format for the generated image.
|
26
|
+
#
|
27
|
+
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat]
|
28
|
+
required :output_format, enum: -> { OpenAI::ImageGenCompletedEvent::OutputFormat }
|
29
|
+
|
30
|
+
# @!attribute quality
|
31
|
+
# The quality setting for the generated image.
|
32
|
+
#
|
33
|
+
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality]
|
34
|
+
required :quality, enum: -> { OpenAI::ImageGenCompletedEvent::Quality }
|
35
|
+
|
36
|
+
# @!attribute size
|
37
|
+
# The size of the generated image.
|
38
|
+
#
|
39
|
+
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size]
|
40
|
+
required :size, enum: -> { OpenAI::ImageGenCompletedEvent::Size }
|
41
|
+
|
42
|
+
# @!attribute type
|
43
|
+
# The type of the event. Always `image_generation.completed`.
|
44
|
+
#
|
45
|
+
# @return [Symbol, :"image_generation.completed"]
|
46
|
+
required :type, const: :"image_generation.completed"
|
47
|
+
|
48
|
+
# @!attribute usage
|
49
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
50
|
+
#
|
51
|
+
# @return [OpenAI::Models::ImageGenCompletedEvent::Usage]
|
52
|
+
required :usage, -> { OpenAI::ImageGenCompletedEvent::Usage }
|
53
|
+
|
54
|
+
# @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_generation.completed")
|
55
|
+
# Some parameter documentations has been truncated, see
|
56
|
+
# {OpenAI::Models::ImageGenCompletedEvent} for more details.
|
57
|
+
#
|
58
|
+
# Emitted when image generation has completed and the final image is available.
|
59
|
+
#
|
60
|
+
# @param b64_json [String] Base64-encoded image data, suitable for rendering as an image.
|
61
|
+
#
|
62
|
+
# @param background [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background] The background setting for the generated image.
|
63
|
+
#
|
64
|
+
# @param created_at [Integer] The Unix timestamp when the event was created.
|
65
|
+
#
|
66
|
+
# @param output_format [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat] The output format for the generated image.
|
67
|
+
#
|
68
|
+
# @param quality [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality] The quality setting for the generated image.
|
69
|
+
#
|
70
|
+
# @param size [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] The size of the generated image.
|
71
|
+
#
|
72
|
+
# @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
|
73
|
+
#
|
74
|
+
# @param type [Symbol, :"image_generation.completed"] The type of the event. Always `image_generation.completed`.
|
75
|
+
|
76
|
+
# The background setting for the generated image.
|
77
|
+
#
|
78
|
+
# @see OpenAI::Models::ImageGenCompletedEvent#background
|
79
|
+
module Background
|
80
|
+
extend OpenAI::Internal::Type::Enum
|
81
|
+
|
82
|
+
TRANSPARENT = :transparent
|
83
|
+
OPAQUE = :opaque
|
84
|
+
AUTO = :auto
|
85
|
+
|
86
|
+
# @!method self.values
|
87
|
+
# @return [Array<Symbol>]
|
88
|
+
end
|
89
|
+
|
90
|
+
# The output format for the generated image.
|
91
|
+
#
|
92
|
+
# @see OpenAI::Models::ImageGenCompletedEvent#output_format
|
93
|
+
module OutputFormat
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
PNG = :png
|
97
|
+
WEBP = :webp
|
98
|
+
JPEG = :jpeg
|
99
|
+
|
100
|
+
# @!method self.values
|
101
|
+
# @return [Array<Symbol>]
|
102
|
+
end
|
103
|
+
|
104
|
+
# The quality setting for the generated image.
|
105
|
+
#
|
106
|
+
# @see OpenAI::Models::ImageGenCompletedEvent#quality
|
107
|
+
module Quality
|
108
|
+
extend OpenAI::Internal::Type::Enum
|
109
|
+
|
110
|
+
LOW = :low
|
111
|
+
MEDIUM = :medium
|
112
|
+
HIGH = :high
|
113
|
+
AUTO = :auto
|
114
|
+
|
115
|
+
# @!method self.values
|
116
|
+
# @return [Array<Symbol>]
|
117
|
+
end
|
118
|
+
|
119
|
+
# The size of the generated image.
|
120
|
+
#
|
121
|
+
# @see OpenAI::Models::ImageGenCompletedEvent#size
|
122
|
+
module Size
|
123
|
+
extend OpenAI::Internal::Type::Enum
|
124
|
+
|
125
|
+
SIZE_1024X1024 = :"1024x1024"
|
126
|
+
SIZE_1024X1536 = :"1024x1536"
|
127
|
+
SIZE_1536X1024 = :"1536x1024"
|
128
|
+
AUTO = :auto
|
129
|
+
|
130
|
+
# @!method self.values
|
131
|
+
# @return [Array<Symbol>]
|
132
|
+
end
|
133
|
+
|
134
|
+
# @see OpenAI::Models::ImageGenCompletedEvent#usage
|
135
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
136
|
+
# @!attribute input_tokens
|
137
|
+
# The number of tokens (images and text) in the input prompt.
|
138
|
+
#
|
139
|
+
# @return [Integer]
|
140
|
+
required :input_tokens, Integer
|
141
|
+
|
142
|
+
# @!attribute input_tokens_details
|
143
|
+
# The input tokens detailed information for the image generation.
|
144
|
+
#
|
145
|
+
# @return [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails]
|
146
|
+
required :input_tokens_details, -> { OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails }
|
147
|
+
|
148
|
+
# @!attribute output_tokens
|
149
|
+
# The number of image tokens in the output image.
|
150
|
+
#
|
151
|
+
# @return [Integer]
|
152
|
+
required :output_tokens, Integer
|
153
|
+
|
154
|
+
# @!attribute total_tokens
|
155
|
+
# The total number of tokens (images and text) used for the image generation.
|
156
|
+
#
|
157
|
+
# @return [Integer]
|
158
|
+
required :total_tokens, Integer
|
159
|
+
|
160
|
+
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
|
161
|
+
# Some parameter documentations has been truncated, see
|
162
|
+
# {OpenAI::Models::ImageGenCompletedEvent::Usage} for more details.
|
163
|
+
#
|
164
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
165
|
+
#
|
166
|
+
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
|
167
|
+
#
|
168
|
+
# @param input_tokens_details [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
|
169
|
+
#
|
170
|
+
# @param output_tokens [Integer] The number of image tokens in the output image.
|
171
|
+
#
|
172
|
+
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
|
173
|
+
|
174
|
+
# @see OpenAI::Models::ImageGenCompletedEvent::Usage#input_tokens_details
|
175
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
176
|
+
# @!attribute image_tokens
|
177
|
+
# The number of image tokens in the input prompt.
|
178
|
+
#
|
179
|
+
# @return [Integer]
|
180
|
+
required :image_tokens, Integer
|
181
|
+
|
182
|
+
# @!attribute text_tokens
|
183
|
+
# The number of text tokens in the input prompt.
|
184
|
+
#
|
185
|
+
# @return [Integer]
|
186
|
+
required :text_tokens, Integer
|
187
|
+
|
188
|
+
# @!method initialize(image_tokens:, text_tokens:)
|
189
|
+
# The input tokens detailed information for the image generation.
|
190
|
+
#
|
191
|
+
# @param image_tokens [Integer] The number of image tokens in the input prompt.
|
192
|
+
#
|
193
|
+
# @param text_tokens [Integer] The number of text tokens in the input prompt.
|
194
|
+
end
|
195
|
+
end
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
@@ -0,0 +1,135 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel
|
6
|
+
# @!attribute b64_json
|
7
|
+
# Base64-encoded partial image data, suitable for rendering as an image.
|
8
|
+
#
|
9
|
+
# @return [String]
|
10
|
+
required :b64_json, String
|
11
|
+
|
12
|
+
# @!attribute background
|
13
|
+
# The background setting for the requested image.
|
14
|
+
#
|
15
|
+
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background]
|
16
|
+
required :background, enum: -> { OpenAI::ImageGenPartialImageEvent::Background }
|
17
|
+
|
18
|
+
# @!attribute created_at
|
19
|
+
# The Unix timestamp when the event was created.
|
20
|
+
#
|
21
|
+
# @return [Integer]
|
22
|
+
required :created_at, Integer
|
23
|
+
|
24
|
+
# @!attribute output_format
|
25
|
+
# The output format for the requested image.
|
26
|
+
#
|
27
|
+
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat]
|
28
|
+
required :output_format, enum: -> { OpenAI::ImageGenPartialImageEvent::OutputFormat }
|
29
|
+
|
30
|
+
# @!attribute partial_image_index
|
31
|
+
# 0-based index for the partial image (streaming).
|
32
|
+
#
|
33
|
+
# @return [Integer]
|
34
|
+
required :partial_image_index, Integer
|
35
|
+
|
36
|
+
# @!attribute quality
|
37
|
+
# The quality setting for the requested image.
|
38
|
+
#
|
39
|
+
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality]
|
40
|
+
required :quality, enum: -> { OpenAI::ImageGenPartialImageEvent::Quality }
|
41
|
+
|
42
|
+
# @!attribute size
|
43
|
+
# The size of the requested image.
|
44
|
+
#
|
45
|
+
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size]
|
46
|
+
required :size, enum: -> { OpenAI::ImageGenPartialImageEvent::Size }
|
47
|
+
|
48
|
+
# @!attribute type
|
49
|
+
# The type of the event. Always `image_generation.partial_image`.
|
50
|
+
#
|
51
|
+
# @return [Symbol, :"image_generation.partial_image"]
|
52
|
+
required :type, const: :"image_generation.partial_image"
|
53
|
+
|
54
|
+
# @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_generation.partial_image")
|
55
|
+
# Some parameter documentations has been truncated, see
|
56
|
+
# {OpenAI::Models::ImageGenPartialImageEvent} for more details.
|
57
|
+
#
|
58
|
+
# Emitted when a partial image is available during image generation streaming.
|
59
|
+
#
|
60
|
+
# @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image.
|
61
|
+
#
|
62
|
+
# @param background [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background] The background setting for the requested image.
|
63
|
+
#
|
64
|
+
# @param created_at [Integer] The Unix timestamp when the event was created.
|
65
|
+
#
|
66
|
+
# @param output_format [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat] The output format for the requested image.
|
67
|
+
#
|
68
|
+
# @param partial_image_index [Integer] 0-based index for the partial image (streaming).
|
69
|
+
#
|
70
|
+
# @param quality [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality] The quality setting for the requested image.
|
71
|
+
#
|
72
|
+
# @param size [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size] The size of the requested image.
|
73
|
+
#
|
74
|
+
# @param type [Symbol, :"image_generation.partial_image"] The type of the event. Always `image_generation.partial_image`.
|
75
|
+
|
76
|
+
# The background setting for the requested image.
|
77
|
+
#
|
78
|
+
# @see OpenAI::Models::ImageGenPartialImageEvent#background
|
79
|
+
module Background
|
80
|
+
extend OpenAI::Internal::Type::Enum
|
81
|
+
|
82
|
+
TRANSPARENT = :transparent
|
83
|
+
OPAQUE = :opaque
|
84
|
+
AUTO = :auto
|
85
|
+
|
86
|
+
# @!method self.values
|
87
|
+
# @return [Array<Symbol>]
|
88
|
+
end
|
89
|
+
|
90
|
+
# The output format for the requested image.
|
91
|
+
#
|
92
|
+
# @see OpenAI::Models::ImageGenPartialImageEvent#output_format
|
93
|
+
module OutputFormat
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
PNG = :png
|
97
|
+
WEBP = :webp
|
98
|
+
JPEG = :jpeg
|
99
|
+
|
100
|
+
# @!method self.values
|
101
|
+
# @return [Array<Symbol>]
|
102
|
+
end
|
103
|
+
|
104
|
+
# The quality setting for the requested image.
|
105
|
+
#
|
106
|
+
# @see OpenAI::Models::ImageGenPartialImageEvent#quality
|
107
|
+
module Quality
|
108
|
+
extend OpenAI::Internal::Type::Enum
|
109
|
+
|
110
|
+
LOW = :low
|
111
|
+
MEDIUM = :medium
|
112
|
+
HIGH = :high
|
113
|
+
AUTO = :auto
|
114
|
+
|
115
|
+
# @!method self.values
|
116
|
+
# @return [Array<Symbol>]
|
117
|
+
end
|
118
|
+
|
119
|
+
# The size of the requested image.
|
120
|
+
#
|
121
|
+
# @see OpenAI::Models::ImageGenPartialImageEvent#size
|
122
|
+
module Size
|
123
|
+
extend OpenAI::Internal::Type::Enum
|
124
|
+
|
125
|
+
SIZE_1024X1024 = :"1024x1024"
|
126
|
+
SIZE_1024X1536 = :"1024x1536"
|
127
|
+
SIZE_1536X1024 = :"1536x1024"
|
128
|
+
AUTO = :auto
|
129
|
+
|
130
|
+
# @!method self.values
|
131
|
+
# @return [Array<Symbol>]
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# Emitted when a partial image is available during image generation streaming.
|
6
|
+
module ImageGenStreamEvent
|
7
|
+
extend OpenAI::Internal::Type::Union
|
8
|
+
|
9
|
+
discriminator :type
|
10
|
+
|
11
|
+
# Emitted when a partial image is available during image generation streaming.
|
12
|
+
variant :"image_generation.partial_image", -> { OpenAI::ImageGenPartialImageEvent }
|
13
|
+
|
14
|
+
# Emitted when image generation has completed and the final image is available.
|
15
|
+
variant :"image_generation.completed", -> { OpenAI::ImageGenCompletedEvent }
|
16
|
+
|
17
|
+
# @!method self.variants
|
18
|
+
# @return [Array(OpenAI::Models::ImageGenPartialImageEvent, OpenAI::Models::ImageGenCompletedEvent)]
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -3,6 +3,8 @@
|
|
3
3
|
module OpenAI
|
4
4
|
module Models
|
5
5
|
# @see OpenAI::Resources::Images#generate
|
6
|
+
#
|
7
|
+
# @see OpenAI::Resources::Images#generate_stream_raw
|
6
8
|
class ImageGenerateParams < OpenAI::Internal::Type::BaseModel
|
7
9
|
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
10
|
include OpenAI::Internal::Type::RequestParameters
|
@@ -64,6 +66,17 @@ module OpenAI
|
|
64
66
|
# @return [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil]
|
65
67
|
optional :output_format, enum: -> { OpenAI::ImageGenerateParams::OutputFormat }, nil?: true
|
66
68
|
|
69
|
+
# @!attribute partial_images
|
70
|
+
# The number of partial images to generate. This parameter is used for streaming
|
71
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
72
|
+
# 0, the response will be a single image sent in one streaming event.
|
73
|
+
#
|
74
|
+
# Note that the final image may be sent before the full number of partial images
|
75
|
+
# are generated if the full image is generated more quickly.
|
76
|
+
#
|
77
|
+
# @return [Integer, nil]
|
78
|
+
optional :partial_images, Integer, nil?: true
|
79
|
+
|
67
80
|
# @!attribute quality
|
68
81
|
# The quality of the image that will be generated.
|
69
82
|
#
|
@@ -111,7 +124,7 @@ module OpenAI
|
|
111
124
|
# @return [String, nil]
|
112
125
|
optional :user, String
|
113
126
|
|
114
|
-
# @!method initialize(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
|
127
|
+
# @!method initialize(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
|
115
128
|
# Some parameter documentations has been truncated, see
|
116
129
|
# {OpenAI::Models::ImageGenerateParams} for more details.
|
117
130
|
#
|
@@ -129,6 +142,8 @@ module OpenAI
|
|
129
142
|
#
|
130
143
|
# @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su
|
131
144
|
#
|
145
|
+
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
|
146
|
+
#
|
132
147
|
# @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated.
|
133
148
|
#
|
134
149
|
# @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned
|
@@ -140,7 +140,7 @@ module OpenAI
|
|
140
140
|
required :input_tokens_details, -> { OpenAI::ImagesResponse::Usage::InputTokensDetails }
|
141
141
|
|
142
142
|
# @!attribute output_tokens
|
143
|
-
# The number of
|
143
|
+
# The number of output tokens generated by the model.
|
144
144
|
#
|
145
145
|
# @return [Integer]
|
146
146
|
required :output_tokens, Integer
|
@@ -158,7 +158,7 @@ module OpenAI
|
|
158
158
|
#
|
159
159
|
# @param input_tokens_details [OpenAI::Models::ImagesResponse::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
|
160
160
|
#
|
161
|
-
# @param output_tokens [Integer] The number of
|
161
|
+
# @param output_tokens [Integer] The number of output tokens generated by the model.
|
162
162
|
#
|
163
163
|
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
|
164
164
|
|
@@ -186,7 +186,7 @@ module OpenAI
|
|
186
186
|
# - If set to 'auto', then the request will be processed with the service tier
|
187
187
|
# configured in the Project settings. Unless otherwise configured, the Project
|
188
188
|
# will use 'default'.
|
189
|
-
# - If set to 'default', then the
|
189
|
+
# - If set to 'default', then the request will be processed with the standard
|
190
190
|
# pricing and performance for the selected model.
|
191
191
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
192
192
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -401,7 +401,7 @@ module OpenAI
|
|
401
401
|
# - If set to 'auto', then the request will be processed with the service tier
|
402
402
|
# configured in the Project settings. Unless otherwise configured, the Project
|
403
403
|
# will use 'default'.
|
404
|
-
# - If set to 'default', then the
|
404
|
+
# - If set to 'default', then the request will be processed with the standard
|
405
405
|
# pricing and performance for the selected model.
|
406
406
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
407
407
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -34,7 +34,8 @@ module OpenAI
|
|
34
34
|
nil?: true
|
35
35
|
|
36
36
|
# @!attribute status
|
37
|
-
# The status of the code interpreter tool call.
|
37
|
+
# The status of the code interpreter tool call. Valid values are `in_progress`,
|
38
|
+
# `completed`, `incomplete`, `interpreting`, and `failed`.
|
38
39
|
#
|
39
40
|
# @return [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status]
|
40
41
|
required :status, enum: -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Status }
|
@@ -59,7 +60,7 @@ module OpenAI
|
|
59
60
|
#
|
60
61
|
# @param outputs [Array<OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image>, nil] The outputs generated by the code interpreter, such as logs or images.
|
61
62
|
#
|
62
|
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call.
|
63
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call. Valid values are `in_progress`, `c
|
63
64
|
#
|
64
65
|
# @param type [Symbol, :code_interpreter_call] The type of the code interpreter tool call. Always `code_interpreter_call`.
|
65
66
|
|
@@ -121,7 +122,8 @@ module OpenAI
|
|
121
122
|
# @return [Array(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image)]
|
122
123
|
end
|
123
124
|
|
124
|
-
# The status of the code interpreter tool call.
|
125
|
+
# The status of the code interpreter tool call. Valid values are `in_progress`,
|
126
|
+
# `completed`, `incomplete`, `interpreting`, and `failed`.
|
125
127
|
#
|
126
128
|
# @see OpenAI::Models::Responses::ResponseCodeInterpreterToolCall#status
|
127
129
|
module Status
|
@@ -138,7 +138,7 @@ module OpenAI
|
|
138
138
|
# - If set to 'auto', then the request will be processed with the service tier
|
139
139
|
# configured in the Project settings. Unless otherwise configured, the Project
|
140
140
|
# will use 'default'.
|
141
|
-
# - If set to 'default', then the
|
141
|
+
# - If set to 'default', then the request will be processed with the standard
|
142
142
|
# pricing and performance for the selected model.
|
143
143
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
144
144
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -328,7 +328,7 @@ module OpenAI
|
|
328
328
|
# - If set to 'auto', then the request will be processed with the service tier
|
329
329
|
# configured in the Project settings. Unless otherwise configured, the Project
|
330
330
|
# will use 'default'.
|
331
|
-
# - If set to 'default', then the
|
331
|
+
# - If set to 'default', then the request will be processed with the standard
|
332
332
|
# pricing and performance for the selected model.
|
333
333
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
334
334
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -5,10 +5,11 @@ module OpenAI
|
|
5
5
|
module Responses
|
6
6
|
class ResponseMcpCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel
|
7
7
|
# @!attribute delta
|
8
|
-
#
|
8
|
+
# A JSON string containing the partial update to the arguments for the MCP tool
|
9
|
+
# call.
|
9
10
|
#
|
10
|
-
# @return [
|
11
|
-
required :delta,
|
11
|
+
# @return [String]
|
12
|
+
required :delta, String
|
12
13
|
|
13
14
|
# @!attribute item_id
|
14
15
|
# The unique identifier of the MCP tool call item being processed.
|
@@ -35,10 +36,14 @@ module OpenAI
|
|
35
36
|
required :type, const: :"response.mcp_call_arguments.delta"
|
36
37
|
|
37
38
|
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.mcp_call_arguments.delta")
|
39
|
+
# Some parameter documentations has been truncated, see
|
40
|
+
# {OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent} for more
|
41
|
+
# details.
|
42
|
+
#
|
38
43
|
# Emitted when there is a delta (partial update) to the arguments of an MCP tool
|
39
44
|
# call.
|
40
45
|
#
|
41
|
-
# @param delta [
|
46
|
+
# @param delta [String] A JSON string containing the partial update to the arguments for the MCP tool ca
|
42
47
|
#
|
43
48
|
# @param item_id [String] The unique identifier of the MCP tool call item being processed.
|
44
49
|
#
|
@@ -5,10 +5,10 @@ module OpenAI
|
|
5
5
|
module Responses
|
6
6
|
class ResponseMcpCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel
|
7
7
|
# @!attribute arguments
|
8
|
-
#
|
8
|
+
# A JSON string containing the finalized arguments for the MCP tool call.
|
9
9
|
#
|
10
|
-
# @return [
|
11
|
-
required :arguments,
|
10
|
+
# @return [String]
|
11
|
+
required :arguments, String
|
12
12
|
|
13
13
|
# @!attribute item_id
|
14
14
|
# The unique identifier of the MCP tool call item being processed.
|
@@ -35,9 +35,12 @@ module OpenAI
|
|
35
35
|
required :type, const: :"response.mcp_call_arguments.done"
|
36
36
|
|
37
37
|
# @!method initialize(arguments:, item_id:, output_index:, sequence_number:, type: :"response.mcp_call_arguments.done")
|
38
|
+
# Some parameter documentations has been truncated, see
|
39
|
+
# {OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent} for more details.
|
40
|
+
#
|
38
41
|
# Emitted when the arguments for an MCP tool call are finalized.
|
39
42
|
#
|
40
|
-
# @param arguments [
|
43
|
+
# @param arguments [String] A JSON string containing the finalized arguments for the MCP tool call.
|
41
44
|
#
|
42
45
|
# @param item_id [String] The unique identifier of the MCP tool call item being processed.
|
43
46
|
#
|
@@ -4,6 +4,18 @@ module OpenAI
|
|
4
4
|
module Models
|
5
5
|
module Responses
|
6
6
|
class ResponseMcpCallCompletedEvent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute item_id
|
8
|
+
# The ID of the MCP tool call item that completed.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :item_id, String
|
12
|
+
|
13
|
+
# @!attribute output_index
|
14
|
+
# The index of the output item that completed.
|
15
|
+
#
|
16
|
+
# @return [Integer]
|
17
|
+
required :output_index, Integer
|
18
|
+
|
7
19
|
# @!attribute sequence_number
|
8
20
|
# The sequence number of this event.
|
9
21
|
#
|
@@ -16,9 +28,13 @@ module OpenAI
|
|
16
28
|
# @return [Symbol, :"response.mcp_call.completed"]
|
17
29
|
required :type, const: :"response.mcp_call.completed"
|
18
30
|
|
19
|
-
# @!method initialize(sequence_number:, type: :"response.mcp_call.completed")
|
31
|
+
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.completed")
|
20
32
|
# Emitted when an MCP tool call has completed successfully.
|
21
33
|
#
|
34
|
+
# @param item_id [String] The ID of the MCP tool call item that completed.
|
35
|
+
#
|
36
|
+
# @param output_index [Integer] The index of the output item that completed.
|
37
|
+
#
|
22
38
|
# @param sequence_number [Integer] The sequence number of this event.
|
23
39
|
#
|
24
40
|
# @param type [Symbol, :"response.mcp_call.completed"] The type of the event. Always 'response.mcp_call.completed'.
|
@@ -4,6 +4,18 @@ module OpenAI
|
|
4
4
|
module Models
|
5
5
|
module Responses
|
6
6
|
class ResponseMcpCallFailedEvent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute item_id
|
8
|
+
# The ID of the MCP tool call item that failed.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :item_id, String
|
12
|
+
|
13
|
+
# @!attribute output_index
|
14
|
+
# The index of the output item that failed.
|
15
|
+
#
|
16
|
+
# @return [Integer]
|
17
|
+
required :output_index, Integer
|
18
|
+
|
7
19
|
# @!attribute sequence_number
|
8
20
|
# The sequence number of this event.
|
9
21
|
#
|
@@ -16,9 +28,13 @@ module OpenAI
|
|
16
28
|
# @return [Symbol, :"response.mcp_call.failed"]
|
17
29
|
required :type, const: :"response.mcp_call.failed"
|
18
30
|
|
19
|
-
# @!method initialize(sequence_number:, type: :"response.mcp_call.failed")
|
31
|
+
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.failed")
|
20
32
|
# Emitted when an MCP tool call has failed.
|
21
33
|
#
|
34
|
+
# @param item_id [String] The ID of the MCP tool call item that failed.
|
35
|
+
#
|
36
|
+
# @param output_index [Integer] The index of the output item that failed.
|
37
|
+
#
|
22
38
|
# @param sequence_number [Integer] The sequence number of this event.
|
23
39
|
#
|
24
40
|
# @param type [Symbol, :"response.mcp_call.failed"] The type of the event. Always 'response.mcp_call.failed'.
|
@@ -4,6 +4,18 @@ module OpenAI
|
|
4
4
|
module Models
|
5
5
|
module Responses
|
6
6
|
class ResponseMcpListToolsCompletedEvent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute item_id
|
8
|
+
# The ID of the MCP tool call item that produced this output.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :item_id, String
|
12
|
+
|
13
|
+
# @!attribute output_index
|
14
|
+
# The index of the output item that was processed.
|
15
|
+
#
|
16
|
+
# @return [Integer]
|
17
|
+
required :output_index, Integer
|
18
|
+
|
7
19
|
# @!attribute sequence_number
|
8
20
|
# The sequence number of this event.
|
9
21
|
#
|
@@ -16,9 +28,13 @@ module OpenAI
|
|
16
28
|
# @return [Symbol, :"response.mcp_list_tools.completed"]
|
17
29
|
required :type, const: :"response.mcp_list_tools.completed"
|
18
30
|
|
19
|
-
# @!method initialize(sequence_number:, type: :"response.mcp_list_tools.completed")
|
31
|
+
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.completed")
|
20
32
|
# Emitted when the list of available MCP tools has been successfully retrieved.
|
21
33
|
#
|
34
|
+
# @param item_id [String] The ID of the MCP tool call item that produced this output.
|
35
|
+
#
|
36
|
+
# @param output_index [Integer] The index of the output item that was processed.
|
37
|
+
#
|
22
38
|
# @param sequence_number [Integer] The sequence number of this event.
|
23
39
|
#
|
24
40
|
# @param type [Symbol, :"response.mcp_list_tools.completed"] The type of the event. Always 'response.mcp_list_tools.completed'.
|