openai 0.13.1 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +26 -0
  3. data/README.md +3 -3
  4. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  5. data/lib/openai/models/chat/chat_completion.rb +2 -2
  6. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  8. data/lib/openai/models/chat/completion_create_params.rb +2 -2
  9. data/lib/openai/models/function_definition.rb +1 -1
  10. data/lib/openai/models/image_edit_completed_event.rb +198 -0
  11. data/lib/openai/models/image_edit_params.rb +39 -1
  12. data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
  13. data/lib/openai/models/image_edit_stream_event.rb +21 -0
  14. data/lib/openai/models/image_gen_completed_event.rb +198 -0
  15. data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
  16. data/lib/openai/models/image_gen_stream_event.rb +21 -0
  17. data/lib/openai/models/image_generate_params.rb +16 -1
  18. data/lib/openai/models/images_response.rb +2 -2
  19. data/lib/openai/models/responses/response.rb +2 -2
  20. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  21. data/lib/openai/models/responses/response_create_params.rb +2 -2
  22. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  23. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  24. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  26. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  27. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  28. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  29. data/lib/openai/models/responses/response_output_refusal.rb +2 -2
  30. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  31. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  32. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  33. data/lib/openai/models/responses/tool.rb +30 -1
  34. data/lib/openai/models.rb +12 -0
  35. data/lib/openai/resources/images.rb +140 -2
  36. data/lib/openai/resources/responses.rb +2 -2
  37. data/lib/openai/version.rb +1 -1
  38. data/lib/openai.rb +6 -2
  39. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  40. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  41. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  42. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  43. data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
  44. data/rbi/openai/models/function_definition.rbi +2 -2
  45. data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
  46. data/rbi/openai/models/image_edit_params.rbi +57 -0
  47. data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
  48. data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
  49. data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
  50. data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
  51. data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
  52. data/rbi/openai/models/image_generate_params.rbi +18 -0
  53. data/rbi/openai/models/images_response.rbi +2 -2
  54. data/rbi/openai/models/responses/response.rbi +3 -3
  55. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  56. data/rbi/openai/models/responses/response_create_params.rbi +3 -3
  57. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  58. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  59. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  60. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  61. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  62. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  63. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  64. data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
  65. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  66. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  67. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  68. data/rbi/openai/models/responses/tool.rbi +61 -0
  69. data/rbi/openai/models.rbi +12 -0
  70. data/rbi/openai/resources/chat/completions.rbi +2 -2
  71. data/rbi/openai/resources/images.rbi +237 -0
  72. data/rbi/openai/resources/responses.rbi +2 -2
  73. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  74. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  75. data/sig/openai/models/image_edit_completed_event.rbs +150 -0
  76. data/sig/openai/models/image_edit_params.rbs +21 -0
  77. data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
  78. data/sig/openai/models/image_edit_stream_event.rbs +12 -0
  79. data/sig/openai/models/image_gen_completed_event.rbs +150 -0
  80. data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
  81. data/sig/openai/models/image_gen_stream_event.rbs +12 -0
  82. data/sig/openai/models/image_generate_params.rbs +5 -0
  83. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  84. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  85. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  86. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  87. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  88. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  89. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  90. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  91. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  92. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  93. data/sig/openai/models/responses/tool.rbs +16 -0
  94. data/sig/openai/models.rbs +12 -0
  95. data/sig/openai/resources/images.rbs +38 -0
  96. metadata +20 -8
  97. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  98. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  99. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  100. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  101. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  102. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -0,0 +1,243 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel
6
+ OrHash =
7
+ T.type_alias do
8
+ T.any(OpenAI::ImageGenPartialImageEvent, OpenAI::Internal::AnyHash)
9
+ end
10
+
11
+ # Base64-encoded partial image data, suitable for rendering as an image.
12
+ sig { returns(String) }
13
+ attr_accessor :b64_json
14
+
15
+ # The background setting for the requested image.
16
+ sig do
17
+ returns(OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol)
18
+ end
19
+ attr_accessor :background
20
+
21
+ # The Unix timestamp when the event was created.
22
+ sig { returns(Integer) }
23
+ attr_accessor :created_at
24
+
25
+ # The output format for the requested image.
26
+ sig do
27
+ returns(OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol)
28
+ end
29
+ attr_accessor :output_format
30
+
31
+ # 0-based index for the partial image (streaming).
32
+ sig { returns(Integer) }
33
+ attr_accessor :partial_image_index
34
+
35
+ # The quality setting for the requested image.
36
+ sig { returns(OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) }
37
+ attr_accessor :quality
38
+
39
+ # The size of the requested image.
40
+ sig { returns(OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol) }
41
+ attr_accessor :size
42
+
43
+ # The type of the event. Always `image_generation.partial_image`.
44
+ sig { returns(Symbol) }
45
+ attr_accessor :type
46
+
47
+ # Emitted when a partial image is available during image generation streaming.
48
+ sig do
49
+ params(
50
+ b64_json: String,
51
+ background: OpenAI::ImageGenPartialImageEvent::Background::OrSymbol,
52
+ created_at: Integer,
53
+ output_format:
54
+ OpenAI::ImageGenPartialImageEvent::OutputFormat::OrSymbol,
55
+ partial_image_index: Integer,
56
+ quality: OpenAI::ImageGenPartialImageEvent::Quality::OrSymbol,
57
+ size: OpenAI::ImageGenPartialImageEvent::Size::OrSymbol,
58
+ type: Symbol
59
+ ).returns(T.attached_class)
60
+ end
61
+ def self.new(
62
+ # Base64-encoded partial image data, suitable for rendering as an image.
63
+ b64_json:,
64
+ # The background setting for the requested image.
65
+ background:,
66
+ # The Unix timestamp when the event was created.
67
+ created_at:,
68
+ # The output format for the requested image.
69
+ output_format:,
70
+ # 0-based index for the partial image (streaming).
71
+ partial_image_index:,
72
+ # The quality setting for the requested image.
73
+ quality:,
74
+ # The size of the requested image.
75
+ size:,
76
+ # The type of the event. Always `image_generation.partial_image`.
77
+ type: :"image_generation.partial_image"
78
+ )
79
+ end
80
+
81
+ sig do
82
+ override.returns(
83
+ {
84
+ b64_json: String,
85
+ background:
86
+ OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol,
87
+ created_at: Integer,
88
+ output_format:
89
+ OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol,
90
+ partial_image_index: Integer,
91
+ quality: OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol,
92
+ size: OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol,
93
+ type: Symbol
94
+ }
95
+ )
96
+ end
97
+ def to_hash
98
+ end
99
+
100
+ # The background setting for the requested image.
101
+ module Background
102
+ extend OpenAI::Internal::Type::Enum
103
+
104
+ TaggedSymbol =
105
+ T.type_alias do
106
+ T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Background)
107
+ end
108
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
109
+
110
+ TRANSPARENT =
111
+ T.let(
112
+ :transparent,
113
+ OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol
114
+ )
115
+ OPAQUE =
116
+ T.let(
117
+ :opaque,
118
+ OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol
119
+ )
120
+ AUTO =
121
+ T.let(
122
+ :auto,
123
+ OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol
124
+ )
125
+
126
+ sig do
127
+ override.returns(
128
+ T::Array[
129
+ OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol
130
+ ]
131
+ )
132
+ end
133
+ def self.values
134
+ end
135
+ end
136
+
137
+ # The output format for the requested image.
138
+ module OutputFormat
139
+ extend OpenAI::Internal::Type::Enum
140
+
141
+ TaggedSymbol =
142
+ T.type_alias do
143
+ T.all(Symbol, OpenAI::ImageGenPartialImageEvent::OutputFormat)
144
+ end
145
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
146
+
147
+ PNG =
148
+ T.let(
149
+ :png,
150
+ OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol
151
+ )
152
+ WEBP =
153
+ T.let(
154
+ :webp,
155
+ OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol
156
+ )
157
+ JPEG =
158
+ T.let(
159
+ :jpeg,
160
+ OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol
161
+ )
162
+
163
+ sig do
164
+ override.returns(
165
+ T::Array[
166
+ OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol
167
+ ]
168
+ )
169
+ end
170
+ def self.values
171
+ end
172
+ end
173
+
174
+ # The quality setting for the requested image.
175
+ module Quality
176
+ extend OpenAI::Internal::Type::Enum
177
+
178
+ TaggedSymbol =
179
+ T.type_alias do
180
+ T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Quality)
181
+ end
182
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
183
+
184
+ LOW =
185
+ T.let(:low, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol)
186
+ MEDIUM =
187
+ T.let(
188
+ :medium,
189
+ OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol
190
+ )
191
+ HIGH =
192
+ T.let(:high, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol)
193
+ AUTO =
194
+ T.let(:auto, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol)
195
+
196
+ sig do
197
+ override.returns(
198
+ T::Array[OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol]
199
+ )
200
+ end
201
+ def self.values
202
+ end
203
+ end
204
+
205
+ # The size of the requested image.
206
+ module Size
207
+ extend OpenAI::Internal::Type::Enum
208
+
209
+ TaggedSymbol =
210
+ T.type_alias do
211
+ T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Size)
212
+ end
213
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
214
+
215
+ SIZE_1024X1024 =
216
+ T.let(
217
+ :"1024x1024",
218
+ OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol
219
+ )
220
+ SIZE_1024X1536 =
221
+ T.let(
222
+ :"1024x1536",
223
+ OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol
224
+ )
225
+ SIZE_1536X1024 =
226
+ T.let(
227
+ :"1536x1024",
228
+ OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol
229
+ )
230
+ AUTO =
231
+ T.let(:auto, OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol)
232
+
233
+ sig do
234
+ override.returns(
235
+ T::Array[OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol]
236
+ )
237
+ end
238
+ def self.values
239
+ end
240
+ end
241
+ end
242
+ end
243
+ end
@@ -0,0 +1,22 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ # Emitted when a partial image is available during image generation streaming.
6
+ module ImageGenStreamEvent
7
+ extend OpenAI::Internal::Type::Union
8
+
9
+ Variants =
10
+ T.type_alias do
11
+ T.any(
12
+ OpenAI::ImageGenPartialImageEvent,
13
+ OpenAI::ImageGenCompletedEvent
14
+ )
15
+ end
16
+
17
+ sig { override.returns(T::Array[OpenAI::ImageGenStreamEvent::Variants]) }
18
+ def self.variants
19
+ end
20
+ end
21
+ end
22
+ end
@@ -60,6 +60,15 @@ module OpenAI
60
60
  end
61
61
  attr_accessor :output_format
62
62
 
63
+ # The number of partial images to generate. This parameter is used for streaming
64
+ # responses that return partial images. Value must be between 0 and 3. When set to
65
+ # 0, the response will be a single image sent in one streaming event.
66
+ #
67
+ # Note that the final image may be sent before the full number of partial images
68
+ # are generated if the full image is generated more quickly.
69
+ sig { returns(T.nilable(Integer)) }
70
+ attr_accessor :partial_images
71
+
63
72
  # The quality of the image that will be generated.
64
73
  #
65
74
  # - `auto` (default value) will automatically select the best quality for the
@@ -116,6 +125,7 @@ module OpenAI
116
125
  output_compression: T.nilable(Integer),
117
126
  output_format:
118
127
  T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
128
+ partial_images: T.nilable(Integer),
119
129
  quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
120
130
  response_format:
121
131
  T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
@@ -155,6 +165,13 @@ module OpenAI
155
165
  # The format in which the generated images are returned. This parameter is only
156
166
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
157
167
  output_format: nil,
168
+ # The number of partial images to generate. This parameter is used for streaming
169
+ # responses that return partial images. Value must be between 0 and 3. When set to
170
+ # 0, the response will be a single image sent in one streaming event.
171
+ #
172
+ # Note that the final image may be sent before the full number of partial images
173
+ # are generated if the full image is generated more quickly.
174
+ partial_images: nil,
158
175
  # The quality of the image that will be generated.
159
176
  #
160
177
  # - `auto` (default value) will automatically select the best quality for the
@@ -199,6 +216,7 @@ module OpenAI
199
216
  output_compression: T.nilable(Integer),
200
217
  output_format:
201
218
  T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
219
+ partial_images: T.nilable(Integer),
202
220
  quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
203
221
  response_format:
204
222
  T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
@@ -224,7 +224,7 @@ module OpenAI
224
224
  end
225
225
  attr_writer :input_tokens_details
226
226
 
227
- # The number of image tokens in the output image.
227
+ # The number of output tokens generated by the model.
228
228
  sig { returns(Integer) }
229
229
  attr_accessor :output_tokens
230
230
 
@@ -247,7 +247,7 @@ module OpenAI
247
247
  input_tokens:,
248
248
  # The input tokens detailed information for the image generation.
249
249
  input_tokens_details:,
250
- # The number of image tokens in the output image.
250
+ # The number of output tokens generated by the model.
251
251
  output_tokens:,
252
252
  # The total number of tokens (images and text) used for the image generation.
253
253
  total_tokens:
@@ -180,7 +180,7 @@ module OpenAI
180
180
  # - If set to 'auto', then the request will be processed with the service tier
181
181
  # configured in the Project settings. Unless otherwise configured, the Project
182
182
  # will use 'default'.
183
- # - If set to 'default', then the requset will be processed with the standard
183
+ # - If set to 'default', then the request will be processed with the standard
184
184
  # pricing and performance for the selected model.
185
185
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
186
186
  # 'priority', then the request will be processed with the corresponding service
@@ -427,7 +427,7 @@ module OpenAI
427
427
  # - If set to 'auto', then the request will be processed with the service tier
428
428
  # configured in the Project settings. Unless otherwise configured, the Project
429
429
  # will use 'default'.
430
- # - If set to 'default', then the requset will be processed with the standard
430
+ # - If set to 'default', then the request will be processed with the standard
431
431
  # pricing and performance for the selected model.
432
432
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
433
433
  # 'priority', then the request will be processed with the corresponding service
@@ -666,7 +666,7 @@ module OpenAI
666
666
  # - If set to 'auto', then the request will be processed with the service tier
667
667
  # configured in the Project settings. Unless otherwise configured, the Project
668
668
  # will use 'default'.
669
- # - If set to 'default', then the requset will be processed with the standard
669
+ # - If set to 'default', then the request will be processed with the standard
670
670
  # pricing and performance for the selected model.
671
671
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
672
672
  # 'priority', then the request will be processed with the corresponding service
@@ -40,7 +40,8 @@ module OpenAI
40
40
  end
41
41
  attr_accessor :outputs
42
42
 
43
- # The status of the code interpreter tool call.
43
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
44
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
44
45
  sig do
45
46
  returns(
46
47
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol
@@ -82,7 +83,8 @@ module OpenAI
82
83
  # The outputs generated by the code interpreter, such as logs or images. Can be
83
84
  # null if no outputs are available.
84
85
  outputs:,
85
- # The status of the code interpreter tool call.
86
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
87
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
86
88
  status:,
87
89
  # The type of the code interpreter tool call. Always `code_interpreter_call`.
88
90
  type: :code_interpreter_call
@@ -200,7 +202,8 @@ module OpenAI
200
202
  end
201
203
  end
202
204
 
203
- # The status of the code interpreter tool call.
205
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
206
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
204
207
  module Status
205
208
  extend OpenAI::Internal::Type::Enum
206
209
 
@@ -163,7 +163,7 @@ module OpenAI
163
163
  # - If set to 'auto', then the request will be processed with the service tier
164
164
  # configured in the Project settings. Unless otherwise configured, the Project
165
165
  # will use 'default'.
166
- # - If set to 'default', then the requset will be processed with the standard
166
+ # - If set to 'default', then the request will be processed with the standard
167
167
  # pricing and performance for the selected model.
168
168
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
169
169
  # 'priority', then the request will be processed with the corresponding service
@@ -472,7 +472,7 @@ module OpenAI
472
472
  # - If set to 'auto', then the request will be processed with the service tier
473
473
  # configured in the Project settings. Unless otherwise configured, the Project
474
474
  # will use 'default'.
475
- # - If set to 'default', then the requset will be processed with the standard
475
+ # - If set to 'default', then the request will be processed with the standard
476
476
  # pricing and performance for the selected model.
477
477
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
478
478
  # 'priority', then the request will be processed with the corresponding service
@@ -640,7 +640,7 @@ module OpenAI
640
640
  # - If set to 'auto', then the request will be processed with the service tier
641
641
  # configured in the Project settings. Unless otherwise configured, the Project
642
642
  # will use 'default'.
643
- # - If set to 'default', then the requset will be processed with the standard
643
+ # - If set to 'default', then the request will be processed with the standard
644
644
  # pricing and performance for the selected model.
645
645
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
646
646
  # 'priority', then the request will be processed with the corresponding service
@@ -12,8 +12,9 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # The partial update to the arguments for the MCP tool call.
16
- sig { returns(T.anything) }
15
+ # A JSON string containing the partial update to the arguments for the MCP tool
16
+ # call.
17
+ sig { returns(String) }
17
18
  attr_accessor :delta
18
19
 
19
20
  # The unique identifier of the MCP tool call item being processed.
@@ -36,7 +37,7 @@ module OpenAI
36
37
  # call.
37
38
  sig do
38
39
  params(
39
- delta: T.anything,
40
+ delta: String,
40
41
  item_id: String,
41
42
  output_index: Integer,
42
43
  sequence_number: Integer,
@@ -44,7 +45,8 @@ module OpenAI
44
45
  ).returns(T.attached_class)
45
46
  end
46
47
  def self.new(
47
- # The partial update to the arguments for the MCP tool call.
48
+ # A JSON string containing the partial update to the arguments for the MCP tool
49
+ # call.
48
50
  delta:,
49
51
  # The unique identifier of the MCP tool call item being processed.
50
52
  item_id:,
@@ -60,7 +62,7 @@ module OpenAI
60
62
  sig do
61
63
  override.returns(
62
64
  {
63
- delta: T.anything,
65
+ delta: String,
64
66
  item_id: String,
65
67
  output_index: Integer,
66
68
  sequence_number: Integer,
@@ -12,8 +12,8 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # The finalized arguments for the MCP tool call.
16
- sig { returns(T.anything) }
15
+ # A JSON string containing the finalized arguments for the MCP tool call.
16
+ sig { returns(String) }
17
17
  attr_accessor :arguments
18
18
 
19
19
  # The unique identifier of the MCP tool call item being processed.
@@ -35,7 +35,7 @@ module OpenAI
35
35
  # Emitted when the arguments for an MCP tool call are finalized.
36
36
  sig do
37
37
  params(
38
- arguments: T.anything,
38
+ arguments: String,
39
39
  item_id: String,
40
40
  output_index: Integer,
41
41
  sequence_number: Integer,
@@ -43,7 +43,7 @@ module OpenAI
43
43
  ).returns(T.attached_class)
44
44
  end
45
45
  def self.new(
46
- # The finalized arguments for the MCP tool call.
46
+ # A JSON string containing the finalized arguments for the MCP tool call.
47
47
  arguments:,
48
48
  # The unique identifier of the MCP tool call item being processed.
49
49
  item_id:,
@@ -59,7 +59,7 @@ module OpenAI
59
59
  sig do
60
60
  override.returns(
61
61
  {
62
- arguments: T.anything,
62
+ arguments: String,
63
63
  item_id: String,
64
64
  output_index: Integer,
65
65
  sequence_number: Integer,
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that completed.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that completed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when an MCP tool call has completed successfully.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that completed.
42
+ item_id:,
43
+ # The index of the output item that completed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_call.completed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that failed.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that failed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when an MCP tool call has failed.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that failed.
42
+ item_id:,
43
+ # The index of the output item that failed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_call.failed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that produced this output.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that was processed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when the list of available MCP tools has been successfully retrieved.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that produced this output.
42
+ item_id:,
43
+ # The index of the output item that was processed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_list_tools.completed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end