openai 0.14.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +41 -0
  3. data/README.md +3 -3
  4. data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
  5. data/lib/openai/helpers/structured_output/union_of.rb +11 -1
  6. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion.rb +2 -2
  8. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  9. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  10. data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
  11. data/lib/openai/models/chat/completion_create_params.rb +33 -7
  12. data/lib/openai/models/function_definition.rb +1 -1
  13. data/lib/openai/models/image_edit_params.rb +4 -1
  14. data/lib/openai/models/image_generate_params.rb +4 -1
  15. data/lib/openai/models/images_response.rb +2 -5
  16. data/lib/openai/models/responses/response.rb +52 -6
  17. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  18. data/lib/openai/models/responses/response_create_params.rb +33 -7
  19. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  20. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  21. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  22. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  23. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  24. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  26. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  27. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  28. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  29. data/lib/openai/resources/chat/completions.rb +12 -4
  30. data/lib/openai/resources/images.rb +6 -6
  31. data/lib/openai/resources/responses.rb +42 -17
  32. data/lib/openai/version.rb +1 -1
  33. data/lib/openai.rb +0 -2
  34. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  35. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  36. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  37. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  38. data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
  39. data/rbi/openai/models/chat/completion_create_params.rbi +47 -9
  40. data/rbi/openai/models/function_definition.rbi +2 -2
  41. data/rbi/openai/models/image_edit_params.rbi +6 -0
  42. data/rbi/openai/models/image_generate_params.rbi +6 -0
  43. data/rbi/openai/models/images_response.rbi +2 -2
  44. data/rbi/openai/models/responses/response.rbi +47 -9
  45. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  46. data/rbi/openai/models/responses/response_create_params.rbi +47 -9
  47. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  48. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  49. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  50. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  51. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  52. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  53. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  54. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  55. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  56. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  57. data/rbi/openai/resources/chat/completions.rbi +36 -8
  58. data/rbi/openai/resources/images.rbi +22 -10
  59. data/rbi/openai/resources/responses.rbi +36 -8
  60. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  61. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  62. data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
  63. data/sig/openai/models/chat/completion_create_params.rbs +14 -0
  64. data/sig/openai/models/responses/response.rbs +14 -0
  65. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  66. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  67. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  68. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  69. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  70. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  71. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  72. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  73. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  74. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  75. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  76. data/sig/openai/resources/chat/completions.rbs +4 -0
  77. data/sig/openai/resources/responses.rbs +4 -0
  78. metadata +2 -8
  79. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  80. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  81. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  82. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  83. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  84. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -165,6 +165,15 @@ module OpenAI
165
165
  end
166
166
  attr_writer :prompt
167
167
 
168
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
169
+ # hit rates. Replaces the `user` field.
170
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
171
+ sig { returns(T.nilable(String)) }
172
+ attr_reader :prompt_cache_key
173
+
174
+ sig { params(prompt_cache_key: String).void }
175
+ attr_writer :prompt_cache_key
176
+
168
177
  # **o-series models only**
169
178
  #
170
179
  # Configuration options for
@@ -175,12 +184,23 @@ module OpenAI
175
184
  sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void }
176
185
  attr_writer :reasoning
177
186
 
187
+ # A stable identifier used to help detect users of your application that may be
188
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
189
+ # identifies each user. We recommend hashing their username or email address, in
190
+ # order to avoid sending us any identifying information.
191
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
192
+ sig { returns(T.nilable(String)) }
193
+ attr_reader :safety_identifier
194
+
195
+ sig { params(safety_identifier: String).void }
196
+ attr_writer :safety_identifier
197
+
178
198
  # Specifies the processing type used for serving the request.
179
199
  #
180
200
  # - If set to 'auto', then the request will be processed with the service tier
181
201
  # configured in the Project settings. Unless otherwise configured, the Project
182
202
  # will use 'default'.
183
- # - If set to 'default', then the requset will be processed with the standard
203
+ # - If set to 'default', then the request will be processed with the standard
184
204
  # pricing and performance for the selected model.
185
205
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
186
206
  # 'priority', then the request will be processed with the corresponding service
@@ -247,9 +267,11 @@ module OpenAI
247
267
  sig { params(usage: OpenAI::Responses::ResponseUsage::OrHash).void }
248
268
  attr_writer :usage
249
269
 
250
- # A stable identifier for your end-users. Used to boost cache hit rates by better
251
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
252
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
270
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
271
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
272
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
273
+ # similar requests and to help OpenAI detect and prevent abuse.
274
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
253
275
  sig { returns(T.nilable(String)) }
254
276
  attr_reader :user
255
277
 
@@ -317,7 +339,9 @@ module OpenAI
317
339
  max_tool_calls: T.nilable(Integer),
318
340
  previous_response_id: T.nilable(String),
319
341
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
342
+ prompt_cache_key: String,
320
343
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
344
+ safety_identifier: String,
321
345
  service_tier:
322
346
  T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol),
323
347
  status: OpenAI::Responses::ResponseStatus::OrSymbol,
@@ -417,17 +441,27 @@ module OpenAI
417
441
  # Reference to a prompt template and its variables.
418
442
  # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
419
443
  prompt: nil,
444
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
445
+ # hit rates. Replaces the `user` field.
446
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
447
+ prompt_cache_key: nil,
420
448
  # **o-series models only**
421
449
  #
422
450
  # Configuration options for
423
451
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
424
452
  reasoning: nil,
453
+ # A stable identifier used to help detect users of your application that may be
454
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
455
+ # identifies each user. We recommend hashing their username or email address, in
456
+ # order to avoid sending us any identifying information.
457
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
458
+ safety_identifier: nil,
425
459
  # Specifies the processing type used for serving the request.
426
460
  #
427
461
  # - If set to 'auto', then the request will be processed with the service tier
428
462
  # configured in the Project settings. Unless otherwise configured, the Project
429
463
  # will use 'default'.
430
- # - If set to 'default', then the requset will be processed with the standard
464
+ # - If set to 'default', then the request will be processed with the standard
431
465
  # pricing and performance for the selected model.
432
466
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
433
467
  # 'priority', then the request will be processed with the corresponding service
@@ -463,9 +497,11 @@ module OpenAI
463
497
  # Represents token usage details including input tokens, output tokens, a
464
498
  # breakdown of output tokens, and the total tokens used.
465
499
  usage: nil,
466
- # A stable identifier for your end-users. Used to boost cache hit rates by better
467
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
468
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
500
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
501
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
502
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
503
+ # similar requests and to help OpenAI detect and prevent abuse.
504
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
469
505
  user: nil,
470
506
  # The object type of this resource - always set to `response`.
471
507
  object: :response
@@ -496,7 +532,9 @@ module OpenAI
496
532
  max_tool_calls: T.nilable(Integer),
497
533
  previous_response_id: T.nilable(String),
498
534
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
535
+ prompt_cache_key: String,
499
536
  reasoning: T.nilable(OpenAI::Reasoning),
537
+ safety_identifier: String,
500
538
  service_tier:
501
539
  T.nilable(
502
540
  OpenAI::Responses::Response::ServiceTier::TaggedSymbol
@@ -666,7 +704,7 @@ module OpenAI
666
704
  # - If set to 'auto', then the request will be processed with the service tier
667
705
  # configured in the Project settings. Unless otherwise configured, the Project
668
706
  # will use 'default'.
669
- # - If set to 'default', then the requset will be processed with the standard
707
+ # - If set to 'default', then the request will be processed with the standard
670
708
  # pricing and performance for the selected model.
671
709
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
672
710
  # 'priority', then the request will be processed with the corresponding service
@@ -40,7 +40,8 @@ module OpenAI
40
40
  end
41
41
  attr_accessor :outputs
42
42
 
43
- # The status of the code interpreter tool call.
43
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
44
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
44
45
  sig do
45
46
  returns(
46
47
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol
@@ -82,7 +83,8 @@ module OpenAI
82
83
  # The outputs generated by the code interpreter, such as logs or images. Can be
83
84
  # null if no outputs are available.
84
85
  outputs:,
85
- # The status of the code interpreter tool call.
86
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
87
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
86
88
  status:,
87
89
  # The type of the code interpreter tool call. Always `code_interpreter_call`.
88
90
  type: :code_interpreter_call
@@ -200,7 +202,8 @@ module OpenAI
200
202
  end
201
203
  end
202
204
 
203
- # The status of the code interpreter tool call.
205
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
206
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
204
207
  module Status
205
208
  extend OpenAI::Internal::Type::Enum
206
209
 
@@ -148,6 +148,15 @@ module OpenAI
148
148
  end
149
149
  attr_writer :prompt
150
150
 
151
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
152
+ # hit rates. Replaces the `user` field.
153
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
154
+ sig { returns(T.nilable(String)) }
155
+ attr_reader :prompt_cache_key
156
+
157
+ sig { params(prompt_cache_key: String).void }
158
+ attr_writer :prompt_cache_key
159
+
151
160
  # **o-series models only**
152
161
  #
153
162
  # Configuration options for
@@ -158,12 +167,23 @@ module OpenAI
158
167
  sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void }
159
168
  attr_writer :reasoning
160
169
 
170
+ # A stable identifier used to help detect users of your application that may be
171
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
172
+ # identifies each user. We recommend hashing their username or email address, in
173
+ # order to avoid sending us any identifying information.
174
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
175
+ sig { returns(T.nilable(String)) }
176
+ attr_reader :safety_identifier
177
+
178
+ sig { params(safety_identifier: String).void }
179
+ attr_writer :safety_identifier
180
+
161
181
  # Specifies the processing type used for serving the request.
162
182
  #
163
183
  # - If set to 'auto', then the request will be processed with the service tier
164
184
  # configured in the Project settings. Unless otherwise configured, the Project
165
185
  # will use 'default'.
166
- # - If set to 'default', then the requset will be processed with the standard
186
+ # - If set to 'default', then the request will be processed with the standard
167
187
  # pricing and performance for the selected model.
168
188
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
169
189
  # 'priority', then the request will be processed with the corresponding service
@@ -326,9 +346,11 @@ module OpenAI
326
346
  end
327
347
  attr_accessor :truncation
328
348
 
329
- # A stable identifier for your end-users. Used to boost cache hit rates by better
330
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
331
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
349
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
350
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
351
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
352
+ # similar requests and to help OpenAI detect and prevent abuse.
353
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
332
354
  sig { returns(T.nilable(String)) }
333
355
  attr_reader :user
334
356
 
@@ -356,7 +378,9 @@ module OpenAI
356
378
  parallel_tool_calls: T.nilable(T::Boolean),
357
379
  previous_response_id: T.nilable(String),
358
380
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
381
+ prompt_cache_key: String,
359
382
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
383
+ safety_identifier: String,
360
384
  service_tier:
361
385
  T.nilable(
362
386
  OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
@@ -462,17 +486,27 @@ module OpenAI
462
486
  # Reference to a prompt template and its variables.
463
487
  # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
464
488
  prompt: nil,
489
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
490
+ # hit rates. Replaces the `user` field.
491
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
492
+ prompt_cache_key: nil,
465
493
  # **o-series models only**
466
494
  #
467
495
  # Configuration options for
468
496
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
469
497
  reasoning: nil,
498
+ # A stable identifier used to help detect users of your application that may be
499
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
500
+ # identifies each user. We recommend hashing their username or email address, in
501
+ # order to avoid sending us any identifying information.
502
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
503
+ safety_identifier: nil,
470
504
  # Specifies the processing type used for serving the request.
471
505
  #
472
506
  # - If set to 'auto', then the request will be processed with the service tier
473
507
  # configured in the Project settings. Unless otherwise configured, the Project
474
508
  # will use 'default'.
475
- # - If set to 'default', then the requset will be processed with the standard
509
+ # - If set to 'default', then the request will be processed with the standard
476
510
  # pricing and performance for the selected model.
477
511
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
478
512
  # 'priority', then the request will be processed with the corresponding service
@@ -534,9 +568,11 @@ module OpenAI
534
568
  # - `disabled` (default): If a model response will exceed the context window size
535
569
  # for a model, the request will fail with a 400 error.
536
570
  truncation: nil,
537
- # A stable identifier for your end-users. Used to boost cache hit rates by better
538
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
539
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
571
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
572
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
573
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
574
+ # similar requests and to help OpenAI detect and prevent abuse.
575
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
540
576
  user: nil,
541
577
  request_options: {}
542
578
  )
@@ -564,7 +600,9 @@ module OpenAI
564
600
  parallel_tool_calls: T.nilable(T::Boolean),
565
601
  previous_response_id: T.nilable(String),
566
602
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
603
+ prompt_cache_key: String,
567
604
  reasoning: T.nilable(OpenAI::Reasoning),
605
+ safety_identifier: String,
568
606
  service_tier:
569
607
  T.nilable(
570
608
  OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
@@ -640,7 +678,7 @@ module OpenAI
640
678
  # - If set to 'auto', then the request will be processed with the service tier
641
679
  # configured in the Project settings. Unless otherwise configured, the Project
642
680
  # will use 'default'.
643
- # - If set to 'default', then the requset will be processed with the standard
681
+ # - If set to 'default', then the request will be processed with the standard
644
682
  # pricing and performance for the selected model.
645
683
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
646
684
  # 'priority', then the request will be processed with the corresponding service
@@ -12,8 +12,9 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # The partial update to the arguments for the MCP tool call.
16
- sig { returns(T.anything) }
15
+ # A JSON string containing the partial update to the arguments for the MCP tool
16
+ # call.
17
+ sig { returns(String) }
17
18
  attr_accessor :delta
18
19
 
19
20
  # The unique identifier of the MCP tool call item being processed.
@@ -36,7 +37,7 @@ module OpenAI
36
37
  # call.
37
38
  sig do
38
39
  params(
39
- delta: T.anything,
40
+ delta: String,
40
41
  item_id: String,
41
42
  output_index: Integer,
42
43
  sequence_number: Integer,
@@ -44,7 +45,8 @@ module OpenAI
44
45
  ).returns(T.attached_class)
45
46
  end
46
47
  def self.new(
47
- # The partial update to the arguments for the MCP tool call.
48
+ # A JSON string containing the partial update to the arguments for the MCP tool
49
+ # call.
48
50
  delta:,
49
51
  # The unique identifier of the MCP tool call item being processed.
50
52
  item_id:,
@@ -60,7 +62,7 @@ module OpenAI
60
62
  sig do
61
63
  override.returns(
62
64
  {
63
- delta: T.anything,
65
+ delta: String,
64
66
  item_id: String,
65
67
  output_index: Integer,
66
68
  sequence_number: Integer,
@@ -12,8 +12,8 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # The finalized arguments for the MCP tool call.
16
- sig { returns(T.anything) }
15
+ # A JSON string containing the finalized arguments for the MCP tool call.
16
+ sig { returns(String) }
17
17
  attr_accessor :arguments
18
18
 
19
19
  # The unique identifier of the MCP tool call item being processed.
@@ -35,7 +35,7 @@ module OpenAI
35
35
  # Emitted when the arguments for an MCP tool call are finalized.
36
36
  sig do
37
37
  params(
38
- arguments: T.anything,
38
+ arguments: String,
39
39
  item_id: String,
40
40
  output_index: Integer,
41
41
  sequence_number: Integer,
@@ -43,7 +43,7 @@ module OpenAI
43
43
  ).returns(T.attached_class)
44
44
  end
45
45
  def self.new(
46
- # The finalized arguments for the MCP tool call.
46
+ # A JSON string containing the finalized arguments for the MCP tool call.
47
47
  arguments:,
48
48
  # The unique identifier of the MCP tool call item being processed.
49
49
  item_id:,
@@ -59,7 +59,7 @@ module OpenAI
59
59
  sig do
60
60
  override.returns(
61
61
  {
62
- arguments: T.anything,
62
+ arguments: String,
63
63
  item_id: String,
64
64
  output_index: Integer,
65
65
  sequence_number: Integer,
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that completed.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that completed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when an MCP tool call has completed successfully.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that completed.
42
+ item_id:,
43
+ # The index of the output item that completed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_call.completed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that failed.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that failed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when an MCP tool call has failed.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that failed.
42
+ item_id:,
43
+ # The index of the output item that failed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_call.failed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that produced this output.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that was processed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when the list of available MCP tools has been successfully retrieved.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that produced this output.
42
+ item_id:,
43
+ # The index of the output item that was processed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_list_tools.completed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that failed.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that failed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -22,11 +30,18 @@ module OpenAI
22
30
 
23
31
  # Emitted when the attempt to list available MCP tools has failed.
24
32
  sig do
25
- params(sequence_number: Integer, type: Symbol).returns(
26
- T.attached_class
27
- )
33
+ params(
34
+ item_id: String,
35
+ output_index: Integer,
36
+ sequence_number: Integer,
37
+ type: Symbol
38
+ ).returns(T.attached_class)
28
39
  end
29
40
  def self.new(
41
+ # The ID of the MCP tool call item that failed.
42
+ item_id:,
43
+ # The index of the output item that failed.
44
+ output_index:,
30
45
  # The sequence number of this event.
31
46
  sequence_number:,
32
47
  # The type of the event. Always 'response.mcp_list_tools.failed'.
@@ -34,7 +49,16 @@ module OpenAI
34
49
  )
35
50
  end
36
51
 
37
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
52
+ sig do
53
+ override.returns(
54
+ {
55
+ item_id: String,
56
+ output_index: Integer,
57
+ sequence_number: Integer,
58
+ type: Symbol
59
+ }
60
+ )
61
+ end
38
62
  def to_hash
39
63
  end
40
64
  end
@@ -12,6 +12,14 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The ID of the MCP tool call item that is being processed.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
+
19
+ # The index of the output item that is being processed.
20
+ sig { returns(Integer) }
21
+ attr_accessor :output_index
22
+
15
23
  # The sequence number of this event.
16
24
  sig { returns(Integer) }
17
25
  attr_accessor :sequence_number
@@ -23,11 +31,18 @@ module OpenAI
23
31
  # Emitted when the system is in the process of retrieving the list of available
24
32
  # MCP tools.
25
33
  sig do
26
- params(sequence_number: Integer, type: Symbol).returns(
27
- T.attached_class
28
- )
34
+ params(
35
+ item_id: String,
36
+ output_index: Integer,
37
+ sequence_number: Integer,
38
+ type: Symbol
39
+ ).returns(T.attached_class)
29
40
  end
30
41
  def self.new(
42
+ # The ID of the MCP tool call item that is being processed.
43
+ item_id:,
44
+ # The index of the output item that is being processed.
45
+ output_index:,
31
46
  # The sequence number of this event.
32
47
  sequence_number:,
33
48
  # The type of the event. Always 'response.mcp_list_tools.in_progress'.
@@ -35,7 +50,16 @@ module OpenAI
35
50
  )
36
51
  end
37
52
 
38
- sig { override.returns({ sequence_number: Integer, type: Symbol }) }
53
+ sig do
54
+ override.returns(
55
+ {
56
+ item_id: String,
57
+ output_index: Integer,
58
+ sequence_number: Integer,
59
+ type: Symbol
60
+ }
61
+ )
62
+ end
39
63
  def to_hash
40
64
  end
41
65
  end
@@ -59,8 +59,6 @@ module OpenAI
59
59
  OpenAI::Responses::ResponseMcpListToolsInProgressEvent,
60
60
  OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent,
61
61
  OpenAI::Responses::ResponseQueuedEvent,
62
- OpenAI::Responses::ResponseReasoningDeltaEvent,
63
- OpenAI::Responses::ResponseReasoningDoneEvent,
64
62
  OpenAI::Responses::ResponseReasoningSummaryDeltaEvent,
65
63
  OpenAI::Responses::ResponseReasoningSummaryDoneEvent
66
64
  )