openai 0.18.1 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +1 -1
  4. data/lib/openai/client.rb +4 -0
  5. data/lib/openai/helpers/structured_output/boolean.rb +1 -0
  6. data/lib/openai/internal/conversation_cursor_page.rb +92 -0
  7. data/lib/openai/internal/transport/base_client.rb +1 -4
  8. data/lib/openai/internal/transport/pooled_net_requester.rb +1 -9
  9. data/lib/openai/internal/util.rb +1 -1
  10. data/lib/openai/models/audio/transcription.rb +1 -4
  11. data/lib/openai/models/audio/transcription_create_params.rb +2 -7
  12. data/lib/openai/models/audio/transcription_text_done_event.rb +1 -4
  13. data/lib/openai/models/beta/assistant_create_params.rb +6 -19
  14. data/lib/openai/models/beta/assistant_stream_event.rb +6 -24
  15. data/lib/openai/models/beta/assistant_update_params.rb +1 -4
  16. data/lib/openai/models/beta/message_stream_event.rb +1 -4
  17. data/lib/openai/models/beta/run_step_stream_event.rb +1 -4
  18. data/lib/openai/models/beta/thread_create_and_run_params.rb +10 -32
  19. data/lib/openai/models/beta/thread_create_params.rb +7 -22
  20. data/lib/openai/models/beta/threads/message.rb +3 -10
  21. data/lib/openai/models/beta/threads/message_create_params.rb +2 -7
  22. data/lib/openai/models/beta/threads/run.rb +2 -7
  23. data/lib/openai/models/beta/threads/run_create_params.rb +3 -10
  24. data/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +1 -3
  25. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +5 -17
  26. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +1 -3
  27. data/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +4 -12
  28. data/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb +1 -4
  29. data/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +1 -4
  30. data/lib/openai/models/beta/threads/text.rb +1 -4
  31. data/lib/openai/models/chat/chat_completion_chunk.rb +1 -3
  32. data/lib/openai/models/chat/chat_completion_custom_tool.rb +2 -7
  33. data/lib/openai/models/conversations/computer_screenshot_content.rb +38 -0
  34. data/lib/openai/models/conversations/container_file_citation_body.rb +58 -0
  35. data/lib/openai/models/conversations/conversation.rb +51 -0
  36. data/lib/openai/models/conversations/conversation_create_params.rb +39 -0
  37. data/lib/openai/models/conversations/conversation_delete_params.rb +16 -0
  38. data/lib/openai/models/conversations/conversation_deleted.rb +29 -0
  39. data/lib/openai/models/conversations/conversation_deleted_resource.rb +30 -0
  40. data/lib/openai/models/conversations/conversation_item.rb +568 -0
  41. data/lib/openai/models/conversations/conversation_item_list.rb +55 -0
  42. data/lib/openai/models/conversations/conversation_retrieve_params.rb +16 -0
  43. data/lib/openai/models/conversations/conversation_update_params.rb +31 -0
  44. data/lib/openai/models/conversations/file_citation_body.rb +42 -0
  45. data/lib/openai/models/conversations/input_file_content.rb +42 -0
  46. data/lib/openai/models/conversations/input_image_content.rb +62 -0
  47. data/lib/openai/models/conversations/input_text_content.rb +26 -0
  48. data/lib/openai/models/conversations/item_create_params.rb +37 -0
  49. data/lib/openai/models/conversations/item_delete_params.rb +22 -0
  50. data/lib/openai/models/conversations/item_list_params.rb +84 -0
  51. data/lib/openai/models/conversations/item_retrieve_params.rb +36 -0
  52. data/lib/openai/models/conversations/lob_prob.rb +35 -0
  53. data/lib/openai/models/conversations/message.rb +115 -0
  54. data/lib/openai/models/conversations/output_text_content.rb +57 -0
  55. data/lib/openai/models/conversations/refusal_content.rb +26 -0
  56. data/lib/openai/models/conversations/summary_text_content.rb +23 -0
  57. data/lib/openai/models/conversations/text_content.rb +23 -0
  58. data/lib/openai/models/conversations/top_log_prob.rb +29 -0
  59. data/lib/openai/models/conversations/url_citation_body.rb +50 -0
  60. data/lib/openai/models/eval_create_params.rb +6 -20
  61. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +29 -53
  62. data/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb +1 -3
  63. data/lib/openai/models/evals/run_create_params.rb +18 -54
  64. data/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb +1 -4
  65. data/lib/openai/models/moderation.rb +5 -15
  66. data/lib/openai/models/responses/input_item_list_params.rb +1 -9
  67. data/lib/openai/models/responses/response.rb +26 -1
  68. data/lib/openai/models/responses/response_computer_tool_call.rb +2 -6
  69. data/lib/openai/models/responses/response_computer_tool_call_output_item.rb +1 -3
  70. data/lib/openai/models/responses/response_conversation_param.rb +20 -0
  71. data/lib/openai/models/responses/response_create_params.rb +34 -1
  72. data/lib/openai/models/responses/response_input_item.rb +2 -7
  73. data/lib/openai/models/responses/response_input_message_item.rb +1 -4
  74. data/lib/openai/models/responses/response_output_item.rb +1 -3
  75. data/lib/openai/models/responses/response_output_message.rb +1 -3
  76. data/lib/openai/models/responses/response_output_text.rb +3 -10
  77. data/lib/openai/models/responses/response_stream_event.rb +4 -16
  78. data/lib/openai/models/responses/response_text_delta_event.rb +1 -3
  79. data/lib/openai/models/responses/response_text_done_event.rb +1 -3
  80. data/lib/openai/models/responses/tool.rb +145 -34
  81. data/lib/openai/models.rb +2 -0
  82. data/lib/openai/resources/conversations/items.rb +141 -0
  83. data/lib/openai/resources/conversations.rb +112 -0
  84. data/lib/openai/resources/responses/input_items.rb +1 -3
  85. data/lib/openai/resources/responses.rb +6 -2
  86. data/lib/openai/version.rb +1 -1
  87. data/lib/openai.rb +31 -0
  88. data/rbi/openai/client.rbi +3 -0
  89. data/rbi/openai/errors.rbi +5 -5
  90. data/rbi/openai/internal/conversation_cursor_page.rbi +25 -0
  91. data/rbi/openai/models/conversations/computer_screenshot_content.rbi +60 -0
  92. data/rbi/openai/models/conversations/container_file_citation_body.rbi +82 -0
  93. data/rbi/openai/models/conversations/conversation.rbi +76 -0
  94. data/rbi/openai/models/conversations/conversation_create_params.rbi +144 -0
  95. data/rbi/openai/models/conversations/conversation_delete_params.rbi +32 -0
  96. data/rbi/openai/models/conversations/conversation_deleted.rbi +40 -0
  97. data/rbi/openai/models/conversations/conversation_deleted_resource.rbi +40 -0
  98. data/rbi/openai/models/conversations/conversation_item.rbi +835 -0
  99. data/rbi/openai/models/conversations/conversation_item_list.rbi +101 -0
  100. data/rbi/openai/models/conversations/conversation_retrieve_params.rbi +32 -0
  101. data/rbi/openai/models/conversations/conversation_update_params.rbi +56 -0
  102. data/rbi/openai/models/conversations/file_citation_body.rbi +61 -0
  103. data/rbi/openai/models/conversations/input_file_content.rbi +72 -0
  104. data/rbi/openai/models/conversations/input_image_content.rbi +113 -0
  105. data/rbi/openai/models/conversations/input_text_content.rbi +38 -0
  106. data/rbi/openai/models/conversations/item_create_params.rbi +150 -0
  107. data/rbi/openai/models/conversations/item_delete_params.rbi +40 -0
  108. data/rbi/openai/models/conversations/item_list_params.rbi +174 -0
  109. data/rbi/openai/models/conversations/item_retrieve_params.rbi +70 -0
  110. data/rbi/openai/models/conversations/lob_prob.rbi +50 -0
  111. data/rbi/openai/models/conversations/message.rbi +196 -0
  112. data/rbi/openai/models/conversations/output_text_content.rbi +110 -0
  113. data/rbi/openai/models/conversations/refusal_content.rbi +38 -0
  114. data/rbi/openai/models/conversations/summary_text_content.rbi +31 -0
  115. data/rbi/openai/models/conversations/text_content.rbi +28 -0
  116. data/rbi/openai/models/conversations/top_log_prob.rbi +41 -0
  117. data/rbi/openai/models/conversations/url_citation_body.rbi +74 -0
  118. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +33 -33
  119. data/rbi/openai/models/responses/input_item_list_params.rbi +0 -11
  120. data/rbi/openai/models/responses/response.rbi +49 -0
  121. data/rbi/openai/models/responses/response_conversation_param.rbi +33 -0
  122. data/rbi/openai/models/responses/response_create_params.rbi +54 -0
  123. data/rbi/openai/models/responses/tool.rbi +243 -31
  124. data/rbi/openai/models.rbi +2 -0
  125. data/rbi/openai/resources/conversations/items.rbi +152 -0
  126. data/rbi/openai/resources/conversations.rbi +110 -0
  127. data/rbi/openai/resources/responses/input_items.rbi +0 -3
  128. data/rbi/openai/resources/responses.rbi +26 -0
  129. data/sig/openai/client.rbs +2 -0
  130. data/sig/openai/internal/conversation_cursor_page.rbs +15 -0
  131. data/sig/openai/models/conversations/computer_screenshot_content.rbs +28 -0
  132. data/sig/openai/models/conversations/container_file_citation_body.rbs +47 -0
  133. data/sig/openai/models/conversations/conversation.rbs +37 -0
  134. data/sig/openai/models/conversations/conversation_create_params.rbs +33 -0
  135. data/sig/openai/models/conversations/conversation_delete_params.rbs +17 -0
  136. data/sig/openai/models/conversations/conversation_deleted.rbs +28 -0
  137. data/sig/openai/models/conversations/conversation_deleted_resource.rbs +28 -0
  138. data/sig/openai/models/conversations/conversation_item.rbs +403 -0
  139. data/sig/openai/models/conversations/conversation_item_list.rbs +44 -0
  140. data/sig/openai/models/conversations/conversation_retrieve_params.rbs +17 -0
  141. data/sig/openai/models/conversations/conversation_update_params.rbs +26 -0
  142. data/sig/openai/models/conversations/file_citation_body.rbs +37 -0
  143. data/sig/openai/models/conversations/input_file_content.rbs +41 -0
  144. data/sig/openai/models/conversations/input_image_content.rbs +49 -0
  145. data/sig/openai/models/conversations/input_text_content.rbs +17 -0
  146. data/sig/openai/models/conversations/item_create_params.rbs +37 -0
  147. data/sig/openai/models/conversations/item_delete_params.rbs +25 -0
  148. data/sig/openai/models/conversations/item_list_params.rbs +66 -0
  149. data/sig/openai/models/conversations/item_retrieve_params.rbs +37 -0
  150. data/sig/openai/models/conversations/lob_prob.rbs +37 -0
  151. data/sig/openai/models/conversations/message.rbs +95 -0
  152. data/sig/openai/models/conversations/output_text_content.rbs +52 -0
  153. data/sig/openai/models/conversations/refusal_content.rbs +17 -0
  154. data/sig/openai/models/conversations/summary_text_content.rbs +17 -0
  155. data/sig/openai/models/conversations/text_content.rbs +17 -0
  156. data/sig/openai/models/conversations/top_log_prob.rbs +28 -0
  157. data/sig/openai/models/conversations/url_citation_body.rbs +42 -0
  158. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +22 -22
  159. data/sig/openai/models/responses/input_item_list_params.rbs +0 -7
  160. data/sig/openai/models/responses/response.rbs +15 -0
  161. data/sig/openai/models/responses/response_conversation_param.rbs +15 -0
  162. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  163. data/sig/openai/models/responses/tool.rbs +83 -18
  164. data/sig/openai/models.rbs +2 -0
  165. data/sig/openai/resources/conversations/items.rbs +38 -0
  166. data/sig/openai/resources/conversations.rbs +31 -0
  167. data/sig/openai/resources/responses/input_items.rbs +0 -1
  168. data/sig/openai/resources/responses.rbs +2 -0
  169. metadata +95 -2
@@ -141,6 +141,13 @@ module OpenAI
141
141
  # @return [Boolean, nil]
142
142
  optional :background, OpenAI::Internal::Type::Boolean, nil?: true
143
143
 
144
+ # @!attribute conversation
145
+ # The conversation that this response belongs to. Input items and output items
146
+ # from this response are automatically added to this conversation.
147
+ #
148
+ # @return [OpenAI::Models::Responses::Response::Conversation, nil]
149
+ optional :conversation, -> { OpenAI::Responses::Response::Conversation }, nil?: true
150
+
144
151
  # @!attribute max_output_tokens
145
152
  # An upper bound for the number of tokens that can be generated for a response,
146
153
  # including visible output tokens and
@@ -162,6 +169,7 @@ module OpenAI
162
169
  # The unique ID of the previous response to the model. Use this to create
163
170
  # multi-turn conversations. Learn more about
164
171
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
172
+ # Cannot be used in conjunction with `conversation`.
165
173
  #
166
174
  # @return [String, nil]
167
175
  optional :previous_response_id, String, nil?: true
@@ -296,7 +304,7 @@ module OpenAI
296
304
  texts.join
297
305
  end
298
306
 
299
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
307
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
300
308
  # Some parameter documentations has been truncated, see
301
309
  # {OpenAI::Models::Responses::Response} for more details.
302
310
  #
@@ -328,6 +336,8 @@ module OpenAI
328
336
  #
329
337
  # @param background [Boolean, nil] Whether to run the model response in the background.
330
338
  #
339
+ # @param conversation [OpenAI::Models::Responses::Response::Conversation, nil] The conversation that this response belongs to. Input items and output items fro
340
+ #
331
341
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
332
342
  #
333
343
  # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
@@ -449,6 +459,21 @@ module OpenAI
449
459
  # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
450
460
  end
451
461
 
462
+ # @see OpenAI::Models::Responses::Response#conversation
463
+ class Conversation < OpenAI::Internal::Type::BaseModel
464
+ # @!attribute id
465
+ # The unique ID of the conversation.
466
+ #
467
+ # @return [String]
468
+ required :id, String
469
+
470
+ # @!method initialize(id:)
471
+ # The conversation that this response belongs to. Input items and output items
472
+ # from this response are automatically added to this conversation.
473
+ #
474
+ # @param id [String] The unique ID of the conversation.
475
+ end
476
+
452
477
  # Specifies the processing type used for serving the request.
453
478
  #
454
479
  # - If set to 'auto', then the request will be processed with the service tier
@@ -27,9 +27,7 @@ module OpenAI
27
27
  #
28
28
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck>]
29
29
  required :pending_safety_checks,
30
- -> {
31
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]
32
- }
30
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck] }
33
31
 
34
32
  # @!attribute status
35
33
  # The status of the item. One of `in_progress`, `completed`, or `incomplete`.
@@ -207,9 +205,7 @@ module OpenAI
207
205
  #
208
206
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path>]
209
207
  required :path,
210
- -> {
211
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path]
212
- }
208
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path] }
213
209
 
214
210
  # @!attribute type
215
211
  # Specifies the event type. For a drag action, this property is always set to
@@ -34,9 +34,7 @@ module OpenAI
34
34
  #
35
35
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck>, nil]
36
36
  optional :acknowledged_safety_checks,
37
- -> {
38
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]
39
- }
37
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck] }
40
38
 
41
39
  # @!attribute status
42
40
  # The status of the message input. One of `in_progress`, `completed`, or
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ResponseConversationParam < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute id
8
+ # The unique ID of the conversation.
9
+ #
10
+ # @return [String]
11
+ required :id, String
12
+
13
+ # @!method initialize(id:)
14
+ # The conversation that this response belongs to.
15
+ #
16
+ # @param id [String] The unique ID of the conversation.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -17,6 +17,19 @@ module OpenAI
17
17
  # @return [Boolean, nil]
18
18
  optional :background, OpenAI::Internal::Type::Boolean, nil?: true
19
19
 
20
+ # @!attribute conversation
21
+ # The conversation that this response belongs to. Items from this conversation are
22
+ # prepended to `input_items` for this response request. Input items and output
23
+ # items from this response are automatically added to this conversation after this
24
+ # response completes.
25
+ #
26
+ # @return [String, OpenAI::Models::Responses::ResponseConversationParam, nil]
27
+ optional :conversation,
28
+ union: -> {
29
+ OpenAI::Responses::ResponseCreateParams::Conversation
30
+ },
31
+ nil?: true
32
+
20
33
  # @!attribute include
21
34
  # Specify additional output data to include in the model response. Currently
22
35
  # supported values are:
@@ -112,6 +125,7 @@ module OpenAI
112
125
  # The unique ID of the previous response to the model. Use this to create
113
126
  # multi-turn conversations. Learn more about
114
127
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
128
+ # Cannot be used in conjunction with `conversation`.
115
129
  #
116
130
  # @return [String, nil]
117
131
  optional :previous_response_id, String, nil?: true
@@ -278,12 +292,14 @@ module OpenAI
278
292
  # @return [String, nil]
279
293
  optional :user, String
280
294
 
281
- # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
295
+ # @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
282
296
  # Some parameter documentations has been truncated, see
283
297
  # {OpenAI::Models::Responses::ResponseCreateParams} for more details.
284
298
  #
285
299
  # @param background [Boolean, nil] Whether to run the model response in the background.
286
300
  #
301
+ # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
302
+ #
287
303
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
288
304
  #
289
305
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
@@ -334,6 +350,23 @@ module OpenAI
334
350
  #
335
351
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
336
352
 
353
+ # The conversation that this response belongs to. Items from this conversation are
354
+ # prepended to `input_items` for this response request. Input items and output
355
+ # items from this response are automatically added to this conversation after this
356
+ # response completes.
357
+ module Conversation
358
+ extend OpenAI::Internal::Type::Union
359
+
360
+ # The unique ID of the conversation.
361
+ variant String
362
+
363
+ # The conversation that this response belongs to.
364
+ variant -> { OpenAI::Responses::ResponseConversationParam }
365
+
366
+ # @!method self.variants
367
+ # @return [Array(String, OpenAI::Models::Responses::ResponseConversationParam)]
368
+ end
369
+
337
370
  # Text, image, or file inputs to the model, used to generate a response.
338
371
  #
339
372
  # Learn more:
@@ -95,10 +95,7 @@ module OpenAI
95
95
  # types.
96
96
  #
97
97
  # @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
98
- required :content,
99
- -> {
100
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
101
- }
98
+ required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
102
99
 
103
100
  # @!attribute role
104
101
  # The role of the message input. One of `user`, `system`, or `developer`.
@@ -605,9 +602,7 @@ module OpenAI
605
602
  #
606
603
  # @return [Array<OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool>]
607
604
  required :tools,
608
- -> {
609
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool]
610
- }
605
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool] }
611
606
 
612
607
  # @!attribute type
613
608
  # The type of the item. Always `mcp_list_tools`.
@@ -15,10 +15,7 @@ module OpenAI
15
15
  # types.
16
16
  #
17
17
  # @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
18
- required :content,
19
- -> {
20
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
21
- }
18
+ required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
22
19
 
23
20
  # @!attribute role
24
21
  # The role of the message input. One of `user`, `system`, or `developer`.
@@ -314,9 +314,7 @@ module OpenAI
314
314
  #
315
315
  # @return [Array<OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool>]
316
316
  required :tools,
317
- -> {
318
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool]
319
- }
317
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool] }
320
318
 
321
319
  # @!attribute type
322
320
  # The type of the item. Always `mcp_list_tools`.
@@ -15,9 +15,7 @@ module OpenAI
15
15
  #
16
16
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal>]
17
17
  required :content,
18
- -> {
19
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content]
20
- }
18
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content] }
21
19
 
22
20
  # @!attribute role
23
21
  # The role of the output message. Always `assistant`.
@@ -9,9 +9,7 @@ module OpenAI
9
9
  #
10
10
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>]
11
11
  required :annotations,
12
- -> {
13
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation]
14
- }
12
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] }
15
13
 
16
14
  # @!attribute text
17
15
  # The text output from the model.
@@ -34,10 +32,7 @@ module OpenAI
34
32
  # @!attribute logprobs
35
33
  #
36
34
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>, nil]
37
- optional :logprobs,
38
- -> {
39
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob]
40
- }
35
+ optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] }
41
36
 
42
37
  # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text)
43
38
  # A text output from the model.
@@ -261,9 +256,7 @@ module OpenAI
261
256
  #
262
257
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob::TopLogprob>]
263
258
  required :top_logprobs,
264
- -> {
265
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
266
- }
259
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] }
267
260
 
268
261
  # @!method initialize(token:, bytes:, logprob:, top_logprobs:)
269
262
  # The log probability of a token.
@@ -16,10 +16,7 @@ module OpenAI
16
16
  variant :"response.audio.done", -> { OpenAI::Responses::ResponseAudioDoneEvent }
17
17
 
18
18
  # Emitted when there is a partial transcript of audio.
19
- variant :"response.audio.transcript.delta",
20
- -> {
21
- OpenAI::Responses::ResponseAudioTranscriptDeltaEvent
22
- }
19
+ variant :"response.audio.transcript.delta", -> { OpenAI::Responses::ResponseAudioTranscriptDeltaEvent }
23
20
 
24
21
  # Emitted when the full audio transcript is completed.
25
22
  variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
@@ -157,16 +154,10 @@ module OpenAI
157
154
  -> { OpenAI::Responses::ResponseImageGenCallPartialImageEvent }
158
155
 
159
156
  # Emitted when there is a delta (partial update) to the arguments of an MCP tool call.
160
- variant :"response.mcp_call_arguments.delta",
161
- -> {
162
- OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent
163
- }
157
+ variant :"response.mcp_call_arguments.delta", -> { OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent }
164
158
 
165
159
  # Emitted when the arguments for an MCP tool call are finalized.
166
- variant :"response.mcp_call_arguments.done",
167
- -> {
168
- OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent
169
- }
160
+ variant :"response.mcp_call_arguments.done", -> { OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent }
170
161
 
171
162
  # Emitted when an MCP tool call has completed successfully.
172
163
  variant :"response.mcp_call.completed", -> { OpenAI::Responses::ResponseMcpCallCompletedEvent }
@@ -178,10 +169,7 @@ module OpenAI
178
169
  variant :"response.mcp_call.in_progress", -> { OpenAI::Responses::ResponseMcpCallInProgressEvent }
179
170
 
180
171
  # Emitted when the list of available MCP tools has been successfully retrieved.
181
- variant :"response.mcp_list_tools.completed",
182
- -> {
183
- OpenAI::Responses::ResponseMcpListToolsCompletedEvent
184
- }
172
+ variant :"response.mcp_list_tools.completed", -> { OpenAI::Responses::ResponseMcpListToolsCompletedEvent }
185
173
 
186
174
  # Emitted when the attempt to list available MCP tools has failed.
187
175
  variant :"response.mcp_list_tools.failed", -> { OpenAI::Responses::ResponseMcpListToolsFailedEvent }
@@ -85,9 +85,7 @@ module OpenAI
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
88
- -> {
89
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
90
- }
88
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] }
91
89
 
92
90
  # @!method initialize(token:, logprob:, top_logprobs: nil)
93
91
  # Some parameter documentations has been truncated, see
@@ -85,9 +85,7 @@ module OpenAI
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
88
- -> {
89
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
90
- }
88
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] }
91
89
 
92
90
  # @!method initialize(token:, logprob:, top_logprobs: nil)
93
91
  # Some parameter documentations has been truncated, see
@@ -47,12 +47,6 @@ module OpenAI
47
47
  # @return [String]
48
48
  required :server_label, String
49
49
 
50
- # @!attribute server_url
51
- # The URL for the MCP server.
52
- #
53
- # @return [String]
54
- required :server_url, String
55
-
56
50
  # @!attribute type
57
51
  # The type of the MCP tool. Always `mcp`.
58
52
  #
@@ -62,9 +56,37 @@ module OpenAI
62
56
  # @!attribute allowed_tools
63
57
  # List of allowed tool names or a filter object.
64
58
  #
65
- # @return [Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter, nil]
59
+ # @return [Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter, nil]
66
60
  optional :allowed_tools, union: -> { OpenAI::Responses::Tool::Mcp::AllowedTools }, nil?: true
67
61
 
62
+ # @!attribute authorization
63
+ # An OAuth access token that can be used with a remote MCP server, either with a
64
+ # custom MCP server URL or a service connector. Your application must handle the
65
+ # OAuth authorization flow and provide the token here.
66
+ #
67
+ # @return [String, nil]
68
+ optional :authorization, String
69
+
70
+ # @!attribute connector_id
71
+ # Identifier for service connectors, like those available in ChatGPT. One of
72
+ # `server_url` or `connector_id` must be provided. Learn more about service
73
+ # connectors
74
+ # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
75
+ #
76
+ # Currently supported `connector_id` values are:
77
+ #
78
+ # - Dropbox: `connector_dropbox`
79
+ # - Gmail: `connector_gmail`
80
+ # - Google Calendar: `connector_googlecalendar`
81
+ # - Google Drive: `connector_googledrive`
82
+ # - Microsoft Teams: `connector_microsoftteams`
83
+ # - Outlook Calendar: `connector_outlookcalendar`
84
+ # - Outlook Email: `connector_outlookemail`
85
+ # - SharePoint: `connector_sharepoint`
86
+ #
87
+ # @return [Symbol, OpenAI::Models::Responses::Tool::Mcp::ConnectorID, nil]
88
+ optional :connector_id, enum: -> { OpenAI::Responses::Tool::Mcp::ConnectorID }
89
+
68
90
  # @!attribute headers
69
91
  # Optional HTTP headers to send to the MCP server. Use for authentication or other
70
92
  # purposes.
@@ -84,7 +106,14 @@ module OpenAI
84
106
  # @return [String, nil]
85
107
  optional :server_description, String
86
108
 
87
- # @!method initialize(server_label:, server_url:, allowed_tools: nil, headers: nil, require_approval: nil, server_description: nil, type: :mcp)
109
+ # @!attribute server_url
110
+ # The URL for the MCP server. One of `server_url` or `connector_id` must be
111
+ # provided.
112
+ #
113
+ # @return [String, nil]
114
+ optional :server_url, String
115
+
116
+ # @!method initialize(server_label:, allowed_tools: nil, authorization: nil, connector_id: nil, headers: nil, require_approval: nil, server_description: nil, server_url: nil, type: :mcp)
88
117
  # Some parameter documentations has been truncated, see
89
118
  # {OpenAI::Models::Responses::Tool::Mcp} for more details.
90
119
  #
@@ -94,9 +123,11 @@ module OpenAI
94
123
  #
95
124
  # @param server_label [String] A label for this MCP server, used to identify it in tool calls.
96
125
  #
97
- # @param server_url [String] The URL for the MCP server.
126
+ # @param allowed_tools [Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter, nil] List of allowed tool names or a filter object.
127
+ #
128
+ # @param authorization [String] An OAuth access token that can be used with a remote MCP server, either
98
129
  #
99
- # @param allowed_tools [Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter, nil] List of allowed tool names or a filter object.
130
+ # @param connector_id [Symbol, OpenAI::Models::Responses::Tool::Mcp::ConnectorID] Identifier for service connectors, like those available in ChatGPT. One of
100
131
  #
101
132
  # @param headers [Hash{Symbol=>String}, nil] Optional HTTP headers to send to the MCP server. Use for authentication
102
133
  #
@@ -104,6 +135,8 @@ module OpenAI
104
135
  #
105
136
  # @param server_description [String] Optional description of the MCP server, used to provide more context.
106
137
  #
138
+ # @param server_url [String] The URL for the MCP server. One of `server_url` or `connector_id` must be
139
+ #
107
140
  # @param type [Symbol, :mcp] The type of the MCP tool. Always `mcp`.
108
141
 
109
142
  # List of allowed tool names or a filter object.
@@ -116,34 +149,85 @@ module OpenAI
116
149
  variant -> { OpenAI::Models::Responses::Tool::Mcp::AllowedTools::StringArray }
117
150
 
118
151
  # A filter object to specify which tools are allowed.
119
- variant -> { OpenAI::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter }
152
+ variant -> { OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter }
153
+
154
+ class McpToolFilter < OpenAI::Internal::Type::BaseModel
155
+ # @!attribute read_only
156
+ # Indicates whether or not a tool modifies data or is read-only. If an MCP server
157
+ # is
158
+ # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
159
+ # it will match this filter.
160
+ #
161
+ # @return [Boolean, nil]
162
+ optional :read_only, OpenAI::Internal::Type::Boolean
120
163
 
121
- class McpAllowedToolsFilter < OpenAI::Internal::Type::BaseModel
122
164
  # @!attribute tool_names
123
165
  # List of allowed tool names.
124
166
  #
125
167
  # @return [Array<String>, nil]
126
168
  optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
127
169
 
128
- # @!method initialize(tool_names: nil)
170
+ # @!method initialize(read_only: nil, tool_names: nil)
171
+ # Some parameter documentations has been truncated, see
172
+ # {OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter} for more
173
+ # details.
174
+ #
129
175
  # A filter object to specify which tools are allowed.
130
176
  #
177
+ # @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an
178
+ #
131
179
  # @param tool_names [Array<String>] List of allowed tool names.
132
180
  end
133
181
 
134
182
  # @!method self.variants
135
- # @return [Array(Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter)]
183
+ # @return [Array(Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter)]
136
184
 
137
185
  # @type [OpenAI::Internal::Type::Converter]
138
186
  StringArray = OpenAI::Internal::Type::ArrayOf[String]
139
187
  end
140
188
 
189
+ # Identifier for service connectors, like those available in ChatGPT. One of
190
+ # `server_url` or `connector_id` must be provided. Learn more about service
191
+ # connectors
192
+ # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
193
+ #
194
+ # Currently supported `connector_id` values are:
195
+ #
196
+ # - Dropbox: `connector_dropbox`
197
+ # - Gmail: `connector_gmail`
198
+ # - Google Calendar: `connector_googlecalendar`
199
+ # - Google Drive: `connector_googledrive`
200
+ # - Microsoft Teams: `connector_microsoftteams`
201
+ # - Outlook Calendar: `connector_outlookcalendar`
202
+ # - Outlook Email: `connector_outlookemail`
203
+ # - SharePoint: `connector_sharepoint`
204
+ #
205
+ # @see OpenAI::Models::Responses::Tool::Mcp#connector_id
206
+ module ConnectorID
207
+ extend OpenAI::Internal::Type::Enum
208
+
209
+ CONNECTOR_DROPBOX = :connector_dropbox
210
+ CONNECTOR_GMAIL = :connector_gmail
211
+ CONNECTOR_GOOGLECALENDAR = :connector_googlecalendar
212
+ CONNECTOR_GOOGLEDRIVE = :connector_googledrive
213
+ CONNECTOR_MICROSOFTTEAMS = :connector_microsoftteams
214
+ CONNECTOR_OUTLOOKCALENDAR = :connector_outlookcalendar
215
+ CONNECTOR_OUTLOOKEMAIL = :connector_outlookemail
216
+ CONNECTOR_SHAREPOINT = :connector_sharepoint
217
+
218
+ # @!method self.values
219
+ # @return [Array<Symbol>]
220
+ end
221
+
141
222
  # Specify which of the MCP server's tools require approval.
142
223
  #
143
224
  # @see OpenAI::Models::Responses::Tool::Mcp#require_approval
144
225
  module RequireApproval
145
226
  extend OpenAI::Internal::Type::Union
146
227
 
228
+ # Specify which of the MCP server's tools require approval. Can be
229
+ # `always`, `never`, or a filter object associated with tools
230
+ # that require approval.
147
231
  variant -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter }
148
232
 
149
233
  # Specify a single approval policy for all tools. One of `always` or
@@ -153,58 +237,85 @@ module OpenAI
153
237
 
154
238
  class McpToolApprovalFilter < OpenAI::Internal::Type::BaseModel
155
239
  # @!attribute always
156
- # A list of tools that always require approval.
240
+ # A filter object to specify which tools are allowed.
157
241
  #
158
242
  # @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, nil]
159
- optional :always,
160
- -> {
161
- OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always
162
- }
243
+ optional :always, -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always }
163
244
 
164
245
  # @!attribute never
165
- # A list of tools that never require approval.
246
+ # A filter object to specify which tools are allowed.
166
247
  #
167
248
  # @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never, nil]
168
- optional :never,
169
- -> {
170
- OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never
171
- }
249
+ optional :never, -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never }
172
250
 
173
251
  # @!method initialize(always: nil, never: nil)
174
252
  # Some parameter documentations has been truncated, see
175
253
  # {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter}
176
254
  # for more details.
177
255
  #
178
- # @param always [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always] A list of tools that always require approval.
256
+ # Specify which of the MCP server's tools require approval. Can be `always`,
257
+ # `never`, or a filter object associated with tools that require approval.
258
+ #
259
+ # @param always [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always] A filter object to specify which tools are allowed.
179
260
  #
180
- # @param never [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never] A list of tools that never require approval.
261
+ # @param never [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never] A filter object to specify which tools are allowed.
181
262
 
182
263
  # @see OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter#always
183
264
  class Always < OpenAI::Internal::Type::BaseModel
265
+ # @!attribute read_only
266
+ # Indicates whether or not a tool modifies data or is read-only. If an MCP server
267
+ # is
268
+ # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
269
+ # it will match this filter.
270
+ #
271
+ # @return [Boolean, nil]
272
+ optional :read_only, OpenAI::Internal::Type::Boolean
273
+
184
274
  # @!attribute tool_names
185
- # List of tools that require approval.
275
+ # List of allowed tool names.
186
276
  #
187
277
  # @return [Array<String>, nil]
188
278
  optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
189
279
 
190
- # @!method initialize(tool_names: nil)
191
- # A list of tools that always require approval.
280
+ # @!method initialize(read_only: nil, tool_names: nil)
281
+ # Some parameter documentations has been truncated, see
282
+ # {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always}
283
+ # for more details.
284
+ #
285
+ # A filter object to specify which tools are allowed.
192
286
  #
193
- # @param tool_names [Array<String>] List of tools that require approval.
287
+ # @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an
288
+ #
289
+ # @param tool_names [Array<String>] List of allowed tool names.
194
290
  end
195
291
 
196
292
  # @see OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter#never
197
293
  class Never < OpenAI::Internal::Type::BaseModel
294
+ # @!attribute read_only
295
+ # Indicates whether or not a tool modifies data or is read-only. If an MCP server
296
+ # is
297
+ # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
298
+ # it will match this filter.
299
+ #
300
+ # @return [Boolean, nil]
301
+ optional :read_only, OpenAI::Internal::Type::Boolean
302
+
198
303
  # @!attribute tool_names
199
- # List of tools that do not require approval.
304
+ # List of allowed tool names.
200
305
  #
201
306
  # @return [Array<String>, nil]
202
307
  optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
203
308
 
204
- # @!method initialize(tool_names: nil)
205
- # A list of tools that never require approval.
309
+ # @!method initialize(read_only: nil, tool_names: nil)
310
+ # Some parameter documentations has been truncated, see
311
+ # {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never}
312
+ # for more details.
313
+ #
314
+ # A filter object to specify which tools are allowed.
315
+ #
316
+ # @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an
206
317
  #
207
- # @param tool_names [Array<String>] List of tools that do not require approval.
318
+ # @param tool_names [Array<String>] List of allowed tool names.
208
319
  end
209
320
  end
210
321
 
data/lib/openai/models.rb CHANGED
@@ -91,6 +91,8 @@ module OpenAI
91
91
 
92
92
  Containers = OpenAI::Models::Containers
93
93
 
94
+ Conversations = OpenAI::Models::Conversations
95
+
94
96
  CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse
95
97
 
96
98
  CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat