openai 0.18.1 → 0.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (186) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +35 -0
  3. data/README.md +1 -1
  4. data/lib/openai/client.rb +4 -0
  5. data/lib/openai/helpers/structured_output/boolean.rb +1 -0
  6. data/lib/openai/internal/conversation_cursor_page.rb +92 -0
  7. data/lib/openai/internal/transport/base_client.rb +1 -4
  8. data/lib/openai/internal/transport/pooled_net_requester.rb +1 -9
  9. data/lib/openai/internal/util.rb +1 -1
  10. data/lib/openai/models/audio/transcription.rb +1 -4
  11. data/lib/openai/models/audio/transcription_create_params.rb +2 -7
  12. data/lib/openai/models/audio/transcription_text_done_event.rb +1 -4
  13. data/lib/openai/models/beta/assistant_create_params.rb +6 -19
  14. data/lib/openai/models/beta/assistant_stream_event.rb +6 -24
  15. data/lib/openai/models/beta/assistant_update_params.rb +1 -4
  16. data/lib/openai/models/beta/message_stream_event.rb +1 -4
  17. data/lib/openai/models/beta/run_step_stream_event.rb +1 -4
  18. data/lib/openai/models/beta/thread_create_and_run_params.rb +10 -32
  19. data/lib/openai/models/beta/thread_create_params.rb +7 -22
  20. data/lib/openai/models/beta/threads/message.rb +3 -10
  21. data/lib/openai/models/beta/threads/message_create_params.rb +2 -7
  22. data/lib/openai/models/beta/threads/run.rb +2 -7
  23. data/lib/openai/models/beta/threads/run_create_params.rb +3 -10
  24. data/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +1 -3
  25. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +5 -17
  26. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +1 -3
  27. data/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +4 -12
  28. data/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb +1 -4
  29. data/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +1 -4
  30. data/lib/openai/models/beta/threads/text.rb +1 -4
  31. data/lib/openai/models/chat/chat_completion_chunk.rb +1 -3
  32. data/lib/openai/models/chat/chat_completion_custom_tool.rb +2 -7
  33. data/lib/openai/models/chat/chat_completion_message.rb +7 -5
  34. data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +7 -5
  35. data/lib/openai/models/conversations/computer_screenshot_content.rb +38 -0
  36. data/lib/openai/models/conversations/container_file_citation_body.rb +58 -0
  37. data/lib/openai/models/conversations/conversation.rb +51 -0
  38. data/lib/openai/models/conversations/conversation_create_params.rb +39 -0
  39. data/lib/openai/models/conversations/conversation_delete_params.rb +16 -0
  40. data/lib/openai/models/conversations/conversation_deleted.rb +29 -0
  41. data/lib/openai/models/conversations/conversation_deleted_resource.rb +30 -0
  42. data/lib/openai/models/conversations/conversation_item.rb +568 -0
  43. data/lib/openai/models/conversations/conversation_item_list.rb +55 -0
  44. data/lib/openai/models/conversations/conversation_retrieve_params.rb +16 -0
  45. data/lib/openai/models/conversations/conversation_update_params.rb +31 -0
  46. data/lib/openai/models/conversations/file_citation_body.rb +42 -0
  47. data/lib/openai/models/conversations/input_file_content.rb +42 -0
  48. data/lib/openai/models/conversations/input_image_content.rb +62 -0
  49. data/lib/openai/models/conversations/input_text_content.rb +26 -0
  50. data/lib/openai/models/conversations/item_create_params.rb +37 -0
  51. data/lib/openai/models/conversations/item_delete_params.rb +22 -0
  52. data/lib/openai/models/conversations/item_list_params.rb +86 -0
  53. data/lib/openai/models/conversations/item_retrieve_params.rb +36 -0
  54. data/lib/openai/models/conversations/lob_prob.rb +35 -0
  55. data/lib/openai/models/conversations/message.rb +115 -0
  56. data/lib/openai/models/conversations/output_text_content.rb +57 -0
  57. data/lib/openai/models/conversations/refusal_content.rb +26 -0
  58. data/lib/openai/models/conversations/summary_text_content.rb +23 -0
  59. data/lib/openai/models/conversations/text_content.rb +23 -0
  60. data/lib/openai/models/conversations/top_log_prob.rb +29 -0
  61. data/lib/openai/models/conversations/url_citation_body.rb +50 -0
  62. data/lib/openai/models/eval_create_params.rb +6 -20
  63. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +29 -53
  64. data/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb +1 -3
  65. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  66. data/lib/openai/models/evals/run_create_params.rb +20 -56
  67. data/lib/openai/models/evals/run_create_response.rb +2 -2
  68. data/lib/openai/models/evals/run_list_response.rb +2 -2
  69. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  70. data/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb +1 -4
  71. data/lib/openai/models/moderation.rb +5 -15
  72. data/lib/openai/models/responses/input_item_list_params.rb +1 -9
  73. data/lib/openai/models/responses/response.rb +28 -3
  74. data/lib/openai/models/responses/response_computer_tool_call.rb +2 -6
  75. data/lib/openai/models/responses/response_computer_tool_call_output_item.rb +1 -3
  76. data/lib/openai/models/responses/response_conversation_param.rb +20 -0
  77. data/lib/openai/models/responses/response_create_params.rb +38 -3
  78. data/lib/openai/models/responses/response_function_tool_call.rb +7 -5
  79. data/lib/openai/models/responses/response_function_web_search.rb +35 -1
  80. data/lib/openai/models/responses/response_includable.rb +2 -0
  81. data/lib/openai/models/responses/response_input_item.rb +2 -7
  82. data/lib/openai/models/responses/response_input_message_item.rb +1 -4
  83. data/lib/openai/models/responses/response_output_item.rb +1 -3
  84. data/lib/openai/models/responses/response_output_message.rb +1 -3
  85. data/lib/openai/models/responses/response_output_text.rb +10 -15
  86. data/lib/openai/models/responses/response_stream_event.rb +4 -16
  87. data/lib/openai/models/responses/response_text_delta_event.rb +1 -3
  88. data/lib/openai/models/responses/response_text_done_event.rb +1 -3
  89. data/lib/openai/models/responses/tool.rb +303 -35
  90. data/lib/openai/models.rb +2 -0
  91. data/lib/openai/resources/conversations/items.rb +141 -0
  92. data/lib/openai/resources/conversations.rb +112 -0
  93. data/lib/openai/resources/responses/input_items.rb +1 -3
  94. data/lib/openai/resources/responses.rb +8 -4
  95. data/lib/openai/version.rb +1 -1
  96. data/lib/openai.rb +31 -0
  97. data/rbi/openai/client.rbi +3 -0
  98. data/rbi/openai/errors.rbi +5 -5
  99. data/rbi/openai/internal/conversation_cursor_page.rbi +25 -0
  100. data/rbi/openai/models/conversations/computer_screenshot_content.rbi +60 -0
  101. data/rbi/openai/models/conversations/container_file_citation_body.rbi +82 -0
  102. data/rbi/openai/models/conversations/conversation.rbi +76 -0
  103. data/rbi/openai/models/conversations/conversation_create_params.rbi +144 -0
  104. data/rbi/openai/models/conversations/conversation_delete_params.rbi +32 -0
  105. data/rbi/openai/models/conversations/conversation_deleted.rbi +40 -0
  106. data/rbi/openai/models/conversations/conversation_deleted_resource.rbi +40 -0
  107. data/rbi/openai/models/conversations/conversation_item.rbi +835 -0
  108. data/rbi/openai/models/conversations/conversation_item_list.rbi +101 -0
  109. data/rbi/openai/models/conversations/conversation_retrieve_params.rbi +32 -0
  110. data/rbi/openai/models/conversations/conversation_update_params.rbi +56 -0
  111. data/rbi/openai/models/conversations/file_citation_body.rbi +61 -0
  112. data/rbi/openai/models/conversations/input_file_content.rbi +72 -0
  113. data/rbi/openai/models/conversations/input_image_content.rbi +113 -0
  114. data/rbi/openai/models/conversations/input_text_content.rbi +38 -0
  115. data/rbi/openai/models/conversations/item_create_params.rbi +150 -0
  116. data/rbi/openai/models/conversations/item_delete_params.rbi +40 -0
  117. data/rbi/openai/models/conversations/item_list_params.rbi +178 -0
  118. data/rbi/openai/models/conversations/item_retrieve_params.rbi +70 -0
  119. data/rbi/openai/models/conversations/lob_prob.rbi +50 -0
  120. data/rbi/openai/models/conversations/message.rbi +196 -0
  121. data/rbi/openai/models/conversations/output_text_content.rbi +110 -0
  122. data/rbi/openai/models/conversations/refusal_content.rbi +38 -0
  123. data/rbi/openai/models/conversations/summary_text_content.rbi +31 -0
  124. data/rbi/openai/models/conversations/text_content.rbi +28 -0
  125. data/rbi/openai/models/conversations/top_log_prob.rbi +41 -0
  126. data/rbi/openai/models/conversations/url_citation_body.rbi +74 -0
  127. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +33 -33
  128. data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
  129. data/rbi/openai/models/evals/run_create_params.rbi +4 -0
  130. data/rbi/openai/models/evals/run_create_response.rbi +2 -0
  131. data/rbi/openai/models/evals/run_list_response.rbi +2 -0
  132. data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
  133. data/rbi/openai/models/responses/input_item_list_params.rbi +0 -11
  134. data/rbi/openai/models/responses/response.rbi +50 -0
  135. data/rbi/openai/models/responses/response_conversation_param.rbi +33 -0
  136. data/rbi/openai/models/responses/response_create_params.rbi +62 -0
  137. data/rbi/openai/models/responses/response_function_web_search.rbi +78 -2
  138. data/rbi/openai/models/responses/response_includable.rbi +2 -0
  139. data/rbi/openai/models/responses/tool.rbi +590 -31
  140. data/rbi/openai/models.rbi +2 -0
  141. data/rbi/openai/resources/conversations/items.rbi +154 -0
  142. data/rbi/openai/resources/conversations.rbi +110 -0
  143. data/rbi/openai/resources/responses/input_items.rbi +0 -3
  144. data/rbi/openai/resources/responses.rbi +32 -0
  145. data/sig/openai/client.rbs +2 -0
  146. data/sig/openai/internal/conversation_cursor_page.rbs +15 -0
  147. data/sig/openai/models/conversations/computer_screenshot_content.rbs +28 -0
  148. data/sig/openai/models/conversations/container_file_citation_body.rbs +47 -0
  149. data/sig/openai/models/conversations/conversation.rbs +37 -0
  150. data/sig/openai/models/conversations/conversation_create_params.rbs +33 -0
  151. data/sig/openai/models/conversations/conversation_delete_params.rbs +17 -0
  152. data/sig/openai/models/conversations/conversation_deleted.rbs +28 -0
  153. data/sig/openai/models/conversations/conversation_deleted_resource.rbs +28 -0
  154. data/sig/openai/models/conversations/conversation_item.rbs +403 -0
  155. data/sig/openai/models/conversations/conversation_item_list.rbs +44 -0
  156. data/sig/openai/models/conversations/conversation_retrieve_params.rbs +17 -0
  157. data/sig/openai/models/conversations/conversation_update_params.rbs +26 -0
  158. data/sig/openai/models/conversations/file_citation_body.rbs +37 -0
  159. data/sig/openai/models/conversations/input_file_content.rbs +41 -0
  160. data/sig/openai/models/conversations/input_image_content.rbs +49 -0
  161. data/sig/openai/models/conversations/input_text_content.rbs +17 -0
  162. data/sig/openai/models/conversations/item_create_params.rbs +37 -0
  163. data/sig/openai/models/conversations/item_delete_params.rbs +25 -0
  164. data/sig/openai/models/conversations/item_list_params.rbs +66 -0
  165. data/sig/openai/models/conversations/item_retrieve_params.rbs +37 -0
  166. data/sig/openai/models/conversations/lob_prob.rbs +37 -0
  167. data/sig/openai/models/conversations/message.rbs +95 -0
  168. data/sig/openai/models/conversations/output_text_content.rbs +52 -0
  169. data/sig/openai/models/conversations/refusal_content.rbs +17 -0
  170. data/sig/openai/models/conversations/summary_text_content.rbs +17 -0
  171. data/sig/openai/models/conversations/text_content.rbs +17 -0
  172. data/sig/openai/models/conversations/top_log_prob.rbs +28 -0
  173. data/sig/openai/models/conversations/url_citation_body.rbs +42 -0
  174. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +22 -22
  175. data/sig/openai/models/responses/input_item_list_params.rbs +0 -7
  176. data/sig/openai/models/responses/response.rbs +15 -0
  177. data/sig/openai/models/responses/response_conversation_param.rbs +15 -0
  178. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  179. data/sig/openai/models/responses/response_function_web_search.rbs +34 -3
  180. data/sig/openai/models/responses/tool.rbs +204 -18
  181. data/sig/openai/models.rbs +2 -0
  182. data/sig/openai/resources/conversations/items.rbs +38 -0
  183. data/sig/openai/resources/conversations.rbs +31 -0
  184. data/sig/openai/resources/responses/input_items.rbs +0 -1
  185. data/sig/openai/resources/responses.rbs +2 -0
  186. metadata +95 -2
@@ -176,9 +176,7 @@ module OpenAI
176
176
  #
177
177
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Harassment>]
178
178
  required :harassment,
179
- -> {
180
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Harassment]
181
- }
179
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Harassment] }
182
180
 
183
181
  # @!attribute harassment_threatening
184
182
  # The applied input type(s) for the category 'harassment/threatening'.
@@ -195,9 +193,7 @@ module OpenAI
195
193
  #
196
194
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Hate>]
197
195
  required :hate,
198
- -> {
199
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Hate]
200
- }
196
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Hate] }
201
197
 
202
198
  # @!attribute hate_threatening
203
199
  # The applied input type(s) for the category 'hate/threatening'.
@@ -214,9 +210,7 @@ module OpenAI
214
210
  #
215
211
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Illicit>]
216
212
  required :illicit,
217
- -> {
218
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Illicit]
219
- }
213
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Illicit] }
220
214
 
221
215
  # @!attribute illicit_violent
222
216
  # The applied input type(s) for the category 'illicit/violent'.
@@ -263,9 +257,7 @@ module OpenAI
263
257
  #
264
258
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Sexual>]
265
259
  required :sexual,
266
- -> {
267
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Sexual]
268
- }
260
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Sexual] }
269
261
 
270
262
  # @!attribute sexual_minors
271
263
  # The applied input type(s) for the category 'sexual/minors'.
@@ -282,9 +274,7 @@ module OpenAI
282
274
  #
283
275
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Violence>]
284
276
  required :violence,
285
- -> {
286
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Violence]
287
- }
277
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Violence] }
288
278
 
289
279
  # @!attribute violence_graphic
290
280
  # The applied input type(s) for the category 'violence/graphic'.
@@ -14,12 +14,6 @@ module OpenAI
14
14
  # @return [String, nil]
15
15
  optional :after, String
16
16
 
17
- # @!attribute before
18
- # An item ID to list items before, used in pagination.
19
- #
20
- # @return [String, nil]
21
- optional :before, String
22
-
23
17
  # @!attribute include
24
18
  # Additional fields to include in the response. See the `include` parameter for
25
19
  # Response creation above for more information.
@@ -43,14 +37,12 @@ module OpenAI
43
37
  # @return [Symbol, OpenAI::Models::Responses::InputItemListParams::Order, nil]
44
38
  optional :order, enum: -> { OpenAI::Responses::InputItemListParams::Order }
45
39
 
46
- # @!method initialize(after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
40
+ # @!method initialize(after: nil, include: nil, limit: nil, order: nil, request_options: {})
47
41
  # Some parameter documentations has been truncated, see
48
42
  # {OpenAI::Models::Responses::InputItemListParams} for more details.
49
43
  #
50
44
  # @param after [String] An item ID to list items after, used in pagination.
51
45
  #
52
- # @param before [String] An item ID to list items before, used in pagination.
53
- #
54
46
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
55
47
  #
56
48
  # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between
@@ -121,7 +121,7 @@ module OpenAI
121
121
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
122
122
  # You can also use custom tools to call your own code.
123
123
  #
124
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>]
124
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::Tool::WebSearchTool, OpenAI::Models::Responses::WebSearchTool>]
125
125
  required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
126
126
 
127
127
  # @!attribute top_p
@@ -141,6 +141,13 @@ module OpenAI
141
141
  # @return [Boolean, nil]
142
142
  optional :background, OpenAI::Internal::Type::Boolean, nil?: true
143
143
 
144
+ # @!attribute conversation
145
+ # The conversation that this response belongs to. Input items and output items
146
+ # from this response are automatically added to this conversation.
147
+ #
148
+ # @return [OpenAI::Models::Responses::Response::Conversation, nil]
149
+ optional :conversation, -> { OpenAI::Responses::Response::Conversation }, nil?: true
150
+
144
151
  # @!attribute max_output_tokens
145
152
  # An upper bound for the number of tokens that can be generated for a response,
146
153
  # including visible output tokens and
@@ -162,6 +169,7 @@ module OpenAI
162
169
  # The unique ID of the previous response to the model. Use this to create
163
170
  # multi-turn conversations. Learn more about
164
171
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
172
+ # Cannot be used in conjunction with `conversation`.
165
173
  #
166
174
  # @return [String, nil]
167
175
  optional :previous_response_id, String, nil?: true
@@ -296,7 +304,7 @@ module OpenAI
296
304
  texts.join
297
305
  end
298
306
 
299
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
307
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
300
308
  # Some parameter documentations has been truncated, see
301
309
  # {OpenAI::Models::Responses::Response} for more details.
302
310
  #
@@ -322,12 +330,14 @@ module OpenAI
322
330
  #
323
331
  # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
324
332
  #
325
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
333
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::Tool::WebSearchTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
326
334
  #
327
335
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
328
336
  #
329
337
  # @param background [Boolean, nil] Whether to run the model response in the background.
330
338
  #
339
+ # @param conversation [OpenAI::Models::Responses::Response::Conversation, nil] The conversation that this response belongs to. Input items and output items fro
340
+ #
331
341
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
332
342
  #
333
343
  # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
@@ -449,6 +459,21 @@ module OpenAI
449
459
  # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
450
460
  end
451
461
 
462
+ # @see OpenAI::Models::Responses::Response#conversation
463
+ class Conversation < OpenAI::Internal::Type::BaseModel
464
+ # @!attribute id
465
+ # The unique ID of the conversation.
466
+ #
467
+ # @return [String]
468
+ required :id, String
469
+
470
+ # @!method initialize(id:)
471
+ # The conversation that this response belongs to. Input items and output items
472
+ # from this response are automatically added to this conversation.
473
+ #
474
+ # @param id [String] The unique ID of the conversation.
475
+ end
476
+
452
477
  # Specifies the processing type used for serving the request.
453
478
  #
454
479
  # - If set to 'auto', then the request will be processed with the service tier
@@ -27,9 +27,7 @@ module OpenAI
27
27
  #
28
28
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck>]
29
29
  required :pending_safety_checks,
30
- -> {
31
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]
32
- }
30
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck] }
33
31
 
34
32
  # @!attribute status
35
33
  # The status of the item. One of `in_progress`, `completed`, or `incomplete`.
@@ -207,9 +205,7 @@ module OpenAI
207
205
  #
208
206
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path>]
209
207
  required :path,
210
- -> {
211
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path]
212
- }
208
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path] }
213
209
 
214
210
  # @!attribute type
215
211
  # Specifies the event type. For a drag action, this property is always set to
@@ -34,9 +34,7 @@ module OpenAI
34
34
  #
35
35
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck>, nil]
36
36
  optional :acknowledged_safety_checks,
37
- -> {
38
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]
39
- }
37
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck] }
40
38
 
41
39
  # @!attribute status
42
40
  # The status of the message input. One of `in_progress`, `completed`, or
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ResponseConversationParam < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute id
8
+ # The unique ID of the conversation.
9
+ #
10
+ # @return [String]
11
+ required :id, String
12
+
13
+ # @!method initialize(id:)
14
+ # The conversation that this response belongs to.
15
+ #
16
+ # @param id [String] The unique ID of the conversation.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -17,10 +17,25 @@ module OpenAI
17
17
  # @return [Boolean, nil]
18
18
  optional :background, OpenAI::Internal::Type::Boolean, nil?: true
19
19
 
20
+ # @!attribute conversation
21
+ # The conversation that this response belongs to. Items from this conversation are
22
+ # prepended to `input_items` for this response request. Input items and output
23
+ # items from this response are automatically added to this conversation after this
24
+ # response completes.
25
+ #
26
+ # @return [String, OpenAI::Models::Responses::ResponseConversationParam, nil]
27
+ optional :conversation,
28
+ union: -> {
29
+ OpenAI::Responses::ResponseCreateParams::Conversation
30
+ },
31
+ nil?: true
32
+
20
33
  # @!attribute include
21
34
  # Specify additional output data to include in the model response. Currently
22
35
  # supported values are:
23
36
  #
37
+ # - `web_search_call.action.sources`: Include the sources of the web search tool
38
+ # call.
24
39
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
25
40
  # in code interpreter tool call items.
26
41
  # - `computer_call_output.output.image_url`: Include image urls from the computer
@@ -112,6 +127,7 @@ module OpenAI
112
127
  # The unique ID of the previous response to the model. Use this to create
113
128
  # multi-turn conversations. Learn more about
114
129
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
130
+ # Cannot be used in conjunction with `conversation`.
115
131
  #
116
132
  # @return [String, nil]
117
133
  optional :previous_response_id, String, nil?: true
@@ -234,7 +250,7 @@ module OpenAI
234
250
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
235
251
  # You can also use custom tools to call your own code.
236
252
  #
237
- # @return [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
253
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::Tool::WebSearchTool, OpenAI::Models::Responses::WebSearchTool>, nil]
238
254
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
239
255
 
240
256
  # @!attribute top_logprobs
@@ -278,12 +294,14 @@ module OpenAI
278
294
  # @return [String, nil]
279
295
  optional :user, String
280
296
 
281
- # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
297
+ # @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
282
298
  # Some parameter documentations has been truncated, see
283
299
  # {OpenAI::Models::Responses::ResponseCreateParams} for more details.
284
300
  #
285
301
  # @param background [Boolean, nil] Whether to run the model response in the background.
286
302
  #
303
+ # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
304
+ #
287
305
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
288
306
  #
289
307
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
@@ -322,7 +340,7 @@ module OpenAI
322
340
  #
323
341
  # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
324
342
  #
325
- # @param tools [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
343
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::Tool::WebSearchTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
326
344
  #
327
345
  # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
328
346
  #
@@ -334,6 +352,23 @@ module OpenAI
334
352
  #
335
353
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
336
354
 
355
+ # The conversation that this response belongs to. Items from this conversation are
356
+ # prepended to `input_items` for this response request. Input items and output
357
+ # items from this response are automatically added to this conversation after this
358
+ # response completes.
359
+ module Conversation
360
+ extend OpenAI::Internal::Type::Union
361
+
362
+ # The unique ID of the conversation.
363
+ variant String
364
+
365
+ # The conversation that this response belongs to.
366
+ variant -> { OpenAI::Responses::ResponseConversationParam }
367
+
368
+ # @!method self.variants
369
+ # @return [Array(String, OpenAI::Models::Responses::ResponseConversationParam)]
370
+ end
371
+
337
372
  # Text, image, or file inputs to the model, used to generate a response.
338
373
  #
339
374
  # Learn more:
@@ -10,11 +10,13 @@ module OpenAI
10
10
  # @return [String]
11
11
  required :arguments, String
12
12
 
13
- # @!attribute parsed
14
- # The parsed contents of the arguments.
15
- #
16
- # @return [Object, nil]
17
- required :parsed, OpenAI::StructuredOutput::ParsedJson
13
+ response_only do
14
+ # @!attribute parsed
15
+ # The parsed contents of the arguments.
16
+ #
17
+ # @return [Object, nil]
18
+ required :parsed, OpenAI::StructuredOutput::ParsedJson
19
+ end
18
20
 
19
21
  # @!attribute call_id
20
22
  # The unique ID of the function tool call generated by the model.
@@ -76,7 +76,14 @@ module OpenAI
76
76
  # @return [Symbol, :search]
77
77
  required :type, const: :search
78
78
 
79
- # @!method initialize(query:, type: :search)
79
+ # @!attribute sources
80
+ # The sources used in the search.
81
+ #
82
+ # @return [Array<OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search::Source>, nil]
83
+ optional :sources,
84
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source] }
85
+
86
+ # @!method initialize(query:, sources: nil, type: :search)
80
87
  # Some parameter documentations has been truncated, see
81
88
  # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search} for more
82
89
  # details.
@@ -85,7 +92,34 @@ module OpenAI
85
92
  #
86
93
  # @param query [String] The search query.
87
94
  #
95
+ # @param sources [Array<OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search::Source>] The sources used in the search.
96
+ #
88
97
  # @param type [Symbol, :search] The action type.
98
+
99
+ class Source < OpenAI::Internal::Type::BaseModel
100
+ # @!attribute type
101
+ # The type of source. Always `url`.
102
+ #
103
+ # @return [Symbol, :url]
104
+ required :type, const: :url
105
+
106
+ # @!attribute url
107
+ # The URL of the source.
108
+ #
109
+ # @return [String]
110
+ required :url, String
111
+
112
+ # @!method initialize(url:, type: :url)
113
+ # Some parameter documentations has been truncated, see
114
+ # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search::Source}
115
+ # for more details.
116
+ #
117
+ # A source used in the search.
118
+ #
119
+ # @param url [String] The URL of the source.
120
+ #
121
+ # @param type [Symbol, :url] The type of source. Always `url`.
122
+ end
89
123
  end
90
124
 
91
125
  class OpenPage < OpenAI::Internal::Type::BaseModel
@@ -6,6 +6,8 @@ module OpenAI
6
6
  # Specify additional output data to include in the model response. Currently
7
7
  # supported values are:
8
8
  #
9
+ # - `web_search_call.action.sources`: Include the sources of the web search tool
10
+ # call.
9
11
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
10
12
  # in code interpreter tool call items.
11
13
  # - `computer_call_output.output.image_url`: Include image urls from the computer
@@ -95,10 +95,7 @@ module OpenAI
95
95
  # types.
96
96
  #
97
97
  # @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
98
- required :content,
99
- -> {
100
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
101
- }
98
+ required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
102
99
 
103
100
  # @!attribute role
104
101
  # The role of the message input. One of `user`, `system`, or `developer`.
@@ -605,9 +602,7 @@ module OpenAI
605
602
  #
606
603
  # @return [Array<OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool>]
607
604
  required :tools,
608
- -> {
609
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool]
610
- }
605
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool] }
611
606
 
612
607
  # @!attribute type
613
608
  # The type of the item. Always `mcp_list_tools`.
@@ -15,10 +15,7 @@ module OpenAI
15
15
  # types.
16
16
  #
17
17
  # @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
18
- required :content,
19
- -> {
20
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
21
- }
18
+ required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
22
19
 
23
20
  # @!attribute role
24
21
  # The role of the message input. One of `user`, `system`, or `developer`.
@@ -314,9 +314,7 @@ module OpenAI
314
314
  #
315
315
  # @return [Array<OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool>]
316
316
  required :tools,
317
- -> {
318
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool]
319
- }
317
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool] }
320
318
 
321
319
  # @!attribute type
322
320
  # The type of the item. Always `mcp_list_tools`.
@@ -15,9 +15,7 @@ module OpenAI
15
15
  #
16
16
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal>]
17
17
  required :content,
18
- -> {
19
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content]
20
- }
18
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content] }
21
19
 
22
20
  # @!attribute role
23
21
  # The role of the output message. Always `assistant`.
@@ -9,9 +9,7 @@ module OpenAI
9
9
  #
10
10
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>]
11
11
  required :annotations,
12
- -> {
13
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation]
14
- }
12
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] }
15
13
 
16
14
  # @!attribute text
17
15
  # The text output from the model.
@@ -19,11 +17,13 @@ module OpenAI
19
17
  # @return [String]
20
18
  required :text, String
21
19
 
22
- # @!attribute parsed
23
- # The parsed contents of the output, if JSON schema is specified.
24
- #
25
- # @return [Object, nil]
26
- optional :parsed, OpenAI::StructuredOutput::ParsedJson
20
+ response_only do
21
+ # @!attribute parsed
22
+ # The parsed contents of the output, if JSON schema is specified.
23
+ #
24
+ # @return [Object, nil]
25
+ optional :parsed, OpenAI::StructuredOutput::ParsedJson
26
+ end
27
27
 
28
28
  # @!attribute type
29
29
  # The type of the output text. Always `output_text`.
@@ -34,10 +34,7 @@ module OpenAI
34
34
  # @!attribute logprobs
35
35
  #
36
36
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>, nil]
37
- optional :logprobs,
38
- -> {
39
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob]
40
- }
37
+ optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] }
41
38
 
42
39
  # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text)
43
40
  # A text output from the model.
@@ -261,9 +258,7 @@ module OpenAI
261
258
  #
262
259
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob::TopLogprob>]
263
260
  required :top_logprobs,
264
- -> {
265
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
266
- }
261
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] }
267
262
 
268
263
  # @!method initialize(token:, bytes:, logprob:, top_logprobs:)
269
264
  # The log probability of a token.
@@ -16,10 +16,7 @@ module OpenAI
16
16
  variant :"response.audio.done", -> { OpenAI::Responses::ResponseAudioDoneEvent }
17
17
 
18
18
  # Emitted when there is a partial transcript of audio.
19
- variant :"response.audio.transcript.delta",
20
- -> {
21
- OpenAI::Responses::ResponseAudioTranscriptDeltaEvent
22
- }
19
+ variant :"response.audio.transcript.delta", -> { OpenAI::Responses::ResponseAudioTranscriptDeltaEvent }
23
20
 
24
21
  # Emitted when the full audio transcript is completed.
25
22
  variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
@@ -157,16 +154,10 @@ module OpenAI
157
154
  -> { OpenAI::Responses::ResponseImageGenCallPartialImageEvent }
158
155
 
159
156
  # Emitted when there is a delta (partial update) to the arguments of an MCP tool call.
160
- variant :"response.mcp_call_arguments.delta",
161
- -> {
162
- OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent
163
- }
157
+ variant :"response.mcp_call_arguments.delta", -> { OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent }
164
158
 
165
159
  # Emitted when the arguments for an MCP tool call are finalized.
166
- variant :"response.mcp_call_arguments.done",
167
- -> {
168
- OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent
169
- }
160
+ variant :"response.mcp_call_arguments.done", -> { OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent }
170
161
 
171
162
  # Emitted when an MCP tool call has completed successfully.
172
163
  variant :"response.mcp_call.completed", -> { OpenAI::Responses::ResponseMcpCallCompletedEvent }
@@ -178,10 +169,7 @@ module OpenAI
178
169
  variant :"response.mcp_call.in_progress", -> { OpenAI::Responses::ResponseMcpCallInProgressEvent }
179
170
 
180
171
  # Emitted when the list of available MCP tools has been successfully retrieved.
181
- variant :"response.mcp_list_tools.completed",
182
- -> {
183
- OpenAI::Responses::ResponseMcpListToolsCompletedEvent
184
- }
172
+ variant :"response.mcp_list_tools.completed", -> { OpenAI::Responses::ResponseMcpListToolsCompletedEvent }
185
173
 
186
174
  # Emitted when the attempt to list available MCP tools has failed.
187
175
  variant :"response.mcp_list_tools.failed", -> { OpenAI::Responses::ResponseMcpListToolsFailedEvent }
@@ -85,9 +85,7 @@ module OpenAI
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
88
- -> {
89
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
90
- }
88
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] }
91
89
 
92
90
  # @!method initialize(token:, logprob:, top_logprobs: nil)
93
91
  # Some parameter documentations has been truncated, see
@@ -85,9 +85,7 @@ module OpenAI
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
88
- -> {
89
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
90
- }
88
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] }
91
89
 
92
90
  # @!method initialize(token:, logprob:, top_logprobs: nil)
93
91
  # Some parameter documentations has been truncated, see