openai 0.17.1 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (209) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +50 -0
  3. data/README.md +1 -1
  4. data/lib/openai/client.rb +4 -0
  5. data/lib/openai/helpers/structured_output/array_of.rb +2 -10
  6. data/lib/openai/helpers/structured_output/base_model.rb +4 -11
  7. data/lib/openai/helpers/structured_output/boolean.rb +1 -0
  8. data/lib/openai/helpers/structured_output/json_schema_converter.rb +19 -3
  9. data/lib/openai/helpers/structured_output/union_of.rb +2 -10
  10. data/lib/openai/internal/conversation_cursor_page.rb +92 -0
  11. data/lib/openai/internal/transport/base_client.rb +1 -4
  12. data/lib/openai/internal/transport/pooled_net_requester.rb +1 -9
  13. data/lib/openai/internal/util.rb +1 -1
  14. data/lib/openai/models/audio/transcription.rb +1 -4
  15. data/lib/openai/models/audio/transcription_create_params.rb +2 -7
  16. data/lib/openai/models/audio/transcription_text_done_event.rb +1 -4
  17. data/lib/openai/models/batch_create_params.rb +38 -1
  18. data/lib/openai/models/beta/assistant_create_params.rb +6 -19
  19. data/lib/openai/models/beta/assistant_stream_event.rb +6 -24
  20. data/lib/openai/models/beta/assistant_update_params.rb +1 -4
  21. data/lib/openai/models/beta/message_stream_event.rb +1 -4
  22. data/lib/openai/models/beta/run_step_stream_event.rb +1 -4
  23. data/lib/openai/models/beta/thread_create_and_run_params.rb +12 -34
  24. data/lib/openai/models/beta/thread_create_params.rb +7 -22
  25. data/lib/openai/models/beta/threads/message.rb +3 -10
  26. data/lib/openai/models/beta/threads/message_create_params.rb +2 -7
  27. data/lib/openai/models/beta/threads/run.rb +4 -9
  28. data/lib/openai/models/beta/threads/run_create_params.rb +5 -12
  29. data/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +1 -3
  30. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +5 -17
  31. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +1 -3
  32. data/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +4 -12
  33. data/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb +1 -4
  34. data/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +1 -4
  35. data/lib/openai/models/beta/threads/text.rb +1 -4
  36. data/lib/openai/models/chat/chat_completion.rb +6 -6
  37. data/lib/openai/models/chat/chat_completion_chunk.rb +7 -9
  38. data/lib/openai/models/chat/chat_completion_custom_tool.rb +2 -7
  39. data/lib/openai/models/chat/completion_create_params.rb +7 -7
  40. data/lib/openai/models/conversations/computer_screenshot_content.rb +38 -0
  41. data/lib/openai/models/conversations/container_file_citation_body.rb +58 -0
  42. data/lib/openai/models/conversations/conversation.rb +51 -0
  43. data/lib/openai/models/conversations/conversation_create_params.rb +39 -0
  44. data/lib/openai/models/conversations/conversation_delete_params.rb +16 -0
  45. data/lib/openai/models/conversations/conversation_deleted.rb +29 -0
  46. data/lib/openai/models/conversations/conversation_deleted_resource.rb +30 -0
  47. data/lib/openai/models/conversations/conversation_item.rb +568 -0
  48. data/lib/openai/models/conversations/conversation_item_list.rb +55 -0
  49. data/lib/openai/models/conversations/conversation_retrieve_params.rb +16 -0
  50. data/lib/openai/models/conversations/conversation_update_params.rb +31 -0
  51. data/lib/openai/models/conversations/file_citation_body.rb +42 -0
  52. data/lib/openai/models/conversations/input_file_content.rb +42 -0
  53. data/lib/openai/models/conversations/input_image_content.rb +62 -0
  54. data/lib/openai/models/conversations/input_text_content.rb +26 -0
  55. data/lib/openai/models/conversations/item_create_params.rb +37 -0
  56. data/lib/openai/models/conversations/item_delete_params.rb +22 -0
  57. data/lib/openai/models/conversations/item_list_params.rb +84 -0
  58. data/lib/openai/models/conversations/item_retrieve_params.rb +36 -0
  59. data/lib/openai/models/conversations/lob_prob.rb +35 -0
  60. data/lib/openai/models/conversations/message.rb +115 -0
  61. data/lib/openai/models/conversations/output_text_content.rb +57 -0
  62. data/lib/openai/models/conversations/refusal_content.rb +26 -0
  63. data/lib/openai/models/conversations/summary_text_content.rb +23 -0
  64. data/lib/openai/models/conversations/text_content.rb +23 -0
  65. data/lib/openai/models/conversations/top_log_prob.rb +29 -0
  66. data/lib/openai/models/conversations/url_citation_body.rb +50 -0
  67. data/lib/openai/models/eval_create_params.rb +6 -20
  68. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +29 -53
  69. data/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb +1 -3
  70. data/lib/openai/models/evals/run_create_params.rb +18 -54
  71. data/lib/openai/models/file_create_params.rb +37 -1
  72. data/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb +1 -4
  73. data/lib/openai/models/graders/text_similarity_grader.rb +6 -5
  74. data/lib/openai/models/moderation.rb +5 -15
  75. data/lib/openai/models/reasoning.rb +1 -1
  76. data/lib/openai/models/responses/input_item_list_params.rb +1 -9
  77. data/lib/openai/models/responses/response.rb +32 -9
  78. data/lib/openai/models/responses/response_computer_tool_call.rb +2 -6
  79. data/lib/openai/models/responses/response_computer_tool_call_output_item.rb +1 -3
  80. data/lib/openai/models/responses/response_conversation_param.rb +20 -0
  81. data/lib/openai/models/responses/response_create_params.rb +40 -9
  82. data/lib/openai/models/responses/response_input_item.rb +2 -7
  83. data/lib/openai/models/responses/response_input_message_item.rb +1 -4
  84. data/lib/openai/models/responses/response_output_item.rb +1 -3
  85. data/lib/openai/models/responses/response_output_message.rb +1 -3
  86. data/lib/openai/models/responses/response_output_text.rb +3 -10
  87. data/lib/openai/models/responses/response_stream_event.rb +4 -16
  88. data/lib/openai/models/responses/response_text_delta_event.rb +1 -3
  89. data/lib/openai/models/responses/response_text_done_event.rb +1 -3
  90. data/lib/openai/models/responses/tool.rb +145 -34
  91. data/lib/openai/models/upload_create_params.rb +37 -1
  92. data/lib/openai/models.rb +2 -0
  93. data/lib/openai/resources/batches.rb +3 -1
  94. data/lib/openai/resources/conversations/items.rb +141 -0
  95. data/lib/openai/resources/conversations.rb +112 -0
  96. data/lib/openai/resources/files.rb +4 -2
  97. data/lib/openai/resources/responses/input_items.rb +1 -3
  98. data/lib/openai/resources/responses.rb +8 -4
  99. data/lib/openai/resources/uploads.rb +3 -1
  100. data/lib/openai/version.rb +1 -1
  101. data/lib/openai.rb +31 -0
  102. data/rbi/openai/client.rbi +3 -0
  103. data/rbi/openai/errors.rbi +5 -5
  104. data/rbi/openai/helpers/structured_output/array_of.rbi +0 -3
  105. data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +10 -0
  106. data/rbi/openai/internal/conversation_cursor_page.rbi +25 -0
  107. data/rbi/openai/models/batch_create_params.rbi +60 -0
  108. data/rbi/openai/models/beta/thread_create_and_run_params.rbi +3 -3
  109. data/rbi/openai/models/beta/threads/run.rbi +3 -3
  110. data/rbi/openai/models/beta/threads/run_create_params.rbi +3 -3
  111. data/rbi/openai/models/chat/chat_completion.rbi +6 -9
  112. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -9
  113. data/rbi/openai/models/chat/completion_create_params.rbi +8 -11
  114. data/rbi/openai/models/conversations/computer_screenshot_content.rbi +60 -0
  115. data/rbi/openai/models/conversations/container_file_citation_body.rbi +82 -0
  116. data/rbi/openai/models/conversations/conversation.rbi +76 -0
  117. data/rbi/openai/models/conversations/conversation_create_params.rbi +144 -0
  118. data/rbi/openai/models/conversations/conversation_delete_params.rbi +32 -0
  119. data/rbi/openai/models/conversations/conversation_deleted.rbi +40 -0
  120. data/rbi/openai/models/conversations/conversation_deleted_resource.rbi +40 -0
  121. data/rbi/openai/models/conversations/conversation_item.rbi +835 -0
  122. data/rbi/openai/models/conversations/conversation_item_list.rbi +101 -0
  123. data/rbi/openai/models/conversations/conversation_retrieve_params.rbi +32 -0
  124. data/rbi/openai/models/conversations/conversation_update_params.rbi +56 -0
  125. data/rbi/openai/models/conversations/file_citation_body.rbi +61 -0
  126. data/rbi/openai/models/conversations/input_file_content.rbi +72 -0
  127. data/rbi/openai/models/conversations/input_image_content.rbi +113 -0
  128. data/rbi/openai/models/conversations/input_text_content.rbi +38 -0
  129. data/rbi/openai/models/conversations/item_create_params.rbi +150 -0
  130. data/rbi/openai/models/conversations/item_delete_params.rbi +40 -0
  131. data/rbi/openai/models/conversations/item_list_params.rbi +174 -0
  132. data/rbi/openai/models/conversations/item_retrieve_params.rbi +70 -0
  133. data/rbi/openai/models/conversations/lob_prob.rbi +50 -0
  134. data/rbi/openai/models/conversations/message.rbi +196 -0
  135. data/rbi/openai/models/conversations/output_text_content.rbi +110 -0
  136. data/rbi/openai/models/conversations/refusal_content.rbi +38 -0
  137. data/rbi/openai/models/conversations/summary_text_content.rbi +31 -0
  138. data/rbi/openai/models/conversations/text_content.rbi +28 -0
  139. data/rbi/openai/models/conversations/top_log_prob.rbi +41 -0
  140. data/rbi/openai/models/conversations/url_citation_body.rbi +74 -0
  141. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +33 -33
  142. data/rbi/openai/models/file_create_params.rbi +56 -0
  143. data/rbi/openai/models/graders/text_similarity_grader.rbi +11 -6
  144. data/rbi/openai/models/reasoning.rbi +1 -1
  145. data/rbi/openai/models/responses/input_item_list_params.rbi +0 -11
  146. data/rbi/openai/models/responses/response.rbi +57 -11
  147. data/rbi/openai/models/responses/response_conversation_param.rbi +33 -0
  148. data/rbi/openai/models/responses/response_create_params.rbi +62 -11
  149. data/rbi/openai/models/responses/tool.rbi +243 -31
  150. data/rbi/openai/models/upload_create_params.rbi +56 -0
  151. data/rbi/openai/models.rbi +2 -0
  152. data/rbi/openai/resources/batches.rbi +5 -0
  153. data/rbi/openai/resources/beta/threads/runs.rbi +2 -2
  154. data/rbi/openai/resources/beta/threads.rbi +2 -2
  155. data/rbi/openai/resources/chat/completions.rbi +6 -8
  156. data/rbi/openai/resources/conversations/items.rbi +152 -0
  157. data/rbi/openai/resources/conversations.rbi +110 -0
  158. data/rbi/openai/resources/files.rbi +5 -1
  159. data/rbi/openai/resources/responses/input_items.rbi +0 -3
  160. data/rbi/openai/resources/responses.rbi +32 -8
  161. data/rbi/openai/resources/uploads.rbi +4 -0
  162. data/sig/openai/client.rbs +2 -0
  163. data/sig/openai/internal/conversation_cursor_page.rbs +15 -0
  164. data/sig/openai/models/batch_create_params.rbs +22 -1
  165. data/sig/openai/models/conversations/computer_screenshot_content.rbs +28 -0
  166. data/sig/openai/models/conversations/container_file_citation_body.rbs +47 -0
  167. data/sig/openai/models/conversations/conversation.rbs +37 -0
  168. data/sig/openai/models/conversations/conversation_create_params.rbs +33 -0
  169. data/sig/openai/models/conversations/conversation_delete_params.rbs +17 -0
  170. data/sig/openai/models/conversations/conversation_deleted.rbs +28 -0
  171. data/sig/openai/models/conversations/conversation_deleted_resource.rbs +28 -0
  172. data/sig/openai/models/conversations/conversation_item.rbs +403 -0
  173. data/sig/openai/models/conversations/conversation_item_list.rbs +44 -0
  174. data/sig/openai/models/conversations/conversation_retrieve_params.rbs +17 -0
  175. data/sig/openai/models/conversations/conversation_update_params.rbs +26 -0
  176. data/sig/openai/models/conversations/file_citation_body.rbs +37 -0
  177. data/sig/openai/models/conversations/input_file_content.rbs +41 -0
  178. data/sig/openai/models/conversations/input_image_content.rbs +49 -0
  179. data/sig/openai/models/conversations/input_text_content.rbs +17 -0
  180. data/sig/openai/models/conversations/item_create_params.rbs +37 -0
  181. data/sig/openai/models/conversations/item_delete_params.rbs +25 -0
  182. data/sig/openai/models/conversations/item_list_params.rbs +66 -0
  183. data/sig/openai/models/conversations/item_retrieve_params.rbs +37 -0
  184. data/sig/openai/models/conversations/lob_prob.rbs +37 -0
  185. data/sig/openai/models/conversations/message.rbs +95 -0
  186. data/sig/openai/models/conversations/output_text_content.rbs +52 -0
  187. data/sig/openai/models/conversations/refusal_content.rbs +17 -0
  188. data/sig/openai/models/conversations/summary_text_content.rbs +17 -0
  189. data/sig/openai/models/conversations/text_content.rbs +17 -0
  190. data/sig/openai/models/conversations/top_log_prob.rbs +28 -0
  191. data/sig/openai/models/conversations/url_citation_body.rbs +42 -0
  192. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +22 -22
  193. data/sig/openai/models/file_create_params.rbs +22 -1
  194. data/sig/openai/models/graders/text_similarity_grader.rbs +3 -1
  195. data/sig/openai/models/responses/input_item_list_params.rbs +0 -7
  196. data/sig/openai/models/responses/response.rbs +15 -0
  197. data/sig/openai/models/responses/response_conversation_param.rbs +15 -0
  198. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  199. data/sig/openai/models/responses/tool.rbs +83 -18
  200. data/sig/openai/models/upload_create_params.rbs +22 -1
  201. data/sig/openai/models.rbs +2 -0
  202. data/sig/openai/resources/batches.rbs +1 -0
  203. data/sig/openai/resources/conversations/items.rbs +38 -0
  204. data/sig/openai/resources/conversations.rbs +31 -0
  205. data/sig/openai/resources/files.rbs +1 -0
  206. data/sig/openai/resources/responses/input_items.rbs +0 -1
  207. data/sig/openai/resources/responses.rbs +2 -0
  208. data/sig/openai/resources/uploads.rbs +1 -0
  209. metadata +95 -2
@@ -176,9 +176,7 @@ module OpenAI
176
176
  #
177
177
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Harassment>]
178
178
  required :harassment,
179
- -> {
180
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Harassment]
181
- }
179
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Harassment] }
182
180
 
183
181
  # @!attribute harassment_threatening
184
182
  # The applied input type(s) for the category 'harassment/threatening'.
@@ -195,9 +193,7 @@ module OpenAI
195
193
  #
196
194
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Hate>]
197
195
  required :hate,
198
- -> {
199
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Hate]
200
- }
196
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Hate] }
201
197
 
202
198
  # @!attribute hate_threatening
203
199
  # The applied input type(s) for the category 'hate/threatening'.
@@ -214,9 +210,7 @@ module OpenAI
214
210
  #
215
211
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Illicit>]
216
212
  required :illicit,
217
- -> {
218
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Illicit]
219
- }
213
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Illicit] }
220
214
 
221
215
  # @!attribute illicit_violent
222
216
  # The applied input type(s) for the category 'illicit/violent'.
@@ -263,9 +257,7 @@ module OpenAI
263
257
  #
264
258
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Sexual>]
265
259
  required :sexual,
266
- -> {
267
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Sexual]
268
- }
260
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Sexual] }
269
261
 
270
262
  # @!attribute sexual_minors
271
263
  # The applied input type(s) for the category 'sexual/minors'.
@@ -282,9 +274,7 @@ module OpenAI
282
274
  #
283
275
  # @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Violence>]
284
276
  required :violence,
285
- -> {
286
- OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Violence]
287
- }
277
+ -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Violence] }
288
278
 
289
279
  # @!attribute violence_graphic
290
280
  # The applied input type(s) for the category 'violence/graphic'.
@@ -37,7 +37,7 @@ module OpenAI
37
37
  # Some parameter documentations has been truncated, see
38
38
  # {OpenAI::Models::Reasoning} for more details.
39
39
  #
40
- # **o-series models only**
40
+ # **gpt-5 and o-series models only**
41
41
  #
42
42
  # Configuration options for
43
43
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -14,12 +14,6 @@ module OpenAI
14
14
  # @return [String, nil]
15
15
  optional :after, String
16
16
 
17
- # @!attribute before
18
- # An item ID to list items before, used in pagination.
19
- #
20
- # @return [String, nil]
21
- optional :before, String
22
-
23
17
  # @!attribute include
24
18
  # Additional fields to include in the response. See the `include` parameter for
25
19
  # Response creation above for more information.
@@ -43,14 +37,12 @@ module OpenAI
43
37
  # @return [Symbol, OpenAI::Models::Responses::InputItemListParams::Order, nil]
44
38
  optional :order, enum: -> { OpenAI::Responses::InputItemListParams::Order }
45
39
 
46
- # @!method initialize(after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
40
+ # @!method initialize(after: nil, include: nil, limit: nil, order: nil, request_options: {})
47
41
  # Some parameter documentations has been truncated, see
48
42
  # {OpenAI::Models::Responses::InputItemListParams} for more details.
49
43
  #
50
44
  # @param after [String] An item ID to list items after, used in pagination.
51
45
  #
52
- # @param before [String] An item ID to list items before, used in pagination.
53
- #
54
46
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
55
47
  #
56
48
  # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between
@@ -141,6 +141,13 @@ module OpenAI
141
141
  # @return [Boolean, nil]
142
142
  optional :background, OpenAI::Internal::Type::Boolean, nil?: true
143
143
 
144
+ # @!attribute conversation
145
+ # The conversation that this response belongs to. Input items and output items
146
+ # from this response are automatically added to this conversation.
147
+ #
148
+ # @return [OpenAI::Models::Responses::Response::Conversation, nil]
149
+ optional :conversation, -> { OpenAI::Responses::Response::Conversation }, nil?: true
150
+
144
151
  # @!attribute max_output_tokens
145
152
  # An upper bound for the number of tokens that can be generated for a response,
146
153
  # including visible output tokens and
@@ -162,6 +169,7 @@ module OpenAI
162
169
  # The unique ID of the previous response to the model. Use this to create
163
170
  # multi-turn conversations. Learn more about
164
171
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
172
+ # Cannot be used in conjunction with `conversation`.
165
173
  #
166
174
  # @return [String, nil]
167
175
  optional :previous_response_id, String, nil?: true
@@ -182,7 +190,7 @@ module OpenAI
182
190
  optional :prompt_cache_key, String
183
191
 
184
192
  # @!attribute reasoning
185
- # **o-series models only**
193
+ # **gpt-5 and o-series models only**
186
194
  #
187
195
  # Configuration options for
188
196
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -209,9 +217,8 @@ module OpenAI
209
217
  # - If set to 'default', then the request will be processed with the standard
210
218
  # pricing and performance for the selected model.
211
219
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
212
- # 'priority', then the request will be processed with the corresponding service
213
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
214
- # Priority processing.
220
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
221
+ # will be processed with the corresponding service tier.
215
222
  # - When not set, the default behavior is 'auto'.
216
223
  #
217
224
  # When the `service_tier` parameter is set, the response body will include the
@@ -297,7 +304,7 @@ module OpenAI
297
304
  texts.join
298
305
  end
299
306
 
300
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
307
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
301
308
  # Some parameter documentations has been truncated, see
302
309
  # {OpenAI::Models::Responses::Response} for more details.
303
310
  #
@@ -329,6 +336,8 @@ module OpenAI
329
336
  #
330
337
  # @param background [Boolean, nil] Whether to run the model response in the background.
331
338
  #
339
+ # @param conversation [OpenAI::Models::Responses::Response::Conversation, nil] The conversation that this response belongs to. Input items and output items fro
340
+ #
332
341
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
333
342
  #
334
343
  # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
@@ -339,7 +348,7 @@ module OpenAI
339
348
  #
340
349
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
341
350
  #
342
- # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
351
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
343
352
  #
344
353
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
345
354
  #
@@ -450,6 +459,21 @@ module OpenAI
450
459
  # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
451
460
  end
452
461
 
462
+ # @see OpenAI::Models::Responses::Response#conversation
463
+ class Conversation < OpenAI::Internal::Type::BaseModel
464
+ # @!attribute id
465
+ # The unique ID of the conversation.
466
+ #
467
+ # @return [String]
468
+ required :id, String
469
+
470
+ # @!method initialize(id:)
471
+ # The conversation that this response belongs to. Input items and output items
472
+ # from this response are automatically added to this conversation.
473
+ #
474
+ # @param id [String] The unique ID of the conversation.
475
+ end
476
+
453
477
  # Specifies the processing type used for serving the request.
454
478
  #
455
479
  # - If set to 'auto', then the request will be processed with the service tier
@@ -458,9 +482,8 @@ module OpenAI
458
482
  # - If set to 'default', then the request will be processed with the standard
459
483
  # pricing and performance for the selected model.
460
484
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
461
- # 'priority', then the request will be processed with the corresponding service
462
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
463
- # Priority processing.
485
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
486
+ # will be processed with the corresponding service tier.
464
487
  # - When not set, the default behavior is 'auto'.
465
488
  #
466
489
  # When the `service_tier` parameter is set, the response body will include the
@@ -27,9 +27,7 @@ module OpenAI
27
27
  #
28
28
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck>]
29
29
  required :pending_safety_checks,
30
- -> {
31
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]
32
- }
30
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck] }
33
31
 
34
32
  # @!attribute status
35
33
  # The status of the item. One of `in_progress`, `completed`, or `incomplete`.
@@ -207,9 +205,7 @@ module OpenAI
207
205
  #
208
206
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path>]
209
207
  required :path,
210
- -> {
211
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path]
212
- }
208
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path] }
213
209
 
214
210
  # @!attribute type
215
211
  # Specifies the event type. For a drag action, this property is always set to
@@ -34,9 +34,7 @@ module OpenAI
34
34
  #
35
35
  # @return [Array<OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck>, nil]
36
36
  optional :acknowledged_safety_checks,
37
- -> {
38
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]
39
- }
37
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck] }
40
38
 
41
39
  # @!attribute status
42
40
  # The status of the message input. One of `in_progress`, `completed`, or
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ResponseConversationParam < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute id
8
+ # The unique ID of the conversation.
9
+ #
10
+ # @return [String]
11
+ required :id, String
12
+
13
+ # @!method initialize(id:)
14
+ # The conversation that this response belongs to.
15
+ #
16
+ # @param id [String] The unique ID of the conversation.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -17,6 +17,19 @@ module OpenAI
17
17
  # @return [Boolean, nil]
18
18
  optional :background, OpenAI::Internal::Type::Boolean, nil?: true
19
19
 
20
+ # @!attribute conversation
21
+ # The conversation that this response belongs to. Items from this conversation are
22
+ # prepended to `input_items` for this response request. Input items and output
23
+ # items from this response are automatically added to this conversation after this
24
+ # response completes.
25
+ #
26
+ # @return [String, OpenAI::Models::Responses::ResponseConversationParam, nil]
27
+ optional :conversation,
28
+ union: -> {
29
+ OpenAI::Responses::ResponseCreateParams::Conversation
30
+ },
31
+ nil?: true
32
+
20
33
  # @!attribute include
21
34
  # Specify additional output data to include in the model response. Currently
22
35
  # supported values are:
@@ -112,6 +125,7 @@ module OpenAI
112
125
  # The unique ID of the previous response to the model. Use this to create
113
126
  # multi-turn conversations. Learn more about
114
127
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
128
+ # Cannot be used in conjunction with `conversation`.
115
129
  #
116
130
  # @return [String, nil]
117
131
  optional :previous_response_id, String, nil?: true
@@ -132,7 +146,7 @@ module OpenAI
132
146
  optional :prompt_cache_key, String
133
147
 
134
148
  # @!attribute reasoning
135
- # **o-series models only**
149
+ # **gpt-5 and o-series models only**
136
150
  #
137
151
  # Configuration options for
138
152
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -159,9 +173,8 @@ module OpenAI
159
173
  # - If set to 'default', then the request will be processed with the standard
160
174
  # pricing and performance for the selected model.
161
175
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
162
- # 'priority', then the request will be processed with the corresponding service
163
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
164
- # Priority processing.
176
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
177
+ # will be processed with the corresponding service tier.
165
178
  # - When not set, the default behavior is 'auto'.
166
179
  #
167
180
  # When the `service_tier` parameter is set, the response body will include the
@@ -279,12 +292,14 @@ module OpenAI
279
292
  # @return [String, nil]
280
293
  optional :user, String
281
294
 
282
- # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
295
+ # @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
283
296
  # Some parameter documentations has been truncated, see
284
297
  # {OpenAI::Models::Responses::ResponseCreateParams} for more details.
285
298
  #
286
299
  # @param background [Boolean, nil] Whether to run the model response in the background.
287
300
  #
301
+ # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
302
+ #
288
303
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
289
304
  #
290
305
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
@@ -307,7 +322,7 @@ module OpenAI
307
322
  #
308
323
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
309
324
  #
310
- # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
325
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
311
326
  #
312
327
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
313
328
  #
@@ -335,6 +350,23 @@ module OpenAI
335
350
  #
336
351
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
337
352
 
353
+ # The conversation that this response belongs to. Items from this conversation are
354
+ # prepended to `input_items` for this response request. Input items and output
355
+ # items from this response are automatically added to this conversation after this
356
+ # response completes.
357
+ module Conversation
358
+ extend OpenAI::Internal::Type::Union
359
+
360
+ # The unique ID of the conversation.
361
+ variant String
362
+
363
+ # The conversation that this response belongs to.
364
+ variant -> { OpenAI::Responses::ResponseConversationParam }
365
+
366
+ # @!method self.variants
367
+ # @return [Array(String, OpenAI::Models::Responses::ResponseConversationParam)]
368
+ end
369
+
338
370
  # Text, image, or file inputs to the model, used to generate a response.
339
371
  #
340
372
  # Learn more:
@@ -367,9 +399,8 @@ module OpenAI
367
399
  # - If set to 'default', then the request will be processed with the standard
368
400
  # pricing and performance for the selected model.
369
401
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
370
- # 'priority', then the request will be processed with the corresponding service
371
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
372
- # Priority processing.
402
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
403
+ # will be processed with the corresponding service tier.
373
404
  # - When not set, the default behavior is 'auto'.
374
405
  #
375
406
  # When the `service_tier` parameter is set, the response body will include the
@@ -95,10 +95,7 @@ module OpenAI
95
95
  # types.
96
96
  #
97
97
  # @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
98
- required :content,
99
- -> {
100
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
101
- }
98
+ required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
102
99
 
103
100
  # @!attribute role
104
101
  # The role of the message input. One of `user`, `system`, or `developer`.
@@ -605,9 +602,7 @@ module OpenAI
605
602
  #
606
603
  # @return [Array<OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool>]
607
604
  required :tools,
608
- -> {
609
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool]
610
- }
605
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool] }
611
606
 
612
607
  # @!attribute type
613
608
  # The type of the item. Always `mcp_list_tools`.
@@ -15,10 +15,7 @@ module OpenAI
15
15
  # types.
16
16
  #
17
17
  # @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
18
- required :content,
19
- -> {
20
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
21
- }
18
+ required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
22
19
 
23
20
  # @!attribute role
24
21
  # The role of the message input. One of `user`, `system`, or `developer`.
@@ -314,9 +314,7 @@ module OpenAI
314
314
  #
315
315
  # @return [Array<OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool>]
316
316
  required :tools,
317
- -> {
318
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool]
319
- }
317
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool] }
320
318
 
321
319
  # @!attribute type
322
320
  # The type of the item. Always `mcp_list_tools`.
@@ -15,9 +15,7 @@ module OpenAI
15
15
  #
16
16
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal>]
17
17
  required :content,
18
- -> {
19
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content]
20
- }
18
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content] }
21
19
 
22
20
  # @!attribute role
23
21
  # The role of the output message. Always `assistant`.
@@ -9,9 +9,7 @@ module OpenAI
9
9
  #
10
10
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>]
11
11
  required :annotations,
12
- -> {
13
- OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation]
14
- }
12
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] }
15
13
 
16
14
  # @!attribute text
17
15
  # The text output from the model.
@@ -34,10 +32,7 @@ module OpenAI
34
32
  # @!attribute logprobs
35
33
  #
36
34
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>, nil]
37
- optional :logprobs,
38
- -> {
39
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob]
40
- }
35
+ optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] }
41
36
 
42
37
  # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text)
43
38
  # A text output from the model.
@@ -261,9 +256,7 @@ module OpenAI
261
256
  #
262
257
  # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob::TopLogprob>]
263
258
  required :top_logprobs,
264
- -> {
265
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
266
- }
259
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] }
267
260
 
268
261
  # @!method initialize(token:, bytes:, logprob:, top_logprobs:)
269
262
  # The log probability of a token.
@@ -16,10 +16,7 @@ module OpenAI
16
16
  variant :"response.audio.done", -> { OpenAI::Responses::ResponseAudioDoneEvent }
17
17
 
18
18
  # Emitted when there is a partial transcript of audio.
19
- variant :"response.audio.transcript.delta",
20
- -> {
21
- OpenAI::Responses::ResponseAudioTranscriptDeltaEvent
22
- }
19
+ variant :"response.audio.transcript.delta", -> { OpenAI::Responses::ResponseAudioTranscriptDeltaEvent }
23
20
 
24
21
  # Emitted when the full audio transcript is completed.
25
22
  variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
@@ -157,16 +154,10 @@ module OpenAI
157
154
  -> { OpenAI::Responses::ResponseImageGenCallPartialImageEvent }
158
155
 
159
156
  # Emitted when there is a delta (partial update) to the arguments of an MCP tool call.
160
- variant :"response.mcp_call_arguments.delta",
161
- -> {
162
- OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent
163
- }
157
+ variant :"response.mcp_call_arguments.delta", -> { OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent }
164
158
 
165
159
  # Emitted when the arguments for an MCP tool call are finalized.
166
- variant :"response.mcp_call_arguments.done",
167
- -> {
168
- OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent
169
- }
160
+ variant :"response.mcp_call_arguments.done", -> { OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent }
170
161
 
171
162
  # Emitted when an MCP tool call has completed successfully.
172
163
  variant :"response.mcp_call.completed", -> { OpenAI::Responses::ResponseMcpCallCompletedEvent }
@@ -178,10 +169,7 @@ module OpenAI
178
169
  variant :"response.mcp_call.in_progress", -> { OpenAI::Responses::ResponseMcpCallInProgressEvent }
179
170
 
180
171
  # Emitted when the list of available MCP tools has been successfully retrieved.
181
- variant :"response.mcp_list_tools.completed",
182
- -> {
183
- OpenAI::Responses::ResponseMcpListToolsCompletedEvent
184
- }
172
+ variant :"response.mcp_list_tools.completed", -> { OpenAI::Responses::ResponseMcpListToolsCompletedEvent }
185
173
 
186
174
  # Emitted when the attempt to list available MCP tools has failed.
187
175
  variant :"response.mcp_list_tools.failed", -> { OpenAI::Responses::ResponseMcpListToolsFailedEvent }
@@ -85,9 +85,7 @@ module OpenAI
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
88
- -> {
89
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
90
- }
88
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] }
91
89
 
92
90
  # @!method initialize(token:, logprob:, top_logprobs: nil)
93
91
  # Some parameter documentations has been truncated, see
@@ -85,9 +85,7 @@ module OpenAI
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
88
- -> {
89
- OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
90
- }
88
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] }
91
89
 
92
90
  # @!method initialize(token:, logprob:, top_logprobs: nil)
93
91
  # Some parameter documentations has been truncated, see