openai 0.17.1 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (209) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +50 -0
  3. data/README.md +1 -1
  4. data/lib/openai/client.rb +4 -0
  5. data/lib/openai/helpers/structured_output/array_of.rb +2 -10
  6. data/lib/openai/helpers/structured_output/base_model.rb +4 -11
  7. data/lib/openai/helpers/structured_output/boolean.rb +1 -0
  8. data/lib/openai/helpers/structured_output/json_schema_converter.rb +19 -3
  9. data/lib/openai/helpers/structured_output/union_of.rb +2 -10
  10. data/lib/openai/internal/conversation_cursor_page.rb +92 -0
  11. data/lib/openai/internal/transport/base_client.rb +1 -4
  12. data/lib/openai/internal/transport/pooled_net_requester.rb +1 -9
  13. data/lib/openai/internal/util.rb +1 -1
  14. data/lib/openai/models/audio/transcription.rb +1 -4
  15. data/lib/openai/models/audio/transcription_create_params.rb +2 -7
  16. data/lib/openai/models/audio/transcription_text_done_event.rb +1 -4
  17. data/lib/openai/models/batch_create_params.rb +38 -1
  18. data/lib/openai/models/beta/assistant_create_params.rb +6 -19
  19. data/lib/openai/models/beta/assistant_stream_event.rb +6 -24
  20. data/lib/openai/models/beta/assistant_update_params.rb +1 -4
  21. data/lib/openai/models/beta/message_stream_event.rb +1 -4
  22. data/lib/openai/models/beta/run_step_stream_event.rb +1 -4
  23. data/lib/openai/models/beta/thread_create_and_run_params.rb +12 -34
  24. data/lib/openai/models/beta/thread_create_params.rb +7 -22
  25. data/lib/openai/models/beta/threads/message.rb +3 -10
  26. data/lib/openai/models/beta/threads/message_create_params.rb +2 -7
  27. data/lib/openai/models/beta/threads/run.rb +4 -9
  28. data/lib/openai/models/beta/threads/run_create_params.rb +5 -12
  29. data/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +1 -3
  30. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +5 -17
  31. data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +1 -3
  32. data/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +4 -12
  33. data/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb +1 -4
  34. data/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +1 -4
  35. data/lib/openai/models/beta/threads/text.rb +1 -4
  36. data/lib/openai/models/chat/chat_completion.rb +6 -6
  37. data/lib/openai/models/chat/chat_completion_chunk.rb +7 -9
  38. data/lib/openai/models/chat/chat_completion_custom_tool.rb +2 -7
  39. data/lib/openai/models/chat/completion_create_params.rb +7 -7
  40. data/lib/openai/models/conversations/computer_screenshot_content.rb +38 -0
  41. data/lib/openai/models/conversations/container_file_citation_body.rb +58 -0
  42. data/lib/openai/models/conversations/conversation.rb +51 -0
  43. data/lib/openai/models/conversations/conversation_create_params.rb +39 -0
  44. data/lib/openai/models/conversations/conversation_delete_params.rb +16 -0
  45. data/lib/openai/models/conversations/conversation_deleted.rb +29 -0
  46. data/lib/openai/models/conversations/conversation_deleted_resource.rb +30 -0
  47. data/lib/openai/models/conversations/conversation_item.rb +568 -0
  48. data/lib/openai/models/conversations/conversation_item_list.rb +55 -0
  49. data/lib/openai/models/conversations/conversation_retrieve_params.rb +16 -0
  50. data/lib/openai/models/conversations/conversation_update_params.rb +31 -0
  51. data/lib/openai/models/conversations/file_citation_body.rb +42 -0
  52. data/lib/openai/models/conversations/input_file_content.rb +42 -0
  53. data/lib/openai/models/conversations/input_image_content.rb +62 -0
  54. data/lib/openai/models/conversations/input_text_content.rb +26 -0
  55. data/lib/openai/models/conversations/item_create_params.rb +37 -0
  56. data/lib/openai/models/conversations/item_delete_params.rb +22 -0
  57. data/lib/openai/models/conversations/item_list_params.rb +84 -0
  58. data/lib/openai/models/conversations/item_retrieve_params.rb +36 -0
  59. data/lib/openai/models/conversations/lob_prob.rb +35 -0
  60. data/lib/openai/models/conversations/message.rb +115 -0
  61. data/lib/openai/models/conversations/output_text_content.rb +57 -0
  62. data/lib/openai/models/conversations/refusal_content.rb +26 -0
  63. data/lib/openai/models/conversations/summary_text_content.rb +23 -0
  64. data/lib/openai/models/conversations/text_content.rb +23 -0
  65. data/lib/openai/models/conversations/top_log_prob.rb +29 -0
  66. data/lib/openai/models/conversations/url_citation_body.rb +50 -0
  67. data/lib/openai/models/eval_create_params.rb +6 -20
  68. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +29 -53
  69. data/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb +1 -3
  70. data/lib/openai/models/evals/run_create_params.rb +18 -54
  71. data/lib/openai/models/file_create_params.rb +37 -1
  72. data/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb +1 -4
  73. data/lib/openai/models/graders/text_similarity_grader.rb +6 -5
  74. data/lib/openai/models/moderation.rb +5 -15
  75. data/lib/openai/models/reasoning.rb +1 -1
  76. data/lib/openai/models/responses/input_item_list_params.rb +1 -9
  77. data/lib/openai/models/responses/response.rb +32 -9
  78. data/lib/openai/models/responses/response_computer_tool_call.rb +2 -6
  79. data/lib/openai/models/responses/response_computer_tool_call_output_item.rb +1 -3
  80. data/lib/openai/models/responses/response_conversation_param.rb +20 -0
  81. data/lib/openai/models/responses/response_create_params.rb +40 -9
  82. data/lib/openai/models/responses/response_input_item.rb +2 -7
  83. data/lib/openai/models/responses/response_input_message_item.rb +1 -4
  84. data/lib/openai/models/responses/response_output_item.rb +1 -3
  85. data/lib/openai/models/responses/response_output_message.rb +1 -3
  86. data/lib/openai/models/responses/response_output_text.rb +3 -10
  87. data/lib/openai/models/responses/response_stream_event.rb +4 -16
  88. data/lib/openai/models/responses/response_text_delta_event.rb +1 -3
  89. data/lib/openai/models/responses/response_text_done_event.rb +1 -3
  90. data/lib/openai/models/responses/tool.rb +145 -34
  91. data/lib/openai/models/upload_create_params.rb +37 -1
  92. data/lib/openai/models.rb +2 -0
  93. data/lib/openai/resources/batches.rb +3 -1
  94. data/lib/openai/resources/conversations/items.rb +141 -0
  95. data/lib/openai/resources/conversations.rb +112 -0
  96. data/lib/openai/resources/files.rb +4 -2
  97. data/lib/openai/resources/responses/input_items.rb +1 -3
  98. data/lib/openai/resources/responses.rb +8 -4
  99. data/lib/openai/resources/uploads.rb +3 -1
  100. data/lib/openai/version.rb +1 -1
  101. data/lib/openai.rb +31 -0
  102. data/rbi/openai/client.rbi +3 -0
  103. data/rbi/openai/errors.rbi +5 -5
  104. data/rbi/openai/helpers/structured_output/array_of.rbi +0 -3
  105. data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +10 -0
  106. data/rbi/openai/internal/conversation_cursor_page.rbi +25 -0
  107. data/rbi/openai/models/batch_create_params.rbi +60 -0
  108. data/rbi/openai/models/beta/thread_create_and_run_params.rbi +3 -3
  109. data/rbi/openai/models/beta/threads/run.rbi +3 -3
  110. data/rbi/openai/models/beta/threads/run_create_params.rbi +3 -3
  111. data/rbi/openai/models/chat/chat_completion.rbi +6 -9
  112. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -9
  113. data/rbi/openai/models/chat/completion_create_params.rbi +8 -11
  114. data/rbi/openai/models/conversations/computer_screenshot_content.rbi +60 -0
  115. data/rbi/openai/models/conversations/container_file_citation_body.rbi +82 -0
  116. data/rbi/openai/models/conversations/conversation.rbi +76 -0
  117. data/rbi/openai/models/conversations/conversation_create_params.rbi +144 -0
  118. data/rbi/openai/models/conversations/conversation_delete_params.rbi +32 -0
  119. data/rbi/openai/models/conversations/conversation_deleted.rbi +40 -0
  120. data/rbi/openai/models/conversations/conversation_deleted_resource.rbi +40 -0
  121. data/rbi/openai/models/conversations/conversation_item.rbi +835 -0
  122. data/rbi/openai/models/conversations/conversation_item_list.rbi +101 -0
  123. data/rbi/openai/models/conversations/conversation_retrieve_params.rbi +32 -0
  124. data/rbi/openai/models/conversations/conversation_update_params.rbi +56 -0
  125. data/rbi/openai/models/conversations/file_citation_body.rbi +61 -0
  126. data/rbi/openai/models/conversations/input_file_content.rbi +72 -0
  127. data/rbi/openai/models/conversations/input_image_content.rbi +113 -0
  128. data/rbi/openai/models/conversations/input_text_content.rbi +38 -0
  129. data/rbi/openai/models/conversations/item_create_params.rbi +150 -0
  130. data/rbi/openai/models/conversations/item_delete_params.rbi +40 -0
  131. data/rbi/openai/models/conversations/item_list_params.rbi +174 -0
  132. data/rbi/openai/models/conversations/item_retrieve_params.rbi +70 -0
  133. data/rbi/openai/models/conversations/lob_prob.rbi +50 -0
  134. data/rbi/openai/models/conversations/message.rbi +196 -0
  135. data/rbi/openai/models/conversations/output_text_content.rbi +110 -0
  136. data/rbi/openai/models/conversations/refusal_content.rbi +38 -0
  137. data/rbi/openai/models/conversations/summary_text_content.rbi +31 -0
  138. data/rbi/openai/models/conversations/text_content.rbi +28 -0
  139. data/rbi/openai/models/conversations/top_log_prob.rbi +41 -0
  140. data/rbi/openai/models/conversations/url_citation_body.rbi +74 -0
  141. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +33 -33
  142. data/rbi/openai/models/file_create_params.rbi +56 -0
  143. data/rbi/openai/models/graders/text_similarity_grader.rbi +11 -6
  144. data/rbi/openai/models/reasoning.rbi +1 -1
  145. data/rbi/openai/models/responses/input_item_list_params.rbi +0 -11
  146. data/rbi/openai/models/responses/response.rbi +57 -11
  147. data/rbi/openai/models/responses/response_conversation_param.rbi +33 -0
  148. data/rbi/openai/models/responses/response_create_params.rbi +62 -11
  149. data/rbi/openai/models/responses/tool.rbi +243 -31
  150. data/rbi/openai/models/upload_create_params.rbi +56 -0
  151. data/rbi/openai/models.rbi +2 -0
  152. data/rbi/openai/resources/batches.rbi +5 -0
  153. data/rbi/openai/resources/beta/threads/runs.rbi +2 -2
  154. data/rbi/openai/resources/beta/threads.rbi +2 -2
  155. data/rbi/openai/resources/chat/completions.rbi +6 -8
  156. data/rbi/openai/resources/conversations/items.rbi +152 -0
  157. data/rbi/openai/resources/conversations.rbi +110 -0
  158. data/rbi/openai/resources/files.rbi +5 -1
  159. data/rbi/openai/resources/responses/input_items.rbi +0 -3
  160. data/rbi/openai/resources/responses.rbi +32 -8
  161. data/rbi/openai/resources/uploads.rbi +4 -0
  162. data/sig/openai/client.rbs +2 -0
  163. data/sig/openai/internal/conversation_cursor_page.rbs +15 -0
  164. data/sig/openai/models/batch_create_params.rbs +22 -1
  165. data/sig/openai/models/conversations/computer_screenshot_content.rbs +28 -0
  166. data/sig/openai/models/conversations/container_file_citation_body.rbs +47 -0
  167. data/sig/openai/models/conversations/conversation.rbs +37 -0
  168. data/sig/openai/models/conversations/conversation_create_params.rbs +33 -0
  169. data/sig/openai/models/conversations/conversation_delete_params.rbs +17 -0
  170. data/sig/openai/models/conversations/conversation_deleted.rbs +28 -0
  171. data/sig/openai/models/conversations/conversation_deleted_resource.rbs +28 -0
  172. data/sig/openai/models/conversations/conversation_item.rbs +403 -0
  173. data/sig/openai/models/conversations/conversation_item_list.rbs +44 -0
  174. data/sig/openai/models/conversations/conversation_retrieve_params.rbs +17 -0
  175. data/sig/openai/models/conversations/conversation_update_params.rbs +26 -0
  176. data/sig/openai/models/conversations/file_citation_body.rbs +37 -0
  177. data/sig/openai/models/conversations/input_file_content.rbs +41 -0
  178. data/sig/openai/models/conversations/input_image_content.rbs +49 -0
  179. data/sig/openai/models/conversations/input_text_content.rbs +17 -0
  180. data/sig/openai/models/conversations/item_create_params.rbs +37 -0
  181. data/sig/openai/models/conversations/item_delete_params.rbs +25 -0
  182. data/sig/openai/models/conversations/item_list_params.rbs +66 -0
  183. data/sig/openai/models/conversations/item_retrieve_params.rbs +37 -0
  184. data/sig/openai/models/conversations/lob_prob.rbs +37 -0
  185. data/sig/openai/models/conversations/message.rbs +95 -0
  186. data/sig/openai/models/conversations/output_text_content.rbs +52 -0
  187. data/sig/openai/models/conversations/refusal_content.rbs +17 -0
  188. data/sig/openai/models/conversations/summary_text_content.rbs +17 -0
  189. data/sig/openai/models/conversations/text_content.rbs +17 -0
  190. data/sig/openai/models/conversations/top_log_prob.rbs +28 -0
  191. data/sig/openai/models/conversations/url_citation_body.rbs +42 -0
  192. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +22 -22
  193. data/sig/openai/models/file_create_params.rbs +22 -1
  194. data/sig/openai/models/graders/text_similarity_grader.rbs +3 -1
  195. data/sig/openai/models/responses/input_item_list_params.rbs +0 -7
  196. data/sig/openai/models/responses/response.rbs +15 -0
  197. data/sig/openai/models/responses/response_conversation_param.rbs +15 -0
  198. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  199. data/sig/openai/models/responses/tool.rbs +83 -18
  200. data/sig/openai/models/upload_create_params.rbs +22 -1
  201. data/sig/openai/models.rbs +2 -0
  202. data/sig/openai/resources/batches.rbs +1 -0
  203. data/sig/openai/resources/conversations/items.rbs +38 -0
  204. data/sig/openai/resources/conversations.rbs +31 -0
  205. data/sig/openai/resources/files.rbs +1 -0
  206. data/sig/openai/resources/responses/input_items.rbs +0 -1
  207. data/sig/openai/resources/responses.rbs +2 -0
  208. data/sig/openai/resources/uploads.rbs +1 -0
  209. metadata +95 -2
@@ -9,14 +9,12 @@ module OpenAI
9
9
  #
10
10
  # Returns a list of input items for a given response.
11
11
  #
12
- # @overload list(response_id, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
12
+ # @overload list(response_id, after: nil, include: nil, limit: nil, order: nil, request_options: {})
13
13
  #
14
14
  # @param response_id [String] The ID of the response to retrieve input items for.
15
15
  #
16
16
  # @param after [String] An item ID to list items after, used in pagination.
17
17
  #
18
- # @param before [String] An item ID to list items before, used in pagination.
19
- #
20
18
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
21
19
  #
22
20
  # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between
@@ -23,10 +23,12 @@ module OpenAI
23
23
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
24
24
  # your own data as input for the model's response.
25
25
  #
26
- # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
26
+ # @overload create(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
27
27
  #
28
28
  # @param background [Boolean, nil] Whether to run the model response in the background.
29
29
  #
30
+ # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
31
+ #
30
32
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
31
33
  #
32
34
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
@@ -49,7 +51,7 @@ module OpenAI
49
51
  #
50
52
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
51
53
  #
52
- # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
54
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
53
55
  #
54
56
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
55
57
  #
@@ -120,10 +122,12 @@ module OpenAI
120
122
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
121
123
  # your own data as input for the model's response.
122
124
  #
123
- # @overload stream(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
125
+ # @overload stream(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
124
126
  #
125
127
  # @param background [Boolean, nil] Whether to run the model response in the background.
126
128
  #
129
+ # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
130
+ #
127
131
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
128
132
  #
129
133
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
@@ -264,7 +268,7 @@ module OpenAI
264
268
  #
265
269
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
266
270
  #
267
- # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
271
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
268
272
  #
269
273
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
270
274
  #
@@ -29,7 +29,7 @@ module OpenAI
29
29
  # the documentation on
30
30
  # [creating a File](https://platform.openai.com/docs/api-reference/files/create).
31
31
  #
32
- # @overload create(bytes:, filename:, mime_type:, purpose:, request_options: {})
32
+ # @overload create(bytes:, filename:, mime_type:, purpose:, expires_after: nil, request_options: {})
33
33
  #
34
34
  # @param bytes [Integer] The number of bytes in the file you are uploading.
35
35
  #
@@ -39,6 +39,8 @@ module OpenAI
39
39
  #
40
40
  # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file.
41
41
  #
42
+ # @param expires_after [OpenAI::Models::UploadCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire
43
+ #
42
44
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
43
45
  #
44
46
  # @return [OpenAI::Models::Upload]
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.17.1"
4
+ VERSION = "0.19.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -48,6 +48,7 @@ require_relative "openai/internal/transport/base_client"
48
48
  require_relative "openai/internal/transport/pooled_net_requester"
49
49
  require_relative "openai/client"
50
50
  require_relative "openai/internal/stream"
51
+ require_relative "openai/internal/conversation_cursor_page"
51
52
  require_relative "openai/internal/cursor_page"
52
53
  require_relative "openai/internal/page"
53
54
  require_relative "openai/helpers/structured_output/json_schema_converter"
@@ -246,6 +247,33 @@ require_relative "openai/models/containers/file_list_response"
246
247
  require_relative "openai/models/containers/file_retrieve_params"
247
248
  require_relative "openai/models/containers/file_retrieve_response"
248
249
  require_relative "openai/models/containers/files/content_retrieve_params"
250
+ require_relative "openai/models/conversations/computer_screenshot_content"
251
+ require_relative "openai/models/conversations/container_file_citation_body"
252
+ require_relative "openai/models/conversations/conversation"
253
+ require_relative "openai/models/conversations/conversation_create_params"
254
+ require_relative "openai/models/conversations/conversation_deleted"
255
+ require_relative "openai/models/conversations/conversation_deleted_resource"
256
+ require_relative "openai/models/conversations/conversation_delete_params"
257
+ require_relative "openai/models/conversations/conversation_item"
258
+ require_relative "openai/models/conversations/conversation_item_list"
259
+ require_relative "openai/models/conversations/conversation_retrieve_params"
260
+ require_relative "openai/models/conversations/conversation_update_params"
261
+ require_relative "openai/models/conversations/file_citation_body"
262
+ require_relative "openai/models/conversations/input_file_content"
263
+ require_relative "openai/models/conversations/input_image_content"
264
+ require_relative "openai/models/conversations/input_text_content"
265
+ require_relative "openai/models/conversations/item_create_params"
266
+ require_relative "openai/models/conversations/item_delete_params"
267
+ require_relative "openai/models/conversations/item_list_params"
268
+ require_relative "openai/models/conversations/item_retrieve_params"
269
+ require_relative "openai/models/conversations/lob_prob"
270
+ require_relative "openai/models/conversations/message"
271
+ require_relative "openai/models/conversations/output_text_content"
272
+ require_relative "openai/models/conversations/refusal_content"
273
+ require_relative "openai/models/conversations/summary_text_content"
274
+ require_relative "openai/models/conversations/text_content"
275
+ require_relative "openai/models/conversations/top_log_prob"
276
+ require_relative "openai/models/conversations/url_citation_body"
249
277
  require_relative "openai/models/create_embedding_response"
250
278
  require_relative "openai/models/custom_tool_input_format"
251
279
  require_relative "openai/models/embedding"
@@ -383,6 +411,7 @@ require_relative "openai/models/responses/response_computer_tool_call_output_scr
383
411
  require_relative "openai/models/responses/response_content"
384
412
  require_relative "openai/models/responses/response_content_part_added_event"
385
413
  require_relative "openai/models/responses/response_content_part_done_event"
414
+ require_relative "openai/models/responses/response_conversation_param"
386
415
  require_relative "openai/models/responses/response_created_event"
387
416
  require_relative "openai/models/responses/response_create_params"
388
417
  require_relative "openai/models/responses/response_custom_tool_call"
@@ -535,6 +564,8 @@ require_relative "openai/resources/completions"
535
564
  require_relative "openai/resources/containers"
536
565
  require_relative "openai/resources/containers/files"
537
566
  require_relative "openai/resources/containers/files/content"
567
+ require_relative "openai/resources/conversations"
568
+ require_relative "openai/resources/conversations/items"
538
569
  require_relative "openai/resources/embeddings"
539
570
  require_relative "openai/resources/evals"
540
571
  require_relative "openai/resources/evals/runs"
@@ -67,6 +67,9 @@ module OpenAI
67
67
  sig { returns(OpenAI::Resources::Responses) }
68
68
  attr_reader :responses
69
69
 
70
+ sig { returns(OpenAI::Resources::Conversations) }
71
+ attr_reader :conversations
72
+
70
73
  sig { returns(OpenAI::Resources::Evals) }
71
74
  attr_reader :evals
72
75
 
@@ -68,19 +68,19 @@ module OpenAI
68
68
  end
69
69
 
70
70
  class APIConnectionError < OpenAI::Errors::APIError
71
- sig { void }
71
+ sig { returns(NilClass) }
72
72
  attr_accessor :status
73
73
 
74
- sig { void }
74
+ sig { returns(NilClass) }
75
75
  attr_accessor :body
76
76
 
77
- sig { void }
77
+ sig { returns(NilClass) }
78
78
  attr_accessor :code
79
79
 
80
- sig { void }
80
+ sig { returns(NilClass) }
81
81
  attr_accessor :param
82
82
 
83
- sig { void }
83
+ sig { returns(NilClass) }
84
84
  attr_accessor :type
85
85
 
86
86
  # @api private
@@ -7,9 +7,6 @@ module OpenAI
7
7
  include OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
8
8
 
9
9
  Elem = type_member(:out)
10
-
11
- sig { returns(String) }
12
- attr_reader :description
13
10
  end
14
11
  end
15
12
  end
@@ -46,6 +46,16 @@ module OpenAI
46
46
  def to_nilable(schema)
47
47
  end
48
48
 
49
+ # @api private
50
+ sig do
51
+ params(
52
+ schema: OpenAI::Helpers::StructuredOutput::JsonSchema,
53
+ meta: OpenAI::Internal::AnyHash
54
+ ).void
55
+ end
56
+ def assoc_meta!(schema, meta:)
57
+ end
58
+
49
59
  # @api private
50
60
  sig do
51
61
  params(
@@ -0,0 +1,25 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Internal
5
+ class ConversationCursorPage
6
+ include OpenAI::Internal::Type::BasePage
7
+
8
+ Elem = type_member
9
+
10
+ sig { returns(T.nilable(T::Array[Elem])) }
11
+ attr_accessor :data
12
+
13
+ sig { returns(T::Boolean) }
14
+ attr_accessor :has_more
15
+
16
+ sig { returns(String) }
17
+ attr_accessor :last_id
18
+
19
+ # @api private
20
+ sig { returns(String) }
21
+ def inspect
22
+ end
23
+ end
24
+ end
25
+ end
@@ -44,6 +44,19 @@ module OpenAI
44
44
  sig { returns(T.nilable(T::Hash[Symbol, String])) }
45
45
  attr_accessor :metadata
46
46
 
47
+ # The expiration policy for the output and/or error file that are generated for a
48
+ # batch.
49
+ sig { returns(T.nilable(OpenAI::BatchCreateParams::OutputExpiresAfter)) }
50
+ attr_reader :output_expires_after
51
+
52
+ sig do
53
+ params(
54
+ output_expires_after:
55
+ OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash
56
+ ).void
57
+ end
58
+ attr_writer :output_expires_after
59
+
47
60
  sig do
48
61
  params(
49
62
  completion_window:
@@ -51,6 +64,8 @@ module OpenAI
51
64
  endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol,
52
65
  input_file_id: String,
53
66
  metadata: T.nilable(T::Hash[Symbol, String]),
67
+ output_expires_after:
68
+ OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash,
54
69
  request_options: OpenAI::RequestOptions::OrHash
55
70
  ).returns(T.attached_class)
56
71
  end
@@ -80,6 +95,9 @@ module OpenAI
80
95
  # Keys are strings with a maximum length of 64 characters. Values are strings with
81
96
  # a maximum length of 512 characters.
82
97
  metadata: nil,
98
+ # The expiration policy for the output and/or error file that are generated for a
99
+ # batch.
100
+ output_expires_after: nil,
83
101
  request_options: {}
84
102
  )
85
103
  end
@@ -92,6 +110,7 @@ module OpenAI
92
110
  endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol,
93
111
  input_file_id: String,
94
112
  metadata: T.nilable(T::Hash[Symbol, String]),
113
+ output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter,
95
114
  request_options: OpenAI::RequestOptions
96
115
  }
97
116
  )
@@ -165,6 +184,47 @@ module OpenAI
165
184
  def self.values
166
185
  end
167
186
  end
187
+
188
+ class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel
189
+ OrHash =
190
+ T.type_alias do
191
+ T.any(
192
+ OpenAI::BatchCreateParams::OutputExpiresAfter,
193
+ OpenAI::Internal::AnyHash
194
+ )
195
+ end
196
+
197
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
198
+ # `created_at`. Note that the anchor is the file creation time, not the time the
199
+ # batch is created.
200
+ sig { returns(Symbol) }
201
+ attr_accessor :anchor
202
+
203
+ # The number of seconds after the anchor time that the file will expire. Must be
204
+ # between 3600 (1 hour) and 2592000 (30 days).
205
+ sig { returns(Integer) }
206
+ attr_accessor :seconds
207
+
208
+ # The expiration policy for the output and/or error file that are generated for a
209
+ # batch.
210
+ sig do
211
+ params(seconds: Integer, anchor: Symbol).returns(T.attached_class)
212
+ end
213
+ def self.new(
214
+ # The number of seconds after the anchor time that the file will expire. Must be
215
+ # between 3600 (1 hour) and 2592000 (30 days).
216
+ seconds:,
217
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
218
+ # `created_at`. Note that the anchor is the file creation time, not the time the
219
+ # batch is created.
220
+ anchor: :created_at
221
+ )
222
+ end
223
+
224
+ sig { override.returns({ anchor: Symbol, seconds: Integer }) }
225
+ def to_hash
226
+ end
227
+ end
168
228
  end
169
229
  end
170
230
  end
@@ -187,7 +187,7 @@ module OpenAI
187
187
  attr_accessor :top_p
188
188
 
189
189
  # Controls for how a thread will be truncated prior to the run. Use this to
190
- # control the intial context window of the run.
190
+ # control the initial context window of the run.
191
191
  sig do
192
192
  returns(
193
193
  T.nilable(
@@ -343,7 +343,7 @@ module OpenAI
343
343
  # We generally recommend altering this or temperature but not both.
344
344
  top_p: nil,
345
345
  # Controls for how a thread will be truncated prior to the run. Use this to
346
- # control the intial context window of the run.
346
+ # control the initial context window of the run.
347
347
  truncation_strategy: nil,
348
348
  request_options: {}
349
349
  )
@@ -1459,7 +1459,7 @@ module OpenAI
1459
1459
  attr_accessor :last_messages
1460
1460
 
1461
1461
  # Controls for how a thread will be truncated prior to the run. Use this to
1462
- # control the intial context window of the run.
1462
+ # control the initial context window of the run.
1463
1463
  sig do
1464
1464
  params(
1465
1465
  type:
@@ -184,7 +184,7 @@ module OpenAI
184
184
  attr_accessor :tools
185
185
 
186
186
  # Controls for how a thread will be truncated prior to the run. Use this to
187
- # control the intial context window of the run.
187
+ # control the initial context window of the run.
188
188
  sig do
189
189
  returns(T.nilable(OpenAI::Beta::Threads::Run::TruncationStrategy))
190
190
  end
@@ -375,7 +375,7 @@ module OpenAI
375
375
  # this run.
376
376
  tools:,
377
377
  # Controls for how a thread will be truncated prior to the run. Use this to
378
- # control the intial context window of the run.
378
+ # control the initial context window of the run.
379
379
  truncation_strategy:,
380
380
  # Usage statistics related to the run. This value will be `null` if the run is not
381
381
  # in a terminal state (i.e. `in_progress`, `queued`, etc.).
@@ -740,7 +740,7 @@ module OpenAI
740
740
  attr_accessor :last_messages
741
741
 
742
742
  # Controls for how a thread will be truncated prior to the run. Use this to
743
- # control the intial context window of the run.
743
+ # control the initial context window of the run.
744
744
  sig do
745
745
  params(
746
746
  type:
@@ -204,7 +204,7 @@ module OpenAI
204
204
  attr_accessor :top_p
205
205
 
206
206
  # Controls for how a thread will be truncated prior to the run. Use this to
207
- # control the intial context window of the run.
207
+ # control the initial context window of the run.
208
208
  sig do
209
209
  returns(
210
210
  T.nilable(
@@ -378,7 +378,7 @@ module OpenAI
378
378
  # We generally recommend altering this or temperature but not both.
379
379
  top_p: nil,
380
380
  # Controls for how a thread will be truncated prior to the run. Use this to
381
- # control the intial context window of the run.
381
+ # control the initial context window of the run.
382
382
  truncation_strategy: nil,
383
383
  request_options: {}
384
384
  )
@@ -803,7 +803,7 @@ module OpenAI
803
803
  attr_accessor :last_messages
804
804
 
805
805
  # Controls for how a thread will be truncated prior to the run. Use this to
806
- # control the intial context window of the run.
806
+ # control the initial context window of the run.
807
807
  sig do
808
808
  params(
809
809
  type:
@@ -40,9 +40,8 @@ module OpenAI
40
40
  # - If set to 'default', then the request will be processed with the standard
41
41
  # pricing and performance for the selected model.
42
42
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
43
- # 'priority', then the request will be processed with the corresponding service
44
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
45
- # Priority processing.
43
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
44
+ # will be processed with the corresponding service tier.
46
45
  # - When not set, the default behavior is 'auto'.
47
46
  #
48
47
  # When the `service_tier` parameter is set, the response body will include the
@@ -106,9 +105,8 @@ module OpenAI
106
105
  # - If set to 'default', then the request will be processed with the standard
107
106
  # pricing and performance for the selected model.
108
107
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
109
- # 'priority', then the request will be processed with the corresponding service
110
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
111
- # Priority processing.
108
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
109
+ # will be processed with the corresponding service tier.
112
110
  # - When not set, the default behavior is 'auto'.
113
111
  #
114
112
  # When the `service_tier` parameter is set, the response body will include the
@@ -371,9 +369,8 @@ module OpenAI
371
369
  # - If set to 'default', then the request will be processed with the standard
372
370
  # pricing and performance for the selected model.
373
371
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
374
- # 'priority', then the request will be processed with the corresponding service
375
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
376
- # Priority processing.
372
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
373
+ # will be processed with the corresponding service tier.
377
374
  # - When not set, the default behavior is 'auto'.
378
375
  #
379
376
  # When the `service_tier` parameter is set, the response body will include the
@@ -42,9 +42,8 @@ module OpenAI
42
42
  # - If set to 'default', then the request will be processed with the standard
43
43
  # pricing and performance for the selected model.
44
44
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
- # 'priority', then the request will be processed with the corresponding service
46
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
47
- # Priority processing.
45
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
46
+ # will be processed with the corresponding service tier.
48
47
  # - When not set, the default behavior is 'auto'.
49
48
  #
50
49
  # When the `service_tier` parameter is set, the response body will include the
@@ -121,9 +120,8 @@ module OpenAI
121
120
  # - If set to 'default', then the request will be processed with the standard
122
121
  # pricing and performance for the selected model.
123
122
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
- # 'priority', then the request will be processed with the corresponding service
125
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
126
- # Priority processing.
123
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
124
+ # will be processed with the corresponding service tier.
127
125
  # - When not set, the default behavior is 'auto'.
128
126
  #
129
127
  # When the `service_tier` parameter is set, the response body will include the
@@ -791,9 +789,8 @@ module OpenAI
791
789
  # - If set to 'default', then the request will be processed with the standard
792
790
  # pricing and performance for the selected model.
793
791
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
- # 'priority', then the request will be processed with the corresponding service
795
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
796
- # Priority processing.
792
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
793
+ # will be processed with the corresponding service tier.
797
794
  # - When not set, the default behavior is 'auto'.
798
795
  #
799
796
  # When the `service_tier` parameter is set, the response body will include the
@@ -297,9 +297,8 @@ module OpenAI
297
297
  # - If set to 'default', then the request will be processed with the standard
298
298
  # pricing and performance for the selected model.
299
299
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
300
- # 'priority', then the request will be processed with the corresponding service
301
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
302
- # Priority processing.
300
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
301
+ # will be processed with the corresponding service tier.
303
302
  # - When not set, the default behavior is 'auto'.
304
303
  #
305
304
  # When the `service_tier` parameter is set, the response body will include the
@@ -330,7 +329,7 @@ module OpenAI
330
329
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
331
330
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
332
331
  #
333
- # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
332
+ # Supports text and image inputs. Note: image inputs over 8MB will be dropped.
334
333
  sig { returns(T.nilable(T::Boolean)) }
335
334
  attr_accessor :store
336
335
 
@@ -700,9 +699,8 @@ module OpenAI
700
699
  # - If set to 'default', then the request will be processed with the standard
701
700
  # pricing and performance for the selected model.
702
701
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
703
- # 'priority', then the request will be processed with the corresponding service
704
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
705
- # Priority processing.
702
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
703
+ # will be processed with the corresponding service tier.
706
704
  # - When not set, the default behavior is 'auto'.
707
705
  #
708
706
  # When the `service_tier` parameter is set, the response body will include the
@@ -719,7 +717,7 @@ module OpenAI
719
717
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
720
718
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
721
719
  #
722
- # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
720
+ # Supports text and image inputs. Note: image inputs over 8MB will be dropped.
723
721
  store: nil,
724
722
  # Options for streaming response. Only set this when you set `stream: true`.
725
723
  stream_options: nil,
@@ -1100,9 +1098,8 @@ module OpenAI
1100
1098
  # - If set to 'default', then the request will be processed with the standard
1101
1099
  # pricing and performance for the selected model.
1102
1100
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1103
- # 'priority', then the request will be processed with the corresponding service
1104
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
1105
- # Priority processing.
1101
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
1102
+ # will be processed with the corresponding service tier.
1106
1103
  # - When not set, the default behavior is 'auto'.
1107
1104
  #
1108
1105
  # When the `service_tier` parameter is set, the response body will include the
@@ -0,0 +1,60 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Conversations
6
+ class ComputerScreenshotContent < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(
10
+ OpenAI::Conversations::ComputerScreenshotContent,
11
+ OpenAI::Internal::AnyHash
12
+ )
13
+ end
14
+
15
+ # The identifier of an uploaded file that contains the screenshot.
16
+ sig { returns(T.nilable(String)) }
17
+ attr_accessor :file_id
18
+
19
+ # The URL of the screenshot image.
20
+ sig { returns(T.nilable(String)) }
21
+ attr_accessor :image_url
22
+
23
+ # Specifies the event type. For a computer screenshot, this property is always set
24
+ # to `computer_screenshot`.
25
+ sig { returns(Symbol) }
26
+ attr_accessor :type
27
+
28
+ sig do
29
+ params(
30
+ file_id: T.nilable(String),
31
+ image_url: T.nilable(String),
32
+ type: Symbol
33
+ ).returns(T.attached_class)
34
+ end
35
+ def self.new(
36
+ # The identifier of an uploaded file that contains the screenshot.
37
+ file_id:,
38
+ # The URL of the screenshot image.
39
+ image_url:,
40
+ # Specifies the event type. For a computer screenshot, this property is always set
41
+ # to `computer_screenshot`.
42
+ type: :computer_screenshot
43
+ )
44
+ end
45
+
46
+ sig do
47
+ override.returns(
48
+ {
49
+ file_id: T.nilable(String),
50
+ image_url: T.nilable(String),
51
+ type: Symbol
52
+ }
53
+ )
54
+ end
55
+ def to_hash
56
+ end
57
+ end
58
+ end
59
+ end
60
+ end