openai 0.15.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (174) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +47 -0
  3. data/README.md +14 -20
  4. data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
  5. data/lib/openai/helpers/structured_output/union_of.rb +11 -1
  6. data/lib/openai/internal/transport/base_client.rb +1 -1
  7. data/lib/openai/internal/type/enum.rb +6 -6
  8. data/lib/openai/internal/type/union.rb +13 -17
  9. data/lib/openai/models/beta/assistant_create_params.rb +4 -5
  10. data/lib/openai/models/beta/assistant_update_params.rb +22 -5
  11. data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
  12. data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
  13. data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
  14. data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
  15. data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
  16. data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
  17. data/lib/openai/models/chat/chat_completion_message.rb +3 -5
  18. data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
  19. data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
  20. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
  21. data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
  22. data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
  23. data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
  24. data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
  25. data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
  26. data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
  27. data/lib/openai/models/chat/completion_create_params.rb +65 -16
  28. data/lib/openai/models/chat_model.rb +7 -0
  29. data/lib/openai/models/custom_tool_input_format.rb +76 -0
  30. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
  31. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  32. data/lib/openai/models/evals/run_create_params.rb +2 -2
  33. data/lib/openai/models/evals/run_create_response.rb +2 -2
  34. data/lib/openai/models/evals/run_list_response.rb +2 -2
  35. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  36. data/lib/openai/models/reasoning.rb +4 -5
  37. data/lib/openai/models/reasoning_effort.rb +4 -4
  38. data/lib/openai/models/response_format_text_grammar.rb +27 -0
  39. data/lib/openai/models/response_format_text_python.rb +20 -0
  40. data/lib/openai/models/responses/custom_tool.rb +48 -0
  41. data/lib/openai/models/responses/response.rb +70 -16
  42. data/lib/openai/models/responses/response_create_params.rb +78 -14
  43. data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
  44. data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
  45. data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
  46. data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
  47. data/lib/openai/models/responses/response_input_item.rb +7 -1
  48. data/lib/openai/models/responses/response_output_item.rb +4 -1
  49. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  50. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  51. data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
  52. data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
  53. data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
  54. data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
  55. data/lib/openai/models/responses/response_stream_event.rb +13 -11
  56. data/lib/openai/models/responses/response_text_config.rb +27 -1
  57. data/lib/openai/models/responses/tool.rb +5 -1
  58. data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
  59. data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
  60. data/lib/openai/models/vector_store_search_params.rb +6 -1
  61. data/lib/openai/models.rb +6 -0
  62. data/lib/openai/resources/beta/assistants.rb +2 -2
  63. data/lib/openai/resources/beta/threads/runs.rb +2 -2
  64. data/lib/openai/resources/chat/completions.rb +26 -12
  65. data/lib/openai/resources/responses.rb +77 -36
  66. data/lib/openai/version.rb +1 -1
  67. data/lib/openai.rb +19 -2
  68. data/rbi/openai/internal/transport/base_client.rbi +1 -1
  69. data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
  70. data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
  71. data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
  72. data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
  73. data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
  74. data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
  75. data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
  76. data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
  77. data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
  78. data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
  79. data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
  80. data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
  81. data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
  82. data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
  83. data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
  84. data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
  85. data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
  86. data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
  87. data/rbi/openai/models/chat/completion_create_params.rbi +150 -31
  88. data/rbi/openai/models/chat_model.rbi +11 -0
  89. data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
  90. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
  91. data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
  92. data/rbi/openai/models/evals/run_create_params.rbi +4 -0
  93. data/rbi/openai/models/evals/run_create_response.rbi +2 -0
  94. data/rbi/openai/models/evals/run_list_response.rbi +2 -0
  95. data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
  96. data/rbi/openai/models/reasoning.rbi +6 -8
  97. data/rbi/openai/models/reasoning_effort.rbi +4 -4
  98. data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
  99. data/rbi/openai/models/response_format_text_python.rbi +30 -0
  100. data/rbi/openai/models/responses/custom_tool.rbi +96 -0
  101. data/rbi/openai/models/responses/response.rbi +59 -11
  102. data/rbi/openai/models/responses/response_create_params.rbi +138 -13
  103. data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
  104. data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
  105. data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
  106. data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
  107. data/rbi/openai/models/responses/response_input_item.rbi +2 -0
  108. data/rbi/openai/models/responses/response_output_item.rbi +2 -1
  109. data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
  110. data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
  111. data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
  112. data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
  113. data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
  114. data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
  115. data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
  116. data/rbi/openai/models/responses/response_text_config.rbi +64 -1
  117. data/rbi/openai/models/responses/tool.rbi +1 -0
  118. data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
  119. data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
  120. data/rbi/openai/models/vector_store_search_params.rbi +12 -1
  121. data/rbi/openai/models.rbi +6 -0
  122. data/rbi/openai/resources/beta/assistants.rbi +6 -8
  123. data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
  124. data/rbi/openai/resources/chat/completions.rbi +78 -25
  125. data/rbi/openai/resources/responses.rbi +249 -47
  126. data/sig/openai/internal/transport/base_client.rbs +1 -1
  127. data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
  128. data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
  129. data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
  130. data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
  131. data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
  132. data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
  133. data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
  134. data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
  135. data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
  136. data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
  137. data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
  138. data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
  139. data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
  140. data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
  141. data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
  142. data/sig/openai/models/chat/completion_create_params.rbs +37 -6
  143. data/sig/openai/models/chat_model.rbs +15 -1
  144. data/sig/openai/models/custom_tool_input_format.rbs +61 -0
  145. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
  146. data/sig/openai/models/reasoning_effort.rbs +2 -1
  147. data/sig/openai/models/response_format_text_grammar.rbs +15 -0
  148. data/sig/openai/models/response_format_text_python.rbs +13 -0
  149. data/sig/openai/models/responses/custom_tool.rbs +43 -0
  150. data/sig/openai/models/responses/response.rbs +16 -0
  151. data/sig/openai/models/responses/response_create_params.rbs +33 -0
  152. data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
  153. data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
  154. data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
  155. data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
  156. data/sig/openai/models/responses/response_input_item.rbs +2 -0
  157. data/sig/openai/models/responses/response_output_item.rbs +1 -0
  158. data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
  159. data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
  160. data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
  161. data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
  162. data/sig/openai/models/responses/response_stream_event.rbs +4 -2
  163. data/sig/openai/models/responses/response_text_config.rbs +22 -3
  164. data/sig/openai/models/responses/tool.rbs +1 -0
  165. data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
  166. data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
  167. data/sig/openai/models/vector_store_search_params.rbs +2 -1
  168. data/sig/openai/models.rbs +6 -0
  169. data/sig/openai/resources/chat/completions.rbs +8 -2
  170. data/sig/openai/resources/responses.rbs +36 -0
  171. metadata +59 -8
  172. data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
  173. data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
  174. data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e15e098317bf9151fffc0d83be9fd3ead36872a58ea53628f2d0dd3028735c78
4
- data.tar.gz: 04a779ac9f0b4418138bf4a7216e01109e48d608d76c33fc83f76e9de82d574e
3
+ metadata.gz: 6413e169f6a035774a4322aa42bbce1b9374344c1f68c5d452be6b0dffcd9756
4
+ data.tar.gz: 1ef928f4030023fd8a2076b08c3077f8ee7b9d95eef046eebbfc72004d46d327
5
5
  SHA512:
6
- metadata.gz: d3673d18e0d3cfcd0db2ddc4c9c45bc6da8dd86371a38d85f7ad181f45780e6a17b48b9478e621c47a17c1708a2ec775ae8b61c0f6eb39c6b9d6686e09edfb65
7
- data.tar.gz: a7fc556be0b4ba6ea16e6a2ba37bb0e9f62c2f7e39344a372b02b4c54b752e5718e15bfea4ffd8c277dd8db4f6d19530dc29e1ee5aee3fb2fc9a253269944b8f
6
+ metadata.gz: cf5a3da338b678f1680b605d0cd5a383ca95d601706422e5bedd5131e7ee7d91f01941b7f913368cec2e007fc684309e64e449e52e724a3c7a764c993c3d9e53
7
+ data.tar.gz: ebb89edd3814ee447bcfababcb05662d26e7380fb024c943cb5fa30196fcc6c49aea64c820de4fe8256f2acdf20562544e41a410084d1e34c774cf179329dc47
data/CHANGELOG.md CHANGED
@@ -1,5 +1,52 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.17.0 (2025-08-08)
4
+
5
+ Full Changelog: [v0.16.0...v0.17.0](https://github.com/openai/openai-ruby/compare/v0.16.0...v0.17.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5 ([068a381](https://github.com/openai/openai-ruby/commit/068a381a17dd2d60865e67fcd17fa84d919f3f5c))
10
+ * **api:** manual updates ([1d79621](https://github.com/openai/openai-ruby/commit/1d79621120fbccc8dd41f5af6df5a9b1a9018e73))
11
+
12
+
13
+ ### Bug Fixes
14
+
15
+ * **client:** dont try to parse if content is missing ([#770](https://github.com/openai/openai-ruby/issues/770)) ([7f8f2d3](https://github.com/openai/openai-ruby/commit/7f8f2d32863fafc39ee4a884937673a2ad9be358))
16
+ * **client:** fix verbosity parameter location in Responses ([a6b7ae8](https://github.com/openai/openai-ruby/commit/a6b7ae8b568c2214d4883fad44c9cf2e8a7d53e2))
17
+ * **internal:** fix rbi error ([803f20b](https://github.com/openai/openai-ruby/commit/803f20ba0c3751d28175dca99853783f0d851645))
18
+ * **respones:** undo accidently deleted fields ([#177](https://github.com/openai/openai-ruby/issues/177)) ([90a7c3a](https://github.com/openai/openai-ruby/commit/90a7c3ac8d22cc90b8ecaa3b091598ea3bc73029))
19
+ * **responses:** remove incorrect verbosity param ([127e2d1](https://github.com/openai/openai-ruby/commit/127e2d1b96b72307178446f0aa8acc1d3ad31367))
20
+
21
+
22
+ ### Chores
23
+
24
+ * **internal:** increase visibility of internal helper method ([eddbcda](https://github.com/openai/openai-ruby/commit/eddbcda189ac0a864fc3dadc5dd3578d730c491f))
25
+ * update @stainless-api/prism-cli to v5.15.0 ([aaa7d89](https://github.com/openai/openai-ruby/commit/aaa7d895a3dba31f32cf5f4373a49d1571667fc6))
26
+
27
+ ## 0.16.0 (2025-07-30)
28
+
29
+ Full Changelog: [v0.15.0...v0.16.0](https://github.com/openai/openai-ruby/compare/v0.15.0...v0.16.0)
30
+
31
+ ### Features
32
+
33
+ * add output_text method for non-streaming responses ([#757](https://github.com/openai/openai-ruby/issues/757)) ([50cf119](https://github.com/openai/openai-ruby/commit/50cf119106f9e16d9ac6a9898028b6d563a6f809))
34
+ * **api:** manual updates ([e9fa8a0](https://github.com/openai/openai-ruby/commit/e9fa8a08d6ecebdd06212eaf6b9103082b7d67aa))
35
+
36
+
37
+ ### Bug Fixes
38
+
39
+ * **internal:** ensure sorbet test always runs serially ([0601061](https://github.com/openai/openai-ruby/commit/0601061047525d16cc2afac64e5a4de0dd9de2e5))
40
+ * provide parsed outputs for resumed streams ([#756](https://github.com/openai/openai-ruby/issues/756)) ([82254f9](https://github.com/openai/openai-ruby/commit/82254f980ccc0affa2555a81b0d8ed5aa0290835))
41
+ * union definition re-using ([#760](https://github.com/openai/openai-ruby/issues/760)) ([3046c28](https://github.com/openai/openai-ruby/commit/3046c28935ca925c2f399f0350937d04eab54c0a))
42
+
43
+
44
+ ### Chores
45
+
46
+ * extract reused JSON schema references even in unions ([#761](https://github.com/openai/openai-ruby/issues/761)) ([e17d3bf](https://github.com/openai/openai-ruby/commit/e17d3bf1fdf241f7a78ed72a39ddecabeb5877c8))
47
+ * **internal:** refactor variable name ([#762](https://github.com/openai/openai-ruby/issues/762)) ([7e15b07](https://github.com/openai/openai-ruby/commit/7e15b0745dcbd3bf7fc4c1899d9d76e0a9ab1e48))
48
+ * update contribute.md ([b4a0297](https://github.com/openai/openai-ruby/commit/b4a029775bb52d5db2f3fac235595f37b6746a61))
49
+
3
50
  ## 0.15.0 (2025-07-21)
4
51
 
5
52
  Full Changelog: [v0.14.0...v0.15.0](https://github.com/openai/openai-ruby/compare/v0.14.0...v0.15.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.15.0"
18
+ gem "openai", "~> 0.17.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -30,10 +30,7 @@ openai = OpenAI::Client.new(
30
30
  api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted
31
31
  )
32
32
 
33
- chat_completion = openai.chat.completions.create(
34
- messages: [{role: "user", content: "Say this is a test"}],
35
- model: :"gpt-4.1"
36
- )
33
+ chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5")
37
34
 
38
35
  puts(chat_completion)
39
36
  ```
@@ -45,7 +42,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
45
42
  ```ruby
46
43
  stream = openai.responses.stream(
47
44
  input: "Write a haiku about OpenAI.",
48
- model: :"gpt-4.1"
45
+ model: :"gpt-5"
49
46
  )
50
47
 
51
48
  stream.each do |event|
@@ -343,7 +340,7 @@ openai = OpenAI::Client.new(
343
340
  # Or, configure per-request:
344
341
  openai.chat.completions.create(
345
342
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
346
- model: :"gpt-4.1",
343
+ model: :"gpt-5",
347
344
  request_options: {max_retries: 5}
348
345
  )
349
346
  ```
@@ -361,7 +358,7 @@ openai = OpenAI::Client.new(
361
358
  # Or, configure per-request:
362
359
  openai.chat.completions.create(
363
360
  messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
364
- model: :"gpt-4.1",
361
+ model: :"gpt-5",
365
362
  request_options: {timeout: 5}
366
363
  )
367
364
  ```
@@ -396,7 +393,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
396
393
  chat_completion =
397
394
  openai.chat.completions.create(
398
395
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
399
- model: :"gpt-4.1",
396
+ model: :"gpt-5",
400
397
  request_options: {
401
398
  extra_query: {my_query_parameter: value},
402
399
  extra_body: {my_body_parameter: value},
@@ -444,7 +441,7 @@ You can provide typesafe request parameters like so:
444
441
  ```ruby
445
442
  openai.chat.completions.create(
446
443
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
447
- model: :"gpt-4.1"
444
+ model: :"gpt-5"
448
445
  )
449
446
  ```
450
447
 
@@ -452,15 +449,12 @@ Or, equivalently:
452
449
 
453
450
  ```ruby
454
451
  # Hashes work, but are not typesafe:
455
- openai.chat.completions.create(
456
- messages: [{role: "user", content: "Say this is a test"}],
457
- model: :"gpt-4.1"
458
- )
452
+ openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5")
459
453
 
460
454
  # You can also splat a full Params class:
461
455
  params = OpenAI::Chat::CompletionCreateParams.new(
462
456
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
463
- model: :"gpt-4.1"
457
+ model: :"gpt-5"
464
458
  )
465
459
  openai.chat.completions.create(**params)
466
460
  ```
@@ -470,11 +464,11 @@ openai.chat.completions.create(**params)
470
464
  Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime:
471
465
 
472
466
  ```ruby
473
- # :low
474
- puts(OpenAI::ReasoningEffort::LOW)
467
+ # :minimal
468
+ puts(OpenAI::ReasoningEffort::MINIMAL)
475
469
 
476
470
  # Revealed type: `T.all(OpenAI::ReasoningEffort, Symbol)`
477
- T.reveal_type(OpenAI::ReasoningEffort::LOW)
471
+ T.reveal_type(OpenAI::ReasoningEffort::MINIMAL)
478
472
  ```
479
473
 
480
474
  Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value:
@@ -482,13 +476,13 @@ Enum parameters have a "relaxed" type, so you can either pass in enum constants
482
476
  ```ruby
483
477
  # Using the enum constants preserves the tagged type information:
484
478
  openai.chat.completions.create(
485
- reasoning_effort: OpenAI::ReasoningEffort::LOW,
479
+ reasoning_effort: OpenAI::ReasoningEffort::MINIMAL,
486
480
  # …
487
481
  )
488
482
 
489
483
  # Literal values are also permissible:
490
484
  openai.chat.completions.create(
491
- reasoning_effort: :low,
485
+ reasoning_effort: :minimal,
492
486
  # …
493
487
  )
494
488
  ```
@@ -6,15 +6,9 @@ module OpenAI
6
6
  # To customize the JSON schema conversion for a type, implement the `JsonSchemaConverter` interface.
7
7
  module JsonSchemaConverter
8
8
  # @api private
9
- POINTER = Object.new.tap do
9
+ POINTERS = Object.new.tap do
10
10
  _1.define_singleton_method(:inspect) do
11
- "#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::POINTER>"
12
- end
13
- end.freeze
14
- # @api private
15
- COUNTER = Object.new.tap do
16
- _1.define_singleton_method(:inspect) do
17
- "#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::COUNTER>"
11
+ "#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::POINTERS>"
18
12
  end
19
13
  end.freeze
20
14
  # @api private
@@ -81,14 +75,15 @@ module OpenAI
81
75
  def cache_def!(state, type:, &blk)
82
76
  defs, path = state.fetch_values(:defs, :path)
83
77
  if (stored = defs[type])
84
- stored[OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER] += 1
85
- stored.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER)
78
+ pointers = stored.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
79
+ pointers.first.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF).tap do
80
+ pointers << _1
81
+ end
86
82
  else
87
83
  ref_path = String.new
88
84
  ref = {"$ref": ref_path}
89
85
  stored = {
90
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER => ref,
91
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER => 1
86
+ OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS => [ref]
92
87
  }
93
88
  defs.store(type, stored)
94
89
  schema = blk.call
@@ -112,17 +107,21 @@ module OpenAI
112
107
  )
113
108
  reused_defs = {}
114
109
  defs.each_value do |acc|
115
- ref = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER)
116
- if (no_ref = ref.delete(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF))
117
- acc[OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER] -= 1
110
+ sch = acc.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
111
+ pointers = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
112
+
113
+ no_refs, refs = pointers.partition do
114
+ _1.delete(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF)
118
115
  end
119
- cnt = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER)
120
116
 
121
- sch = acc.except(
122
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER,
123
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER
124
- )
125
- cnt > 1 && !no_ref ? reused_defs.store(ref.fetch(:$ref), sch) : ref.replace(sch)
117
+ case refs
118
+ in [ref]
119
+ ref.replace(sch)
120
+ in [_, ref, *]
121
+ reused_defs.store(ref.fetch(:$ref), sch)
122
+ else
123
+ end
124
+ no_refs.each { _1.replace(sch) }
126
125
  end
127
126
 
128
127
  xformed = reused_defs.transform_keys { _1.delete_prefix("#/$defs/") }
@@ -36,7 +36,17 @@ module OpenAI
36
36
  mergeable_keys.each_key { mergeable_keys[_1] += 1 if schema.keys == _1 }
37
37
  end
38
38
  mergeable = mergeable_keys.any? { _1.last == schemas.length }
39
- mergeable ? OpenAI::Internal::Util.deep_merge(*schemas, concat: true) : {anyOf: schemas}
39
+ if mergeable
40
+ OpenAI::Internal::Util.deep_merge(*schemas, concat: true)
41
+ else
42
+ {
43
+ anyOf: schemas.each do
44
+ if _1.key?(:$ref)
45
+ _1.update(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF => true)
46
+ end
47
+ end
48
+ }
49
+ end
40
50
  end
41
51
  end
42
52
 
@@ -365,7 +365,7 @@ module OpenAI
365
365
  #
366
366
  # @raise [OpenAI::Errors::APIError]
367
367
  # @return [Array(Integer, Net::HTTPResponse, Enumerable<String>)]
368
- private def send_request(request, redirect_count:, retry_count:, send_retry_header:)
368
+ def send_request(request, redirect_count:, retry_count:, send_retry_header:)
369
369
  url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout)
370
370
  input = {**request.except(:timeout), deadline: OpenAI::Internal::Util.monotonic_secs + timeout}
371
371
 
@@ -19,11 +19,11 @@ module OpenAI
19
19
  # @example
20
20
  # # `chat_model` is a `OpenAI::ChatModel`
21
21
  # case chat_model
22
- # when OpenAI::ChatModel::GPT_4_1
22
+ # when OpenAI::ChatModel::GPT_5
23
23
  # # ...
24
- # when OpenAI::ChatModel::GPT_4_1_MINI
24
+ # when OpenAI::ChatModel::GPT_5_MINI
25
25
  # # ...
26
- # when OpenAI::ChatModel::GPT_4_1_NANO
26
+ # when OpenAI::ChatModel::GPT_5_NANO
27
27
  # # ...
28
28
  # else
29
29
  # puts(chat_model)
@@ -31,11 +31,11 @@ module OpenAI
31
31
  #
32
32
  # @example
33
33
  # case chat_model
34
- # in :"gpt-4.1"
34
+ # in :"gpt-5"
35
35
  # # ...
36
- # in :"gpt-4.1-mini"
36
+ # in :"gpt-5-mini"
37
37
  # # ...
38
- # in :"gpt-4.1-nano"
38
+ # in :"gpt-5-nano"
39
39
  # # ...
40
40
  # else
41
41
  # puts(chat_model)
@@ -6,28 +6,24 @@ module OpenAI
6
6
  # @api private
7
7
  #
8
8
  # @example
9
- # # `chat_completion_content_part` is a `OpenAI::Chat::ChatCompletionContentPart`
10
- # case chat_completion_content_part
11
- # when OpenAI::Chat::ChatCompletionContentPartText
12
- # puts(chat_completion_content_part.text)
13
- # when OpenAI::Chat::ChatCompletionContentPartImage
14
- # puts(chat_completion_content_part.image_url)
15
- # when OpenAI::Chat::ChatCompletionContentPartInputAudio
16
- # puts(chat_completion_content_part.input_audio)
9
+ # # `custom_tool_input_format` is a `OpenAI::CustomToolInputFormat`
10
+ # case custom_tool_input_format
11
+ # when OpenAI::CustomToolInputFormat::Text
12
+ # puts(custom_tool_input_format.type)
13
+ # when OpenAI::CustomToolInputFormat::Grammar
14
+ # puts(custom_tool_input_format.definition)
17
15
  # else
18
- # puts(chat_completion_content_part)
16
+ # puts(custom_tool_input_format)
19
17
  # end
20
18
  #
21
19
  # @example
22
- # case chat_completion_content_part
23
- # in {type: :text, text: text}
24
- # puts(text)
25
- # in {type: :image_url, image_url: image_url}
26
- # puts(image_url)
27
- # in {type: :input_audio, input_audio: input_audio}
28
- # puts(input_audio)
20
+ # case custom_tool_input_format
21
+ # in {type: :text}
22
+ # # ...
23
+ # in {type: :grammar, definition: definition, syntax: syntax}
24
+ # puts(definition)
29
25
  # else
30
- # puts(chat_completion_content_part)
26
+ # puts(custom_tool_input_format)
31
27
  # end
32
28
  module Union
33
29
  include OpenAI::Internal::Type::Converter
@@ -49,12 +49,11 @@ module OpenAI
49
49
  optional :name, String, nil?: true
50
50
 
51
51
  # @!attribute reasoning_effort
52
- # **o-series models only**
53
- #
54
52
  # Constrains effort on reasoning for
55
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
56
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
57
- # result in faster responses and fewer tokens used on reasoning in a response.
54
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
55
+ # effort can result in faster responses and fewer tokens used on reasoning in a
56
+ # response.
58
57
  #
59
58
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
60
59
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -133,7 +132,7 @@ module OpenAI
133
132
  #
134
133
  # @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
135
134
  #
136
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
135
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
137
136
  #
138
137
  # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
139
138
  #
@@ -49,12 +49,11 @@ module OpenAI
49
49
  optional :name, String, nil?: true
50
50
 
51
51
  # @!attribute reasoning_effort
52
- # **o-series models only**
53
- #
54
52
  # Constrains effort on reasoning for
55
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
56
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
57
- # result in faster responses and fewer tokens used on reasoning in a response.
54
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
55
+ # effort can result in faster responses and fewer tokens used on reasoning in a
56
+ # response.
58
57
  #
59
58
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
60
59
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -133,7 +132,7 @@ module OpenAI
133
132
  #
134
133
  # @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
135
134
  #
136
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
135
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
137
136
  #
138
137
  # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
139
138
  #
@@ -157,6 +156,18 @@ module OpenAI
157
156
 
158
157
  variant String
159
158
 
159
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5 }
160
+
161
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI }
162
+
163
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO }
164
+
165
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_2025_08_07 }
166
+
167
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI_2025_08_07 }
168
+
169
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO_2025_08_07 }
170
+
160
171
  variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1 }
161
172
 
162
173
  variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI }
@@ -238,6 +249,12 @@ module OpenAI
238
249
 
239
250
  # @!group
240
251
 
252
+ GPT_5 = :"gpt-5"
253
+ GPT_5_MINI = :"gpt-5-mini"
254
+ GPT_5_NANO = :"gpt-5-nano"
255
+ GPT_5_2025_08_07 = :"gpt-5-2025-08-07"
256
+ GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07"
257
+ GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07"
241
258
  GPT_4_1 = :"gpt-4.1"
242
259
  GPT_4_1_MINI = :"gpt-4.1-mini"
243
260
  GPT_4_1_NANO = :"gpt-4.1-nano"
@@ -107,12 +107,11 @@ module OpenAI
107
107
  optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean
108
108
 
109
109
  # @!attribute reasoning_effort
110
- # **o-series models only**
111
- #
112
110
  # Constrains effort on reasoning for
113
111
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
114
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
115
- # result in faster responses and fewer tokens used on reasoning in a response.
112
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
113
+ # effort can result in faster responses and fewer tokens used on reasoning in a
114
+ # response.
116
115
  #
117
116
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
118
117
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -216,7 +215,7 @@ module OpenAI
216
215
  #
217
216
  # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
218
217
  #
219
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
218
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
220
219
  #
221
220
  # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
222
221
  #
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Chat
6
+ class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute allowed_tools
8
+ # Constrains the tools available to the model to a pre-defined set.
9
+ #
10
+ # @return [OpenAI::Models::Chat::ChatCompletionAllowedTools]
11
+ required :allowed_tools, -> { OpenAI::Chat::ChatCompletionAllowedTools }
12
+
13
+ # @!attribute type
14
+ # Allowed tool configuration type. Always `allowed_tools`.
15
+ #
16
+ # @return [Symbol, :allowed_tools]
17
+ required :type, const: :allowed_tools
18
+
19
+ # @!method initialize(allowed_tools:, type: :allowed_tools)
20
+ # Some parameter documentations has been truncated, see
21
+ # {OpenAI::Models::Chat::ChatCompletionAllowedToolChoice} for more details.
22
+ #
23
+ # Constrains the tools available to the model to a pre-defined set.
24
+ #
25
+ # @param allowed_tools [OpenAI::Models::Chat::ChatCompletionAllowedTools] Constrains the tools available to the model to a pre-defined set.
26
+ #
27
+ # @param type [Symbol, :allowed_tools] Allowed tool configuration type. Always `allowed_tools`.
28
+ end
29
+ end
30
+
31
+ ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice
32
+ end
33
+ end
@@ -0,0 +1,64 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Chat
6
+ class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute mode
8
+ # Constrains the tools available to the model to a pre-defined set.
9
+ #
10
+ # `auto` allows the model to pick from among the allowed tools and generate a
11
+ # message.
12
+ #
13
+ # `required` requires the model to call one or more of the allowed tools.
14
+ #
15
+ # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode]
16
+ required :mode, enum: -> { OpenAI::Chat::ChatCompletionAllowedTools::Mode }
17
+
18
+ # @!attribute tools
19
+ # A list of tool definitions that the model should be allowed to call.
20
+ #
21
+ # For the Chat Completions API, the list of tool definitions might look like:
22
+ #
23
+ # ```json
24
+ # [
25
+ # { "type": "function", "function": { "name": "get_weather" } },
26
+ # { "type": "function", "function": { "name": "get_time" } }
27
+ # ]
28
+ # ```
29
+ #
30
+ # @return [Array<Hash{Symbol=>Object}>]
31
+ required :tools,
32
+ OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]
33
+
34
+ # @!method initialize(mode:, tools:)
35
+ # Some parameter documentations has been truncated, see
36
+ # {OpenAI::Models::Chat::ChatCompletionAllowedTools} for more details.
37
+ #
38
+ # Constrains the tools available to the model to a pre-defined set.
39
+ #
40
+ # @param mode [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode] Constrains the tools available to the model to a pre-defined set.
41
+ #
42
+ # @param tools [Array<Hash{Symbol=>Object}>] A list of tool definitions that the model should be allowed to call.
43
+
44
+ # Constrains the tools available to the model to a pre-defined set.
45
+ #
46
+ # `auto` allows the model to pick from among the allowed tools and generate a
47
+ # message.
48
+ #
49
+ # `required` requires the model to call one or more of the allowed tools.
50
+ #
51
+ # @see OpenAI::Models::Chat::ChatCompletionAllowedTools#mode
52
+ module Mode
53
+ extend OpenAI::Internal::Type::Enum
54
+
55
+ AUTO = :auto
56
+ REQUIRED = :required
57
+
58
+ # @!method self.values
59
+ # @return [Array<Symbol>]
60
+ end
61
+ end
62
+ end
63
+ end
64
+ end
@@ -55,11 +55,9 @@ module OpenAI
55
55
  # @!attribute tool_calls
56
56
  # The tool calls generated by the model, such as function calls.
57
57
  #
58
- # @return [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>, nil]
58
+ # @return [Array<OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall, OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall>, nil]
59
59
  optional :tool_calls,
60
- -> {
61
- OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessageToolCall]
62
- }
60
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageToolCall] }
63
61
 
64
62
  # @!method initialize(audio: nil, content: nil, function_call: nil, name: nil, refusal: nil, tool_calls: nil, role: :assistant)
65
63
  # Some parameter documentations has been truncated, see
@@ -77,7 +75,7 @@ module OpenAI
77
75
  #
78
76
  # @param refusal [String, nil] The refusal message by the assistant.
79
77
  #
80
- # @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>] The tool calls generated by the model, such as function calls.
78
+ # @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall, OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall>] The tool calls generated by the model, such as function calls.
81
79
  #
82
80
  # @param role [Symbol, :assistant] The role of the messages author, in this case `assistant`.
83
81