openai 0.16.0 → 0.17.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (177) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +33 -0
  3. data/README.md +14 -20
  4. data/lib/openai/internal/transport/base_client.rb +1 -1
  5. data/lib/openai/internal/type/array_of.rb +1 -0
  6. data/lib/openai/internal/type/base_model.rb +3 -1
  7. data/lib/openai/internal/type/converter.rb +27 -0
  8. data/lib/openai/internal/type/enum.rb +6 -6
  9. data/lib/openai/internal/type/hash_of.rb +1 -0
  10. data/lib/openai/internal/type/union.rb +22 -24
  11. data/lib/openai/models/beta/assistant_create_params.rb +4 -5
  12. data/lib/openai/models/beta/assistant_update_params.rb +22 -5
  13. data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
  14. data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
  15. data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
  16. data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
  17. data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
  18. data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
  19. data/lib/openai/models/chat/chat_completion_message.rb +3 -5
  20. data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
  21. data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
  22. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
  23. data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
  24. data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
  25. data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
  26. data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
  27. data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
  28. data/lib/openai/models/chat/completion_create_params.rb +35 -12
  29. data/lib/openai/models/chat_model.rb +7 -0
  30. data/lib/openai/models/custom_tool_input_format.rb +76 -0
  31. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
  32. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  33. data/lib/openai/models/evals/run_create_params.rb +2 -2
  34. data/lib/openai/models/evals/run_create_response.rb +2 -2
  35. data/lib/openai/models/evals/run_list_response.rb +2 -2
  36. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  37. data/lib/openai/models/reasoning.rb +4 -5
  38. data/lib/openai/models/reasoning_effort.rb +4 -4
  39. data/lib/openai/models/response_format_text_grammar.rb +27 -0
  40. data/lib/openai/models/response_format_text_python.rb +20 -0
  41. data/lib/openai/models/responses/custom_tool.rb +48 -0
  42. data/lib/openai/models/responses/response.rb +20 -12
  43. data/lib/openai/models/responses/response_create_params.rb +48 -10
  44. data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
  45. data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
  46. data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
  47. data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
  48. data/lib/openai/models/responses/response_input_item.rb +7 -1
  49. data/lib/openai/models/responses/response_output_item.rb +4 -1
  50. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  51. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  52. data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
  53. data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
  54. data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
  55. data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
  56. data/lib/openai/models/responses/response_stream_event.rb +13 -11
  57. data/lib/openai/models/responses/response_text_config.rb +27 -1
  58. data/lib/openai/models/responses/tool.rb +5 -1
  59. data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
  60. data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
  61. data/lib/openai/models/vector_store_search_params.rb +6 -1
  62. data/lib/openai/models.rb +6 -0
  63. data/lib/openai/resources/beta/assistants.rb +2 -2
  64. data/lib/openai/resources/beta/threads/runs.rb +2 -2
  65. data/lib/openai/resources/chat/completions.rb +16 -10
  66. data/lib/openai/resources/responses.rb +38 -22
  67. data/lib/openai/version.rb +1 -1
  68. data/lib/openai.rb +19 -2
  69. data/rbi/openai/internal/transport/base_client.rbi +1 -1
  70. data/rbi/openai/internal/type/converter.rbi +46 -0
  71. data/rbi/openai/internal/type/union.rbi +7 -2
  72. data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
  73. data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
  74. data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
  75. data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
  76. data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
  77. data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
  78. data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
  79. data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
  80. data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
  81. data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
  82. data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
  83. data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
  84. data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
  85. data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
  86. data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
  87. data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
  88. data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
  89. data/rbi/openai/models/chat/completion_create_params.rbi +106 -25
  90. data/rbi/openai/models/chat_model.rbi +11 -0
  91. data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
  92. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
  93. data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
  94. data/rbi/openai/models/evals/run_create_params.rbi +4 -0
  95. data/rbi/openai/models/evals/run_create_response.rbi +2 -0
  96. data/rbi/openai/models/evals/run_list_response.rbi +2 -0
  97. data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
  98. data/rbi/openai/models/reasoning.rbi +6 -8
  99. data/rbi/openai/models/reasoning_effort.rbi +4 -4
  100. data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
  101. data/rbi/openai/models/response_format_text_python.rbi +30 -0
  102. data/rbi/openai/models/responses/custom_tool.rbi +96 -0
  103. data/rbi/openai/models/responses/response.rbi +15 -5
  104. data/rbi/openai/models/responses/response_create_params.rbi +94 -7
  105. data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
  106. data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
  107. data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
  108. data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
  109. data/rbi/openai/models/responses/response_input_item.rbi +2 -0
  110. data/rbi/openai/models/responses/response_output_item.rbi +2 -1
  111. data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
  112. data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
  113. data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
  114. data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
  115. data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
  116. data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
  117. data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
  118. data/rbi/openai/models/responses/response_text_config.rbi +64 -1
  119. data/rbi/openai/models/responses/tool.rbi +1 -0
  120. data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
  121. data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
  122. data/rbi/openai/models/vector_store_search_params.rbi +12 -1
  123. data/rbi/openai/models.rbi +6 -0
  124. data/rbi/openai/resources/beta/assistants.rbi +6 -8
  125. data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
  126. data/rbi/openai/resources/chat/completions.rbi +44 -19
  127. data/rbi/openai/resources/responses.rbi +215 -41
  128. data/sig/openai/internal/transport/base_client.rbs +1 -1
  129. data/sig/openai/internal/type/converter.rbs +17 -0
  130. data/sig/openai/internal/type/union.rbs +2 -2
  131. data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
  132. data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
  133. data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
  134. data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
  135. data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
  136. data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
  137. data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
  138. data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
  139. data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
  140. data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
  141. data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
  142. data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
  143. data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
  144. data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
  145. data/sig/openai/models/chat/completion_create_params.rbs +23 -6
  146. data/sig/openai/models/chat_model.rbs +15 -1
  147. data/sig/openai/models/custom_tool_input_format.rbs +61 -0
  148. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
  149. data/sig/openai/models/reasoning_effort.rbs +2 -1
  150. data/sig/openai/models/response_format_text_grammar.rbs +15 -0
  151. data/sig/openai/models/response_format_text_python.rbs +13 -0
  152. data/sig/openai/models/responses/custom_tool.rbs +43 -0
  153. data/sig/openai/models/responses/response.rbs +2 -0
  154. data/sig/openai/models/responses/response_create_params.rbs +19 -0
  155. data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
  156. data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
  157. data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
  158. data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
  159. data/sig/openai/models/responses/response_input_item.rbs +2 -0
  160. data/sig/openai/models/responses/response_output_item.rbs +1 -0
  161. data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
  162. data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
  163. data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
  164. data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
  165. data/sig/openai/models/responses/response_stream_event.rbs +4 -2
  166. data/sig/openai/models/responses/response_text_config.rbs +22 -3
  167. data/sig/openai/models/responses/tool.rbs +1 -0
  168. data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
  169. data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
  170. data/sig/openai/models/vector_store_search_params.rbs +2 -1
  171. data/sig/openai/models.rbs +6 -0
  172. data/sig/openai/resources/chat/completions.rbs +4 -2
  173. data/sig/openai/resources/responses.rbs +32 -0
  174. metadata +59 -8
  175. data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
  176. data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
  177. data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 78e829792412e459c6e623de5db5f553d7d1325fa28300ae94c62103d511c946
4
- data.tar.gz: e7711af3f619a26ac688445353ee83b8ca250cad3feb84358f0a556650662e0b
3
+ metadata.gz: f9090d21fb7dcdd5e12c607bbe02aa7c1e7336e02148fec0465b21cc43bb9d45
4
+ data.tar.gz: 61662c62cde77fefb5bec16ce51127efd60b4f71542d2bcf2c48f0225ecb29f9
5
5
  SHA512:
6
- metadata.gz: 3a6d15ee5239db7f9f8fe2494169ce279c26435bc7b83a5b8ecb2dfde47da5f6636f6db49a29f429b7fca35b58d90136d06f7de4e49677ee37fb94b899f518da
7
- data.tar.gz: 1f6659074cc6e2317b47e0677a928765d9d8b73e898f7c2ee0eb435dc1fa0728df18f64859a6a732ec3beaaa1db16a5e96176eb1bdda174ac9368cccd0a243a2
6
+ metadata.gz: '08c00969160cc09128778848c99704501b192ca61ed7a845f5d9ead7a59eedd7762b5c0e3e941b4b016b2baa935200d59e8d480a55b82dbd5ebeeb58d94660a4'
7
+ data.tar.gz: 3386e4858cfe7ce20c46b10ac51f2cfa403f963bf0ef33e1eedb70f8eeb14076f539729700f442f65b6535bea30b8a9638e8c43bc79e6a7af56a8efaaf26f907
data/CHANGELOG.md CHANGED
@@ -1,5 +1,38 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.17.1 (2025-08-09)
4
+
5
+ Full Changelog: [v0.17.0...v0.17.1](https://github.com/openai/openai-ruby/compare/v0.17.0...v0.17.1)
6
+
7
+ ### Chores
8
+
9
+ * collect metadata from type DSL ([d63cb9e](https://github.com/openai/openai-ruby/commit/d63cb9eb8efc60d43bd17c96bb6dc1e3b4254b26))
10
+ * **internal:** update comment in script ([a08be47](https://github.com/openai/openai-ruby/commit/a08be4787dfc910a7c9cc06bc72f9c40b40250a4))
11
+
12
+ ## 0.17.0 (2025-08-08)
13
+
14
+ Full Changelog: [v0.16.0...v0.17.0](https://github.com/openai/openai-ruby/compare/v0.16.0...v0.17.0)
15
+
16
+ ### Features
17
+
18
+ * **api:** adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5 ([068a381](https://github.com/openai/openai-ruby/commit/068a381a17dd2d60865e67fcd17fa84d919f3f5c))
19
+ * **api:** manual updates ([1d79621](https://github.com/openai/openai-ruby/commit/1d79621120fbccc8dd41f5af6df5a9b1a9018e73))
20
+
21
+
22
+ ### Bug Fixes
23
+
24
+ * **client:** dont try to parse if content is missing ([#770](https://github.com/openai/openai-ruby/issues/770)) ([7f8f2d3](https://github.com/openai/openai-ruby/commit/7f8f2d32863fafc39ee4a884937673a2ad9be358))
25
+ * **client:** fix verbosity parameter location in Responses ([a6b7ae8](https://github.com/openai/openai-ruby/commit/a6b7ae8b568c2214d4883fad44c9cf2e8a7d53e2))
26
+ * **internal:** fix rbi error ([803f20b](https://github.com/openai/openai-ruby/commit/803f20ba0c3751d28175dca99853783f0d851645))
27
+ * **respones:** undo accidently deleted fields ([#177](https://github.com/openai/openai-ruby/issues/177)) ([90a7c3a](https://github.com/openai/openai-ruby/commit/90a7c3ac8d22cc90b8ecaa3b091598ea3bc73029))
28
+ * **responses:** remove incorrect verbosity param ([127e2d1](https://github.com/openai/openai-ruby/commit/127e2d1b96b72307178446f0aa8acc1d3ad31367))
29
+
30
+
31
+ ### Chores
32
+
33
+ * **internal:** increase visibility of internal helper method ([eddbcda](https://github.com/openai/openai-ruby/commit/eddbcda189ac0a864fc3dadc5dd3578d730c491f))
34
+ * update @stainless-api/prism-cli to v5.15.0 ([aaa7d89](https://github.com/openai/openai-ruby/commit/aaa7d895a3dba31f32cf5f4373a49d1571667fc6))
35
+
3
36
  ## 0.16.0 (2025-07-30)
4
37
 
5
38
  Full Changelog: [v0.15.0...v0.16.0](https://github.com/openai/openai-ruby/compare/v0.15.0...v0.16.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.16.0"
18
+ gem "openai", "~> 0.17.1"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -30,10 +30,7 @@ openai = OpenAI::Client.new(
30
30
  api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted
31
31
  )
32
32
 
33
- chat_completion = openai.chat.completions.create(
34
- messages: [{role: "user", content: "Say this is a test"}],
35
- model: :"gpt-4.1"
36
- )
33
+ chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5")
37
34
 
38
35
  puts(chat_completion)
39
36
  ```
@@ -45,7 +42,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
45
42
  ```ruby
46
43
  stream = openai.responses.stream(
47
44
  input: "Write a haiku about OpenAI.",
48
- model: :"gpt-4.1"
45
+ model: :"gpt-5"
49
46
  )
50
47
 
51
48
  stream.each do |event|
@@ -343,7 +340,7 @@ openai = OpenAI::Client.new(
343
340
  # Or, configure per-request:
344
341
  openai.chat.completions.create(
345
342
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
346
- model: :"gpt-4.1",
343
+ model: :"gpt-5",
347
344
  request_options: {max_retries: 5}
348
345
  )
349
346
  ```
@@ -361,7 +358,7 @@ openai = OpenAI::Client.new(
361
358
  # Or, configure per-request:
362
359
  openai.chat.completions.create(
363
360
  messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
364
- model: :"gpt-4.1",
361
+ model: :"gpt-5",
365
362
  request_options: {timeout: 5}
366
363
  )
367
364
  ```
@@ -396,7 +393,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
396
393
  chat_completion =
397
394
  openai.chat.completions.create(
398
395
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
399
- model: :"gpt-4.1",
396
+ model: :"gpt-5",
400
397
  request_options: {
401
398
  extra_query: {my_query_parameter: value},
402
399
  extra_body: {my_body_parameter: value},
@@ -444,7 +441,7 @@ You can provide typesafe request parameters like so:
444
441
  ```ruby
445
442
  openai.chat.completions.create(
446
443
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
447
- model: :"gpt-4.1"
444
+ model: :"gpt-5"
448
445
  )
449
446
  ```
450
447
 
@@ -452,15 +449,12 @@ Or, equivalently:
452
449
 
453
450
  ```ruby
454
451
  # Hashes work, but are not typesafe:
455
- openai.chat.completions.create(
456
- messages: [{role: "user", content: "Say this is a test"}],
457
- model: :"gpt-4.1"
458
- )
452
+ openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5")
459
453
 
460
454
  # You can also splat a full Params class:
461
455
  params = OpenAI::Chat::CompletionCreateParams.new(
462
456
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
463
- model: :"gpt-4.1"
457
+ model: :"gpt-5"
464
458
  )
465
459
  openai.chat.completions.create(**params)
466
460
  ```
@@ -470,11 +464,11 @@ openai.chat.completions.create(**params)
470
464
  Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime:
471
465
 
472
466
  ```ruby
473
- # :low
474
- puts(OpenAI::ReasoningEffort::LOW)
467
+ # :minimal
468
+ puts(OpenAI::ReasoningEffort::MINIMAL)
475
469
 
476
470
  # Revealed type: `T.all(OpenAI::ReasoningEffort, Symbol)`
477
- T.reveal_type(OpenAI::ReasoningEffort::LOW)
471
+ T.reveal_type(OpenAI::ReasoningEffort::MINIMAL)
478
472
  ```
479
473
 
480
474
  Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value:
@@ -482,13 +476,13 @@ Enum parameters have a "relaxed" type, so you can either pass in enum constants
482
476
  ```ruby
483
477
  # Using the enum constants preserves the tagged type information:
484
478
  openai.chat.completions.create(
485
- reasoning_effort: OpenAI::ReasoningEffort::LOW,
479
+ reasoning_effort: OpenAI::ReasoningEffort::MINIMAL,
486
480
  # …
487
481
  )
488
482
 
489
483
  # Literal values are also permissible:
490
484
  openai.chat.completions.create(
491
- reasoning_effort: :low,
485
+ reasoning_effort: :minimal,
492
486
  # …
493
487
  )
494
488
  ```
@@ -365,7 +365,7 @@ module OpenAI
365
365
  #
366
366
  # @raise [OpenAI::Errors::APIError]
367
367
  # @return [Array(Integer, Net::HTTPResponse, Enumerable<String>)]
368
- private def send_request(request, redirect_count:, retry_count:, send_retry_header:)
368
+ def send_request(request, redirect_count:, retry_count:, send_retry_header:)
369
369
  url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout)
370
370
  input = {**request.except(:timeout), deadline: OpenAI::Internal::Util.monotonic_secs + timeout}
371
371
 
@@ -148,6 +148,7 @@ module OpenAI
148
148
  # @option spec [Boolean] :"nil?"
149
149
  def initialize(type_info, spec = {})
150
150
  @item_type_fn = OpenAI::Internal::Type::Converter.type_info(type_info || spec)
151
+ @meta = OpenAI::Internal::Type::Converter.meta_info(type_info, spec)
151
152
  @nilable = spec.fetch(:nil?, false)
152
153
  end
153
154
 
@@ -52,6 +52,7 @@ module OpenAI
52
52
  #
53
53
  # @option spec [Boolean] :"nil?"
54
54
  private def add_field(name_sym, required:, type_info:, spec:)
55
+ meta = OpenAI::Internal::Type::Converter.meta_info(type_info, spec)
55
56
  type_fn, info =
56
57
  case type_info
57
58
  in Proc | OpenAI::Internal::Type::Converter | Class
@@ -74,7 +75,8 @@ module OpenAI
74
75
  required: required,
75
76
  nilable: nilable,
76
77
  const: const,
77
- type_fn: type_fn
78
+ type_fn: type_fn,
79
+ meta: meta
78
80
  }
79
81
 
80
82
  define_method(setter) do |value|
@@ -98,6 +98,33 @@ module OpenAI
98
98
  end
99
99
  end
100
100
 
101
+ # @api private
102
+ #
103
+ # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] .
104
+ #
105
+ # @option type_info [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
106
+ #
107
+ # @option type_info [Proc] :enum
108
+ #
109
+ # @option type_info [Proc] :union
110
+ #
111
+ # @option type_info [Boolean] :"nil?"
112
+ #
113
+ # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] .
114
+ #
115
+ # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
116
+ #
117
+ # @option spec [Proc] :enum
118
+ #
119
+ # @option spec [Proc] :union
120
+ #
121
+ # @option spec [Boolean] :"nil?"
122
+ #
123
+ # @return [Hash{Symbol=>Object}]
124
+ def meta_info(type_info, spec)
125
+ [spec, type_info].grep(Hash).first.to_h.except(:const, :enum, :union, :nil?)
126
+ end
127
+
101
128
  # @api private
102
129
  #
103
130
  # @param translate_names [Boolean]
@@ -19,11 +19,11 @@ module OpenAI
19
19
  # @example
20
20
  # # `chat_model` is a `OpenAI::ChatModel`
21
21
  # case chat_model
22
- # when OpenAI::ChatModel::GPT_4_1
22
+ # when OpenAI::ChatModel::GPT_5
23
23
  # # ...
24
- # when OpenAI::ChatModel::GPT_4_1_MINI
24
+ # when OpenAI::ChatModel::GPT_5_MINI
25
25
  # # ...
26
- # when OpenAI::ChatModel::GPT_4_1_NANO
26
+ # when OpenAI::ChatModel::GPT_5_NANO
27
27
  # # ...
28
28
  # else
29
29
  # puts(chat_model)
@@ -31,11 +31,11 @@ module OpenAI
31
31
  #
32
32
  # @example
33
33
  # case chat_model
34
- # in :"gpt-4.1"
34
+ # in :"gpt-5"
35
35
  # # ...
36
- # in :"gpt-4.1-mini"
36
+ # in :"gpt-5-mini"
37
37
  # # ...
38
- # in :"gpt-4.1-nano"
38
+ # in :"gpt-5-nano"
39
39
  # # ...
40
40
  # else
41
41
  # puts(chat_model)
@@ -168,6 +168,7 @@ module OpenAI
168
168
  # @option spec [Boolean] :"nil?"
169
169
  def initialize(type_info, spec = {})
170
170
  @item_type_fn = OpenAI::Internal::Type::Converter.type_info(type_info || spec)
171
+ @meta = OpenAI::Internal::Type::Converter.meta_info(type_info, spec)
171
172
  @nilable = spec.fetch(:nil?, false)
172
173
  end
173
174
 
@@ -6,28 +6,24 @@ module OpenAI
6
6
  # @api private
7
7
  #
8
8
  # @example
9
- # # `chat_completion_content_part` is a `OpenAI::Chat::ChatCompletionContentPart`
10
- # case chat_completion_content_part
11
- # when OpenAI::Chat::ChatCompletionContentPartText
12
- # puts(chat_completion_content_part.text)
13
- # when OpenAI::Chat::ChatCompletionContentPartImage
14
- # puts(chat_completion_content_part.image_url)
15
- # when OpenAI::Chat::ChatCompletionContentPartInputAudio
16
- # puts(chat_completion_content_part.input_audio)
9
+ # # `custom_tool_input_format` is a `OpenAI::CustomToolInputFormat`
10
+ # case custom_tool_input_format
11
+ # when OpenAI::CustomToolInputFormat::Text
12
+ # puts(custom_tool_input_format.type)
13
+ # when OpenAI::CustomToolInputFormat::Grammar
14
+ # puts(custom_tool_input_format.definition)
17
15
  # else
18
- # puts(chat_completion_content_part)
16
+ # puts(custom_tool_input_format)
19
17
  # end
20
18
  #
21
19
  # @example
22
- # case chat_completion_content_part
23
- # in {type: :text, text: text}
24
- # puts(text)
25
- # in {type: :image_url, image_url: image_url}
26
- # puts(image_url)
27
- # in {type: :input_audio, input_audio: input_audio}
28
- # puts(input_audio)
20
+ # case custom_tool_input_format
21
+ # in {type: :text}
22
+ # # ...
23
+ # in {type: :grammar, definition: definition, syntax: syntax}
24
+ # puts(definition)
29
25
  # else
30
- # puts(chat_completion_content_part)
26
+ # puts(custom_tool_input_format)
31
27
  # end
32
28
  module Union
33
29
  include OpenAI::Internal::Type::Converter
@@ -37,20 +33,20 @@ module OpenAI
37
33
  #
38
34
  # All of the specified variant info for this union.
39
35
  #
40
- # @return [Array<Array(Symbol, Proc)>]
36
+ # @return [Array<Array(Symbol, Proc, Hash{Symbol=>Object})>]
41
37
  private def known_variants = (@known_variants ||= [])
42
38
 
43
39
  # @api private
44
40
  #
45
- # @return [Array<Array(Symbol, Object)>]
41
+ # @return [Array<Array(Symbol, Object, Hash{Symbol=>Object})>]
46
42
  protected def derefed_variants
47
- known_variants.map { |key, variant_fn| [key, variant_fn.call] }
43
+ known_variants.map { |key, variant_fn, meta| [key, variant_fn.call, meta] }
48
44
  end
49
45
 
50
46
  # All of the specified variants for this union.
51
47
  #
52
48
  # @return [Array<Object>]
53
- def variants = derefed_variants.map(&:last)
49
+ def variants = derefed_variants.map { _2 }
54
50
 
55
51
  # @api private
56
52
  #
@@ -76,12 +72,13 @@ module OpenAI
76
72
  #
77
73
  # @option spec [Boolean] :"nil?"
78
74
  private def variant(key, spec = nil)
75
+ meta = OpenAI::Internal::Type::Converter.meta_info(nil, spec)
79
76
  variant_info =
80
77
  case key
81
78
  in Symbol
82
- [key, OpenAI::Internal::Type::Converter.type_info(spec)]
79
+ [key, OpenAI::Internal::Type::Converter.type_info(spec), meta]
83
80
  in Proc | OpenAI::Internal::Type::Converter | Class | Hash
84
- [nil, OpenAI::Internal::Type::Converter.type_info(key)]
81
+ [nil, OpenAI::Internal::Type::Converter.type_info(key), meta]
85
82
  end
86
83
 
87
84
  known_variants << variant_info
@@ -104,7 +101,8 @@ module OpenAI
104
101
  return nil if key == OpenAI::Internal::OMIT
105
102
 
106
103
  key = key.to_sym if key.is_a?(String)
107
- known_variants.find { |k,| k == key }&.last&.call
104
+ _, found = known_variants.find { |k,| k == key }
105
+ found&.call
108
106
  else
109
107
  nil
110
108
  end
@@ -49,12 +49,11 @@ module OpenAI
49
49
  optional :name, String, nil?: true
50
50
 
51
51
  # @!attribute reasoning_effort
52
- # **o-series models only**
53
- #
54
52
  # Constrains effort on reasoning for
55
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
56
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
57
- # result in faster responses and fewer tokens used on reasoning in a response.
54
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
55
+ # effort can result in faster responses and fewer tokens used on reasoning in a
56
+ # response.
58
57
  #
59
58
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
60
59
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -133,7 +132,7 @@ module OpenAI
133
132
  #
134
133
  # @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
135
134
  #
136
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
135
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
137
136
  #
138
137
  # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
139
138
  #
@@ -49,12 +49,11 @@ module OpenAI
49
49
  optional :name, String, nil?: true
50
50
 
51
51
  # @!attribute reasoning_effort
52
- # **o-series models only**
53
- #
54
52
  # Constrains effort on reasoning for
55
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
56
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
57
- # result in faster responses and fewer tokens used on reasoning in a response.
54
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
55
+ # effort can result in faster responses and fewer tokens used on reasoning in a
56
+ # response.
58
57
  #
59
58
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
60
59
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -133,7 +132,7 @@ module OpenAI
133
132
  #
134
133
  # @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
135
134
  #
136
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
135
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
137
136
  #
138
137
  # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
139
138
  #
@@ -157,6 +156,18 @@ module OpenAI
157
156
 
158
157
  variant String
159
158
 
159
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5 }
160
+
161
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI }
162
+
163
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO }
164
+
165
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_2025_08_07 }
166
+
167
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI_2025_08_07 }
168
+
169
+ variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO_2025_08_07 }
170
+
160
171
  variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1 }
161
172
 
162
173
  variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI }
@@ -238,6 +249,12 @@ module OpenAI
238
249
 
239
250
  # @!group
240
251
 
252
+ GPT_5 = :"gpt-5"
253
+ GPT_5_MINI = :"gpt-5-mini"
254
+ GPT_5_NANO = :"gpt-5-nano"
255
+ GPT_5_2025_08_07 = :"gpt-5-2025-08-07"
256
+ GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07"
257
+ GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07"
241
258
  GPT_4_1 = :"gpt-4.1"
242
259
  GPT_4_1_MINI = :"gpt-4.1-mini"
243
260
  GPT_4_1_NANO = :"gpt-4.1-nano"
@@ -107,12 +107,11 @@ module OpenAI
107
107
  optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean
108
108
 
109
109
  # @!attribute reasoning_effort
110
- # **o-series models only**
111
- #
112
110
  # Constrains effort on reasoning for
113
111
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
114
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
115
- # result in faster responses and fewer tokens used on reasoning in a response.
112
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
113
+ # effort can result in faster responses and fewer tokens used on reasoning in a
114
+ # response.
116
115
  #
117
116
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
118
117
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -216,7 +215,7 @@ module OpenAI
216
215
  #
217
216
  # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
218
217
  #
219
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
218
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
220
219
  #
221
220
  # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
222
221
  #
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Chat
6
+ class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute allowed_tools
8
+ # Constrains the tools available to the model to a pre-defined set.
9
+ #
10
+ # @return [OpenAI::Models::Chat::ChatCompletionAllowedTools]
11
+ required :allowed_tools, -> { OpenAI::Chat::ChatCompletionAllowedTools }
12
+
13
+ # @!attribute type
14
+ # Allowed tool configuration type. Always `allowed_tools`.
15
+ #
16
+ # @return [Symbol, :allowed_tools]
17
+ required :type, const: :allowed_tools
18
+
19
+ # @!method initialize(allowed_tools:, type: :allowed_tools)
20
+ # Some parameter documentations has been truncated, see
21
+ # {OpenAI::Models::Chat::ChatCompletionAllowedToolChoice} for more details.
22
+ #
23
+ # Constrains the tools available to the model to a pre-defined set.
24
+ #
25
+ # @param allowed_tools [OpenAI::Models::Chat::ChatCompletionAllowedTools] Constrains the tools available to the model to a pre-defined set.
26
+ #
27
+ # @param type [Symbol, :allowed_tools] Allowed tool configuration type. Always `allowed_tools`.
28
+ end
29
+ end
30
+
31
+ ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice
32
+ end
33
+ end
@@ -0,0 +1,64 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Chat
6
+ class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute mode
8
+ # Constrains the tools available to the model to a pre-defined set.
9
+ #
10
+ # `auto` allows the model to pick from among the allowed tools and generate a
11
+ # message.
12
+ #
13
+ # `required` requires the model to call one or more of the allowed tools.
14
+ #
15
+ # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode]
16
+ required :mode, enum: -> { OpenAI::Chat::ChatCompletionAllowedTools::Mode }
17
+
18
+ # @!attribute tools
19
+ # A list of tool definitions that the model should be allowed to call.
20
+ #
21
+ # For the Chat Completions API, the list of tool definitions might look like:
22
+ #
23
+ # ```json
24
+ # [
25
+ # { "type": "function", "function": { "name": "get_weather" } },
26
+ # { "type": "function", "function": { "name": "get_time" } }
27
+ # ]
28
+ # ```
29
+ #
30
+ # @return [Array<Hash{Symbol=>Object}>]
31
+ required :tools,
32
+ OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]
33
+
34
+ # @!method initialize(mode:, tools:)
35
+ # Some parameter documentations has been truncated, see
36
+ # {OpenAI::Models::Chat::ChatCompletionAllowedTools} for more details.
37
+ #
38
+ # Constrains the tools available to the model to a pre-defined set.
39
+ #
40
+ # @param mode [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode] Constrains the tools available to the model to a pre-defined set.
41
+ #
42
+ # @param tools [Array<Hash{Symbol=>Object}>] A list of tool definitions that the model should be allowed to call.
43
+
44
+ # Constrains the tools available to the model to a pre-defined set.
45
+ #
46
+ # `auto` allows the model to pick from among the allowed tools and generate a
47
+ # message.
48
+ #
49
+ # `required` requires the model to call one or more of the allowed tools.
50
+ #
51
+ # @see OpenAI::Models::Chat::ChatCompletionAllowedTools#mode
52
+ module Mode
53
+ extend OpenAI::Internal::Type::Enum
54
+
55
+ AUTO = :auto
56
+ REQUIRED = :required
57
+
58
+ # @!method self.values
59
+ # @return [Array<Symbol>]
60
+ end
61
+ end
62
+ end
63
+ end
64
+ end
@@ -55,11 +55,9 @@ module OpenAI
55
55
  # @!attribute tool_calls
56
56
  # The tool calls generated by the model, such as function calls.
57
57
  #
58
- # @return [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>, nil]
58
+ # @return [Array<OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall, OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall>, nil]
59
59
  optional :tool_calls,
60
- -> {
61
- OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessageToolCall]
62
- }
60
+ -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageToolCall] }
63
61
 
64
62
  # @!method initialize(audio: nil, content: nil, function_call: nil, name: nil, refusal: nil, tool_calls: nil, role: :assistant)
65
63
  # Some parameter documentations has been truncated, see
@@ -77,7 +75,7 @@ module OpenAI
77
75
  #
78
76
  # @param refusal [String, nil] The refusal message by the assistant.
79
77
  #
80
- # @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>] The tool calls generated by the model, such as function calls.
78
+ # @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall, OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall>] The tool calls generated by the model, such as function calls.
81
79
  #
82
80
  # @param role [Symbol, :assistant] The role of the messages author, in this case `assistant`.
83
81