openai 0.16.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/README.md +14 -20
  4. data/lib/openai/internal/transport/base_client.rb +1 -1
  5. data/lib/openai/internal/type/enum.rb +6 -6
  6. data/lib/openai/internal/type/union.rb +13 -17
  7. data/lib/openai/models/beta/assistant_create_params.rb +4 -5
  8. data/lib/openai/models/beta/assistant_update_params.rb +22 -5
  9. data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
  10. data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
  11. data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
  12. data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
  13. data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
  14. data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
  15. data/lib/openai/models/chat/chat_completion_message.rb +3 -5
  16. data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
  17. data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
  18. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
  19. data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
  20. data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
  21. data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
  22. data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
  23. data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
  24. data/lib/openai/models/chat/completion_create_params.rb +35 -12
  25. data/lib/openai/models/chat_model.rb +7 -0
  26. data/lib/openai/models/custom_tool_input_format.rb +76 -0
  27. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
  28. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  29. data/lib/openai/models/evals/run_create_params.rb +2 -2
  30. data/lib/openai/models/evals/run_create_response.rb +2 -2
  31. data/lib/openai/models/evals/run_list_response.rb +2 -2
  32. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  33. data/lib/openai/models/reasoning.rb +4 -5
  34. data/lib/openai/models/reasoning_effort.rb +4 -4
  35. data/lib/openai/models/response_format_text_grammar.rb +27 -0
  36. data/lib/openai/models/response_format_text_python.rb +20 -0
  37. data/lib/openai/models/responses/custom_tool.rb +48 -0
  38. data/lib/openai/models/responses/response.rb +20 -12
  39. data/lib/openai/models/responses/response_create_params.rb +48 -10
  40. data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
  41. data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
  42. data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
  43. data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
  44. data/lib/openai/models/responses/response_input_item.rb +7 -1
  45. data/lib/openai/models/responses/response_output_item.rb +4 -1
  46. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  47. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  48. data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
  49. data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
  50. data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
  51. data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
  52. data/lib/openai/models/responses/response_stream_event.rb +13 -11
  53. data/lib/openai/models/responses/response_text_config.rb +27 -1
  54. data/lib/openai/models/responses/tool.rb +5 -1
  55. data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
  56. data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
  57. data/lib/openai/models/vector_store_search_params.rb +6 -1
  58. data/lib/openai/models.rb +6 -0
  59. data/lib/openai/resources/beta/assistants.rb +2 -2
  60. data/lib/openai/resources/beta/threads/runs.rb +2 -2
  61. data/lib/openai/resources/chat/completions.rb +16 -10
  62. data/lib/openai/resources/responses.rb +38 -22
  63. data/lib/openai/version.rb +1 -1
  64. data/lib/openai.rb +19 -2
  65. data/rbi/openai/internal/transport/base_client.rbi +1 -1
  66. data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
  67. data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
  68. data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
  69. data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
  70. data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
  71. data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
  72. data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
  73. data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
  74. data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
  75. data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
  76. data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
  77. data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
  78. data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
  79. data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
  80. data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
  81. data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
  82. data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
  83. data/rbi/openai/models/chat/completion_create_params.rbi +106 -25
  84. data/rbi/openai/models/chat_model.rbi +11 -0
  85. data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
  86. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
  87. data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
  88. data/rbi/openai/models/evals/run_create_params.rbi +4 -0
  89. data/rbi/openai/models/evals/run_create_response.rbi +2 -0
  90. data/rbi/openai/models/evals/run_list_response.rbi +2 -0
  91. data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
  92. data/rbi/openai/models/reasoning.rbi +6 -8
  93. data/rbi/openai/models/reasoning_effort.rbi +4 -4
  94. data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
  95. data/rbi/openai/models/response_format_text_python.rbi +30 -0
  96. data/rbi/openai/models/responses/custom_tool.rbi +96 -0
  97. data/rbi/openai/models/responses/response.rbi +15 -5
  98. data/rbi/openai/models/responses/response_create_params.rbi +94 -7
  99. data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
  100. data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
  101. data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
  102. data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
  103. data/rbi/openai/models/responses/response_input_item.rbi +2 -0
  104. data/rbi/openai/models/responses/response_output_item.rbi +2 -1
  105. data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
  106. data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
  107. data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
  108. data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
  109. data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
  110. data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
  111. data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
  112. data/rbi/openai/models/responses/response_text_config.rbi +64 -1
  113. data/rbi/openai/models/responses/tool.rbi +1 -0
  114. data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
  115. data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
  116. data/rbi/openai/models/vector_store_search_params.rbi +12 -1
  117. data/rbi/openai/models.rbi +6 -0
  118. data/rbi/openai/resources/beta/assistants.rbi +6 -8
  119. data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
  120. data/rbi/openai/resources/chat/completions.rbi +44 -19
  121. data/rbi/openai/resources/responses.rbi +215 -41
  122. data/sig/openai/internal/transport/base_client.rbs +1 -1
  123. data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
  124. data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
  125. data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
  126. data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
  127. data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
  128. data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
  129. data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
  130. data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
  131. data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
  132. data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
  133. data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
  134. data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
  135. data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
  136. data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
  137. data/sig/openai/models/chat/completion_create_params.rbs +23 -6
  138. data/sig/openai/models/chat_model.rbs +15 -1
  139. data/sig/openai/models/custom_tool_input_format.rbs +61 -0
  140. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
  141. data/sig/openai/models/reasoning_effort.rbs +2 -1
  142. data/sig/openai/models/response_format_text_grammar.rbs +15 -0
  143. data/sig/openai/models/response_format_text_python.rbs +13 -0
  144. data/sig/openai/models/responses/custom_tool.rbs +43 -0
  145. data/sig/openai/models/responses/response.rbs +2 -0
  146. data/sig/openai/models/responses/response_create_params.rbs +19 -0
  147. data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
  148. data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
  149. data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
  150. data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
  151. data/sig/openai/models/responses/response_input_item.rbs +2 -0
  152. data/sig/openai/models/responses/response_output_item.rbs +1 -0
  153. data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
  154. data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
  155. data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
  156. data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
  157. data/sig/openai/models/responses/response_stream_event.rbs +4 -2
  158. data/sig/openai/models/responses/response_text_config.rbs +22 -3
  159. data/sig/openai/models/responses/tool.rbs +1 -0
  160. data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
  161. data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
  162. data/sig/openai/models/vector_store_search_params.rbs +2 -1
  163. data/sig/openai/models.rbs +6 -0
  164. data/sig/openai/resources/chat/completions.rbs +4 -2
  165. data/sig/openai/resources/responses.rbs +32 -0
  166. metadata +59 -8
  167. data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
  168. data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
  169. data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -157,6 +157,7 @@ module OpenAI
157
157
  )
158
158
  end
159
159
 
160
+ # Enable re-ranking; set to `none` to disable, which can help reduce latency.
160
161
  sig do
161
162
  returns(
162
163
  T.nilable(
@@ -188,7 +189,11 @@ module OpenAI
188
189
  score_threshold: Float
189
190
  ).returns(T.attached_class)
190
191
  end
191
- def self.new(ranker: nil, score_threshold: nil)
192
+ def self.new(
193
+ # Enable re-ranking; set to `none` to disable, which can help reduce latency.
194
+ ranker: nil,
195
+ score_threshold: nil
196
+ )
192
197
  end
193
198
 
194
199
  sig do
@@ -203,6 +208,7 @@ module OpenAI
203
208
  def to_hash
204
209
  end
205
210
 
211
+ # Enable re-ranking; set to `none` to disable, which can help reduce latency.
206
212
  module Ranker
207
213
  extend OpenAI::Internal::Type::Enum
208
214
 
@@ -215,6 +221,11 @@ module OpenAI
215
221
  end
216
222
  OrSymbol = T.type_alias { T.any(Symbol, String) }
217
223
 
224
+ NONE =
225
+ T.let(
226
+ :none,
227
+ OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::TaggedSymbol
228
+ )
218
229
  AUTO =
219
230
  T.let(
220
231
  :auto,
@@ -55,6 +55,8 @@ module OpenAI
55
55
 
56
56
  CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse
57
57
 
58
+ CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat
59
+
58
60
  Embedding = OpenAI::Models::Embedding
59
61
 
60
62
  EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams
@@ -172,6 +174,10 @@ module OpenAI
172
174
 
173
175
  ResponseFormatText = OpenAI::Models::ResponseFormatText
174
176
 
177
+ ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar
178
+
179
+ ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython
180
+
175
181
  Responses = OpenAI::Models::Responses
176
182
 
177
183
  ResponsesModel = OpenAI::Models::ResponsesModel
@@ -60,12 +60,11 @@ module OpenAI
60
60
  metadata: nil,
61
61
  # The name of the assistant. The maximum length is 256 characters.
62
62
  name: nil,
63
- # **o-series models only**
64
- #
65
63
  # Constrains effort on reasoning for
66
64
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
67
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
68
- # result in faster responses and fewer tokens used on reasoning in a response.
65
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
66
+ # effort can result in faster responses and fewer tokens used on reasoning in a
67
+ # response.
69
68
  reasoning_effort: nil,
70
69
  # Specifies the format that the model must output. Compatible with
71
70
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -188,12 +187,11 @@ module OpenAI
188
187
  model: nil,
189
188
  # The name of the assistant. The maximum length is 256 characters.
190
189
  name: nil,
191
- # **o-series models only**
192
- #
193
190
  # Constrains effort on reasoning for
194
191
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
195
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
196
- # result in faster responses and fewer tokens used on reasoning in a response.
192
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
193
+ # effort can result in faster responses and fewer tokens used on reasoning in a
194
+ # response.
197
195
  reasoning_effort: nil,
198
196
  # Specifies the format that the model must output. Compatible with
199
197
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -123,12 +123,11 @@ module OpenAI
123
123
  # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
124
124
  # during tool use.
125
125
  parallel_tool_calls: nil,
126
- # Body param: **o-series models only**
127
- #
128
- # Constrains effort on reasoning for
126
+ # Body param: Constrains effort on reasoning for
129
127
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
130
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
131
- # result in faster responses and fewer tokens used on reasoning in a response.
128
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
129
+ # effort can result in faster responses and fewer tokens used on reasoning in a
130
+ # response.
132
131
  reasoning_effort: nil,
133
132
  # Body param: Specifies the format that the model must output. Compatible with
134
133
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -303,12 +302,11 @@ module OpenAI
303
302
  # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
304
303
  # during tool use.
305
304
  parallel_tool_calls: nil,
306
- # Body param: **o-series models only**
307
- #
308
- # Constrains effort on reasoning for
305
+ # Body param: Constrains effort on reasoning for
309
306
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
310
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
311
- # result in faster responses and fewer tokens used on reasoning in a response.
307
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
308
+ # effort can result in faster responses and fewer tokens used on reasoning in a
309
+ # response.
312
310
  reasoning_effort: nil,
313
311
  # Body param: Specifies the format that the model must output. Compatible with
314
312
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -89,18 +89,25 @@ module OpenAI
89
89
  tool_choice:
90
90
  T.any(
91
91
  OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol,
92
- OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash
92
+ OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash,
93
+ OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash,
94
+ OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash
93
95
  ),
94
96
  tools:
95
97
  T::Array[
96
98
  T.any(
97
- OpenAI::Chat::ChatCompletionTool::OrHash,
98
- OpenAI::StructuredOutput::JsonSchemaConverter
99
+ OpenAI::StructuredOutput::JsonSchemaConverter,
100
+ OpenAI::Chat::ChatCompletionFunctionTool::OrHash,
101
+ OpenAI::Chat::ChatCompletionCustomTool::OrHash
99
102
  )
100
103
  ],
101
104
  top_logprobs: T.nilable(Integer),
102
105
  top_p: T.nilable(Float),
103
106
  user: String,
107
+ verbosity:
108
+ T.nilable(
109
+ OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol
110
+ ),
104
111
  web_search_options:
105
112
  OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash,
106
113
  stream: T.noreturn,
@@ -210,12 +217,11 @@ module OpenAI
210
217
  # hit rates. Replaces the `user` field.
211
218
  # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
212
219
  prompt_cache_key: nil,
213
- # **o-series models only**
214
- #
215
220
  # Constrains effort on reasoning for
216
221
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
217
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
218
- # result in faster responses and fewer tokens used on reasoning in a response.
222
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
223
+ # effort can result in faster responses and fewer tokens used on reasoning in a
224
+ # response.
219
225
  reasoning_effort: nil,
220
226
  # An object specifying the format that the model must output.
221
227
  #
@@ -286,9 +292,9 @@ module OpenAI
286
292
  # `none` is the default when no tools are present. `auto` is the default if tools
287
293
  # are present.
288
294
  tool_choice: nil,
289
- # A list of tools the model may call. Currently, only functions are supported as a
290
- # tool. Use this to provide a list of functions the model may generate JSON inputs
291
- # for. A max of 128 functions are supported.
295
+ # A list of tools the model may call. You can provide either
296
+ # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
297
+ # or [function tools](https://platform.openai.com/docs/guides/function-calling).
292
298
  tools: nil,
293
299
  # An integer between 0 and 20 specifying the number of most likely tokens to
294
300
  # return at each token position, each with an associated log probability.
@@ -306,6 +312,10 @@ module OpenAI
306
312
  # similar requests and to help OpenAI detect and prevent abuse.
307
313
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
308
314
  user: nil,
315
+ # Constrains the verbosity of the model's response. Lower values will result in
316
+ # more concise responses, while higher values will result in more verbose
317
+ # responses. Currently supported values are `low`, `medium`, and `high`.
318
+ verbosity: nil,
309
319
  # This tool searches the web for relevant results to use in a response. Learn more
310
320
  # about the
311
321
  # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -398,12 +408,24 @@ module OpenAI
398
408
  tool_choice:
399
409
  T.any(
400
410
  OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol,
401
- OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash
411
+ OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash,
412
+ OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash,
413
+ OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash
402
414
  ),
403
- tools: T::Array[OpenAI::Chat::ChatCompletionTool::OrHash],
415
+ tools:
416
+ T::Array[
417
+ T.any(
418
+ OpenAI::Chat::ChatCompletionFunctionTool::OrHash,
419
+ OpenAI::Chat::ChatCompletionCustomTool::OrHash
420
+ )
421
+ ],
404
422
  top_logprobs: T.nilable(Integer),
405
423
  top_p: T.nilable(Float),
406
424
  user: String,
425
+ verbosity:
426
+ T.nilable(
427
+ OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol
428
+ ),
407
429
  web_search_options:
408
430
  OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash,
409
431
  stream: T.noreturn,
@@ -513,12 +535,11 @@ module OpenAI
513
535
  # hit rates. Replaces the `user` field.
514
536
  # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
515
537
  prompt_cache_key: nil,
516
- # **o-series models only**
517
- #
518
538
  # Constrains effort on reasoning for
519
539
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
520
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
521
- # result in faster responses and fewer tokens used on reasoning in a response.
540
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
541
+ # effort can result in faster responses and fewer tokens used on reasoning in a
542
+ # response.
522
543
  reasoning_effort: nil,
523
544
  # An object specifying the format that the model must output.
524
545
  #
@@ -589,9 +610,9 @@ module OpenAI
589
610
  # `none` is the default when no tools are present. `auto` is the default if tools
590
611
  # are present.
591
612
  tool_choice: nil,
592
- # A list of tools the model may call. Currently, only functions are supported as a
593
- # tool. Use this to provide a list of functions the model may generate JSON inputs
594
- # for. A max of 128 functions are supported.
613
+ # A list of tools the model may call. You can provide either
614
+ # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
615
+ # or [function tools](https://platform.openai.com/docs/guides/function-calling).
595
616
  tools: nil,
596
617
  # An integer between 0 and 20 specifying the number of most likely tokens to
597
618
  # return at each token position, each with an associated log probability.
@@ -609,6 +630,10 @@ module OpenAI
609
630
  # similar requests and to help OpenAI detect and prevent abuse.
610
631
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
611
632
  user: nil,
633
+ # Constrains the verbosity of the model's response. Lower values will result in
634
+ # more concise responses, while higher values will result in more verbose
635
+ # responses. Currently supported values are `low`, `medium`, and `high`.
636
+ verbosity: nil,
612
637
  # This tool searches the web for relevant results to use in a response. Learn more
613
638
  # about the
614
639
  # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).