openai 0.15.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (174) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +47 -0
  3. data/README.md +14 -20
  4. data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
  5. data/lib/openai/helpers/structured_output/union_of.rb +11 -1
  6. data/lib/openai/internal/transport/base_client.rb +1 -1
  7. data/lib/openai/internal/type/enum.rb +6 -6
  8. data/lib/openai/internal/type/union.rb +13 -17
  9. data/lib/openai/models/beta/assistant_create_params.rb +4 -5
  10. data/lib/openai/models/beta/assistant_update_params.rb +22 -5
  11. data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
  12. data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
  13. data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
  14. data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
  15. data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
  16. data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
  17. data/lib/openai/models/chat/chat_completion_message.rb +3 -5
  18. data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
  19. data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
  20. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
  21. data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
  22. data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
  23. data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
  24. data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
  25. data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
  26. data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
  27. data/lib/openai/models/chat/completion_create_params.rb +65 -16
  28. data/lib/openai/models/chat_model.rb +7 -0
  29. data/lib/openai/models/custom_tool_input_format.rb +76 -0
  30. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
  31. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  32. data/lib/openai/models/evals/run_create_params.rb +2 -2
  33. data/lib/openai/models/evals/run_create_response.rb +2 -2
  34. data/lib/openai/models/evals/run_list_response.rb +2 -2
  35. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  36. data/lib/openai/models/reasoning.rb +4 -5
  37. data/lib/openai/models/reasoning_effort.rb +4 -4
  38. data/lib/openai/models/response_format_text_grammar.rb +27 -0
  39. data/lib/openai/models/response_format_text_python.rb +20 -0
  40. data/lib/openai/models/responses/custom_tool.rb +48 -0
  41. data/lib/openai/models/responses/response.rb +70 -16
  42. data/lib/openai/models/responses/response_create_params.rb +78 -14
  43. data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
  44. data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
  45. data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
  46. data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
  47. data/lib/openai/models/responses/response_input_item.rb +7 -1
  48. data/lib/openai/models/responses/response_output_item.rb +4 -1
  49. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  50. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  51. data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
  52. data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
  53. data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
  54. data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
  55. data/lib/openai/models/responses/response_stream_event.rb +13 -11
  56. data/lib/openai/models/responses/response_text_config.rb +27 -1
  57. data/lib/openai/models/responses/tool.rb +5 -1
  58. data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
  59. data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
  60. data/lib/openai/models/vector_store_search_params.rb +6 -1
  61. data/lib/openai/models.rb +6 -0
  62. data/lib/openai/resources/beta/assistants.rb +2 -2
  63. data/lib/openai/resources/beta/threads/runs.rb +2 -2
  64. data/lib/openai/resources/chat/completions.rb +26 -12
  65. data/lib/openai/resources/responses.rb +77 -36
  66. data/lib/openai/version.rb +1 -1
  67. data/lib/openai.rb +19 -2
  68. data/rbi/openai/internal/transport/base_client.rbi +1 -1
  69. data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
  70. data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
  71. data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
  72. data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
  73. data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
  74. data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
  75. data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
  76. data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
  77. data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
  78. data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
  79. data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
  80. data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
  81. data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
  82. data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
  83. data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
  84. data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
  85. data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
  86. data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
  87. data/rbi/openai/models/chat/completion_create_params.rbi +150 -31
  88. data/rbi/openai/models/chat_model.rbi +11 -0
  89. data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
  90. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
  91. data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
  92. data/rbi/openai/models/evals/run_create_params.rbi +4 -0
  93. data/rbi/openai/models/evals/run_create_response.rbi +2 -0
  94. data/rbi/openai/models/evals/run_list_response.rbi +2 -0
  95. data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
  96. data/rbi/openai/models/reasoning.rbi +6 -8
  97. data/rbi/openai/models/reasoning_effort.rbi +4 -4
  98. data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
  99. data/rbi/openai/models/response_format_text_python.rbi +30 -0
  100. data/rbi/openai/models/responses/custom_tool.rbi +96 -0
  101. data/rbi/openai/models/responses/response.rbi +59 -11
  102. data/rbi/openai/models/responses/response_create_params.rbi +138 -13
  103. data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
  104. data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
  105. data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
  106. data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
  107. data/rbi/openai/models/responses/response_input_item.rbi +2 -0
  108. data/rbi/openai/models/responses/response_output_item.rbi +2 -1
  109. data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
  110. data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
  111. data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
  112. data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
  113. data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
  114. data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
  115. data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
  116. data/rbi/openai/models/responses/response_text_config.rbi +64 -1
  117. data/rbi/openai/models/responses/tool.rbi +1 -0
  118. data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
  119. data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
  120. data/rbi/openai/models/vector_store_search_params.rbi +12 -1
  121. data/rbi/openai/models.rbi +6 -0
  122. data/rbi/openai/resources/beta/assistants.rbi +6 -8
  123. data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
  124. data/rbi/openai/resources/chat/completions.rbi +78 -25
  125. data/rbi/openai/resources/responses.rbi +249 -47
  126. data/sig/openai/internal/transport/base_client.rbs +1 -1
  127. data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
  128. data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
  129. data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
  130. data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
  131. data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
  132. data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
  133. data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
  134. data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
  135. data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
  136. data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
  137. data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
  138. data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
  139. data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
  140. data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
  141. data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
  142. data/sig/openai/models/chat/completion_create_params.rbs +37 -6
  143. data/sig/openai/models/chat_model.rbs +15 -1
  144. data/sig/openai/models/custom_tool_input_format.rbs +61 -0
  145. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
  146. data/sig/openai/models/reasoning_effort.rbs +2 -1
  147. data/sig/openai/models/response_format_text_grammar.rbs +15 -0
  148. data/sig/openai/models/response_format_text_python.rbs +13 -0
  149. data/sig/openai/models/responses/custom_tool.rbs +43 -0
  150. data/sig/openai/models/responses/response.rbs +16 -0
  151. data/sig/openai/models/responses/response_create_params.rbs +33 -0
  152. data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
  153. data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
  154. data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
  155. data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
  156. data/sig/openai/models/responses/response_input_item.rbs +2 -0
  157. data/sig/openai/models/responses/response_output_item.rbs +1 -0
  158. data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
  159. data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
  160. data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
  161. data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
  162. data/sig/openai/models/responses/response_stream_event.rbs +4 -2
  163. data/sig/openai/models/responses/response_text_config.rbs +22 -3
  164. data/sig/openai/models/responses/tool.rbs +1 -0
  165. data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
  166. data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
  167. data/sig/openai/models/vector_store_search_params.rbs +2 -1
  168. data/sig/openai/models.rbs +6 -0
  169. data/sig/openai/resources/chat/completions.rbs +8 -2
  170. data/sig/openai/resources/responses.rbs +36 -0
  171. metadata +59 -8
  172. data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
  173. data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
  174. data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -182,13 +182,20 @@ module OpenAI
182
182
  # @return [Float, nil]
183
183
  optional :presence_penalty, Float, nil?: true
184
184
 
185
- # @!attribute reasoning_effort
186
- # **o-series models only**
185
+ # @!attribute prompt_cache_key
186
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
187
+ # hit rates. Replaces the `user` field.
188
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
187
189
  #
190
+ # @return [String, nil]
191
+ optional :prompt_cache_key, String
192
+
193
+ # @!attribute reasoning_effort
188
194
  # Constrains effort on reasoning for
189
195
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
190
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
191
- # result in faster responses and fewer tokens used on reasoning in a response.
196
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
197
+ # effort can result in faster responses and fewer tokens used on reasoning in a
198
+ # response.
192
199
  #
193
200
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
194
201
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -208,6 +215,16 @@ module OpenAI
208
215
  # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject, nil]
209
216
  optional :response_format, union: -> { OpenAI::Chat::CompletionCreateParams::ResponseFormat }
210
217
 
218
+ # @!attribute safety_identifier
219
+ # A stable identifier used to help detect users of your application that may be
220
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
221
+ # identifies each user. We recommend hashing their username or email address, in
222
+ # order to avoid sending us any identifying information.
223
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
224
+ #
225
+ # @return [String, nil]
226
+ optional :safety_identifier, String
227
+
211
228
  # @!attribute seed
212
229
  # This feature is in Beta. If specified, our system will make a best effort to
213
230
  # sample deterministically, such that repeated requests with the same `seed` and
@@ -285,13 +302,13 @@ module OpenAI
285
302
  # `none` is the default when no tools are present. `auto` is the default if tools
286
303
  # are present.
287
304
  #
288
- # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, nil]
305
+ # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom, nil]
289
306
  optional :tool_choice, union: -> { OpenAI::Chat::ChatCompletionToolChoiceOption }
290
307
 
291
308
  # @!attribute tools
292
- # A list of tools the model may call. Currently, only functions are supported as a
293
- # tool. Use this to provide a list of functions the model may generate JSON inputs
294
- # for. A max of 128 functions are supported.
309
+ # A list of tools the model may call. You can provide either
310
+ # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
311
+ # or [function tools](https://platform.openai.com/docs/guides/function-calling).
295
312
  #
296
313
  # @return [Array<OpenAI::Models::Chat::ChatCompletionTool, OpenAI::StructuredOutput::JsonSchemaConverter>, nil]
297
314
  optional :tools,
@@ -320,13 +337,25 @@ module OpenAI
320
337
  optional :top_p, Float, nil?: true
321
338
 
322
339
  # @!attribute user
323
- # A stable identifier for your end-users. Used to boost cache hit rates by better
324
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
325
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
340
+ # @deprecated
341
+ #
342
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
343
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
344
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
345
+ # similar requests and to help OpenAI detect and prevent abuse.
346
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
326
347
  #
327
348
  # @return [String, nil]
328
349
  optional :user, String
329
350
 
351
+ # @!attribute verbosity
352
+ # Constrains the verbosity of the model's response. Lower values will result in
353
+ # more concise responses, while higher values will result in more verbose
354
+ # responses. Currently supported values are `low`, `medium`, and `high`.
355
+ #
356
+ # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil]
357
+ optional :verbosity, enum: -> { OpenAI::Chat::CompletionCreateParams::Verbosity }, nil?: true
358
+
330
359
  # @!attribute web_search_options
331
360
  # This tool searches the web for relevant results to use in a response. Learn more
332
361
  # about the
@@ -335,7 +364,7 @@ module OpenAI
335
364
  # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
336
365
  optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
337
366
 
338
- # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
367
+ # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
339
368
  # Some parameter documentations has been truncated, see
340
369
  # {OpenAI::Models::Chat::CompletionCreateParams} for more details.
341
370
  #
@@ -371,10 +400,14 @@ module OpenAI
371
400
  #
372
401
  # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
373
402
  #
374
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
403
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
404
+ #
405
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
375
406
  #
376
407
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
377
408
  #
409
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
410
+ #
378
411
  # @param seed [Integer, nil] This feature is in Beta.
379
412
  #
380
413
  # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
@@ -387,15 +420,17 @@ module OpenAI
387
420
  #
388
421
  # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
389
422
  #
390
- # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model.
423
+ # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model.
391
424
  #
392
- # @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool, OpenAI::StructuredOutput::JsonSchemaConverter>] A list of tools the model may call. Currently, only functions are supported as a
425
+ # @param tools [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool>] A list of tools the model may call. You can provide either
393
426
  #
394
427
  # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
395
428
  #
396
429
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
397
430
  #
398
- # @param user [String] A stable identifier for your end-users.
431
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
432
+ #
433
+ # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in
399
434
  #
400
435
  # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
401
436
  #
@@ -596,6 +631,20 @@ module OpenAI
596
631
  StringArray = OpenAI::Internal::Type::ArrayOf[String]
597
632
  end
598
633
 
634
+ # Constrains the verbosity of the model's response. Lower values will result in
635
+ # more concise responses, while higher values will result in more verbose
636
+ # responses. Currently supported values are `low`, `medium`, and `high`.
637
+ module Verbosity
638
+ extend OpenAI::Internal::Type::Enum
639
+
640
+ LOW = :low
641
+ MEDIUM = :medium
642
+ HIGH = :high
643
+
644
+ # @!method self.values
645
+ # @return [Array<Symbol>]
646
+ end
647
+
599
648
  class WebSearchOptions < OpenAI::Internal::Type::BaseModel
600
649
  # @!attribute search_context_size
601
650
  # High level guidance for the amount of context window space to use for the
@@ -5,6 +5,13 @@ module OpenAI
5
5
  module ChatModel
6
6
  extend OpenAI::Internal::Type::Enum
7
7
 
8
+ GPT_5 = :"gpt-5"
9
+ GPT_5_MINI = :"gpt-5-mini"
10
+ GPT_5_NANO = :"gpt-5-nano"
11
+ GPT_5_2025_08_07 = :"gpt-5-2025-08-07"
12
+ GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07"
13
+ GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07"
14
+ GPT_5_CHAT_LATEST = :"gpt-5-chat-latest"
8
15
  GPT_4_1 = :"gpt-4.1"
9
16
  GPT_4_1_MINI = :"gpt-4.1-mini"
10
17
  GPT_4_1_NANO = :"gpt-4.1-nano"
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ # The input format for the custom tool. Default is unconstrained text.
6
+ module CustomToolInputFormat
7
+ extend OpenAI::Internal::Type::Union
8
+
9
+ discriminator :type
10
+
11
+ # Unconstrained free-form text.
12
+ variant :text, -> { OpenAI::CustomToolInputFormat::Text }
13
+
14
+ # A grammar defined by the user.
15
+ variant :grammar, -> { OpenAI::CustomToolInputFormat::Grammar }
16
+
17
+ class Text < OpenAI::Internal::Type::BaseModel
18
+ # @!attribute type
19
+ # Unconstrained text format. Always `text`.
20
+ #
21
+ # @return [Symbol, :text]
22
+ required :type, const: :text
23
+
24
+ # @!method initialize(type: :text)
25
+ # Unconstrained free-form text.
26
+ #
27
+ # @param type [Symbol, :text] Unconstrained text format. Always `text`.
28
+ end
29
+
30
+ class Grammar < OpenAI::Internal::Type::BaseModel
31
+ # @!attribute definition
32
+ # The grammar definition.
33
+ #
34
+ # @return [String]
35
+ required :definition, String
36
+
37
+ # @!attribute syntax
38
+ # The syntax of the grammar definition. One of `lark` or `regex`.
39
+ #
40
+ # @return [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax]
41
+ required :syntax, enum: -> { OpenAI::CustomToolInputFormat::Grammar::Syntax }
42
+
43
+ # @!attribute type
44
+ # Grammar format. Always `grammar`.
45
+ #
46
+ # @return [Symbol, :grammar]
47
+ required :type, const: :grammar
48
+
49
+ # @!method initialize(definition:, syntax:, type: :grammar)
50
+ # A grammar defined by the user.
51
+ #
52
+ # @param definition [String] The grammar definition.
53
+ #
54
+ # @param syntax [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`.
55
+ #
56
+ # @param type [Symbol, :grammar] Grammar format. Always `grammar`.
57
+
58
+ # The syntax of the grammar definition. One of `lark` or `regex`.
59
+ #
60
+ # @see OpenAI::Models::CustomToolInputFormat::Grammar#syntax
61
+ module Syntax
62
+ extend OpenAI::Internal::Type::Enum
63
+
64
+ LARK = :lark
65
+ REGEX = :regex
66
+
67
+ # @!method self.values
68
+ # @return [Array<Symbol>]
69
+ end
70
+ end
71
+
72
+ # @!method self.variants
73
+ # @return [Array(OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar)]
74
+ end
75
+ end
76
+ end
@@ -512,8 +512,8 @@ module OpenAI
512
512
  # tool. Use this to provide a list of functions the model may generate JSON inputs
513
513
  # for. A max of 128 functions are supported.
514
514
  #
515
- # @return [Array<OpenAI::Models::Chat::ChatCompletionTool>, nil]
516
- optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTool] }
515
+ # @return [Array<OpenAI::Models::Chat::ChatCompletionFunctionTool>, nil]
516
+ optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionFunctionTool] }
517
517
 
518
518
  # @!attribute top_p
519
519
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
@@ -534,7 +534,7 @@ module OpenAI
534
534
  #
535
535
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
536
536
  #
537
- # @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool>] A list of tools the model may call. Currently, only functions are supported as a
537
+ # @param tools [Array<OpenAI::Models::Chat::ChatCompletionFunctionTool>] A list of tools the model may call. Currently, only functions are supported as a
538
538
  #
539
539
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
540
540
 
@@ -686,7 +686,7 @@ module OpenAI
686
686
  # the model to call your own code. Learn more about
687
687
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
688
688
  #
689
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
689
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
690
690
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
691
691
 
692
692
  # @!attribute top_p
@@ -708,7 +708,7 @@ module OpenAI
708
708
  #
709
709
  # @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
710
710
  #
711
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
711
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
712
712
  #
713
713
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
714
714
 
@@ -650,7 +650,7 @@ module OpenAI
650
650
  # the model to call your own code. Learn more about
651
651
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
652
652
  #
653
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
653
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
654
654
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
655
655
 
656
656
  # @!attribute top_p
@@ -672,7 +672,7 @@ module OpenAI
672
672
  #
673
673
  # @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
674
674
  #
675
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
675
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
676
676
  #
677
677
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
678
678
 
@@ -686,7 +686,7 @@ module OpenAI
686
686
  # the model to call your own code. Learn more about
687
687
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
688
688
  #
689
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
689
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
690
690
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
691
691
 
692
692
  # @!attribute top_p
@@ -708,7 +708,7 @@ module OpenAI
708
708
  #
709
709
  # @param text [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
710
710
  #
711
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
711
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
712
712
  #
713
713
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
714
714
 
@@ -685,7 +685,7 @@ module OpenAI
685
685
  # the model to call your own code. Learn more about
686
686
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
687
687
  #
688
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
688
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
689
689
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
690
690
 
691
691
  # @!attribute top_p
@@ -707,7 +707,7 @@ module OpenAI
707
707
  #
708
708
  # @param text [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
709
709
  #
710
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
710
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
711
711
  #
712
712
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
713
713
 
@@ -690,7 +690,7 @@ module OpenAI
690
690
  # the model to call your own code. Learn more about
691
691
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
692
692
  #
693
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
693
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
694
694
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
695
695
 
696
696
  # @!attribute top_p
@@ -712,7 +712,7 @@ module OpenAI
712
712
  #
713
713
  # @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
714
714
  #
715
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
715
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
716
716
  #
717
717
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
718
718
 
@@ -4,12 +4,11 @@ module OpenAI
4
4
  module Models
5
5
  class Reasoning < OpenAI::Internal::Type::BaseModel
6
6
  # @!attribute effort
7
- # **o-series models only**
8
- #
9
7
  # Constrains effort on reasoning for
10
8
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
11
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
12
- # result in faster responses and fewer tokens used on reasoning in a response.
9
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
10
+ # effort can result in faster responses and fewer tokens used on reasoning in a
11
+ # response.
13
12
  #
14
13
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
15
14
  optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -43,7 +42,7 @@ module OpenAI
43
42
  # Configuration options for
44
43
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
45
44
  #
46
- # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
45
+ # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
47
46
  #
48
47
  # @param generate_summary [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] **Deprecated:** use `summary` instead.
49
48
  #
@@ -2,15 +2,15 @@
2
2
 
3
3
  module OpenAI
4
4
  module Models
5
- # **o-series models only**
6
- #
7
5
  # Constrains effort on reasoning for
8
6
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
9
- # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
10
- # result in faster responses and fewer tokens used on reasoning in a response.
7
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
8
+ # effort can result in faster responses and fewer tokens used on reasoning in a
9
+ # response.
11
10
  module ReasoningEffort
12
11
  extend OpenAI::Internal::Type::Enum
13
12
 
13
+ MINIMAL = :minimal
14
14
  LOW = :low
15
15
  MEDIUM = :medium
16
16
  HIGH = :high
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel
6
+ # @!attribute grammar
7
+ # The custom grammar for the model to follow.
8
+ #
9
+ # @return [String]
10
+ required :grammar, String
11
+
12
+ # @!attribute type
13
+ # The type of response format being defined. Always `grammar`.
14
+ #
15
+ # @return [Symbol, :grammar]
16
+ required :type, const: :grammar
17
+
18
+ # @!method initialize(grammar:, type: :grammar)
19
+ # A custom grammar for the model to follow when generating text. Learn more in the
20
+ # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars).
21
+ #
22
+ # @param grammar [String] The custom grammar for the model to follow.
23
+ #
24
+ # @param type [Symbol, :grammar] The type of response format being defined. Always `grammar`.
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel
6
+ # @!attribute type
7
+ # The type of response format being defined. Always `python`.
8
+ #
9
+ # @return [Symbol, :python]
10
+ required :type, const: :python
11
+
12
+ # @!method initialize(type: :python)
13
+ # Configure the model to generate valid Python code. See the
14
+ # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars)
15
+ # for more details.
16
+ #
17
+ # @param type [Symbol, :python] The type of response format being defined. Always `python`.
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class CustomTool < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute name
8
+ # The name of the custom tool, used to identify it in tool calls.
9
+ #
10
+ # @return [String]
11
+ required :name, String
12
+
13
+ # @!attribute type
14
+ # The type of the custom tool. Always `custom`.
15
+ #
16
+ # @return [Symbol, :custom]
17
+ required :type, const: :custom
18
+
19
+ # @!attribute description
20
+ # Optional description of the custom tool, used to provide more context.
21
+ #
22
+ # @return [String, nil]
23
+ optional :description, String
24
+
25
+ # @!attribute format_
26
+ # The input format for the custom tool. Default is unconstrained text.
27
+ #
28
+ # @return [OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar, nil]
29
+ optional :format_, union: -> { OpenAI::CustomToolInputFormat }, api_name: :format
30
+
31
+ # @!method initialize(name:, description: nil, format_: nil, type: :custom)
32
+ # Some parameter documentations has been truncated, see
33
+ # {OpenAI::Models::Responses::CustomTool} for more details.
34
+ #
35
+ # A custom tool that processes input using a specified format. Learn more about
36
+ # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools).
37
+ #
38
+ # @param name [String] The name of the custom tool, used to identify it in tool calls.
39
+ #
40
+ # @param description [String] Optional description of the custom tool, used to provide more context.
41
+ #
42
+ # @param format_ [OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar] The input format for the custom tool. Default is unconstrained text.
43
+ #
44
+ # @param type [Symbol, :custom] The type of the custom tool. Always `custom`.
45
+ end
46
+ end
47
+ end
48
+ end