openai 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/models/all_models.rb +4 -0
  7. data/lib/openai/models/chat/chat_completion.rb +32 -31
  8. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  9. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  10. data/lib/openai/models/images_response.rb +92 -1
  11. data/lib/openai/models/responses/response.rb +59 -35
  12. data/lib/openai/models/responses/response_create_params.rb +64 -39
  13. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  14. data/lib/openai/models/responses/response_includable.rb +8 -6
  15. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  16. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  17. data/lib/openai/models/responses_model.rb +4 -0
  18. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  19. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  20. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  21. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  22. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  23. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  24. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  25. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  26. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  27. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  28. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  29. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  30. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  32. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  33. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  34. data/lib/openai/models.rb +2 -0
  35. data/lib/openai/resources/chat/completions.rb +2 -2
  36. data/lib/openai/resources/responses.rb +14 -6
  37. data/lib/openai/resources/webhooks.rb +124 -0
  38. data/lib/openai/version.rb +1 -1
  39. data/lib/openai.rb +18 -0
  40. data/rbi/openai/client.rbi +3 -0
  41. data/rbi/openai/models/all_models.rbi +20 -0
  42. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  43. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  44. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  45. data/rbi/openai/models/images_response.rbi +146 -0
  46. data/rbi/openai/models/responses/response.rbi +75 -44
  47. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  48. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  49. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  50. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  51. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  52. data/rbi/openai/models/responses_model.rbi +20 -0
  53. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  54. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  55. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  56. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  57. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  58. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  59. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  60. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  61. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  62. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  63. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  64. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  65. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  66. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  67. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  68. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  69. data/rbi/openai/models.rbi +2 -0
  70. data/rbi/openai/resources/chat/completions.rbi +34 -30
  71. data/rbi/openai/resources/responses.rbi +62 -38
  72. data/rbi/openai/resources/webhooks.rbi +68 -0
  73. data/sig/openai/client.rbs +2 -0
  74. data/sig/openai/models/all_models.rbs +8 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/images_response.rbs +83 -0
  79. data/sig/openai/models/responses/response.rbs +13 -1
  80. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  81. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  82. data/sig/openai/models/responses/response_includable.rbs +7 -5
  83. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  84. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  85. data/sig/openai/models/responses_model.rbs +8 -0
  86. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  87. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  88. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  89. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  90. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  91. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  92. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  93. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  94. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  95. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  96. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  97. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  98. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  99. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  100. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  101. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  102. data/sig/openai/models.rbs +2 -0
  103. data/sig/openai/resources/responses.rbs +4 -0
  104. data/sig/openai/resources/webhooks.rbs +33 -0
  105. metadata +56 -2
@@ -100,7 +100,7 @@ module OpenAI
100
100
  # response. See the `tools` parameter to see how to specify which tools the model
101
101
  # can call.
102
102
  #
103
- # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction]
103
+ # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp]
104
104
  required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice }
105
105
 
106
106
  # @!attribute tools
@@ -147,6 +147,15 @@ module OpenAI
147
147
  # @return [Integer, nil]
148
148
  optional :max_output_tokens, Integer, nil?: true
149
149
 
150
+ # @!attribute max_tool_calls
151
+ # The maximum number of total calls to built-in tools that can be processed in a
152
+ # response. This maximum number applies across all built-in tool calls, not per
153
+ # individual tool. Any further attempts to call a tool by the model will be
154
+ # ignored.
155
+ #
156
+ # @return [Integer, nil]
157
+ optional :max_tool_calls, Integer, nil?: true
158
+
150
159
  # @!attribute previous_response_id
151
160
  # The unique ID of the previous response to the model. Use this to create
152
161
  # multi-turn conversations. Learn more about
@@ -172,23 +181,23 @@ module OpenAI
172
181
  optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
173
182
 
174
183
  # @!attribute service_tier
175
- # Specifies the latency tier to use for processing the request. This parameter is
176
- # relevant for customers subscribed to the scale tier service:
177
- #
178
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
179
- # utilize scale tier credits until they are exhausted.
180
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
181
- # be processed using the default service tier with a lower uptime SLA and no
182
- # latency guarantee.
183
- # - If set to 'default', the request will be processed using the default service
184
- # tier with a lower uptime SLA and no latency guarantee.
185
- # - If set to 'flex', the request will be processed with the Flex Processing
186
- # service tier.
187
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
184
+ # Specifies the processing type used for serving the request.
185
+ #
186
+ # - If set to 'auto', then the request will be processed with the service tier
187
+ # configured in the Project settings. Unless otherwise configured, the Project
188
+ # will use 'default'.
189
+ # - If set to 'default', then the requset will be processed with the standard
190
+ # pricing and performance for the selected model.
191
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
192
+ # 'priority', then the request will be processed with the corresponding service
193
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
194
+ # Priority processing.
188
195
  # - When not set, the default behavior is 'auto'.
189
196
  #
190
- # When this parameter is set, the response body will include the `service_tier`
191
- # utilized.
197
+ # When the `service_tier` parameter is set, the response body will include the
198
+ # `service_tier` value based on the processing mode actually used to serve the
199
+ # request. This response value may be different from the value set in the
200
+ # parameter.
192
201
  #
193
202
  # @return [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil]
194
203
  optional :service_tier, enum: -> { OpenAI::Responses::Response::ServiceTier }, nil?: true
@@ -210,6 +219,13 @@ module OpenAI
210
219
  # @return [OpenAI::Models::Responses::ResponseTextConfig, nil]
211
220
  optional :text, -> { OpenAI::Responses::ResponseTextConfig }
212
221
 
222
+ # @!attribute top_logprobs
223
+ # An integer between 0 and 20 specifying the number of most likely tokens to
224
+ # return at each token position, each with an associated log probability.
225
+ #
226
+ # @return [Integer, nil]
227
+ optional :top_logprobs, Integer, nil?: true
228
+
213
229
  # @!attribute truncation
214
230
  # The truncation strategy to use for the model response.
215
231
  #
@@ -237,7 +253,7 @@ module OpenAI
237
253
  # @return [String, nil]
238
254
  optional :user, String
239
255
 
240
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
256
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
241
257
  # Some parameter documentations has been truncated, see
242
258
  # {OpenAI::Models::Responses::Response} for more details.
243
259
  #
@@ -261,7 +277,7 @@ module OpenAI
261
277
  #
262
278
  # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
263
279
  #
264
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
280
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
265
281
  #
266
282
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
267
283
  #
@@ -271,18 +287,22 @@ module OpenAI
271
287
  #
272
288
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
273
289
  #
290
+ # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
291
+ #
274
292
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
275
293
  #
276
294
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
277
295
  #
278
296
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
279
297
  #
280
- # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
298
+ # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
281
299
  #
282
300
  # @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
283
301
  #
284
302
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
285
303
  #
304
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
305
+ #
286
306
  # @param truncation [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] The truncation strategy to use for the model response.
287
307
  #
288
308
  # @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
@@ -369,27 +389,30 @@ module OpenAI
369
389
  # Use this option to force the model to call a specific function.
370
390
  variant -> { OpenAI::Responses::ToolChoiceFunction }
371
391
 
392
+ # Use this option to force the model to call a specific tool on a remote MCP server.
393
+ variant -> { OpenAI::Responses::ToolChoiceMcp }
394
+
372
395
  # @!method self.variants
373
- # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)]
396
+ # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
374
397
  end
375
398
 
376
- # Specifies the latency tier to use for processing the request. This parameter is
377
- # relevant for customers subscribed to the scale tier service:
378
- #
379
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
380
- # utilize scale tier credits until they are exhausted.
381
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
382
- # be processed using the default service tier with a lower uptime SLA and no
383
- # latency guarantee.
384
- # - If set to 'default', the request will be processed using the default service
385
- # tier with a lower uptime SLA and no latency guarantee.
386
- # - If set to 'flex', the request will be processed with the Flex Processing
387
- # service tier.
388
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
399
+ # Specifies the processing type used for serving the request.
400
+ #
401
+ # - If set to 'auto', then the request will be processed with the service tier
402
+ # configured in the Project settings. Unless otherwise configured, the Project
403
+ # will use 'default'.
404
+ # - If set to 'default', then the requset will be processed with the standard
405
+ # pricing and performance for the selected model.
406
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
407
+ # 'priority', then the request will be processed with the corresponding service
408
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
409
+ # Priority processing.
389
410
  # - When not set, the default behavior is 'auto'.
390
411
  #
391
- # When this parameter is set, the response body will include the `service_tier`
392
- # utilized.
412
+ # When the `service_tier` parameter is set, the response body will include the
413
+ # `service_tier` value based on the processing mode actually used to serve the
414
+ # request. This response value may be different from the value set in the
415
+ # parameter.
393
416
  #
394
417
  # @see OpenAI::Models::Responses::Response#service_tier
395
418
  module ServiceTier
@@ -399,6 +422,7 @@ module OpenAI
399
422
  DEFAULT = :default
400
423
  FLEX = :flex
401
424
  SCALE = :scale
425
+ PRIORITY = :priority
402
426
 
403
427
  # @!method self.values
404
428
  # @return [Array<Symbol>]
@@ -21,18 +21,19 @@ module OpenAI
21
21
  # Specify additional output data to include in the model response. Currently
22
22
  # supported values are:
23
23
  #
24
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
25
+ # in code interpreter tool call items.
26
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
27
+ # call output.
24
28
  # - `file_search_call.results`: Include the search results of the file search tool
25
29
  # call.
26
30
  # - `message.input_image.image_url`: Include image urls from the input message.
27
- # - `computer_call_output.output.image_url`: Include image urls from the computer
28
- # call output.
31
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
29
32
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
30
33
  # tokens in reasoning item outputs. This enables reasoning items to be used in
31
34
  # multi-turn conversations when using the Responses API statelessly (like when
32
35
  # the `store` parameter is set to `false`, or when an organization is enrolled
33
36
  # in the zero data retention program).
34
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
35
- # in code interpreter tool call items.
36
37
  #
37
38
  # @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
38
39
  optional :include,
@@ -71,6 +72,15 @@ module OpenAI
71
72
  # @return [Integer, nil]
72
73
  optional :max_output_tokens, Integer, nil?: true
73
74
 
75
+ # @!attribute max_tool_calls
76
+ # The maximum number of total calls to built-in tools that can be processed in a
77
+ # response. This maximum number applies across all built-in tool calls, not per
78
+ # individual tool. Any further attempts to call a tool by the model will be
79
+ # ignored.
80
+ #
81
+ # @return [Integer, nil]
82
+ optional :max_tool_calls, Integer, nil?: true
83
+
74
84
  # @!attribute metadata
75
85
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
76
86
  # for storing additional information about the object in a structured format, and
@@ -123,23 +133,23 @@ module OpenAI
123
133
  optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
124
134
 
125
135
  # @!attribute service_tier
126
- # Specifies the latency tier to use for processing the request. This parameter is
127
- # relevant for customers subscribed to the scale tier service:
128
- #
129
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
130
- # utilize scale tier credits until they are exhausted.
131
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
132
- # be processed using the default service tier with a lower uptime SLA and no
133
- # latency guarantee.
134
- # - If set to 'default', the request will be processed using the default service
135
- # tier with a lower uptime SLA and no latency guarantee.
136
- # - If set to 'flex', the request will be processed with the Flex Processing
137
- # service tier.
138
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
136
+ # Specifies the processing type used for serving the request.
137
+ #
138
+ # - If set to 'auto', then the request will be processed with the service tier
139
+ # configured in the Project settings. Unless otherwise configured, the Project
140
+ # will use 'default'.
141
+ # - If set to 'default', then the requset will be processed with the standard
142
+ # pricing and performance for the selected model.
143
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
144
+ # 'priority', then the request will be processed with the corresponding service
145
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
146
+ # Priority processing.
139
147
  # - When not set, the default behavior is 'auto'.
140
148
  #
141
- # When this parameter is set, the response body will include the `service_tier`
142
- # utilized.
149
+ # When the `service_tier` parameter is set, the response body will include the
150
+ # `service_tier` value based on the processing mode actually used to serve the
151
+ # request. This response value may be different from the value set in the
152
+ # parameter.
143
153
  #
144
154
  # @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil]
145
155
  optional :service_tier, enum: -> { OpenAI::Responses::ResponseCreateParams::ServiceTier }, nil?: true
@@ -180,7 +190,7 @@ module OpenAI
180
190
  # response. See the `tools` parameter to see how to specify which tools the model
181
191
  # can call.
182
192
  #
183
- # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, nil]
193
+ # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, nil]
184
194
  optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice }
185
195
 
186
196
  # @!attribute tools
@@ -202,6 +212,13 @@ module OpenAI
202
212
  # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
203
213
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
204
214
 
215
+ # @!attribute top_logprobs
216
+ # An integer between 0 and 20 specifying the number of most likely tokens to
217
+ # return at each token position, each with an associated log probability.
218
+ #
219
+ # @return [Integer, nil]
220
+ optional :top_logprobs, Integer, nil?: true
221
+
205
222
  # @!attribute top_p
206
223
  # An alternative to sampling with temperature, called nucleus sampling, where the
207
224
  # model considers the results of the tokens with top_p probability mass. So 0.1
@@ -232,7 +249,7 @@ module OpenAI
232
249
  # @return [String, nil]
233
250
  optional :user, String
234
251
 
235
- # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
252
+ # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
236
253
  # Some parameter documentations has been truncated, see
237
254
  # {OpenAI::Models::Responses::ResponseCreateParams} for more details.
238
255
  #
@@ -246,6 +263,8 @@ module OpenAI
246
263
  #
247
264
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
248
265
  #
266
+ # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
267
+ #
249
268
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
250
269
  #
251
270
  # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
@@ -258,7 +277,7 @@ module OpenAI
258
277
  #
259
278
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
260
279
  #
261
- # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
280
+ # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
262
281
  #
263
282
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
264
283
  #
@@ -266,10 +285,12 @@ module OpenAI
266
285
  #
267
286
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
268
287
  #
269
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
288
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
270
289
  #
271
290
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
272
291
  #
292
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
293
+ #
273
294
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
274
295
  #
275
296
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
@@ -302,23 +323,23 @@ module OpenAI
302
323
  # @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
303
324
  end
304
325
 
305
- # Specifies the latency tier to use for processing the request. This parameter is
306
- # relevant for customers subscribed to the scale tier service:
307
- #
308
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
309
- # utilize scale tier credits until they are exhausted.
310
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
311
- # be processed using the default service tier with a lower uptime SLA and no
312
- # latency guarantee.
313
- # - If set to 'default', the request will be processed using the default service
314
- # tier with a lower uptime SLA and no latency guarantee.
315
- # - If set to 'flex', the request will be processed with the Flex Processing
316
- # service tier.
317
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
326
+ # Specifies the processing type used for serving the request.
327
+ #
328
+ # - If set to 'auto', then the request will be processed with the service tier
329
+ # configured in the Project settings. Unless otherwise configured, the Project
330
+ # will use 'default'.
331
+ # - If set to 'default', then the requset will be processed with the standard
332
+ # pricing and performance for the selected model.
333
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
334
+ # 'priority', then the request will be processed with the corresponding service
335
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
336
+ # Priority processing.
318
337
  # - When not set, the default behavior is 'auto'.
319
338
  #
320
- # When this parameter is set, the response body will include the `service_tier`
321
- # utilized.
339
+ # When the `service_tier` parameter is set, the response body will include the
340
+ # `service_tier` value based on the processing mode actually used to serve the
341
+ # request. This response value may be different from the value set in the
342
+ # parameter.
322
343
  module ServiceTier
323
344
  extend OpenAI::Internal::Type::Enum
324
345
 
@@ -326,6 +347,7 @@ module OpenAI
326
347
  DEFAULT = :default
327
348
  FLEX = :flex
328
349
  SCALE = :scale
350
+ PRIORITY = :priority
329
351
 
330
352
  # @!method self.values
331
353
  # @return [Array<Symbol>]
@@ -354,8 +376,11 @@ module OpenAI
354
376
  # Use this option to force the model to call a specific function.
355
377
  variant -> { OpenAI::Responses::ToolChoiceFunction }
356
378
 
379
+ # Use this option to force the model to call a specific tool on a remote MCP server.
380
+ variant -> { OpenAI::Responses::ToolChoiceMcp }
381
+
357
382
  # @!method self.variants
358
- # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)]
383
+ # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
359
384
  end
360
385
 
361
386
  # The truncation strategy to use for the model response.
@@ -10,6 +10,13 @@ module OpenAI
10
10
  # @return [String]
11
11
  required :id, String
12
12
 
13
+ # @!attribute action
14
+ # An object describing the specific action taken in this web search call. Includes
15
+ # details on how the model used the web (search, open_page, find).
16
+ #
17
+ # @return [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find]
18
+ required :action, union: -> { OpenAI::Responses::ResponseFunctionWebSearch::Action }
19
+
13
20
  # @!attribute status
14
21
  # The status of the web search tool call.
15
22
  #
@@ -22,7 +29,7 @@ module OpenAI
22
29
  # @return [Symbol, :web_search_call]
23
30
  required :type, const: :web_search_call
24
31
 
25
- # @!method initialize(id:, status:, type: :web_search_call)
32
+ # @!method initialize(id:, action:, status:, type: :web_search_call)
26
33
  # Some parameter documentations has been truncated, see
27
34
  # {OpenAI::Models::Responses::ResponseFunctionWebSearch} for more details.
28
35
  #
@@ -32,10 +39,117 @@ module OpenAI
32
39
  #
33
40
  # @param id [String] The unique ID of the web search tool call.
34
41
  #
42
+ # @param action [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find] An object describing the specific action taken in this web search call.
43
+ #
35
44
  # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] The status of the web search tool call.
36
45
  #
37
46
  # @param type [Symbol, :web_search_call] The type of the web search tool call. Always `web_search_call`.
38
47
 
48
+ # An object describing the specific action taken in this web search call. Includes
49
+ # details on how the model used the web (search, open_page, find).
50
+ #
51
+ # @see OpenAI::Models::Responses::ResponseFunctionWebSearch#action
52
+ module Action
53
+ extend OpenAI::Internal::Type::Union
54
+
55
+ discriminator :type
56
+
57
+ # Action type "search" - Performs a web search query.
58
+ variant :search, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Search }
59
+
60
+ # Action type "open_page" - Opens a specific URL from search results.
61
+ variant :open_page, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage }
62
+
63
+ # Action type "find": Searches for a pattern within a loaded page.
64
+ variant :find, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Find }
65
+
66
+ class Search < OpenAI::Internal::Type::BaseModel
67
+ # @!attribute query
68
+ # The search query.
69
+ #
70
+ # @return [String]
71
+ required :query, String
72
+
73
+ # @!attribute type
74
+ # The action type.
75
+ #
76
+ # @return [Symbol, :search]
77
+ required :type, const: :search
78
+
79
+ # @!method initialize(query:, type: :search)
80
+ # Some parameter documentations has been truncated, see
81
+ # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search} for more
82
+ # details.
83
+ #
84
+ # Action type "search" - Performs a web search query.
85
+ #
86
+ # @param query [String] The search query.
87
+ #
88
+ # @param type [Symbol, :search] The action type.
89
+ end
90
+
91
+ class OpenPage < OpenAI::Internal::Type::BaseModel
92
+ # @!attribute type
93
+ # The action type.
94
+ #
95
+ # @return [Symbol, :open_page]
96
+ required :type, const: :open_page
97
+
98
+ # @!attribute url
99
+ # The URL opened by the model.
100
+ #
101
+ # @return [String]
102
+ required :url, String
103
+
104
+ # @!method initialize(url:, type: :open_page)
105
+ # Some parameter documentations has been truncated, see
106
+ # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage} for
107
+ # more details.
108
+ #
109
+ # Action type "open_page" - Opens a specific URL from search results.
110
+ #
111
+ # @param url [String] The URL opened by the model.
112
+ #
113
+ # @param type [Symbol, :open_page] The action type.
114
+ end
115
+
116
+ class Find < OpenAI::Internal::Type::BaseModel
117
+ # @!attribute pattern
118
+ # The pattern or text to search for within the page.
119
+ #
120
+ # @return [String]
121
+ required :pattern, String
122
+
123
+ # @!attribute type
124
+ # The action type.
125
+ #
126
+ # @return [Symbol, :find]
127
+ required :type, const: :find
128
+
129
+ # @!attribute url
130
+ # The URL of the page searched for the pattern.
131
+ #
132
+ # @return [String]
133
+ required :url, String
134
+
135
+ # @!method initialize(pattern:, url:, type: :find)
136
+ # Some parameter documentations has been truncated, see
137
+ # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find} for more
138
+ # details.
139
+ #
140
+ # Action type "find": Searches for a pattern within a loaded page.
141
+ #
142
+ # @param pattern [String] The pattern or text to search for within the page.
143
+ #
144
+ # @param url [String] The URL of the page searched for the pattern.
145
+ #
146
+ # @param type [Symbol, :find] The action type.
147
+ end
148
+
149
+ # @!method self.variants
150
+ # @return [Array(OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find)]
151
+ end
152
+
39
153
  # The status of the web search tool call.
40
154
  #
41
155
  # @see OpenAI::Models::Responses::ResponseFunctionWebSearch#status
@@ -6,26 +6,28 @@ module OpenAI
6
6
  # Specify additional output data to include in the model response. Currently
7
7
  # supported values are:
8
8
  #
9
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
10
+ # in code interpreter tool call items.
11
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
12
+ # call output.
9
13
  # - `file_search_call.results`: Include the search results of the file search tool
10
14
  # call.
11
15
  # - `message.input_image.image_url`: Include image urls from the input message.
12
- # - `computer_call_output.output.image_url`: Include image urls from the computer
13
- # call output.
16
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
14
17
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
15
18
  # tokens in reasoning item outputs. This enables reasoning items to be used in
16
19
  # multi-turn conversations when using the Responses API statelessly (like when
17
20
  # the `store` parameter is set to `false`, or when an organization is enrolled
18
21
  # in the zero data retention program).
19
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
20
- # in code interpreter tool call items.
21
22
  module ResponseIncludable
22
23
  extend OpenAI::Internal::Type::Enum
23
24
 
25
+ CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs"
26
+ COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url"
24
27
  FILE_SEARCH_CALL_RESULTS = :"file_search_call.results"
25
28
  MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url"
26
- COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url"
29
+ MESSAGE_OUTPUT_TEXT_LOGPROBS = :"message.output_text.logprobs"
27
30
  REASONING_ENCRYPTED_CONTENT = :"reasoning.encrypted_content"
28
- CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs"
29
31
 
30
32
  # @!method self.values
31
33
  # @return [Array<Symbol>]
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute server_label
8
+ # The label of the MCP server to use.
9
+ #
10
+ # @return [String]
11
+ required :server_label, String
12
+
13
+ # @!attribute type
14
+ # For MCP tools, the type is always `mcp`.
15
+ #
16
+ # @return [Symbol, :mcp]
17
+ required :type, const: :mcp
18
+
19
+ # @!attribute name
20
+ # The name of the tool to call on the server.
21
+ #
22
+ # @return [String, nil]
23
+ optional :name, String, nil?: true
24
+
25
+ # @!method initialize(server_label:, name: nil, type: :mcp)
26
+ # Some parameter documentations has been truncated, see
27
+ # {OpenAI::Models::Responses::ToolChoiceMcp} for more details.
28
+ #
29
+ # Use this option to force the model to call a specific tool on a remote MCP
30
+ # server.
31
+ #
32
+ # @param server_label [String] The label of the MCP server to use.
33
+ #
34
+ # @param name [String, nil] The name of the tool to call on the server.
35
+ #
36
+ # @param type [Symbol, :mcp] For MCP tools, the type is always `mcp`.
37
+ end
38
+ end
39
+ end
40
+ end
@@ -14,7 +14,6 @@ module OpenAI
14
14
  # - `web_search_preview`
15
15
  # - `computer_use_preview`
16
16
  # - `code_interpreter`
17
- # - `mcp`
18
17
  # - `image_generation`
19
18
  #
20
19
  # @return [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type]
@@ -38,7 +37,6 @@ module OpenAI
38
37
  # - `web_search_preview`
39
38
  # - `computer_use_preview`
40
39
  # - `code_interpreter`
41
- # - `mcp`
42
40
  # - `image_generation`
43
41
  #
44
42
  # @see OpenAI::Models::Responses::ToolChoiceTypes#type
@@ -51,7 +49,6 @@ module OpenAI
51
49
  WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11
52
50
  IMAGE_GENERATION = :image_generation
53
51
  CODE_INTERPRETER = :code_interpreter
54
- MCP = :mcp
55
52
 
56
53
  # @!method self.values
57
54
  # @return [Array<Symbol>]
@@ -18,6 +18,10 @@ module OpenAI
18
18
  O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
19
19
  O3_PRO = :"o3-pro"
20
20
  O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
21
+ O3_DEEP_RESEARCH = :"o3-deep-research"
22
+ O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
23
+ O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
24
+ O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
21
25
  COMPUTER_USE_PREVIEW = :"computer-use-preview"
22
26
  COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
23
27