openai 0.10.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +20 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +3 -0
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_create_params.rb +64 -39
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/chat/completions.rb +2 -2
- data/lib/openai/resources/responses.rb +14 -6
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_create_params.rbi +91 -55
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/responses.rbi +62 -38
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_create_params.rbs +13 -1
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/responses.rbs +4 -0
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -29,6 +29,7 @@ module OpenAI
|
|
29
29
|
input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
|
30
30
|
instructions: T.nilable(String),
|
31
31
|
max_output_tokens: T.nilable(Integer),
|
32
|
+
max_tool_calls: T.nilable(Integer),
|
32
33
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
33
34
|
model:
|
34
35
|
T.any(
|
@@ -55,7 +56,8 @@ module OpenAI
|
|
55
56
|
T.any(
|
56
57
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
57
58
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
58
|
-
OpenAI::Responses::ToolChoiceFunction::OrHash
|
59
|
+
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
60
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash
|
59
61
|
),
|
60
62
|
tools:
|
61
63
|
T::Array[
|
@@ -70,6 +72,7 @@ module OpenAI
|
|
70
72
|
OpenAI::Responses::WebSearchTool::OrHash
|
71
73
|
)
|
72
74
|
],
|
75
|
+
top_logprobs: T.nilable(Integer),
|
73
76
|
top_p: T.nilable(Float),
|
74
77
|
truncation:
|
75
78
|
T.nilable(
|
@@ -87,18 +90,19 @@ module OpenAI
|
|
87
90
|
# Specify additional output data to include in the model response. Currently
|
88
91
|
# supported values are:
|
89
92
|
#
|
93
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
94
|
+
# in code interpreter tool call items.
|
95
|
+
# - `computer_call_output.output.image_url`: Include image urls from the computer
|
96
|
+
# call output.
|
90
97
|
# - `file_search_call.results`: Include the search results of the file search tool
|
91
98
|
# call.
|
92
99
|
# - `message.input_image.image_url`: Include image urls from the input message.
|
93
|
-
# - `
|
94
|
-
# call output.
|
100
|
+
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
|
95
101
|
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
|
96
102
|
# tokens in reasoning item outputs. This enables reasoning items to be used in
|
97
103
|
# multi-turn conversations when using the Responses API statelessly (like when
|
98
104
|
# the `store` parameter is set to `false`, or when an organization is enrolled
|
99
105
|
# in the zero data retention program).
|
100
|
-
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
101
|
-
# in code interpreter tool call items.
|
102
106
|
include: nil,
|
103
107
|
# Text, image, or file inputs to the model, used to generate a response.
|
104
108
|
#
|
@@ -120,6 +124,11 @@ module OpenAI
|
|
120
124
|
# including visible output tokens and
|
121
125
|
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
122
126
|
max_output_tokens: nil,
|
127
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
128
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
129
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
130
|
+
# ignored.
|
131
|
+
max_tool_calls: nil,
|
123
132
|
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
124
133
|
# for storing additional information about the object in a structured format, and
|
125
134
|
# querying for objects via API or the dashboard.
|
@@ -147,23 +156,23 @@ module OpenAI
|
|
147
156
|
# Configuration options for
|
148
157
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
149
158
|
reasoning: nil,
|
150
|
-
# Specifies the
|
151
|
-
# relevant for customers subscribed to the scale tier service:
|
159
|
+
# Specifies the processing type used for serving the request.
|
152
160
|
#
|
153
|
-
# - If set to 'auto',
|
154
|
-
#
|
155
|
-
#
|
156
|
-
#
|
157
|
-
#
|
158
|
-
# - If set to '
|
159
|
-
#
|
160
|
-
# -
|
161
|
-
#
|
162
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
161
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
162
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
163
|
+
# will use 'default'.
|
164
|
+
# - If set to 'default', then the requset will be processed with the standard
|
165
|
+
# pricing and performance for the selected model.
|
166
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
167
|
+
# 'priority', then the request will be processed with the corresponding service
|
168
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
169
|
+
# Priority processing.
|
163
170
|
# - When not set, the default behavior is 'auto'.
|
164
171
|
#
|
165
|
-
# When
|
166
|
-
#
|
172
|
+
# When the `service_tier` parameter is set, the response body will include the
|
173
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
174
|
+
# request. This response value may be different from the value set in the
|
175
|
+
# parameter.
|
167
176
|
service_tier: nil,
|
168
177
|
# Whether to store the generated model response for later retrieval via API.
|
169
178
|
store: nil,
|
@@ -197,6 +206,9 @@ module OpenAI
|
|
197
206
|
# the model to call your own code. Learn more about
|
198
207
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
199
208
|
tools: nil,
|
209
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
210
|
+
# return at each token position, each with an associated log probability.
|
211
|
+
top_logprobs: nil,
|
200
212
|
# An alternative to sampling with temperature, called nucleus sampling, where the
|
201
213
|
# model considers the results of the tokens with top_p probability mass. So 0.1
|
202
214
|
# means only the tokens comprising the top 10% probability mass are considered.
|
@@ -245,6 +257,7 @@ module OpenAI
|
|
245
257
|
input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
|
246
258
|
instructions: T.nilable(String),
|
247
259
|
max_output_tokens: T.nilable(Integer),
|
260
|
+
max_tool_calls: T.nilable(Integer),
|
248
261
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
249
262
|
model:
|
250
263
|
T.any(
|
@@ -267,7 +280,8 @@ module OpenAI
|
|
267
280
|
T.any(
|
268
281
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
269
282
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
270
|
-
OpenAI::Responses::ToolChoiceFunction::OrHash
|
283
|
+
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
284
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash
|
271
285
|
),
|
272
286
|
tools:
|
273
287
|
T::Array[
|
@@ -282,6 +296,7 @@ module OpenAI
|
|
282
296
|
OpenAI::Responses::WebSearchTool::OrHash
|
283
297
|
)
|
284
298
|
],
|
299
|
+
top_logprobs: T.nilable(Integer),
|
285
300
|
top_p: T.nilable(Float),
|
286
301
|
truncation:
|
287
302
|
T.nilable(
|
@@ -303,18 +318,19 @@ module OpenAI
|
|
303
318
|
# Specify additional output data to include in the model response. Currently
|
304
319
|
# supported values are:
|
305
320
|
#
|
321
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
322
|
+
# in code interpreter tool call items.
|
323
|
+
# - `computer_call_output.output.image_url`: Include image urls from the computer
|
324
|
+
# call output.
|
306
325
|
# - `file_search_call.results`: Include the search results of the file search tool
|
307
326
|
# call.
|
308
327
|
# - `message.input_image.image_url`: Include image urls from the input message.
|
309
|
-
# - `
|
310
|
-
# call output.
|
328
|
+
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
|
311
329
|
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
|
312
330
|
# tokens in reasoning item outputs. This enables reasoning items to be used in
|
313
331
|
# multi-turn conversations when using the Responses API statelessly (like when
|
314
332
|
# the `store` parameter is set to `false`, or when an organization is enrolled
|
315
333
|
# in the zero data retention program).
|
316
|
-
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
317
|
-
# in code interpreter tool call items.
|
318
334
|
include: nil,
|
319
335
|
# Text, image, or file inputs to the model, used to generate a response.
|
320
336
|
#
|
@@ -336,6 +352,11 @@ module OpenAI
|
|
336
352
|
# including visible output tokens and
|
337
353
|
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
338
354
|
max_output_tokens: nil,
|
355
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
356
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
357
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
358
|
+
# ignored.
|
359
|
+
max_tool_calls: nil,
|
339
360
|
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
340
361
|
# for storing additional information about the object in a structured format, and
|
341
362
|
# querying for objects via API or the dashboard.
|
@@ -363,23 +384,23 @@ module OpenAI
|
|
363
384
|
# Configuration options for
|
364
385
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
365
386
|
reasoning: nil,
|
366
|
-
# Specifies the
|
367
|
-
# relevant for customers subscribed to the scale tier service:
|
387
|
+
# Specifies the processing type used for serving the request.
|
368
388
|
#
|
369
|
-
# - If set to 'auto',
|
370
|
-
#
|
371
|
-
#
|
372
|
-
#
|
373
|
-
#
|
374
|
-
# - If set to '
|
375
|
-
#
|
376
|
-
# -
|
377
|
-
#
|
378
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
389
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
390
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
391
|
+
# will use 'default'.
|
392
|
+
# - If set to 'default', then the requset will be processed with the standard
|
393
|
+
# pricing and performance for the selected model.
|
394
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
395
|
+
# 'priority', then the request will be processed with the corresponding service
|
396
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
397
|
+
# Priority processing.
|
379
398
|
# - When not set, the default behavior is 'auto'.
|
380
399
|
#
|
381
|
-
# When
|
382
|
-
#
|
400
|
+
# When the `service_tier` parameter is set, the response body will include the
|
401
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
402
|
+
# request. This response value may be different from the value set in the
|
403
|
+
# parameter.
|
383
404
|
service_tier: nil,
|
384
405
|
# Whether to store the generated model response for later retrieval via API.
|
385
406
|
store: nil,
|
@@ -413,6 +434,9 @@ module OpenAI
|
|
413
434
|
# the model to call your own code. Learn more about
|
414
435
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
415
436
|
tools: nil,
|
437
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
438
|
+
# return at each token position, each with an associated log probability.
|
439
|
+
top_logprobs: nil,
|
416
440
|
# An alternative to sampling with temperature, called nucleus sampling, where the
|
417
441
|
# model considers the results of the tokens with top_p probability mass. So 0.1
|
418
442
|
# means only the tokens comprising the top 10% probability mass are considered.
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Resources
|
5
|
+
class Webhooks
|
6
|
+
# Validates that the given payload was sent by OpenAI and parses the payload.
|
7
|
+
sig do
|
8
|
+
params(
|
9
|
+
payload: String,
|
10
|
+
headers: T.nilable(T::Hash[T.any(String, Symbol), String]),
|
11
|
+
webhook_secret: T.nilable(String)
|
12
|
+
).returns(
|
13
|
+
T.any(
|
14
|
+
OpenAI::Webhooks::BatchCancelledWebhookEvent,
|
15
|
+
OpenAI::Webhooks::BatchCompletedWebhookEvent,
|
16
|
+
OpenAI::Webhooks::BatchExpiredWebhookEvent,
|
17
|
+
OpenAI::Webhooks::BatchFailedWebhookEvent,
|
18
|
+
OpenAI::Webhooks::EvalRunCanceledWebhookEvent,
|
19
|
+
OpenAI::Webhooks::EvalRunFailedWebhookEvent,
|
20
|
+
OpenAI::Webhooks::EvalRunSucceededWebhookEvent,
|
21
|
+
OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent,
|
22
|
+
OpenAI::Webhooks::FineTuningJobFailedWebhookEvent,
|
23
|
+
OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent,
|
24
|
+
OpenAI::Webhooks::ResponseCancelledWebhookEvent,
|
25
|
+
OpenAI::Webhooks::ResponseCompletedWebhookEvent,
|
26
|
+
OpenAI::Webhooks::ResponseFailedWebhookEvent,
|
27
|
+
OpenAI::Webhooks::ResponseIncompleteWebhookEvent
|
28
|
+
)
|
29
|
+
)
|
30
|
+
end
|
31
|
+
def unwrap(
|
32
|
+
# The raw webhook payload as a string
|
33
|
+
payload,
|
34
|
+
# The webhook headers
|
35
|
+
headers = {},
|
36
|
+
# The webhook secret (optional, will use ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
|
37
|
+
webhook_secret = nil
|
38
|
+
)
|
39
|
+
end
|
40
|
+
|
41
|
+
# Validates whether or not the webhook payload was sent by OpenAI.
|
42
|
+
sig do
|
43
|
+
params(
|
44
|
+
payload: String,
|
45
|
+
headers: T::Hash[T.any(String, Symbol), String],
|
46
|
+
webhook_secret: T.nilable(String),
|
47
|
+
tolerance: Integer
|
48
|
+
).void
|
49
|
+
end
|
50
|
+
def verify_signature(
|
51
|
+
# The webhook payload as a string
|
52
|
+
payload,
|
53
|
+
# The webhook headers
|
54
|
+
headers,
|
55
|
+
# The webhook secret (optional, will use ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
|
56
|
+
webhook_secret = nil,
|
57
|
+
# Maximum age of the webhook in seconds (default: 300 = 5 minutes)
|
58
|
+
tolerance = 300
|
59
|
+
)
|
60
|
+
end
|
61
|
+
|
62
|
+
# @api private
|
63
|
+
sig { params(client: OpenAI::Client).returns(T.attached_class) }
|
64
|
+
def self.new(client:)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
data/sig/openai/client.rbs
CHANGED
@@ -13,6 +13,10 @@ module OpenAI
|
|
13
13
|
| :"o1-pro-2025-03-19"
|
14
14
|
| :"o3-pro"
|
15
15
|
| :"o3-pro-2025-06-10"
|
16
|
+
| :"o3-deep-research"
|
17
|
+
| :"o3-deep-research-2025-06-26"
|
18
|
+
| :"o4-mini-deep-research"
|
19
|
+
| :"o4-mini-deep-research-2025-06-26"
|
16
20
|
| :"computer-use-preview"
|
17
21
|
| :"computer-use-preview-2025-03-11"
|
18
22
|
|
@@ -23,6 +27,10 @@ module OpenAI
|
|
23
27
|
O1_PRO_2025_03_19: :"o1-pro-2025-03-19"
|
24
28
|
O3_PRO: :"o3-pro"
|
25
29
|
O3_PRO_2025_06_10: :"o3-pro-2025-06-10"
|
30
|
+
O3_DEEP_RESEARCH: :"o3-deep-research"
|
31
|
+
O3_DEEP_RESEARCH_2025_06_26: :"o3-deep-research-2025-06-26"
|
32
|
+
O4_MINI_DEEP_RESEARCH: :"o4-mini-deep-research"
|
33
|
+
O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26"
|
26
34
|
COMPUTER_USE_PREVIEW: :"computer-use-preview"
|
27
35
|
COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11"
|
28
36
|
|
@@ -127,7 +127,7 @@ module OpenAI
|
|
127
127
|
end
|
128
128
|
end
|
129
129
|
|
130
|
-
type service_tier = :auto | :default | :flex | :scale
|
130
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
131
131
|
|
132
132
|
module ServiceTier
|
133
133
|
extend OpenAI::Internal::Type::Enum
|
@@ -136,6 +136,7 @@ module OpenAI
|
|
136
136
|
DEFAULT: :default
|
137
137
|
FLEX: :flex
|
138
138
|
SCALE: :scale
|
139
|
+
PRIORITY: :priority
|
139
140
|
|
140
141
|
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier]
|
141
142
|
end
|
@@ -272,7 +272,7 @@ module OpenAI
|
|
272
272
|
end
|
273
273
|
end
|
274
274
|
|
275
|
-
type service_tier = :auto | :default | :flex | :scale
|
275
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
276
276
|
|
277
277
|
module ServiceTier
|
278
278
|
extend OpenAI::Internal::Type::Enum
|
@@ -281,6 +281,7 @@ module OpenAI
|
|
281
281
|
DEFAULT: :default
|
282
282
|
FLEX: :flex
|
283
283
|
SCALE: :scale
|
284
|
+
PRIORITY: :priority
|
284
285
|
|
285
286
|
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier]
|
286
287
|
end
|
@@ -280,7 +280,7 @@ module OpenAI
|
|
280
280
|
def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::response_format]
|
281
281
|
end
|
282
282
|
|
283
|
-
type service_tier = :auto | :default | :flex | :scale
|
283
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
284
284
|
|
285
285
|
module ServiceTier
|
286
286
|
extend OpenAI::Internal::Type::Enum
|
@@ -289,6 +289,7 @@ module OpenAI
|
|
289
289
|
DEFAULT: :default
|
290
290
|
FLEX: :flex
|
291
291
|
SCALE: :scale
|
292
|
+
PRIORITY: :priority
|
292
293
|
|
293
294
|
def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier]
|
294
295
|
end
|
@@ -3,17 +3,45 @@ module OpenAI
|
|
3
3
|
type images_response =
|
4
4
|
{
|
5
5
|
created: Integer,
|
6
|
+
background: OpenAI::Models::ImagesResponse::background,
|
6
7
|
data: ::Array[OpenAI::Image],
|
8
|
+
output_format: OpenAI::Models::ImagesResponse::output_format,
|
9
|
+
quality: OpenAI::Models::ImagesResponse::quality,
|
10
|
+
size: OpenAI::Models::ImagesResponse::size,
|
7
11
|
usage: OpenAI::ImagesResponse::Usage
|
8
12
|
}
|
9
13
|
|
10
14
|
class ImagesResponse < OpenAI::Internal::Type::BaseModel
|
11
15
|
attr_accessor created: Integer
|
12
16
|
|
17
|
+
attr_reader background: OpenAI::Models::ImagesResponse::background?
|
18
|
+
|
19
|
+
def background=: (
|
20
|
+
OpenAI::Models::ImagesResponse::background
|
21
|
+
) -> OpenAI::Models::ImagesResponse::background
|
22
|
+
|
13
23
|
attr_reader data: ::Array[OpenAI::Image]?
|
14
24
|
|
15
25
|
def data=: (::Array[OpenAI::Image]) -> ::Array[OpenAI::Image]
|
16
26
|
|
27
|
+
attr_reader output_format: OpenAI::Models::ImagesResponse::output_format?
|
28
|
+
|
29
|
+
def output_format=: (
|
30
|
+
OpenAI::Models::ImagesResponse::output_format
|
31
|
+
) -> OpenAI::Models::ImagesResponse::output_format
|
32
|
+
|
33
|
+
attr_reader quality: OpenAI::Models::ImagesResponse::quality?
|
34
|
+
|
35
|
+
def quality=: (
|
36
|
+
OpenAI::Models::ImagesResponse::quality
|
37
|
+
) -> OpenAI::Models::ImagesResponse::quality
|
38
|
+
|
39
|
+
attr_reader size: OpenAI::Models::ImagesResponse::size?
|
40
|
+
|
41
|
+
def size=: (
|
42
|
+
OpenAI::Models::ImagesResponse::size
|
43
|
+
) -> OpenAI::Models::ImagesResponse::size
|
44
|
+
|
17
45
|
attr_reader usage: OpenAI::ImagesResponse::Usage?
|
18
46
|
|
19
47
|
def usage=: (
|
@@ -22,16 +50,71 @@ module OpenAI
|
|
22
50
|
|
23
51
|
def initialize: (
|
24
52
|
created: Integer,
|
53
|
+
?background: OpenAI::Models::ImagesResponse::background,
|
25
54
|
?data: ::Array[OpenAI::Image],
|
55
|
+
?output_format: OpenAI::Models::ImagesResponse::output_format,
|
56
|
+
?quality: OpenAI::Models::ImagesResponse::quality,
|
57
|
+
?size: OpenAI::Models::ImagesResponse::size,
|
26
58
|
?usage: OpenAI::ImagesResponse::Usage
|
27
59
|
) -> void
|
28
60
|
|
29
61
|
def to_hash: -> {
|
30
62
|
created: Integer,
|
63
|
+
background: OpenAI::Models::ImagesResponse::background,
|
31
64
|
data: ::Array[OpenAI::Image],
|
65
|
+
output_format: OpenAI::Models::ImagesResponse::output_format,
|
66
|
+
quality: OpenAI::Models::ImagesResponse::quality,
|
67
|
+
size: OpenAI::Models::ImagesResponse::size,
|
32
68
|
usage: OpenAI::ImagesResponse::Usage
|
33
69
|
}
|
34
70
|
|
71
|
+
type background = :transparent | :opaque
|
72
|
+
|
73
|
+
module Background
|
74
|
+
extend OpenAI::Internal::Type::Enum
|
75
|
+
|
76
|
+
TRANSPARENT: :transparent
|
77
|
+
OPAQUE: :opaque
|
78
|
+
|
79
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::background]
|
80
|
+
end
|
81
|
+
|
82
|
+
type output_format = :png | :webp | :jpeg
|
83
|
+
|
84
|
+
module OutputFormat
|
85
|
+
extend OpenAI::Internal::Type::Enum
|
86
|
+
|
87
|
+
PNG: :png
|
88
|
+
WEBP: :webp
|
89
|
+
JPEG: :jpeg
|
90
|
+
|
91
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::output_format]
|
92
|
+
end
|
93
|
+
|
94
|
+
type quality = :low | :medium | :high
|
95
|
+
|
96
|
+
module Quality
|
97
|
+
extend OpenAI::Internal::Type::Enum
|
98
|
+
|
99
|
+
LOW: :low
|
100
|
+
MEDIUM: :medium
|
101
|
+
HIGH: :high
|
102
|
+
|
103
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::quality]
|
104
|
+
end
|
105
|
+
|
106
|
+
type size = :"1024x1024" | :"1024x1536" | :"1536x1024"
|
107
|
+
|
108
|
+
module Size
|
109
|
+
extend OpenAI::Internal::Type::Enum
|
110
|
+
|
111
|
+
SIZE_1024X1024: :"1024x1024"
|
112
|
+
SIZE_1024X1536: :"1024x1536"
|
113
|
+
SIZE_1536X1024: :"1536x1024"
|
114
|
+
|
115
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::size]
|
116
|
+
end
|
117
|
+
|
35
118
|
type usage =
|
36
119
|
{
|
37
120
|
input_tokens: Integer,
|
@@ -19,12 +19,14 @@ module OpenAI
|
|
19
19
|
top_p: Float?,
|
20
20
|
background: bool?,
|
21
21
|
max_output_tokens: Integer?,
|
22
|
+
max_tool_calls: Integer?,
|
22
23
|
previous_response_id: String?,
|
23
24
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
24
25
|
reasoning: OpenAI::Reasoning?,
|
25
26
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
26
27
|
status: OpenAI::Models::Responses::response_status,
|
27
28
|
text: OpenAI::Responses::ResponseTextConfig,
|
29
|
+
top_logprobs: Integer?,
|
28
30
|
truncation: OpenAI::Models::Responses::Response::truncation?,
|
29
31
|
usage: OpenAI::Responses::ResponseUsage,
|
30
32
|
user: String
|
@@ -63,6 +65,8 @@ module OpenAI
|
|
63
65
|
|
64
66
|
attr_accessor max_output_tokens: Integer?
|
65
67
|
|
68
|
+
attr_accessor max_tool_calls: Integer?
|
69
|
+
|
66
70
|
attr_accessor previous_response_id: String?
|
67
71
|
|
68
72
|
attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
|
@@ -83,6 +87,8 @@ module OpenAI
|
|
83
87
|
OpenAI::Responses::ResponseTextConfig
|
84
88
|
) -> OpenAI::Responses::ResponseTextConfig
|
85
89
|
|
90
|
+
attr_accessor top_logprobs: Integer?
|
91
|
+
|
86
92
|
attr_accessor truncation: OpenAI::Models::Responses::Response::truncation?
|
87
93
|
|
88
94
|
attr_reader usage: OpenAI::Responses::ResponseUsage?
|
@@ -111,12 +117,14 @@ module OpenAI
|
|
111
117
|
top_p: Float?,
|
112
118
|
?background: bool?,
|
113
119
|
?max_output_tokens: Integer?,
|
120
|
+
?max_tool_calls: Integer?,
|
114
121
|
?previous_response_id: String?,
|
115
122
|
?prompt: OpenAI::Responses::ResponsePrompt?,
|
116
123
|
?reasoning: OpenAI::Reasoning?,
|
117
124
|
?service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
118
125
|
?status: OpenAI::Models::Responses::response_status,
|
119
126
|
?text: OpenAI::Responses::ResponseTextConfig,
|
127
|
+
?top_logprobs: Integer?,
|
120
128
|
?truncation: OpenAI::Models::Responses::Response::truncation?,
|
121
129
|
?usage: OpenAI::Responses::ResponseUsage,
|
122
130
|
?user: String,
|
@@ -140,12 +148,14 @@ module OpenAI
|
|
140
148
|
top_p: Float?,
|
141
149
|
background: bool?,
|
142
150
|
max_output_tokens: Integer?,
|
151
|
+
max_tool_calls: Integer?,
|
143
152
|
previous_response_id: String?,
|
144
153
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
145
154
|
reasoning: OpenAI::Reasoning?,
|
146
155
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
147
156
|
status: OpenAI::Models::Responses::response_status,
|
148
157
|
text: OpenAI::Responses::ResponseTextConfig,
|
158
|
+
top_logprobs: Integer?,
|
149
159
|
truncation: OpenAI::Models::Responses::Response::truncation?,
|
150
160
|
usage: OpenAI::Responses::ResponseUsage,
|
151
161
|
user: String
|
@@ -198,6 +208,7 @@ module OpenAI
|
|
198
208
|
OpenAI::Models::Responses::tool_choice_options
|
199
209
|
| OpenAI::Responses::ToolChoiceTypes
|
200
210
|
| OpenAI::Responses::ToolChoiceFunction
|
211
|
+
| OpenAI::Responses::ToolChoiceMcp
|
201
212
|
|
202
213
|
module ToolChoice
|
203
214
|
extend OpenAI::Internal::Type::Union
|
@@ -205,7 +216,7 @@ module OpenAI
|
|
205
216
|
def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::tool_choice]
|
206
217
|
end
|
207
218
|
|
208
|
-
type service_tier = :auto | :default | :flex | :scale
|
219
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
209
220
|
|
210
221
|
module ServiceTier
|
211
222
|
extend OpenAI::Internal::Type::Enum
|
@@ -214,6 +225,7 @@ module OpenAI
|
|
214
225
|
DEFAULT: :default
|
215
226
|
FLEX: :flex
|
216
227
|
SCALE: :scale
|
228
|
+
PRIORITY: :priority
|
217
229
|
|
218
230
|
def self?.values: -> ::Array[OpenAI::Models::Responses::Response::service_tier]
|
219
231
|
end
|
@@ -8,6 +8,7 @@ module OpenAI
|
|
8
8
|
input: OpenAI::Models::Responses::ResponseCreateParams::input,
|
9
9
|
instructions: String?,
|
10
10
|
max_output_tokens: Integer?,
|
11
|
+
max_tool_calls: Integer?,
|
11
12
|
metadata: OpenAI::Models::metadata?,
|
12
13
|
model: OpenAI::Models::responses_model,
|
13
14
|
parallel_tool_calls: bool?,
|
@@ -20,6 +21,7 @@ module OpenAI
|
|
20
21
|
text: OpenAI::Responses::ResponseTextConfig,
|
21
22
|
tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice,
|
22
23
|
tools: ::Array[OpenAI::Models::Responses::tool],
|
24
|
+
top_logprobs: Integer?,
|
23
25
|
top_p: Float?,
|
24
26
|
truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?,
|
25
27
|
user: String
|
@@ -44,6 +46,8 @@ module OpenAI
|
|
44
46
|
|
45
47
|
attr_accessor max_output_tokens: Integer?
|
46
48
|
|
49
|
+
attr_accessor max_tool_calls: Integer?
|
50
|
+
|
47
51
|
attr_accessor metadata: OpenAI::Models::metadata?
|
48
52
|
|
49
53
|
attr_reader model: OpenAI::Models::responses_model?
|
@@ -84,6 +88,8 @@ module OpenAI
|
|
84
88
|
::Array[OpenAI::Models::Responses::tool]
|
85
89
|
) -> ::Array[OpenAI::Models::Responses::tool]
|
86
90
|
|
91
|
+
attr_accessor top_logprobs: Integer?
|
92
|
+
|
87
93
|
attr_accessor top_p: Float?
|
88
94
|
|
89
95
|
attr_accessor truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?
|
@@ -98,6 +104,7 @@ module OpenAI
|
|
98
104
|
?input: OpenAI::Models::Responses::ResponseCreateParams::input,
|
99
105
|
?instructions: String?,
|
100
106
|
?max_output_tokens: Integer?,
|
107
|
+
?max_tool_calls: Integer?,
|
101
108
|
?metadata: OpenAI::Models::metadata?,
|
102
109
|
?model: OpenAI::Models::responses_model,
|
103
110
|
?parallel_tool_calls: bool?,
|
@@ -110,6 +117,7 @@ module OpenAI
|
|
110
117
|
?text: OpenAI::Responses::ResponseTextConfig,
|
111
118
|
?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice,
|
112
119
|
?tools: ::Array[OpenAI::Models::Responses::tool],
|
120
|
+
?top_logprobs: Integer?,
|
113
121
|
?top_p: Float?,
|
114
122
|
?truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?,
|
115
123
|
?user: String,
|
@@ -122,6 +130,7 @@ module OpenAI
|
|
122
130
|
input: OpenAI::Models::Responses::ResponseCreateParams::input,
|
123
131
|
instructions: String?,
|
124
132
|
max_output_tokens: Integer?,
|
133
|
+
max_tool_calls: Integer?,
|
125
134
|
metadata: OpenAI::Models::metadata?,
|
126
135
|
model: OpenAI::Models::responses_model,
|
127
136
|
parallel_tool_calls: bool?,
|
@@ -134,6 +143,7 @@ module OpenAI
|
|
134
143
|
text: OpenAI::Responses::ResponseTextConfig,
|
135
144
|
tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice,
|
136
145
|
tools: ::Array[OpenAI::Models::Responses::tool],
|
146
|
+
top_logprobs: Integer?,
|
137
147
|
top_p: Float?,
|
138
148
|
truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?,
|
139
149
|
user: String,
|
@@ -148,7 +158,7 @@ module OpenAI
|
|
148
158
|
def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::input]
|
149
159
|
end
|
150
160
|
|
151
|
-
type service_tier = :auto | :default | :flex | :scale
|
161
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
152
162
|
|
153
163
|
module ServiceTier
|
154
164
|
extend OpenAI::Internal::Type::Enum
|
@@ -157,6 +167,7 @@ module OpenAI
|
|
157
167
|
DEFAULT: :default
|
158
168
|
FLEX: :flex
|
159
169
|
SCALE: :scale
|
170
|
+
PRIORITY: :priority
|
160
171
|
|
161
172
|
def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::service_tier]
|
162
173
|
end
|
@@ -165,6 +176,7 @@ module OpenAI
|
|
165
176
|
OpenAI::Models::Responses::tool_choice_options
|
166
177
|
| OpenAI::Responses::ToolChoiceTypes
|
167
178
|
| OpenAI::Responses::ToolChoiceFunction
|
179
|
+
| OpenAI::Responses::ToolChoiceMcp
|
168
180
|
|
169
181
|
module ToolChoice
|
170
182
|
extend OpenAI::Internal::Type::Union
|