openai 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/models/all_models.rb +4 -0
  7. data/lib/openai/models/chat/chat_completion.rb +32 -31
  8. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  9. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  10. data/lib/openai/models/images_response.rb +92 -1
  11. data/lib/openai/models/responses/response.rb +59 -35
  12. data/lib/openai/models/responses/response_create_params.rb +64 -39
  13. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  14. data/lib/openai/models/responses/response_includable.rb +8 -6
  15. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  16. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  17. data/lib/openai/models/responses_model.rb +4 -0
  18. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  19. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  20. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  21. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  22. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  23. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  24. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  25. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  26. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  27. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  28. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  29. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  30. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  32. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  33. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  34. data/lib/openai/models.rb +2 -0
  35. data/lib/openai/resources/chat/completions.rb +2 -2
  36. data/lib/openai/resources/responses.rb +14 -6
  37. data/lib/openai/resources/webhooks.rb +124 -0
  38. data/lib/openai/version.rb +1 -1
  39. data/lib/openai.rb +18 -0
  40. data/rbi/openai/client.rbi +3 -0
  41. data/rbi/openai/models/all_models.rbi +20 -0
  42. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  43. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  44. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  45. data/rbi/openai/models/images_response.rbi +146 -0
  46. data/rbi/openai/models/responses/response.rbi +75 -44
  47. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  48. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  49. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  50. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  51. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  52. data/rbi/openai/models/responses_model.rbi +20 -0
  53. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  54. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  55. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  56. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  57. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  58. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  59. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  60. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  61. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  62. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  63. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  64. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  65. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  66. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  67. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  68. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  69. data/rbi/openai/models.rbi +2 -0
  70. data/rbi/openai/resources/chat/completions.rbi +34 -30
  71. data/rbi/openai/resources/responses.rbi +62 -38
  72. data/rbi/openai/resources/webhooks.rbi +68 -0
  73. data/sig/openai/client.rbs +2 -0
  74. data/sig/openai/models/all_models.rbs +8 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/images_response.rbs +83 -0
  79. data/sig/openai/models/responses/response.rbs +13 -1
  80. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  81. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  82. data/sig/openai/models/responses/response_includable.rbs +7 -5
  83. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  84. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  85. data/sig/openai/models/responses_model.rbs +8 -0
  86. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  87. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  88. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  89. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  90. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  91. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  92. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  93. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  94. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  95. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  96. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  97. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  98. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  99. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  100. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  101. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  102. data/sig/openai/models.rbs +2 -0
  103. data/sig/openai/resources/responses.rbs +4 -0
  104. data/sig/openai/resources/webhooks.rbs +33 -0
  105. metadata +56 -2
@@ -23,7 +23,7 @@ module OpenAI
23
23
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
24
24
  # your own data as input for the model's response.
25
25
  #
26
- # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
26
+ # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
27
27
  #
28
28
  # @param background [Boolean, nil] Whether to run the model response in the background.
29
29
  #
@@ -35,6 +35,8 @@ module OpenAI
35
35
  #
36
36
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
37
37
  #
38
+ # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
39
+ #
38
40
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
39
41
  #
40
42
  # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
@@ -47,7 +49,7 @@ module OpenAI
47
49
  #
48
50
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
49
51
  #
50
- # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
52
+ # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
51
53
  #
52
54
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
53
55
  #
@@ -55,10 +57,12 @@ module OpenAI
55
57
  #
56
58
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
57
59
  #
58
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
60
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
59
61
  #
60
62
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
61
63
  #
64
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
65
+ #
62
66
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
63
67
  #
64
68
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
@@ -183,7 +187,7 @@ module OpenAI
183
187
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
184
188
  # your own data as input for the model's response.
185
189
  #
186
- # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
190
+ # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
187
191
  #
188
192
  # @param background [Boolean, nil] Whether to run the model response in the background.
189
193
  #
@@ -195,6 +199,8 @@ module OpenAI
195
199
  #
196
200
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
197
201
  #
202
+ # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
203
+ #
198
204
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
199
205
  #
200
206
  # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
@@ -207,7 +213,7 @@ module OpenAI
207
213
  #
208
214
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
209
215
  #
210
- # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
216
+ # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
211
217
  #
212
218
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
213
219
  #
@@ -215,10 +221,12 @@ module OpenAI
215
221
  #
216
222
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
217
223
  #
218
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
224
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
219
225
  #
220
226
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
221
227
  #
228
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
229
+ #
222
230
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
223
231
  #
224
232
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
@@ -0,0 +1,124 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "openssl"
4
+ require "base64"
5
+
6
+ module OpenAI
7
+ module Resources
8
+ class Webhooks
9
+ # Validates that the given payload was sent by OpenAI and parses the payload.
10
+ #
11
+ # @param payload [String] The raw webhook payload as a string
12
+ # @param headers [Hash] The webhook headers
13
+ # @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
14
+ #
15
+ # @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseCreatedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent]
16
+ #
17
+ # @raise [ArgumentError] if signature verification fails
18
+ def unwrap(
19
+ payload,
20
+ headers = {},
21
+ webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"]
22
+ )
23
+ verify_signature(payload, headers, webhook_secret)
24
+
25
+ parsed = JSON.parse(payload, symbolize_names: true)
26
+ OpenAI::Internal::Type::Converter.coerce(OpenAI::Models::Webhooks::UnwrapWebhookEvent, parsed)
27
+ end
28
+
29
+ # Validates whether or not the webhook payload was sent by OpenAI.
30
+ #
31
+ # @param payload [String] The webhook payload as a string
32
+ # @param headers [Hash] The webhook headers
33
+ # @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
34
+ # @param tolerance [Integer] Maximum age of the webhook in seconds (default: 300 = 5 minutes)
35
+ #
36
+ # @raise [ArgumentError] if the signature is invalid
37
+ def verify_signature(
38
+ payload,
39
+ headers,
40
+ webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"],
41
+ tolerance = 300
42
+ )
43
+ if webhook_secret.nil?
44
+ raise ArgumentError,
45
+ "The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " \
46
+ "or passed to this function"
47
+ end
48
+
49
+ # Extract required headers
50
+ signature_header = headers["webhook-signature"] || headers[:webhook_signature]
51
+ timestamp_header = headers["webhook-timestamp"] || headers[:webhook_timestamp]
52
+ webhook_id = headers["webhook-id"] || headers[:webhook_id]
53
+
54
+ if signature_header.nil?
55
+ raise ArgumentError, "Missing required webhook-signature header"
56
+ end
57
+
58
+ if timestamp_header.nil?
59
+ raise ArgumentError, "Missing required webhook-timestamp header"
60
+ end
61
+
62
+ if webhook_id.nil?
63
+ raise ArgumentError, "Missing required webhook-id header"
64
+ end
65
+
66
+ # Validate timestamp to prevent replay attacks
67
+ begin
68
+ timestamp_seconds = timestamp_header.to_i
69
+ rescue ArgumentError
70
+ raise ArgumentError, "Invalid webhook timestamp format"
71
+ end
72
+
73
+ now = Time.now.to_i
74
+
75
+ if now - timestamp_seconds > tolerance
76
+ raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too old"
77
+ end
78
+
79
+ if timestamp_seconds > now + tolerance
80
+ raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too new"
81
+ end
82
+
83
+ # Extract signatures from v1,<base64> format
84
+ # The signature header can have multiple values, separated by spaces.
85
+ # Each value is in the format v1,<base64>. We should accept if any match.
86
+ signatures = signature_header.split.map do |part|
87
+ if part.start_with?("v1,")
88
+ part[3..]
89
+ else
90
+ part
91
+ end
92
+ end
93
+
94
+ # Decode the secret if it starts with whsec_
95
+ decoded_secret = if webhook_secret.start_with?("whsec_")
96
+ Base64.decode64(webhook_secret[6..])
97
+ else
98
+ webhook_secret
99
+ end
100
+
101
+ # Create the signed payload: {webhook_id}.{timestamp}.{payload}
102
+ signed_payload = "#{webhook_id}.#{timestamp_header}.#{payload}"
103
+
104
+ # Compute HMAC-SHA256 signature
105
+ expected_signature = Base64.encode64(
106
+ OpenSSL::HMAC.digest("sha256", decoded_secret, signed_payload)
107
+ ).strip
108
+
109
+ # Accept if any signature matches using timing-safe comparison
110
+ return if signatures.any? { |signature| OpenSSL.secure_compare(expected_signature, signature) }
111
+
112
+ raise OpenAI::Errors::InvalidWebhookSignatureError,
113
+ "The given webhook signature does not match the expected signature"
114
+ end
115
+
116
+ # @api private
117
+ #
118
+ # @param client [OpenAI::Client]
119
+ def initialize(client:)
120
+ @client = client
121
+ end
122
+ end
123
+ end
124
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.10.0"
4
+ VERSION = "0.11.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -441,6 +441,7 @@ require_relative "openai/models/responses/response_web_search_call_in_progress_e
441
441
  require_relative "openai/models/responses/response_web_search_call_searching_event"
442
442
  require_relative "openai/models/responses/tool"
443
443
  require_relative "openai/models/responses/tool_choice_function"
444
+ require_relative "openai/models/responses/tool_choice_mcp"
444
445
  require_relative "openai/models/responses/tool_choice_options"
445
446
  require_relative "openai/models/responses/tool_choice_types"
446
447
  require_relative "openai/models/responses/web_search_tool"
@@ -477,6 +478,22 @@ require_relative "openai/models/vector_stores/vector_store_file_deleted"
477
478
  require_relative "openai/models/vector_store_search_params"
478
479
  require_relative "openai/models/vector_store_search_response"
479
480
  require_relative "openai/models/vector_store_update_params"
481
+ require_relative "openai/models/webhooks/batch_cancelled_webhook_event"
482
+ require_relative "openai/models/webhooks/batch_completed_webhook_event"
483
+ require_relative "openai/models/webhooks/batch_expired_webhook_event"
484
+ require_relative "openai/models/webhooks/batch_failed_webhook_event"
485
+ require_relative "openai/models/webhooks/eval_run_canceled_webhook_event"
486
+ require_relative "openai/models/webhooks/eval_run_failed_webhook_event"
487
+ require_relative "openai/models/webhooks/eval_run_succeeded_webhook_event"
488
+ require_relative "openai/models/webhooks/fine_tuning_job_cancelled_webhook_event"
489
+ require_relative "openai/models/webhooks/fine_tuning_job_failed_webhook_event"
490
+ require_relative "openai/models/webhooks/fine_tuning_job_succeeded_webhook_event"
491
+ require_relative "openai/models/webhooks/response_cancelled_webhook_event"
492
+ require_relative "openai/models/webhooks/response_completed_webhook_event"
493
+ require_relative "openai/models/webhooks/response_failed_webhook_event"
494
+ require_relative "openai/models/webhooks/response_incomplete_webhook_event"
495
+ require_relative "openai/models/webhooks/unwrap_webhook_event"
496
+ require_relative "openai/models/webhooks/webhook_unwrap_params"
480
497
  require_relative "openai/models"
481
498
  require_relative "openai/resources/audio"
482
499
  require_relative "openai/resources/audio/speech"
@@ -521,3 +538,4 @@ require_relative "openai/resources/uploads/parts"
521
538
  require_relative "openai/resources/vector_stores"
522
539
  require_relative "openai/resources/vector_stores/file_batches"
523
540
  require_relative "openai/resources/vector_stores/files"
541
+ require_relative "openai/resources/webhooks"
@@ -52,6 +52,9 @@ module OpenAI
52
52
  sig { returns(OpenAI::Resources::VectorStores) }
53
53
  attr_reader :vector_stores
54
54
 
55
+ sig { returns(OpenAI::Resources::Webhooks) }
56
+ attr_reader :webhooks
57
+
55
58
  sig { returns(OpenAI::Resources::Beta) }
56
59
  attr_reader :beta
57
60
 
@@ -35,6 +35,26 @@ module OpenAI
35
35
  :"o3-pro-2025-06-10",
36
36
  OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
37
37
  )
38
+ O3_DEEP_RESEARCH =
39
+ T.let(
40
+ :"o3-deep-research",
41
+ OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
42
+ )
43
+ O3_DEEP_RESEARCH_2025_06_26 =
44
+ T.let(
45
+ :"o3-deep-research-2025-06-26",
46
+ OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
47
+ )
48
+ O4_MINI_DEEP_RESEARCH =
49
+ T.let(
50
+ :"o4-mini-deep-research",
51
+ OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
52
+ )
53
+ O4_MINI_DEEP_RESEARCH_2025_06_26 =
54
+ T.let(
55
+ :"o4-mini-deep-research-2025-06-26",
56
+ OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
57
+ )
38
58
  COMPUTER_USE_PREVIEW =
39
59
  T.let(
40
60
  :"computer-use-preview",
@@ -32,23 +32,23 @@ module OpenAI
32
32
  sig { returns(Symbol) }
33
33
  attr_accessor :object
34
34
 
35
- # Specifies the latency tier to use for processing the request. This parameter is
36
- # relevant for customers subscribed to the scale tier service:
35
+ # Specifies the processing type used for serving the request.
37
36
  #
38
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
39
- # utilize scale tier credits until they are exhausted.
40
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
41
- # be processed using the default service tier with a lower uptime SLA and no
42
- # latency guarantee.
43
- # - If set to 'default', the request will be processed using the default service
44
- # tier with a lower uptime SLA and no latency guarantee.
45
- # - If set to 'flex', the request will be processed with the Flex Processing
46
- # service tier.
47
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
37
+ # - If set to 'auto', then the request will be processed with the service tier
38
+ # configured in the Project settings. Unless otherwise configured, the Project
39
+ # will use 'default'.
40
+ # - If set to 'default', then the requset will be processed with the standard
41
+ # pricing and performance for the selected model.
42
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
43
+ # 'priority', then the request will be processed with the corresponding service
44
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
45
+ # Priority processing.
48
46
  # - When not set, the default behavior is 'auto'.
49
47
  #
50
- # When this parameter is set, the response body will include the `service_tier`
51
- # utilized.
48
+ # When the `service_tier` parameter is set, the response body will include the
49
+ # `service_tier` value based on the processing mode actually used to serve the
50
+ # request. This response value may be different from the value set in the
51
+ # parameter.
52
52
  sig do
53
53
  returns(
54
54
  T.nilable(OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol)
@@ -98,23 +98,23 @@ module OpenAI
98
98
  created:,
99
99
  # The model used for the chat completion.
100
100
  model:,
101
- # Specifies the latency tier to use for processing the request. This parameter is
102
- # relevant for customers subscribed to the scale tier service:
101
+ # Specifies the processing type used for serving the request.
103
102
  #
104
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
105
- # utilize scale tier credits until they are exhausted.
106
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
107
- # be processed using the default service tier with a lower uptime SLA and no
108
- # latency guarantee.
109
- # - If set to 'default', the request will be processed using the default service
110
- # tier with a lower uptime SLA and no latency guarantee.
111
- # - If set to 'flex', the request will be processed with the Flex Processing
112
- # service tier.
113
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
103
+ # - If set to 'auto', then the request will be processed with the service tier
104
+ # configured in the Project settings. Unless otherwise configured, the Project
105
+ # will use 'default'.
106
+ # - If set to 'default', then the requset will be processed with the standard
107
+ # pricing and performance for the selected model.
108
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
109
+ # 'priority', then the request will be processed with the corresponding service
110
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
111
+ # Priority processing.
114
112
  # - When not set, the default behavior is 'auto'.
115
113
  #
116
- # When this parameter is set, the response body will include the `service_tier`
117
- # utilized.
114
+ # When the `service_tier` parameter is set, the response body will include the
115
+ # `service_tier` value based on the processing mode actually used to serve the
116
+ # request. This response value may be different from the value set in the
117
+ # parameter.
118
118
  service_tier: nil,
119
119
  # This fingerprint represents the backend configuration that the model runs with.
120
120
  #
@@ -363,23 +363,23 @@ module OpenAI
363
363
  end
364
364
  end
365
365
 
366
- # Specifies the latency tier to use for processing the request. This parameter is
367
- # relevant for customers subscribed to the scale tier service:
366
+ # Specifies the processing type used for serving the request.
368
367
  #
369
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
370
- # utilize scale tier credits until they are exhausted.
371
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
372
- # be processed using the default service tier with a lower uptime SLA and no
373
- # latency guarantee.
374
- # - If set to 'default', the request will be processed using the default service
375
- # tier with a lower uptime SLA and no latency guarantee.
376
- # - If set to 'flex', the request will be processed with the Flex Processing
377
- # service tier.
378
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
368
+ # - If set to 'auto', then the request will be processed with the service tier
369
+ # configured in the Project settings. Unless otherwise configured, the Project
370
+ # will use 'default'.
371
+ # - If set to 'default', then the requset will be processed with the standard
372
+ # pricing and performance for the selected model.
373
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
374
+ # 'priority', then the request will be processed with the corresponding service
375
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
376
+ # Priority processing.
379
377
  # - When not set, the default behavior is 'auto'.
380
378
  #
381
- # When this parameter is set, the response body will include the `service_tier`
382
- # utilized.
379
+ # When the `service_tier` parameter is set, the response body will include the
380
+ # `service_tier` value based on the processing mode actually used to serve the
381
+ # request. This response value may be different from the value set in the
382
+ # parameter.
383
383
  module ServiceTier
384
384
  extend OpenAI::Internal::Type::Enum
385
385
 
@@ -409,6 +409,11 @@ module OpenAI
409
409
  :scale,
410
410
  OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol
411
411
  )
412
+ PRIORITY =
413
+ T.let(
414
+ :priority,
415
+ OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol
416
+ )
412
417
 
413
418
  sig do
414
419
  override.returns(
@@ -34,23 +34,23 @@ module OpenAI
34
34
  sig { returns(Symbol) }
35
35
  attr_accessor :object
36
36
 
37
- # Specifies the latency tier to use for processing the request. This parameter is
38
- # relevant for customers subscribed to the scale tier service:
37
+ # Specifies the processing type used for serving the request.
39
38
  #
40
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
41
- # utilize scale tier credits until they are exhausted.
42
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
43
- # be processed using the default service tier with a lower uptime SLA and no
44
- # latency guarantee.
45
- # - If set to 'default', the request will be processed using the default service
46
- # tier with a lower uptime SLA and no latency guarantee.
47
- # - If set to 'flex', the request will be processed with the Flex Processing
48
- # service tier.
49
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
39
+ # - If set to 'auto', then the request will be processed with the service tier
40
+ # configured in the Project settings. Unless otherwise configured, the Project
41
+ # will use 'default'.
42
+ # - If set to 'default', then the requset will be processed with the standard
43
+ # pricing and performance for the selected model.
44
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
+ # 'priority', then the request will be processed with the corresponding service
46
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
47
+ # Priority processing.
50
48
  # - When not set, the default behavior is 'auto'.
51
49
  #
52
- # When this parameter is set, the response body will include the `service_tier`
53
- # utilized.
50
+ # When the `service_tier` parameter is set, the response body will include the
51
+ # `service_tier` value based on the processing mode actually used to serve the
52
+ # request. This response value may be different from the value set in the
53
+ # parameter.
54
54
  sig do
55
55
  returns(
56
56
  T.nilable(
@@ -113,23 +113,23 @@ module OpenAI
113
113
  created:,
114
114
  # The model to generate the completion.
115
115
  model:,
116
- # Specifies the latency tier to use for processing the request. This parameter is
117
- # relevant for customers subscribed to the scale tier service:
116
+ # Specifies the processing type used for serving the request.
118
117
  #
119
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
120
- # utilize scale tier credits until they are exhausted.
121
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
122
- # be processed using the default service tier with a lower uptime SLA and no
123
- # latency guarantee.
124
- # - If set to 'default', the request will be processed using the default service
125
- # tier with a lower uptime SLA and no latency guarantee.
126
- # - If set to 'flex', the request will be processed with the Flex Processing
127
- # service tier.
128
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
118
+ # - If set to 'auto', then the request will be processed with the service tier
119
+ # configured in the Project settings. Unless otherwise configured, the Project
120
+ # will use 'default'.
121
+ # - If set to 'default', then the requset will be processed with the standard
122
+ # pricing and performance for the selected model.
123
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
+ # 'priority', then the request will be processed with the corresponding service
125
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
126
+ # Priority processing.
129
127
  # - When not set, the default behavior is 'auto'.
130
128
  #
131
- # When this parameter is set, the response body will include the `service_tier`
132
- # utilized.
129
+ # When the `service_tier` parameter is set, the response body will include the
130
+ # `service_tier` value based on the processing mode actually used to serve the
131
+ # request. This response value may be different from the value set in the
132
+ # parameter.
133
133
  service_tier: nil,
134
134
  # This fingerprint represents the backend configuration that the model runs with.
135
135
  # Can be used in conjunction with the `seed` request parameter to understand when
@@ -783,23 +783,23 @@ module OpenAI
783
783
  end
784
784
  end
785
785
 
786
- # Specifies the latency tier to use for processing the request. This parameter is
787
- # relevant for customers subscribed to the scale tier service:
786
+ # Specifies the processing type used for serving the request.
788
787
  #
789
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
790
- # utilize scale tier credits until they are exhausted.
791
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
792
- # be processed using the default service tier with a lower uptime SLA and no
793
- # latency guarantee.
794
- # - If set to 'default', the request will be processed using the default service
795
- # tier with a lower uptime SLA and no latency guarantee.
796
- # - If set to 'flex', the request will be processed with the Flex Processing
797
- # service tier.
798
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
788
+ # - If set to 'auto', then the request will be processed with the service tier
789
+ # configured in the Project settings. Unless otherwise configured, the Project
790
+ # will use 'default'.
791
+ # - If set to 'default', then the requset will be processed with the standard
792
+ # pricing and performance for the selected model.
793
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
+ # 'priority', then the request will be processed with the corresponding service
795
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
796
+ # Priority processing.
799
797
  # - When not set, the default behavior is 'auto'.
800
798
  #
801
- # When this parameter is set, the response body will include the `service_tier`
802
- # utilized.
799
+ # When the `service_tier` parameter is set, the response body will include the
800
+ # `service_tier` value based on the processing mode actually used to serve the
801
+ # request. This response value may be different from the value set in the
802
+ # parameter.
803
803
  module ServiceTier
804
804
  extend OpenAI::Internal::Type::Enum
805
805
 
@@ -829,6 +829,11 @@ module OpenAI
829
829
  :scale,
830
830
  OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
831
831
  )
832
+ PRIORITY =
833
+ T.let(
834
+ :priority,
835
+ OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
836
+ )
832
837
 
833
838
  sig do
834
839
  override.returns(