openai 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/models/all_models.rb +4 -0
  7. data/lib/openai/models/chat/chat_completion.rb +32 -31
  8. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  9. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  10. data/lib/openai/models/images_response.rb +92 -1
  11. data/lib/openai/models/responses/response.rb +59 -35
  12. data/lib/openai/models/responses/response_create_params.rb +64 -39
  13. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  14. data/lib/openai/models/responses/response_includable.rb +8 -6
  15. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  16. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  17. data/lib/openai/models/responses_model.rb +4 -0
  18. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  19. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  20. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  21. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  22. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  23. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  24. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  25. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  26. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  27. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  28. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  29. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  30. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  32. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  33. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  34. data/lib/openai/models.rb +2 -0
  35. data/lib/openai/resources/chat/completions.rb +2 -2
  36. data/lib/openai/resources/responses.rb +14 -6
  37. data/lib/openai/resources/webhooks.rb +124 -0
  38. data/lib/openai/version.rb +1 -1
  39. data/lib/openai.rb +18 -0
  40. data/rbi/openai/client.rbi +3 -0
  41. data/rbi/openai/models/all_models.rbi +20 -0
  42. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  43. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  44. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  45. data/rbi/openai/models/images_response.rbi +146 -0
  46. data/rbi/openai/models/responses/response.rbi +75 -44
  47. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  48. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  49. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  50. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  51. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  52. data/rbi/openai/models/responses_model.rbi +20 -0
  53. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  54. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  55. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  56. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  57. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  58. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  59. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  60. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  61. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  62. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  63. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  64. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  65. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  66. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  67. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  68. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  69. data/rbi/openai/models.rbi +2 -0
  70. data/rbi/openai/resources/chat/completions.rbi +34 -30
  71. data/rbi/openai/resources/responses.rbi +62 -38
  72. data/rbi/openai/resources/webhooks.rbi +68 -0
  73. data/sig/openai/client.rbs +2 -0
  74. data/sig/openai/models/all_models.rbs +8 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/images_response.rbs +83 -0
  79. data/sig/openai/models/responses/response.rbs +13 -1
  80. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  81. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  82. data/sig/openai/models/responses/response_includable.rbs +7 -5
  83. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  84. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  85. data/sig/openai/models/responses_model.rbs +8 -0
  86. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  87. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  88. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  89. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  90. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  91. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  92. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  93. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  94. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  95. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  96. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  97. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  98. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  99. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  100. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  101. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  102. data/sig/openai/models.rbs +2 -0
  103. data/sig/openai/resources/responses.rbs +4 -0
  104. data/sig/openai/resources/webhooks.rbs +33 -0
  105. metadata +56 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d8ca71d5671f3a20cde2fda0eec819c5305ebd9e60aabcc55abf471f1de63b53
4
- data.tar.gz: 1de944c9a0ee804f9d15567fec8038368458132183468c81ae42609c036e05ac
3
+ metadata.gz: 252a9ce9833b0a9f66be94b76081f34716e348ebca732e63c3e16c105ed42ea1
4
+ data.tar.gz: d4d8b36822ee74af77508ec8e48b472d72d619333e468055695158e242c49760
5
5
  SHA512:
6
- metadata.gz: 822226e8ebc9b077c242ae0366ec3f5e4b0b413054cbc3e4d9e971347c79ccb9b64006a6af5cc5a88b25472103be1a5c9a2b3c9a5e25396951f454f9f15f2dc4
7
- data.tar.gz: 6ed344972114e5c0f098d3e5619dc1548ac61557e7b3b23384d558e2f9ccbe1ee1dd00ee0ffa0f77b17027554c1c46d33bc6eb552695615bf0fd289cf0fb4dc1
6
+ metadata.gz: f27c40e40df727c8da570a4f6b1e20f72a87baa710efd3957bb0fbbcc23eb1dd78d03b1bcc3c3cf5684d4db4b062c392c1c3d970093c8e0fac89ef85db053db3
7
+ data.tar.gz: 21fe44d1605c6196d7321dcb7efa7d951a2523ba71859eab1bf242ff3314afe0cce25201c21b141f9f3902b684cb577ba51c520d12602d1279eccb770769e3a4
data/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.11.0 (2025-06-26)
4
+
5
+ Full Changelog: [v0.10.0...v0.11.0](https://github.com/openai/openai-ruby/compare/v0.10.0...v0.11.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** webhook and deep research support ([6228400](https://github.com/openai/openai-ruby/commit/6228400e19aadefc5f87e24b3c104fc0b44d3cee))
10
+
11
+
12
+ ### Bug Fixes
13
+
14
+ * **ci:** release-doctor — report correct token name ([c12c991](https://github.com/openai/openai-ruby/commit/c12c9911beaeb8b1c72d7c5cc5f14dcb9cd5452e))
15
+
16
+
17
+ ### Chores
18
+
19
+ * **api:** remove unsupported property ([1073c3a](https://github.com/openai/openai-ruby/commit/1073c3a6059f2d1e1ef92937326699e0240503e5))
20
+ * **client:** throw specific errors ([0cf937e](https://github.com/openai/openai-ruby/commit/0cf937ea8abebc05e52a419e19e275a45b5da646))
21
+ * **docs:** update README to include links to docs on Webhooks ([2d8f23e](https://github.com/openai/openai-ruby/commit/2d8f23ecb245c88f3f082f93eb906af857d64c7d))
22
+
3
23
  ## 0.10.0 (2025-06-23)
4
24
 
5
25
  Full Changelog: [v0.9.0...v0.10.0](https://github.com/openai/openai-ruby/compare/v0.9.0...v0.10.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.10.0"
18
+ gem "openai", "~> 0.11.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -112,6 +112,84 @@ puts(edited.data.first)
112
112
 
113
113
  Note that you can also pass a raw `IO` descriptor, but this disables retries, as the library can't be sure if the descriptor is a file or pipe (which cannot be rewound).
114
114
 
115
+ ## Webhook Verification
116
+
117
+ Verifying webhook signatures is _optional but encouraged_.
118
+
119
+ For more information about webhooks, see [the API docs](https://platform.openai.com/docs/guides/webhooks).
120
+
121
+ ### Parsing webhook payloads
122
+
123
+ For most use cases, you will likely want to verify the webhook and parse the payload at the same time. To achieve this, we provide the method `client.webhooks.unwrap`, which parses a webhook request and verifies that it was sent by OpenAI. This method will raise an error if the signature is invalid.
124
+
125
+ Note that the `body` parameter must be the raw JSON string sent from the server (do not parse it first). The `unwrap` method will parse this JSON for you into an event object after verifying the webhook was sent from OpenAI.
126
+
127
+ ```ruby
128
+ require 'sinatra'
129
+ require 'openai'
130
+
131
+ # Set up the client with webhook secret from environment variable
132
+ client = OpenAI::Client.new(webhook_secret: ENV['OPENAI_WEBHOOK_SECRET'])
133
+
134
+ post '/webhook' do
135
+ request_body = request.body.read
136
+
137
+ begin
138
+ event = client.webhooks.unwrap(request_body, request.env)
139
+
140
+ case event.type
141
+ when 'response.completed'
142
+ puts "Response completed: #{event.data}"
143
+ when 'response.failed'
144
+ puts "Response failed: #{event.data}"
145
+ else
146
+ puts "Unhandled event type: #{event.type}"
147
+ end
148
+
149
+ status 200
150
+ 'ok'
151
+ rescue StandardError => e
152
+ puts "Invalid signature: #{e}"
153
+ status 400
154
+ 'Invalid signature'
155
+ end
156
+ end
157
+ ```
158
+
159
+ ### Verifying webhook payloads directly
160
+
161
+ In some cases, you may want to verify the webhook separately from parsing the payload. If you prefer to handle these steps separately, we provide the method `client.webhooks.verify_signature` to _only verify_ the signature of a webhook request. Like `unwrap`, this method will raise an error if the signature is invalid.
162
+
163
+ Note that the `body` parameter must be the raw JSON string sent from the server (do not parse it first). You will then need to parse the body after verifying the signature.
164
+
165
+ ```ruby
166
+ require 'sinatra'
167
+ require 'json'
168
+ require 'openai'
169
+
170
+ # Set up the client with webhook secret from environment variable
171
+ client = OpenAI::Client.new(webhook_secret: ENV['OPENAI_WEBHOOK_SECRET'])
172
+
173
+ post '/webhook' do
174
+ request_body = request.body.read
175
+
176
+ begin
177
+ client.webhooks.verify_signature(request_body, request.env)
178
+
179
+ # Parse the body after verification
180
+ event = JSON.parse(request_body)
181
+ puts "Verified event: #{event}"
182
+
183
+ status 200
184
+ 'ok'
185
+ rescue StandardError => e
186
+ puts "Invalid signature: #{e}"
187
+ status 400
188
+ 'Invalid signature'
189
+ end
190
+ end
191
+ ```
192
+
115
193
  ### [Structured outputs](https://platform.openai.com/docs/guides/structured-outputs) and function calling
116
194
 
117
195
  This SDK ships with helpers in `OpenAI::BaseModel`, `OpenAI::ArrayOf`, `OpenAI::EnumOf`, and `OpenAI::UnionOf` to help you define the supported JSON schemas used in making structured outputs and function calling requests.
data/lib/openai/client.rb CHANGED
@@ -24,6 +24,9 @@ module OpenAI
24
24
  # @return [String, nil]
25
25
  attr_reader :project
26
26
 
27
+ # @return [String, nil]
28
+ attr_reader :webhook_secret
29
+
27
30
  # @return [OpenAI::Resources::Completions]
28
31
  attr_reader :completions
29
32
 
@@ -57,6 +60,9 @@ module OpenAI
57
60
  # @return [OpenAI::Resources::VectorStores]
58
61
  attr_reader :vector_stores
59
62
 
63
+ # @return [OpenAI::Resources::Webhooks]
64
+ attr_reader :webhooks
65
+
60
66
  # @return [OpenAI::Resources::Beta]
61
67
  attr_reader :beta
62
68
 
@@ -92,6 +98,8 @@ module OpenAI
92
98
  #
93
99
  # @param project [String, nil] Defaults to `ENV["OPENAI_PROJECT_ID"]`
94
100
  #
101
+ # @param webhook_secret [String, nil] Defaults to `ENV["OPENAI_WEBHOOK_SECRET"]`
102
+ #
95
103
  # @param base_url [String, nil] Override the default base URL for the API, e.g.,
96
104
  # `"https://api.example.com/v2/"`. Defaults to `ENV["OPENAI_BASE_URL"]`
97
105
  #
@@ -106,6 +114,7 @@ module OpenAI
106
114
  api_key: ENV["OPENAI_API_KEY"],
107
115
  organization: ENV["OPENAI_ORG_ID"],
108
116
  project: ENV["OPENAI_PROJECT_ID"],
117
+ webhook_secret: ENV["OPENAI_WEBHOOK_SECRET"],
109
118
  base_url: ENV["OPENAI_BASE_URL"],
110
119
  max_retries: self.class::DEFAULT_MAX_RETRIES,
111
120
  timeout: self.class::DEFAULT_TIMEOUT_IN_SECONDS,
@@ -124,6 +133,7 @@ module OpenAI
124
133
  }
125
134
 
126
135
  @api_key = api_key.to_s
136
+ @webhook_secret = webhook_secret&.to_s
127
137
 
128
138
  super(
129
139
  base_url: base_url,
@@ -145,6 +155,7 @@ module OpenAI
145
155
  @fine_tuning = OpenAI::Resources::FineTuning.new(client: self)
146
156
  @graders = OpenAI::Resources::Graders.new(client: self)
147
157
  @vector_stores = OpenAI::Resources::VectorStores.new(client: self)
158
+ @webhooks = OpenAI::Resources::Webhooks.new(client: self)
148
159
  @beta = OpenAI::Resources::Beta.new(client: self)
149
160
  @batches = OpenAI::Resources::Batches.new(client: self)
150
161
  @uploads = OpenAI::Resources::Uploads.new(client: self)
data/lib/openai/errors.rb CHANGED
@@ -8,6 +8,9 @@ module OpenAI
8
8
  # @return [StandardError, nil]
9
9
  end
10
10
 
11
+ class InvalidWebhookSignatureError < OpenAI::Errors::Error
12
+ end
13
+
11
14
  class ConversionError < OpenAI::Errors::Error
12
15
  # @return [StandardError, nil]
13
16
  def cause = @cause.nil? ? super : @cause
@@ -18,6 +18,10 @@ module OpenAI
18
18
  O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
19
19
  O3_PRO = :"o3-pro"
20
20
  O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
21
+ O3_DEEP_RESEARCH = :"o3-deep-research"
22
+ O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
23
+ O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
24
+ O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
21
25
  COMPUTER_USE_PREVIEW = :"computer-use-preview"
22
26
  COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
23
27
 
@@ -39,23 +39,23 @@ module OpenAI
39
39
  required :object, const: :"chat.completion"
40
40
 
41
41
  # @!attribute service_tier
42
- # Specifies the latency tier to use for processing the request. This parameter is
43
- # relevant for customers subscribed to the scale tier service:
44
- #
45
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
46
- # utilize scale tier credits until they are exhausted.
47
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
48
- # be processed using the default service tier with a lower uptime SLA and no
49
- # latency guarantee.
50
- # - If set to 'default', the request will be processed using the default service
51
- # tier with a lower uptime SLA and no latency guarantee.
52
- # - If set to 'flex', the request will be processed with the Flex Processing
53
- # service tier.
54
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
42
+ # Specifies the processing type used for serving the request.
43
+ #
44
+ # - If set to 'auto', then the request will be processed with the service tier
45
+ # configured in the Project settings. Unless otherwise configured, the Project
46
+ # will use 'default'.
47
+ # - If set to 'default', then the requset will be processed with the standard
48
+ # pricing and performance for the selected model.
49
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
50
+ # 'priority', then the request will be processed with the corresponding service
51
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
52
+ # Priority processing.
55
53
  # - When not set, the default behavior is 'auto'.
56
54
  #
57
- # When this parameter is set, the response body will include the `service_tier`
58
- # utilized.
55
+ # When the `service_tier` parameter is set, the response body will include the
56
+ # `service_tier` value based on the processing mode actually used to serve the
57
+ # request. This response value may be different from the value set in the
58
+ # parameter.
59
59
  #
60
60
  # @return [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil]
61
61
  optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletion::ServiceTier }, nil?: true
@@ -90,7 +90,7 @@ module OpenAI
90
90
  #
91
91
  # @param model [String] The model used for the chat completion.
92
92
  #
93
- # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
93
+ # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the processing type used for serving the request.
94
94
  #
95
95
  # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
96
96
  #
@@ -188,23 +188,23 @@ module OpenAI
188
188
  end
189
189
  end
190
190
 
191
- # Specifies the latency tier to use for processing the request. This parameter is
192
- # relevant for customers subscribed to the scale tier service:
193
- #
194
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
195
- # utilize scale tier credits until they are exhausted.
196
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
197
- # be processed using the default service tier with a lower uptime SLA and no
198
- # latency guarantee.
199
- # - If set to 'default', the request will be processed using the default service
200
- # tier with a lower uptime SLA and no latency guarantee.
201
- # - If set to 'flex', the request will be processed with the Flex Processing
202
- # service tier.
203
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
191
+ # Specifies the processing type used for serving the request.
192
+ #
193
+ # - If set to 'auto', then the request will be processed with the service tier
194
+ # configured in the Project settings. Unless otherwise configured, the Project
195
+ # will use 'default'.
196
+ # - If set to 'default', then the requset will be processed with the standard
197
+ # pricing and performance for the selected model.
198
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
199
+ # 'priority', then the request will be processed with the corresponding service
200
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
201
+ # Priority processing.
204
202
  # - When not set, the default behavior is 'auto'.
205
203
  #
206
- # When this parameter is set, the response body will include the `service_tier`
207
- # utilized.
204
+ # When the `service_tier` parameter is set, the response body will include the
205
+ # `service_tier` value based on the processing mode actually used to serve the
206
+ # request. This response value may be different from the value set in the
207
+ # parameter.
208
208
  #
209
209
  # @see OpenAI::Models::Chat::ChatCompletion#service_tier
210
210
  module ServiceTier
@@ -214,6 +214,7 @@ module OpenAI
214
214
  DEFAULT = :default
215
215
  FLEX = :flex
216
216
  SCALE = :scale
217
+ PRIORITY = :priority
217
218
 
218
219
  # @!method self.values
219
220
  # @return [Array<Symbol>]
@@ -38,23 +38,23 @@ module OpenAI
38
38
  required :object, const: :"chat.completion.chunk"
39
39
 
40
40
  # @!attribute service_tier
41
- # Specifies the latency tier to use for processing the request. This parameter is
42
- # relevant for customers subscribed to the scale tier service:
41
+ # Specifies the processing type used for serving the request.
43
42
  #
44
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
45
- # utilize scale tier credits until they are exhausted.
46
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
47
- # be processed using the default service tier with a lower uptime SLA and no
48
- # latency guarantee.
49
- # - If set to 'default', the request will be processed using the default service
50
- # tier with a lower uptime SLA and no latency guarantee.
51
- # - If set to 'flex', the request will be processed with the Flex Processing
52
- # service tier.
53
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
43
+ # - If set to 'auto', then the request will be processed with the service tier
44
+ # configured in the Project settings. Unless otherwise configured, the Project
45
+ # will use 'default'.
46
+ # - If set to 'default', then the requset will be processed with the standard
47
+ # pricing and performance for the selected model.
48
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
49
+ # 'priority', then the request will be processed with the corresponding service
50
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
51
+ # Priority processing.
54
52
  # - When not set, the default behavior is 'auto'.
55
53
  #
56
- # When this parameter is set, the response body will include the `service_tier`
57
- # utilized.
54
+ # When the `service_tier` parameter is set, the response body will include the
55
+ # `service_tier` value based on the processing mode actually used to serve the
56
+ # request. This response value may be different from the value set in the
57
+ # parameter.
58
58
  #
59
59
  # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil]
60
60
  optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletionChunk::ServiceTier }, nil?: true
@@ -95,7 +95,7 @@ module OpenAI
95
95
  #
96
96
  # @param model [String] The model to generate the completion.
97
97
  #
98
- # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
98
+ # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the processing type used for serving the request.
99
99
  #
100
100
  # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
101
101
  #
@@ -371,23 +371,23 @@ module OpenAI
371
371
  end
372
372
  end
373
373
 
374
- # Specifies the latency tier to use for processing the request. This parameter is
375
- # relevant for customers subscribed to the scale tier service:
374
+ # Specifies the processing type used for serving the request.
376
375
  #
377
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
378
- # utilize scale tier credits until they are exhausted.
379
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
380
- # be processed using the default service tier with a lower uptime SLA and no
381
- # latency guarantee.
382
- # - If set to 'default', the request will be processed using the default service
383
- # tier with a lower uptime SLA and no latency guarantee.
384
- # - If set to 'flex', the request will be processed with the Flex Processing
385
- # service tier.
386
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
376
+ # - If set to 'auto', then the request will be processed with the service tier
377
+ # configured in the Project settings. Unless otherwise configured, the Project
378
+ # will use 'default'.
379
+ # - If set to 'default', then the requset will be processed with the standard
380
+ # pricing and performance for the selected model.
381
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
382
+ # 'priority', then the request will be processed with the corresponding service
383
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
384
+ # Priority processing.
387
385
  # - When not set, the default behavior is 'auto'.
388
386
  #
389
- # When this parameter is set, the response body will include the `service_tier`
390
- # utilized.
387
+ # When the `service_tier` parameter is set, the response body will include the
388
+ # `service_tier` value based on the processing mode actually used to serve the
389
+ # request. This response value may be different from the value set in the
390
+ # parameter.
391
391
  #
392
392
  # @see OpenAI::Models::Chat::ChatCompletionChunk#service_tier
393
393
  module ServiceTier
@@ -397,6 +397,7 @@ module OpenAI
397
397
  DEFAULT = :default
398
398
  FLEX = :flex
399
399
  SCALE = :scale
400
+ PRIORITY = :priority
400
401
 
401
402
  # @!method self.values
402
403
  # @return [Array<Symbol>]
@@ -219,23 +219,23 @@ module OpenAI
219
219
  optional :seed, Integer, nil?: true
220
220
 
221
221
  # @!attribute service_tier
222
- # Specifies the latency tier to use for processing the request. This parameter is
223
- # relevant for customers subscribed to the scale tier service:
224
- #
225
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
226
- # utilize scale tier credits until they are exhausted.
227
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
228
- # be processed using the default service tier with a lower uptime SLA and no
229
- # latency guarantee.
230
- # - If set to 'default', the request will be processed using the default service
231
- # tier with a lower uptime SLA and no latency guarantee.
232
- # - If set to 'flex', the request will be processed with the Flex Processing
233
- # service tier.
234
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
222
+ # Specifies the processing type used for serving the request.
223
+ #
224
+ # - If set to 'auto', then the request will be processed with the service tier
225
+ # configured in the Project settings. Unless otherwise configured, the Project
226
+ # will use 'default'.
227
+ # - If set to 'default', then the requset will be processed with the standard
228
+ # pricing and performance for the selected model.
229
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
230
+ # 'priority', then the request will be processed with the corresponding service
231
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
232
+ # Priority processing.
235
233
  # - When not set, the default behavior is 'auto'.
236
234
  #
237
- # When this parameter is set, the response body will include the `service_tier`
238
- # utilized.
235
+ # When the `service_tier` parameter is set, the response body will include the
236
+ # `service_tier` value based on the processing mode actually used to serve the
237
+ # request. This response value may be different from the value set in the
238
+ # parameter.
239
239
  #
240
240
  # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil]
241
241
  optional :service_tier, enum: -> { OpenAI::Chat::CompletionCreateParams::ServiceTier }, nil?: true
@@ -254,6 +254,8 @@ module OpenAI
254
254
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
255
255
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
256
256
  #
257
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
258
+ #
257
259
  # @return [Boolean, nil]
258
260
  optional :store, OpenAI::Internal::Type::Boolean, nil?: true
259
261
 
@@ -375,7 +377,7 @@ module OpenAI
375
377
  #
376
378
  # @param seed [Integer, nil] This feature is in Beta.
377
379
  #
378
- # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
380
+ # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
379
381
  #
380
382
  # @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
381
383
  #
@@ -546,23 +548,23 @@ module OpenAI
546
548
  # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
547
549
  end
548
550
 
549
- # Specifies the latency tier to use for processing the request. This parameter is
550
- # relevant for customers subscribed to the scale tier service:
551
- #
552
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
553
- # utilize scale tier credits until they are exhausted.
554
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
555
- # be processed using the default service tier with a lower uptime SLA and no
556
- # latency guarantee.
557
- # - If set to 'default', the request will be processed using the default service
558
- # tier with a lower uptime SLA and no latency guarantee.
559
- # - If set to 'flex', the request will be processed with the Flex Processing
560
- # service tier.
561
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
551
+ # Specifies the processing type used for serving the request.
552
+ #
553
+ # - If set to 'auto', then the request will be processed with the service tier
554
+ # configured in the Project settings. Unless otherwise configured, the Project
555
+ # will use 'default'.
556
+ # - If set to 'default', then the requset will be processed with the standard
557
+ # pricing and performance for the selected model.
558
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
559
+ # 'priority', then the request will be processed with the corresponding service
560
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
561
+ # Priority processing.
562
562
  # - When not set, the default behavior is 'auto'.
563
563
  #
564
- # When this parameter is set, the response body will include the `service_tier`
565
- # utilized.
564
+ # When the `service_tier` parameter is set, the response body will include the
565
+ # `service_tier` value based on the processing mode actually used to serve the
566
+ # request. This response value may be different from the value set in the
567
+ # parameter.
566
568
  module ServiceTier
567
569
  extend OpenAI::Internal::Type::Enum
568
570
 
@@ -570,6 +572,7 @@ module OpenAI
570
572
  DEFAULT = :default
571
573
  FLEX = :flex
572
574
  SCALE = :scale
575
+ PRIORITY = :priority
573
576
 
574
577
  # @!method self.values
575
578
  # @return [Array<Symbol>]
@@ -10,19 +10,45 @@ module OpenAI
10
10
  # @return [Integer]
11
11
  required :created, Integer
12
12
 
13
+ # @!attribute background
14
+ # The background parameter used for the image generation. Either `transparent` or
15
+ # `opaque`.
16
+ #
17
+ # @return [Symbol, OpenAI::Models::ImagesResponse::Background, nil]
18
+ optional :background, enum: -> { OpenAI::ImagesResponse::Background }
19
+
13
20
  # @!attribute data
14
21
  # The list of generated images.
15
22
  #
16
23
  # @return [Array<OpenAI::Models::Image>, nil]
17
24
  optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Image] }
18
25
 
26
+ # @!attribute output_format
27
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
28
+ #
29
+ # @return [Symbol, OpenAI::Models::ImagesResponse::OutputFormat, nil]
30
+ optional :output_format, enum: -> { OpenAI::ImagesResponse::OutputFormat }
31
+
32
+ # @!attribute quality
33
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
34
+ #
35
+ # @return [Symbol, OpenAI::Models::ImagesResponse::Quality, nil]
36
+ optional :quality, enum: -> { OpenAI::ImagesResponse::Quality }
37
+
38
+ # @!attribute size
39
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
40
+ # `1536x1024`.
41
+ #
42
+ # @return [Symbol, OpenAI::Models::ImagesResponse::Size, nil]
43
+ optional :size, enum: -> { OpenAI::ImagesResponse::Size }
44
+
19
45
  # @!attribute usage
20
46
  # For `gpt-image-1` only, the token usage information for the image generation.
21
47
  #
22
48
  # @return [OpenAI::Models::ImagesResponse::Usage, nil]
23
49
  optional :usage, -> { OpenAI::ImagesResponse::Usage }
24
50
 
25
- # @!method initialize(created:, data: nil, usage: nil)
51
+ # @!method initialize(created:, background: nil, data: nil, output_format: nil, quality: nil, size: nil, usage: nil)
26
52
  # Some parameter documentations has been truncated, see
27
53
  # {OpenAI::Models::ImagesResponse} for more details.
28
54
  #
@@ -30,10 +56,75 @@ module OpenAI
30
56
  #
31
57
  # @param created [Integer] The Unix timestamp (in seconds) of when the image was created.
32
58
  #
59
+ # @param background [Symbol, OpenAI::Models::ImagesResponse::Background] The background parameter used for the image generation. Either `transparent` or
60
+ #
33
61
  # @param data [Array<OpenAI::Models::Image>] The list of generated images.
34
62
  #
63
+ # @param output_format [Symbol, OpenAI::Models::ImagesResponse::OutputFormat] The output format of the image generation. Either `png`, `webp`, or `jpeg`.
64
+ #
65
+ # @param quality [Symbol, OpenAI::Models::ImagesResponse::Quality] The quality of the image generated. Either `low`, `medium`, or `high`.
66
+ #
67
+ # @param size [Symbol, OpenAI::Models::ImagesResponse::Size] The size of the image generated. Either `1024x1024`, `1024x1536`, or `1536x1024`
68
+ #
35
69
  # @param usage [OpenAI::Models::ImagesResponse::Usage] For `gpt-image-1` only, the token usage information for the image generation.
36
70
 
71
+ # The background parameter used for the image generation. Either `transparent` or
72
+ # `opaque`.
73
+ #
74
+ # @see OpenAI::Models::ImagesResponse#background
75
+ module Background
76
+ extend OpenAI::Internal::Type::Enum
77
+
78
+ TRANSPARENT = :transparent
79
+ OPAQUE = :opaque
80
+
81
+ # @!method self.values
82
+ # @return [Array<Symbol>]
83
+ end
84
+
85
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
86
+ #
87
+ # @see OpenAI::Models::ImagesResponse#output_format
88
+ module OutputFormat
89
+ extend OpenAI::Internal::Type::Enum
90
+
91
+ PNG = :png
92
+ WEBP = :webp
93
+ JPEG = :jpeg
94
+
95
+ # @!method self.values
96
+ # @return [Array<Symbol>]
97
+ end
98
+
99
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
100
+ #
101
+ # @see OpenAI::Models::ImagesResponse#quality
102
+ module Quality
103
+ extend OpenAI::Internal::Type::Enum
104
+
105
+ LOW = :low
106
+ MEDIUM = :medium
107
+ HIGH = :high
108
+
109
+ # @!method self.values
110
+ # @return [Array<Symbol>]
111
+ end
112
+
113
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
114
+ # `1536x1024`.
115
+ #
116
+ # @see OpenAI::Models::ImagesResponse#size
117
+ module Size
118
+ extend OpenAI::Internal::Type::Enum
119
+
120
+ SIZE_1024X1024 = :"1024x1024"
121
+ SIZE_1024X1536 = :"1024x1536"
122
+ SIZE_1536X1024 = :"1536x1024"
123
+
124
+ # @!method self.values
125
+ # @return [Array<Symbol>]
126
+ end
127
+
37
128
  # @see OpenAI::Models::ImagesResponse#usage
38
129
  class Usage < OpenAI::Internal::Type::BaseModel
39
130
  # @!attribute input_tokens