openai 0.10.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +36 -0
  3. data/README.md +83 -7
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/helpers/streaming/events.rb +23 -0
  7. data/lib/openai/helpers/streaming/response_stream.rb +232 -0
  8. data/lib/openai/helpers/structured_output/parsed_json.rb +39 -0
  9. data/lib/openai/internal/stream.rb +2 -1
  10. data/lib/openai/internal/transport/base_client.rb +10 -2
  11. data/lib/openai/internal/type/base_stream.rb +3 -1
  12. data/lib/openai/models/all_models.rb +4 -0
  13. data/lib/openai/models/chat/chat_completion.rb +32 -31
  14. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  15. data/lib/openai/models/chat/chat_completion_message.rb +1 -1
  16. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +1 -1
  17. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  18. data/lib/openai/models/images_response.rb +92 -1
  19. data/lib/openai/models/responses/response.rb +59 -35
  20. data/lib/openai/models/responses/response_create_params.rb +64 -39
  21. data/lib/openai/models/responses/response_function_tool_call.rb +1 -1
  22. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  23. data/lib/openai/models/responses/response_includable.rb +8 -6
  24. data/lib/openai/models/responses/response_output_text.rb +1 -1
  25. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  26. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  27. data/lib/openai/models/responses_model.rb +4 -0
  28. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  29. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  30. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  32. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  33. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  34. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  35. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  36. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  37. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  38. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  39. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  40. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  41. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  42. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  43. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  44. data/lib/openai/models.rb +2 -0
  45. data/lib/openai/resources/chat/completions.rb +14 -6
  46. data/lib/openai/resources/responses.rb +262 -81
  47. data/lib/openai/resources/webhooks.rb +124 -0
  48. data/lib/openai/streaming.rb +5 -0
  49. data/lib/openai/version.rb +1 -1
  50. data/lib/openai.rb +22 -0
  51. data/rbi/openai/client.rbi +3 -0
  52. data/rbi/openai/helpers/streaming/events.rbi +31 -0
  53. data/rbi/openai/helpers/streaming/response_stream.rbi +104 -0
  54. data/rbi/openai/internal/type/base_stream.rbi +8 -1
  55. data/rbi/openai/models/all_models.rbi +20 -0
  56. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  57. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  58. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  59. data/rbi/openai/models/images_response.rbi +146 -0
  60. data/rbi/openai/models/responses/response.rbi +75 -44
  61. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  62. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  63. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  64. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  65. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  66. data/rbi/openai/models/responses_model.rbi +20 -0
  67. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  68. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  69. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  70. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  71. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  72. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  73. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  74. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  75. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  76. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  77. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  78. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  79. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  80. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  81. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  82. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  83. data/rbi/openai/models.rbi +2 -0
  84. data/rbi/openai/resources/chat/completions.rbi +34 -30
  85. data/rbi/openai/resources/responses.rbi +188 -39
  86. data/rbi/openai/resources/webhooks.rbi +68 -0
  87. data/rbi/openai/streaming.rbi +5 -0
  88. data/sig/openai/client.rbs +2 -0
  89. data/sig/openai/internal/type/base_stream.rbs +4 -0
  90. data/sig/openai/models/all_models.rbs +8 -0
  91. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  92. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  93. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  94. data/sig/openai/models/images_response.rbs +83 -0
  95. data/sig/openai/models/responses/response.rbs +13 -1
  96. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  97. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  98. data/sig/openai/models/responses/response_includable.rbs +7 -5
  99. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  100. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  101. data/sig/openai/models/responses_model.rbs +8 -0
  102. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  103. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  104. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  105. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  106. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  107. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  108. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  109. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  110. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  111. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  112. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  113. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  114. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  115. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  116. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  117. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  118. data/sig/openai/models.rbs +2 -0
  119. data/sig/openai/resources/responses.rbs +4 -0
  120. data/sig/openai/resources/webhooks.rbs +33 -0
  121. metadata +63 -2
@@ -39,23 +39,23 @@ module OpenAI
39
39
  required :object, const: :"chat.completion"
40
40
 
41
41
  # @!attribute service_tier
42
- # Specifies the latency tier to use for processing the request. This parameter is
43
- # relevant for customers subscribed to the scale tier service:
44
- #
45
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
46
- # utilize scale tier credits until they are exhausted.
47
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
48
- # be processed using the default service tier with a lower uptime SLA and no
49
- # latency guarantee.
50
- # - If set to 'default', the request will be processed using the default service
51
- # tier with a lower uptime SLA and no latency guarantee.
52
- # - If set to 'flex', the request will be processed with the Flex Processing
53
- # service tier.
54
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
42
+ # Specifies the processing type used for serving the request.
43
+ #
44
+ # - If set to 'auto', then the request will be processed with the service tier
45
+ # configured in the Project settings. Unless otherwise configured, the Project
46
+ # will use 'default'.
47
+ # - If set to 'default', then the requset will be processed with the standard
48
+ # pricing and performance for the selected model.
49
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
50
+ # 'priority', then the request will be processed with the corresponding service
51
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
52
+ # Priority processing.
55
53
  # - When not set, the default behavior is 'auto'.
56
54
  #
57
- # When this parameter is set, the response body will include the `service_tier`
58
- # utilized.
55
+ # When the `service_tier` parameter is set, the response body will include the
56
+ # `service_tier` value based on the processing mode actually used to serve the
57
+ # request. This response value may be different from the value set in the
58
+ # parameter.
59
59
  #
60
60
  # @return [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil]
61
61
  optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletion::ServiceTier }, nil?: true
@@ -90,7 +90,7 @@ module OpenAI
90
90
  #
91
91
  # @param model [String] The model used for the chat completion.
92
92
  #
93
- # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
93
+ # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the processing type used for serving the request.
94
94
  #
95
95
  # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
96
96
  #
@@ -188,23 +188,23 @@ module OpenAI
188
188
  end
189
189
  end
190
190
 
191
- # Specifies the latency tier to use for processing the request. This parameter is
192
- # relevant for customers subscribed to the scale tier service:
193
- #
194
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
195
- # utilize scale tier credits until they are exhausted.
196
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
197
- # be processed using the default service tier with a lower uptime SLA and no
198
- # latency guarantee.
199
- # - If set to 'default', the request will be processed using the default service
200
- # tier with a lower uptime SLA and no latency guarantee.
201
- # - If set to 'flex', the request will be processed with the Flex Processing
202
- # service tier.
203
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
191
+ # Specifies the processing type used for serving the request.
192
+ #
193
+ # - If set to 'auto', then the request will be processed with the service tier
194
+ # configured in the Project settings. Unless otherwise configured, the Project
195
+ # will use 'default'.
196
+ # - If set to 'default', then the requset will be processed with the standard
197
+ # pricing and performance for the selected model.
198
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
199
+ # 'priority', then the request will be processed with the corresponding service
200
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
201
+ # Priority processing.
204
202
  # - When not set, the default behavior is 'auto'.
205
203
  #
206
- # When this parameter is set, the response body will include the `service_tier`
207
- # utilized.
204
+ # When the `service_tier` parameter is set, the response body will include the
205
+ # `service_tier` value based on the processing mode actually used to serve the
206
+ # request. This response value may be different from the value set in the
207
+ # parameter.
208
208
  #
209
209
  # @see OpenAI::Models::Chat::ChatCompletion#service_tier
210
210
  module ServiceTier
@@ -214,6 +214,7 @@ module OpenAI
214
214
  DEFAULT = :default
215
215
  FLEX = :flex
216
216
  SCALE = :scale
217
+ PRIORITY = :priority
217
218
 
218
219
  # @!method self.values
219
220
  # @return [Array<Symbol>]
@@ -38,23 +38,23 @@ module OpenAI
38
38
  required :object, const: :"chat.completion.chunk"
39
39
 
40
40
  # @!attribute service_tier
41
- # Specifies the latency tier to use for processing the request. This parameter is
42
- # relevant for customers subscribed to the scale tier service:
41
+ # Specifies the processing type used for serving the request.
43
42
  #
44
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
45
- # utilize scale tier credits until they are exhausted.
46
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
47
- # be processed using the default service tier with a lower uptime SLA and no
48
- # latency guarantee.
49
- # - If set to 'default', the request will be processed using the default service
50
- # tier with a lower uptime SLA and no latency guarantee.
51
- # - If set to 'flex', the request will be processed with the Flex Processing
52
- # service tier.
53
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
43
+ # - If set to 'auto', then the request will be processed with the service tier
44
+ # configured in the Project settings. Unless otherwise configured, the Project
45
+ # will use 'default'.
46
+ # - If set to 'default', then the requset will be processed with the standard
47
+ # pricing and performance for the selected model.
48
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
49
+ # 'priority', then the request will be processed with the corresponding service
50
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
51
+ # Priority processing.
54
52
  # - When not set, the default behavior is 'auto'.
55
53
  #
56
- # When this parameter is set, the response body will include the `service_tier`
57
- # utilized.
54
+ # When the `service_tier` parameter is set, the response body will include the
55
+ # `service_tier` value based on the processing mode actually used to serve the
56
+ # request. This response value may be different from the value set in the
57
+ # parameter.
58
58
  #
59
59
  # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil]
60
60
  optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletionChunk::ServiceTier }, nil?: true
@@ -95,7 +95,7 @@ module OpenAI
95
95
  #
96
96
  # @param model [String] The model to generate the completion.
97
97
  #
98
- # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
98
+ # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the processing type used for serving the request.
99
99
  #
100
100
  # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
101
101
  #
@@ -371,23 +371,23 @@ module OpenAI
371
371
  end
372
372
  end
373
373
 
374
- # Specifies the latency tier to use for processing the request. This parameter is
375
- # relevant for customers subscribed to the scale tier service:
374
+ # Specifies the processing type used for serving the request.
376
375
  #
377
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
378
- # utilize scale tier credits until they are exhausted.
379
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
380
- # be processed using the default service tier with a lower uptime SLA and no
381
- # latency guarantee.
382
- # - If set to 'default', the request will be processed using the default service
383
- # tier with a lower uptime SLA and no latency guarantee.
384
- # - If set to 'flex', the request will be processed with the Flex Processing
385
- # service tier.
386
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
376
+ # - If set to 'auto', then the request will be processed with the service tier
377
+ # configured in the Project settings. Unless otherwise configured, the Project
378
+ # will use 'default'.
379
+ # - If set to 'default', then the requset will be processed with the standard
380
+ # pricing and performance for the selected model.
381
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
382
+ # 'priority', then the request will be processed with the corresponding service
383
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
384
+ # Priority processing.
387
385
  # - When not set, the default behavior is 'auto'.
388
386
  #
389
- # When this parameter is set, the response body will include the `service_tier`
390
- # utilized.
387
+ # When the `service_tier` parameter is set, the response body will include the
388
+ # `service_tier` value based on the processing mode actually used to serve the
389
+ # request. This response value may be different from the value set in the
390
+ # parameter.
391
391
  #
392
392
  # @see OpenAI::Models::Chat::ChatCompletionChunk#service_tier
393
393
  module ServiceTier
@@ -397,6 +397,7 @@ module OpenAI
397
397
  DEFAULT = :default
398
398
  FLEX = :flex
399
399
  SCALE = :scale
400
+ PRIORITY = :priority
400
401
 
401
402
  # @!method self.values
402
403
  # @return [Array<Symbol>]
@@ -14,7 +14,7 @@ module OpenAI
14
14
  # The parsed contents of the message, if JSON schema is specified.
15
15
  #
16
16
  # @return [Object, nil]
17
- optional :parsed, OpenAI::Internal::Type::Unknown
17
+ optional :parsed, OpenAI::StructuredOutput::ParsedJson
18
18
 
19
19
  # @!attribute refusal
20
20
  # The refusal message generated by the model.
@@ -44,7 +44,7 @@ module OpenAI
44
44
  # The parsed contents of the arguments.
45
45
  #
46
46
  # @return [Object, nil]
47
- required :parsed, OpenAI::Internal::Type::Unknown
47
+ required :parsed, OpenAI::StructuredOutput::ParsedJson
48
48
 
49
49
  # @!attribute name
50
50
  # The name of the function to call.
@@ -219,23 +219,23 @@ module OpenAI
219
219
  optional :seed, Integer, nil?: true
220
220
 
221
221
  # @!attribute service_tier
222
- # Specifies the latency tier to use for processing the request. This parameter is
223
- # relevant for customers subscribed to the scale tier service:
224
- #
225
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
226
- # utilize scale tier credits until they are exhausted.
227
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
228
- # be processed using the default service tier with a lower uptime SLA and no
229
- # latency guarantee.
230
- # - If set to 'default', the request will be processed using the default service
231
- # tier with a lower uptime SLA and no latency guarantee.
232
- # - If set to 'flex', the request will be processed with the Flex Processing
233
- # service tier.
234
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
222
+ # Specifies the processing type used for serving the request.
223
+ #
224
+ # - If set to 'auto', then the request will be processed with the service tier
225
+ # configured in the Project settings. Unless otherwise configured, the Project
226
+ # will use 'default'.
227
+ # - If set to 'default', then the requset will be processed with the standard
228
+ # pricing and performance for the selected model.
229
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
230
+ # 'priority', then the request will be processed with the corresponding service
231
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
232
+ # Priority processing.
235
233
  # - When not set, the default behavior is 'auto'.
236
234
  #
237
- # When this parameter is set, the response body will include the `service_tier`
238
- # utilized.
235
+ # When the `service_tier` parameter is set, the response body will include the
236
+ # `service_tier` value based on the processing mode actually used to serve the
237
+ # request. This response value may be different from the value set in the
238
+ # parameter.
239
239
  #
240
240
  # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil]
241
241
  optional :service_tier, enum: -> { OpenAI::Chat::CompletionCreateParams::ServiceTier }, nil?: true
@@ -254,6 +254,8 @@ module OpenAI
254
254
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
255
255
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
256
256
  #
257
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
258
+ #
257
259
  # @return [Boolean, nil]
258
260
  optional :store, OpenAI::Internal::Type::Boolean, nil?: true
259
261
 
@@ -375,7 +377,7 @@ module OpenAI
375
377
  #
376
378
  # @param seed [Integer, nil] This feature is in Beta.
377
379
  #
378
- # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
380
+ # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
379
381
  #
380
382
  # @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
381
383
  #
@@ -546,23 +548,23 @@ module OpenAI
546
548
  # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
547
549
  end
548
550
 
549
- # Specifies the latency tier to use for processing the request. This parameter is
550
- # relevant for customers subscribed to the scale tier service:
551
- #
552
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
553
- # utilize scale tier credits until they are exhausted.
554
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
555
- # be processed using the default service tier with a lower uptime SLA and no
556
- # latency guarantee.
557
- # - If set to 'default', the request will be processed using the default service
558
- # tier with a lower uptime SLA and no latency guarantee.
559
- # - If set to 'flex', the request will be processed with the Flex Processing
560
- # service tier.
561
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
551
+ # Specifies the processing type used for serving the request.
552
+ #
553
+ # - If set to 'auto', then the request will be processed with the service tier
554
+ # configured in the Project settings. Unless otherwise configured, the Project
555
+ # will use 'default'.
556
+ # - If set to 'default', then the requset will be processed with the standard
557
+ # pricing and performance for the selected model.
558
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
559
+ # 'priority', then the request will be processed with the corresponding service
560
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
561
+ # Priority processing.
562
562
  # - When not set, the default behavior is 'auto'.
563
563
  #
564
- # When this parameter is set, the response body will include the `service_tier`
565
- # utilized.
564
+ # When the `service_tier` parameter is set, the response body will include the
565
+ # `service_tier` value based on the processing mode actually used to serve the
566
+ # request. This response value may be different from the value set in the
567
+ # parameter.
566
568
  module ServiceTier
567
569
  extend OpenAI::Internal::Type::Enum
568
570
 
@@ -570,6 +572,7 @@ module OpenAI
570
572
  DEFAULT = :default
571
573
  FLEX = :flex
572
574
  SCALE = :scale
575
+ PRIORITY = :priority
573
576
 
574
577
  # @!method self.values
575
578
  # @return [Array<Symbol>]
@@ -10,19 +10,45 @@ module OpenAI
10
10
  # @return [Integer]
11
11
  required :created, Integer
12
12
 
13
+ # @!attribute background
14
+ # The background parameter used for the image generation. Either `transparent` or
15
+ # `opaque`.
16
+ #
17
+ # @return [Symbol, OpenAI::Models::ImagesResponse::Background, nil]
18
+ optional :background, enum: -> { OpenAI::ImagesResponse::Background }
19
+
13
20
  # @!attribute data
14
21
  # The list of generated images.
15
22
  #
16
23
  # @return [Array<OpenAI::Models::Image>, nil]
17
24
  optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Image] }
18
25
 
26
+ # @!attribute output_format
27
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
28
+ #
29
+ # @return [Symbol, OpenAI::Models::ImagesResponse::OutputFormat, nil]
30
+ optional :output_format, enum: -> { OpenAI::ImagesResponse::OutputFormat }
31
+
32
+ # @!attribute quality
33
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
34
+ #
35
+ # @return [Symbol, OpenAI::Models::ImagesResponse::Quality, nil]
36
+ optional :quality, enum: -> { OpenAI::ImagesResponse::Quality }
37
+
38
+ # @!attribute size
39
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
40
+ # `1536x1024`.
41
+ #
42
+ # @return [Symbol, OpenAI::Models::ImagesResponse::Size, nil]
43
+ optional :size, enum: -> { OpenAI::ImagesResponse::Size }
44
+
19
45
  # @!attribute usage
20
46
  # For `gpt-image-1` only, the token usage information for the image generation.
21
47
  #
22
48
  # @return [OpenAI::Models::ImagesResponse::Usage, nil]
23
49
  optional :usage, -> { OpenAI::ImagesResponse::Usage }
24
50
 
25
- # @!method initialize(created:, data: nil, usage: nil)
51
+ # @!method initialize(created:, background: nil, data: nil, output_format: nil, quality: nil, size: nil, usage: nil)
26
52
  # Some parameter documentations has been truncated, see
27
53
  # {OpenAI::Models::ImagesResponse} for more details.
28
54
  #
@@ -30,10 +56,75 @@ module OpenAI
30
56
  #
31
57
  # @param created [Integer] The Unix timestamp (in seconds) of when the image was created.
32
58
  #
59
+ # @param background [Symbol, OpenAI::Models::ImagesResponse::Background] The background parameter used for the image generation. Either `transparent` or
60
+ #
33
61
  # @param data [Array<OpenAI::Models::Image>] The list of generated images.
34
62
  #
63
+ # @param output_format [Symbol, OpenAI::Models::ImagesResponse::OutputFormat] The output format of the image generation. Either `png`, `webp`, or `jpeg`.
64
+ #
65
+ # @param quality [Symbol, OpenAI::Models::ImagesResponse::Quality] The quality of the image generated. Either `low`, `medium`, or `high`.
66
+ #
67
+ # @param size [Symbol, OpenAI::Models::ImagesResponse::Size] The size of the image generated. Either `1024x1024`, `1024x1536`, or `1536x1024`
68
+ #
35
69
  # @param usage [OpenAI::Models::ImagesResponse::Usage] For `gpt-image-1` only, the token usage information for the image generation.
36
70
 
71
+ # The background parameter used for the image generation. Either `transparent` or
72
+ # `opaque`.
73
+ #
74
+ # @see OpenAI::Models::ImagesResponse#background
75
+ module Background
76
+ extend OpenAI::Internal::Type::Enum
77
+
78
+ TRANSPARENT = :transparent
79
+ OPAQUE = :opaque
80
+
81
+ # @!method self.values
82
+ # @return [Array<Symbol>]
83
+ end
84
+
85
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
86
+ #
87
+ # @see OpenAI::Models::ImagesResponse#output_format
88
+ module OutputFormat
89
+ extend OpenAI::Internal::Type::Enum
90
+
91
+ PNG = :png
92
+ WEBP = :webp
93
+ JPEG = :jpeg
94
+
95
+ # @!method self.values
96
+ # @return [Array<Symbol>]
97
+ end
98
+
99
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
100
+ #
101
+ # @see OpenAI::Models::ImagesResponse#quality
102
+ module Quality
103
+ extend OpenAI::Internal::Type::Enum
104
+
105
+ LOW = :low
106
+ MEDIUM = :medium
107
+ HIGH = :high
108
+
109
+ # @!method self.values
110
+ # @return [Array<Symbol>]
111
+ end
112
+
113
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
114
+ # `1536x1024`.
115
+ #
116
+ # @see OpenAI::Models::ImagesResponse#size
117
+ module Size
118
+ extend OpenAI::Internal::Type::Enum
119
+
120
+ SIZE_1024X1024 = :"1024x1024"
121
+ SIZE_1024X1536 = :"1024x1536"
122
+ SIZE_1536X1024 = :"1536x1024"
123
+
124
+ # @!method self.values
125
+ # @return [Array<Symbol>]
126
+ end
127
+
37
128
  # @see OpenAI::Models::ImagesResponse#usage
38
129
  class Usage < OpenAI::Internal::Type::BaseModel
39
130
  # @!attribute input_tokens
@@ -100,7 +100,7 @@ module OpenAI
100
100
  # response. See the `tools` parameter to see how to specify which tools the model
101
101
  # can call.
102
102
  #
103
- # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction]
103
+ # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp]
104
104
  required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice }
105
105
 
106
106
  # @!attribute tools
@@ -147,6 +147,15 @@ module OpenAI
147
147
  # @return [Integer, nil]
148
148
  optional :max_output_tokens, Integer, nil?: true
149
149
 
150
+ # @!attribute max_tool_calls
151
+ # The maximum number of total calls to built-in tools that can be processed in a
152
+ # response. This maximum number applies across all built-in tool calls, not per
153
+ # individual tool. Any further attempts to call a tool by the model will be
154
+ # ignored.
155
+ #
156
+ # @return [Integer, nil]
157
+ optional :max_tool_calls, Integer, nil?: true
158
+
150
159
  # @!attribute previous_response_id
151
160
  # The unique ID of the previous response to the model. Use this to create
152
161
  # multi-turn conversations. Learn more about
@@ -172,23 +181,23 @@ module OpenAI
172
181
  optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
173
182
 
174
183
  # @!attribute service_tier
175
- # Specifies the latency tier to use for processing the request. This parameter is
176
- # relevant for customers subscribed to the scale tier service:
177
- #
178
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
179
- # utilize scale tier credits until they are exhausted.
180
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
181
- # be processed using the default service tier with a lower uptime SLA and no
182
- # latency guarantee.
183
- # - If set to 'default', the request will be processed using the default service
184
- # tier with a lower uptime SLA and no latency guarantee.
185
- # - If set to 'flex', the request will be processed with the Flex Processing
186
- # service tier.
187
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
184
+ # Specifies the processing type used for serving the request.
185
+ #
186
+ # - If set to 'auto', then the request will be processed with the service tier
187
+ # configured in the Project settings. Unless otherwise configured, the Project
188
+ # will use 'default'.
189
+ # - If set to 'default', then the requset will be processed with the standard
190
+ # pricing and performance for the selected model.
191
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
192
+ # 'priority', then the request will be processed with the corresponding service
193
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
194
+ # Priority processing.
188
195
  # - When not set, the default behavior is 'auto'.
189
196
  #
190
- # When this parameter is set, the response body will include the `service_tier`
191
- # utilized.
197
+ # When the `service_tier` parameter is set, the response body will include the
198
+ # `service_tier` value based on the processing mode actually used to serve the
199
+ # request. This response value may be different from the value set in the
200
+ # parameter.
192
201
  #
193
202
  # @return [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil]
194
203
  optional :service_tier, enum: -> { OpenAI::Responses::Response::ServiceTier }, nil?: true
@@ -210,6 +219,13 @@ module OpenAI
210
219
  # @return [OpenAI::Models::Responses::ResponseTextConfig, nil]
211
220
  optional :text, -> { OpenAI::Responses::ResponseTextConfig }
212
221
 
222
+ # @!attribute top_logprobs
223
+ # An integer between 0 and 20 specifying the number of most likely tokens to
224
+ # return at each token position, each with an associated log probability.
225
+ #
226
+ # @return [Integer, nil]
227
+ optional :top_logprobs, Integer, nil?: true
228
+
213
229
  # @!attribute truncation
214
230
  # The truncation strategy to use for the model response.
215
231
  #
@@ -237,7 +253,7 @@ module OpenAI
237
253
  # @return [String, nil]
238
254
  optional :user, String
239
255
 
240
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
256
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
241
257
  # Some parameter documentations has been truncated, see
242
258
  # {OpenAI::Models::Responses::Response} for more details.
243
259
  #
@@ -261,7 +277,7 @@ module OpenAI
261
277
  #
262
278
  # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
263
279
  #
264
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
280
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
265
281
  #
266
282
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
267
283
  #
@@ -271,18 +287,22 @@ module OpenAI
271
287
  #
272
288
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
273
289
  #
290
+ # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
291
+ #
274
292
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
275
293
  #
276
294
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
277
295
  #
278
296
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
279
297
  #
280
- # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
298
+ # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
281
299
  #
282
300
  # @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
283
301
  #
284
302
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
285
303
  #
304
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
305
+ #
286
306
  # @param truncation [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] The truncation strategy to use for the model response.
287
307
  #
288
308
  # @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
@@ -369,27 +389,30 @@ module OpenAI
369
389
  # Use this option to force the model to call a specific function.
370
390
  variant -> { OpenAI::Responses::ToolChoiceFunction }
371
391
 
392
+ # Use this option to force the model to call a specific tool on a remote MCP server.
393
+ variant -> { OpenAI::Responses::ToolChoiceMcp }
394
+
372
395
  # @!method self.variants
373
- # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)]
396
+ # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
374
397
  end
375
398
 
376
- # Specifies the latency tier to use for processing the request. This parameter is
377
- # relevant for customers subscribed to the scale tier service:
378
- #
379
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
380
- # utilize scale tier credits until they are exhausted.
381
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
382
- # be processed using the default service tier with a lower uptime SLA and no
383
- # latency guarantee.
384
- # - If set to 'default', the request will be processed using the default service
385
- # tier with a lower uptime SLA and no latency guarantee.
386
- # - If set to 'flex', the request will be processed with the Flex Processing
387
- # service tier.
388
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
399
+ # Specifies the processing type used for serving the request.
400
+ #
401
+ # - If set to 'auto', then the request will be processed with the service tier
402
+ # configured in the Project settings. Unless otherwise configured, the Project
403
+ # will use 'default'.
404
+ # - If set to 'default', then the requset will be processed with the standard
405
+ # pricing and performance for the selected model.
406
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
407
+ # 'priority', then the request will be processed with the corresponding service
408
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
409
+ # Priority processing.
389
410
  # - When not set, the default behavior is 'auto'.
390
411
  #
391
- # When this parameter is set, the response body will include the `service_tier`
392
- # utilized.
412
+ # When the `service_tier` parameter is set, the response body will include the
413
+ # `service_tier` value based on the processing mode actually used to serve the
414
+ # request. This response value may be different from the value set in the
415
+ # parameter.
393
416
  #
394
417
  # @see OpenAI::Models::Responses::Response#service_tier
395
418
  module ServiceTier
@@ -399,6 +422,7 @@ module OpenAI
399
422
  DEFAULT = :default
400
423
  FLEX = :flex
401
424
  SCALE = :scale
425
+ PRIORITY = :priority
402
426
 
403
427
  # @!method self.values
404
428
  # @return [Array<Symbol>]