openai 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/models/all_models.rb +4 -0
  7. data/lib/openai/models/chat/chat_completion.rb +32 -31
  8. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  9. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  10. data/lib/openai/models/images_response.rb +92 -1
  11. data/lib/openai/models/responses/response.rb +59 -35
  12. data/lib/openai/models/responses/response_create_params.rb +64 -39
  13. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  14. data/lib/openai/models/responses/response_includable.rb +8 -6
  15. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  16. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  17. data/lib/openai/models/responses_model.rb +4 -0
  18. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  19. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  20. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  21. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  22. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  23. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  24. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  25. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  26. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  27. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  28. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  29. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  30. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  32. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  33. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  34. data/lib/openai/models.rb +2 -0
  35. data/lib/openai/resources/chat/completions.rb +2 -2
  36. data/lib/openai/resources/responses.rb +14 -6
  37. data/lib/openai/resources/webhooks.rb +124 -0
  38. data/lib/openai/version.rb +1 -1
  39. data/lib/openai.rb +18 -0
  40. data/rbi/openai/client.rbi +3 -0
  41. data/rbi/openai/models/all_models.rbi +20 -0
  42. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  43. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  44. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  45. data/rbi/openai/models/images_response.rbi +146 -0
  46. data/rbi/openai/models/responses/response.rbi +75 -44
  47. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  48. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  49. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  50. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  51. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  52. data/rbi/openai/models/responses_model.rbi +20 -0
  53. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  54. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  55. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  56. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  57. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  58. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  59. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  60. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  61. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  62. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  63. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  64. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  65. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  66. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  67. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  68. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  69. data/rbi/openai/models.rbi +2 -0
  70. data/rbi/openai/resources/chat/completions.rbi +34 -30
  71. data/rbi/openai/resources/responses.rbi +62 -38
  72. data/rbi/openai/resources/webhooks.rbi +68 -0
  73. data/sig/openai/client.rbs +2 -0
  74. data/sig/openai/models/all_models.rbs +8 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/images_response.rbs +83 -0
  79. data/sig/openai/models/responses/response.rbs +13 -1
  80. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  81. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  82. data/sig/openai/models/responses/response_includable.rbs +7 -5
  83. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  84. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  85. data/sig/openai/models/responses_model.rbs +8 -0
  86. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  87. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  88. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  89. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  90. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  91. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  92. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  93. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  94. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  95. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  96. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  97. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  98. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  99. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  100. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  101. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  102. data/sig/openai/models.rbs +2 -0
  103. data/sig/openai/resources/responses.rbs +4 -0
  104. data/sig/openai/resources/webhooks.rbs +33 -0
  105. metadata +56 -2
@@ -270,23 +270,23 @@ module OpenAI
270
270
  sig { returns(T.nilable(Integer)) }
271
271
  attr_accessor :seed
272
272
 
273
- # Specifies the latency tier to use for processing the request. This parameter is
274
- # relevant for customers subscribed to the scale tier service:
273
+ # Specifies the processing type used for serving the request.
275
274
  #
276
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
277
- # utilize scale tier credits until they are exhausted.
278
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
279
- # be processed using the default service tier with a lower uptime SLA and no
280
- # latency guarantee.
281
- # - If set to 'default', the request will be processed using the default service
282
- # tier with a lower uptime SLA and no latency guarantee.
283
- # - If set to 'flex', the request will be processed with the Flex Processing
284
- # service tier.
285
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
275
+ # - If set to 'auto', then the request will be processed with the service tier
276
+ # configured in the Project settings. Unless otherwise configured, the Project
277
+ # will use 'default'.
278
+ # - If set to 'default', then the requset will be processed with the standard
279
+ # pricing and performance for the selected model.
280
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
281
+ # 'priority', then the request will be processed with the corresponding service
282
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
283
+ # Priority processing.
286
284
  # - When not set, the default behavior is 'auto'.
287
285
  #
288
- # When this parameter is set, the response body will include the `service_tier`
289
- # utilized.
286
+ # When the `service_tier` parameter is set, the response body will include the
287
+ # `service_tier` value based on the processing mode actually used to serve the
288
+ # request. This response value may be different from the value set in the
289
+ # parameter.
290
290
  sig do
291
291
  returns(
292
292
  T.nilable(
@@ -310,6 +310,8 @@ module OpenAI
310
310
  # Whether or not to store the output of this chat completion request for use in
311
311
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
312
312
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
313
+ #
314
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
313
315
  sig { returns(T.nilable(T::Boolean)) }
314
316
  attr_accessor :store
315
317
 
@@ -625,23 +627,23 @@ module OpenAI
625
627
  # should refer to the `system_fingerprint` response parameter to monitor changes
626
628
  # in the backend.
627
629
  seed: nil,
628
- # Specifies the latency tier to use for processing the request. This parameter is
629
- # relevant for customers subscribed to the scale tier service:
630
+ # Specifies the processing type used for serving the request.
630
631
  #
631
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
632
- # utilize scale tier credits until they are exhausted.
633
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
634
- # be processed using the default service tier with a lower uptime SLA and no
635
- # latency guarantee.
636
- # - If set to 'default', the request will be processed using the default service
637
- # tier with a lower uptime SLA and no latency guarantee.
638
- # - If set to 'flex', the request will be processed with the Flex Processing
639
- # service tier.
640
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
632
+ # - If set to 'auto', then the request will be processed with the service tier
633
+ # configured in the Project settings. Unless otherwise configured, the Project
634
+ # will use 'default'.
635
+ # - If set to 'default', then the requset will be processed with the standard
636
+ # pricing and performance for the selected model.
637
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
638
+ # 'priority', then the request will be processed with the corresponding service
639
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
640
+ # Priority processing.
641
641
  # - When not set, the default behavior is 'auto'.
642
642
  #
643
- # When this parameter is set, the response body will include the `service_tier`
644
- # utilized.
643
+ # When the `service_tier` parameter is set, the response body will include the
644
+ # `service_tier` value based on the processing mode actually used to serve the
645
+ # request. This response value may be different from the value set in the
646
+ # parameter.
645
647
  service_tier: nil,
646
648
  # Not supported with latest reasoning models `o3` and `o4-mini`.
647
649
  #
@@ -651,6 +653,8 @@ module OpenAI
651
653
  # Whether or not to store the output of this chat completion request for use in
652
654
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
653
655
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
656
+ #
657
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
654
658
  store: nil,
655
659
  # Options for streaming response. Only set this when you set `stream: true`.
656
660
  stream_options: nil,
@@ -1008,23 +1012,23 @@ module OpenAI
1008
1012
  end
1009
1013
  end
1010
1014
 
1011
- # Specifies the latency tier to use for processing the request. This parameter is
1012
- # relevant for customers subscribed to the scale tier service:
1015
+ # Specifies the processing type used for serving the request.
1013
1016
  #
1014
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
1015
- # utilize scale tier credits until they are exhausted.
1016
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
1017
- # be processed using the default service tier with a lower uptime SLA and no
1018
- # latency guarantee.
1019
- # - If set to 'default', the request will be processed using the default service
1020
- # tier with a lower uptime SLA and no latency guarantee.
1021
- # - If set to 'flex', the request will be processed with the Flex Processing
1022
- # service tier.
1023
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
1017
+ # - If set to 'auto', then the request will be processed with the service tier
1018
+ # configured in the Project settings. Unless otherwise configured, the Project
1019
+ # will use 'default'.
1020
+ # - If set to 'default', then the requset will be processed with the standard
1021
+ # pricing and performance for the selected model.
1022
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1023
+ # 'priority', then the request will be processed with the corresponding service
1024
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
1025
+ # Priority processing.
1024
1026
  # - When not set, the default behavior is 'auto'.
1025
1027
  #
1026
- # When this parameter is set, the response body will include the `service_tier`
1027
- # utilized.
1028
+ # When the `service_tier` parameter is set, the response body will include the
1029
+ # `service_tier` value based on the processing mode actually used to serve the
1030
+ # request. This response value may be different from the value set in the
1031
+ # parameter.
1028
1032
  module ServiceTier
1029
1033
  extend OpenAI::Internal::Type::Enum
1030
1034
 
@@ -1054,6 +1058,11 @@ module OpenAI
1054
1058
  :scale,
1055
1059
  OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
1056
1060
  )
1061
+ PRIORITY =
1062
+ T.let(
1063
+ :priority,
1064
+ OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
1065
+ )
1057
1066
 
1058
1067
  sig do
1059
1068
  override.returns(
@@ -12,6 +12,18 @@ module OpenAI
12
12
  sig { returns(Integer) }
13
13
  attr_accessor :created
14
14
 
15
+ # The background parameter used for the image generation. Either `transparent` or
16
+ # `opaque`.
17
+ sig do
18
+ returns(T.nilable(OpenAI::ImagesResponse::Background::TaggedSymbol))
19
+ end
20
+ attr_reader :background
21
+
22
+ sig do
23
+ params(background: OpenAI::ImagesResponse::Background::OrSymbol).void
24
+ end
25
+ attr_writer :background
26
+
15
27
  # The list of generated images.
16
28
  sig { returns(T.nilable(T::Array[OpenAI::Image])) }
17
29
  attr_reader :data
@@ -19,6 +31,34 @@ module OpenAI
19
31
  sig { params(data: T::Array[OpenAI::Image::OrHash]).void }
20
32
  attr_writer :data
21
33
 
34
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
35
+ sig do
36
+ returns(T.nilable(OpenAI::ImagesResponse::OutputFormat::TaggedSymbol))
37
+ end
38
+ attr_reader :output_format
39
+
40
+ sig do
41
+ params(
42
+ output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol
43
+ ).void
44
+ end
45
+ attr_writer :output_format
46
+
47
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
48
+ sig { returns(T.nilable(OpenAI::ImagesResponse::Quality::TaggedSymbol)) }
49
+ attr_reader :quality
50
+
51
+ sig { params(quality: OpenAI::ImagesResponse::Quality::OrSymbol).void }
52
+ attr_writer :quality
53
+
54
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
55
+ # `1536x1024`.
56
+ sig { returns(T.nilable(OpenAI::ImagesResponse::Size::TaggedSymbol)) }
57
+ attr_reader :size
58
+
59
+ sig { params(size: OpenAI::ImagesResponse::Size::OrSymbol).void }
60
+ attr_writer :size
61
+
22
62
  # For `gpt-image-1` only, the token usage information for the image generation.
23
63
  sig { returns(T.nilable(OpenAI::ImagesResponse::Usage)) }
24
64
  attr_reader :usage
@@ -30,15 +70,29 @@ module OpenAI
30
70
  sig do
31
71
  params(
32
72
  created: Integer,
73
+ background: OpenAI::ImagesResponse::Background::OrSymbol,
33
74
  data: T::Array[OpenAI::Image::OrHash],
75
+ output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol,
76
+ quality: OpenAI::ImagesResponse::Quality::OrSymbol,
77
+ size: OpenAI::ImagesResponse::Size::OrSymbol,
34
78
  usage: OpenAI::ImagesResponse::Usage::OrHash
35
79
  ).returns(T.attached_class)
36
80
  end
37
81
  def self.new(
38
82
  # The Unix timestamp (in seconds) of when the image was created.
39
83
  created:,
84
+ # The background parameter used for the image generation. Either `transparent` or
85
+ # `opaque`.
86
+ background: nil,
40
87
  # The list of generated images.
41
88
  data: nil,
89
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
90
+ output_format: nil,
91
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
92
+ quality: nil,
93
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
94
+ # `1536x1024`.
95
+ size: nil,
42
96
  # For `gpt-image-1` only, the token usage information for the image generation.
43
97
  usage: nil
44
98
  )
@@ -48,7 +102,11 @@ module OpenAI
48
102
  override.returns(
49
103
  {
50
104
  created: Integer,
105
+ background: OpenAI::ImagesResponse::Background::TaggedSymbol,
51
106
  data: T::Array[OpenAI::Image],
107
+ output_format: OpenAI::ImagesResponse::OutputFormat::TaggedSymbol,
108
+ quality: OpenAI::ImagesResponse::Quality::TaggedSymbol,
109
+ size: OpenAI::ImagesResponse::Size::TaggedSymbol,
52
110
  usage: OpenAI::ImagesResponse::Usage
53
111
  }
54
112
  )
@@ -56,6 +114,94 @@ module OpenAI
56
114
  def to_hash
57
115
  end
58
116
 
117
+ # The background parameter used for the image generation. Either `transparent` or
118
+ # `opaque`.
119
+ module Background
120
+ extend OpenAI::Internal::Type::Enum
121
+
122
+ TaggedSymbol =
123
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Background) }
124
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
125
+
126
+ TRANSPARENT =
127
+ T.let(:transparent, OpenAI::ImagesResponse::Background::TaggedSymbol)
128
+ OPAQUE =
129
+ T.let(:opaque, OpenAI::ImagesResponse::Background::TaggedSymbol)
130
+
131
+ sig do
132
+ override.returns(
133
+ T::Array[OpenAI::ImagesResponse::Background::TaggedSymbol]
134
+ )
135
+ end
136
+ def self.values
137
+ end
138
+ end
139
+
140
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
141
+ module OutputFormat
142
+ extend OpenAI::Internal::Type::Enum
143
+
144
+ TaggedSymbol =
145
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::OutputFormat) }
146
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
147
+
148
+ PNG = T.let(:png, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
149
+ WEBP = T.let(:webp, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
150
+ JPEG = T.let(:jpeg, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
151
+
152
+ sig do
153
+ override.returns(
154
+ T::Array[OpenAI::ImagesResponse::OutputFormat::TaggedSymbol]
155
+ )
156
+ end
157
+ def self.values
158
+ end
159
+ end
160
+
161
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
162
+ module Quality
163
+ extend OpenAI::Internal::Type::Enum
164
+
165
+ TaggedSymbol =
166
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Quality) }
167
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
168
+
169
+ LOW = T.let(:low, OpenAI::ImagesResponse::Quality::TaggedSymbol)
170
+ MEDIUM = T.let(:medium, OpenAI::ImagesResponse::Quality::TaggedSymbol)
171
+ HIGH = T.let(:high, OpenAI::ImagesResponse::Quality::TaggedSymbol)
172
+
173
+ sig do
174
+ override.returns(
175
+ T::Array[OpenAI::ImagesResponse::Quality::TaggedSymbol]
176
+ )
177
+ end
178
+ def self.values
179
+ end
180
+ end
181
+
182
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
183
+ # `1536x1024`.
184
+ module Size
185
+ extend OpenAI::Internal::Type::Enum
186
+
187
+ TaggedSymbol =
188
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Size) }
189
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
190
+
191
+ SIZE_1024X1024 =
192
+ T.let(:"1024x1024", OpenAI::ImagesResponse::Size::TaggedSymbol)
193
+ SIZE_1024X1536 =
194
+ T.let(:"1024x1536", OpenAI::ImagesResponse::Size::TaggedSymbol)
195
+ SIZE_1536X1024 =
196
+ T.let(:"1536x1024", OpenAI::ImagesResponse::Size::TaggedSymbol)
197
+
198
+ sig do
199
+ override.returns(T::Array[OpenAI::ImagesResponse::Size::TaggedSymbol])
200
+ end
201
+ def self.values
202
+ end
203
+ end
204
+
59
205
  class Usage < OpenAI::Internal::Type::BaseModel
60
206
  OrHash =
61
207
  T.type_alias do
@@ -140,6 +140,13 @@ module OpenAI
140
140
  sig { returns(T.nilable(Integer)) }
141
141
  attr_accessor :max_output_tokens
142
142
 
143
+ # The maximum number of total calls to built-in tools that can be processed in a
144
+ # response. This maximum number applies across all built-in tool calls, not per
145
+ # individual tool. Any further attempts to call a tool by the model will be
146
+ # ignored.
147
+ sig { returns(T.nilable(Integer)) }
148
+ attr_accessor :max_tool_calls
149
+
143
150
  # The unique ID of the previous response to the model. Use this to create
144
151
  # multi-turn conversations. Learn more about
145
152
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -168,23 +175,23 @@ module OpenAI
168
175
  sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void }
169
176
  attr_writer :reasoning
170
177
 
171
- # Specifies the latency tier to use for processing the request. This parameter is
172
- # relevant for customers subscribed to the scale tier service:
178
+ # Specifies the processing type used for serving the request.
173
179
  #
174
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
175
- # utilize scale tier credits until they are exhausted.
176
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
177
- # be processed using the default service tier with a lower uptime SLA and no
178
- # latency guarantee.
179
- # - If set to 'default', the request will be processed using the default service
180
- # tier with a lower uptime SLA and no latency guarantee.
181
- # - If set to 'flex', the request will be processed with the Flex Processing
182
- # service tier.
183
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
180
+ # - If set to 'auto', then the request will be processed with the service tier
181
+ # configured in the Project settings. Unless otherwise configured, the Project
182
+ # will use 'default'.
183
+ # - If set to 'default', then the requset will be processed with the standard
184
+ # pricing and performance for the selected model.
185
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
186
+ # 'priority', then the request will be processed with the corresponding service
187
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
188
+ # Priority processing.
184
189
  # - When not set, the default behavior is 'auto'.
185
190
  #
186
- # When this parameter is set, the response body will include the `service_tier`
187
- # utilized.
191
+ # When the `service_tier` parameter is set, the response body will include the
192
+ # `service_tier` value based on the processing mode actually used to serve the
193
+ # request. This response value may be different from the value set in the
194
+ # parameter.
188
195
  sig do
189
196
  returns(
190
197
  T.nilable(OpenAI::Responses::Response::ServiceTier::TaggedSymbol)
@@ -213,6 +220,11 @@ module OpenAI
213
220
  sig { params(text: OpenAI::Responses::ResponseTextConfig::OrHash).void }
214
221
  attr_writer :text
215
222
 
223
+ # An integer between 0 and 20 specifying the number of most likely tokens to
224
+ # return at each token position, each with an associated log probability.
225
+ sig { returns(T.nilable(Integer)) }
226
+ attr_accessor :top_logprobs
227
+
216
228
  # The truncation strategy to use for the model response.
217
229
  #
218
230
  # - `auto`: If the context of this response and previous ones exceeds the model's
@@ -283,7 +295,8 @@ module OpenAI
283
295
  T.any(
284
296
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
285
297
  OpenAI::Responses::ToolChoiceTypes::OrHash,
286
- OpenAI::Responses::ToolChoiceFunction::OrHash
298
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
299
+ OpenAI::Responses::ToolChoiceMcp::OrHash
287
300
  ),
288
301
  tools:
289
302
  T::Array[
@@ -301,6 +314,7 @@ module OpenAI
301
314
  top_p: T.nilable(Float),
302
315
  background: T.nilable(T::Boolean),
303
316
  max_output_tokens: T.nilable(Integer),
317
+ max_tool_calls: T.nilable(Integer),
304
318
  previous_response_id: T.nilable(String),
305
319
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
306
320
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
@@ -308,6 +322,7 @@ module OpenAI
308
322
  T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol),
309
323
  status: OpenAI::Responses::ResponseStatus::OrSymbol,
310
324
  text: OpenAI::Responses::ResponseTextConfig::OrHash,
325
+ top_logprobs: T.nilable(Integer),
311
326
  truncation:
312
327
  T.nilable(OpenAI::Responses::Response::Truncation::OrSymbol),
313
328
  usage: OpenAI::Responses::ResponseUsage::OrHash,
@@ -390,6 +405,11 @@ module OpenAI
390
405
  # including visible output tokens and
391
406
  # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
392
407
  max_output_tokens: nil,
408
+ # The maximum number of total calls to built-in tools that can be processed in a
409
+ # response. This maximum number applies across all built-in tool calls, not per
410
+ # individual tool. Any further attempts to call a tool by the model will be
411
+ # ignored.
412
+ max_tool_calls: nil,
393
413
  # The unique ID of the previous response to the model. Use this to create
394
414
  # multi-turn conversations. Learn more about
395
415
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -402,23 +422,23 @@ module OpenAI
402
422
  # Configuration options for
403
423
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
404
424
  reasoning: nil,
405
- # Specifies the latency tier to use for processing the request. This parameter is
406
- # relevant for customers subscribed to the scale tier service:
425
+ # Specifies the processing type used for serving the request.
407
426
  #
408
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
409
- # utilize scale tier credits until they are exhausted.
410
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
411
- # be processed using the default service tier with a lower uptime SLA and no
412
- # latency guarantee.
413
- # - If set to 'default', the request will be processed using the default service
414
- # tier with a lower uptime SLA and no latency guarantee.
415
- # - If set to 'flex', the request will be processed with the Flex Processing
416
- # service tier.
417
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
427
+ # - If set to 'auto', then the request will be processed with the service tier
428
+ # configured in the Project settings. Unless otherwise configured, the Project
429
+ # will use 'default'.
430
+ # - If set to 'default', then the requset will be processed with the standard
431
+ # pricing and performance for the selected model.
432
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
433
+ # 'priority', then the request will be processed with the corresponding service
434
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
435
+ # Priority processing.
418
436
  # - When not set, the default behavior is 'auto'.
419
437
  #
420
- # When this parameter is set, the response body will include the `service_tier`
421
- # utilized.
438
+ # When the `service_tier` parameter is set, the response body will include the
439
+ # `service_tier` value based on the processing mode actually used to serve the
440
+ # request. This response value may be different from the value set in the
441
+ # parameter.
422
442
  service_tier: nil,
423
443
  # The status of the response generation. One of `completed`, `failed`,
424
444
  # `in_progress`, `cancelled`, `queued`, or `incomplete`.
@@ -429,6 +449,9 @@ module OpenAI
429
449
  # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
430
450
  # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
431
451
  text: nil,
452
+ # An integer between 0 and 20 specifying the number of most likely tokens to
453
+ # return at each token position, each with an associated log probability.
454
+ top_logprobs: nil,
432
455
  # The truncation strategy to use for the model response.
433
456
  #
434
457
  # - `auto`: If the context of this response and previous ones exceeds the model's
@@ -470,6 +493,7 @@ module OpenAI
470
493
  top_p: T.nilable(Float),
471
494
  background: T.nilable(T::Boolean),
472
495
  max_output_tokens: T.nilable(Integer),
496
+ max_tool_calls: T.nilable(Integer),
473
497
  previous_response_id: T.nilable(String),
474
498
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
475
499
  reasoning: T.nilable(OpenAI::Reasoning),
@@ -479,6 +503,7 @@ module OpenAI
479
503
  ),
480
504
  status: OpenAI::Responses::ResponseStatus::TaggedSymbol,
481
505
  text: OpenAI::Responses::ResponseTextConfig,
506
+ top_logprobs: T.nilable(Integer),
482
507
  truncation:
483
508
  T.nilable(
484
509
  OpenAI::Responses::Response::Truncation::TaggedSymbol
@@ -622,7 +647,8 @@ module OpenAI
622
647
  T.any(
623
648
  OpenAI::Responses::ToolChoiceOptions::TaggedSymbol,
624
649
  OpenAI::Responses::ToolChoiceTypes,
625
- OpenAI::Responses::ToolChoiceFunction
650
+ OpenAI::Responses::ToolChoiceFunction,
651
+ OpenAI::Responses::ToolChoiceMcp
626
652
  )
627
653
  end
628
654
 
@@ -635,23 +661,23 @@ module OpenAI
635
661
  end
636
662
  end
637
663
 
638
- # Specifies the latency tier to use for processing the request. This parameter is
639
- # relevant for customers subscribed to the scale tier service:
664
+ # Specifies the processing type used for serving the request.
640
665
  #
641
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
642
- # utilize scale tier credits until they are exhausted.
643
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
644
- # be processed using the default service tier with a lower uptime SLA and no
645
- # latency guarantee.
646
- # - If set to 'default', the request will be processed using the default service
647
- # tier with a lower uptime SLA and no latency guarantee.
648
- # - If set to 'flex', the request will be processed with the Flex Processing
649
- # service tier.
650
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
666
+ # - If set to 'auto', then the request will be processed with the service tier
667
+ # configured in the Project settings. Unless otherwise configured, the Project
668
+ # will use 'default'.
669
+ # - If set to 'default', then the requset will be processed with the standard
670
+ # pricing and performance for the selected model.
671
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
672
+ # 'priority', then the request will be processed with the corresponding service
673
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
674
+ # Priority processing.
651
675
  # - When not set, the default behavior is 'auto'.
652
676
  #
653
- # When this parameter is set, the response body will include the `service_tier`
654
- # utilized.
677
+ # When the `service_tier` parameter is set, the response body will include the
678
+ # `service_tier` value based on the processing mode actually used to serve the
679
+ # request. This response value may be different from the value set in the
680
+ # parameter.
655
681
  module ServiceTier
656
682
  extend OpenAI::Internal::Type::Enum
657
683
 
@@ -675,6 +701,11 @@ module OpenAI
675
701
  :scale,
676
702
  OpenAI::Responses::Response::ServiceTier::TaggedSymbol
677
703
  )
704
+ PRIORITY =
705
+ T.let(
706
+ :priority,
707
+ OpenAI::Responses::Response::ServiceTier::TaggedSymbol
708
+ )
678
709
 
679
710
  sig do
680
711
  override.returns(