openai 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +40 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +25 -0
  6. data/lib/openai/internal/type/array_of.rb +6 -1
  7. data/lib/openai/internal/type/base_model.rb +76 -24
  8. data/lib/openai/internal/type/boolean.rb +7 -1
  9. data/lib/openai/internal/type/converter.rb +42 -34
  10. data/lib/openai/internal/type/enum.rb +10 -2
  11. data/lib/openai/internal/type/file_input.rb +6 -1
  12. data/lib/openai/internal/type/hash_of.rb +6 -1
  13. data/lib/openai/internal/type/union.rb +12 -7
  14. data/lib/openai/internal/type/unknown.rb +7 -1
  15. data/lib/openai/models/all_models.rb +4 -0
  16. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  17. data/lib/openai/models/audio/transcription.rb +118 -1
  18. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  19. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  20. data/lib/openai/models/chat/chat_completion.rb +32 -31
  21. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  22. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  23. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
  24. data/lib/openai/models/images_response.rb +92 -1
  25. data/lib/openai/models/responses/response.rb +59 -35
  26. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  27. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  28. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  29. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  30. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  31. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  32. data/lib/openai/models/responses/response_create_params.rb +92 -67
  33. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  34. data/lib/openai/models/responses/response_includable.rb +8 -6
  35. data/lib/openai/models/responses/response_output_text.rb +18 -2
  36. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  37. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  38. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  39. data/lib/openai/models/responses_model.rb +4 -0
  40. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  41. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  42. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  43. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  44. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  45. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  46. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  47. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  48. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  49. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  50. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  51. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  52. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  53. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  54. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  55. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  56. data/lib/openai/models.rb +2 -0
  57. data/lib/openai/resources/audio/speech.rb +3 -1
  58. data/lib/openai/resources/chat/completions.rb +10 -2
  59. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
  60. data/lib/openai/resources/responses.rb +24 -16
  61. data/lib/openai/resources/webhooks.rb +124 -0
  62. data/lib/openai/version.rb +1 -1
  63. data/lib/openai.rb +18 -0
  64. data/rbi/openai/client.rbi +3 -0
  65. data/rbi/openai/errors.rbi +16 -0
  66. data/rbi/openai/internal/type/boolean.rbi +2 -0
  67. data/rbi/openai/internal/type/converter.rbi +15 -15
  68. data/rbi/openai/internal/type/union.rbi +5 -0
  69. data/rbi/openai/internal/type/unknown.rbi +2 -0
  70. data/rbi/openai/models/all_models.rbi +20 -0
  71. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  72. data/rbi/openai/models/audio/transcription.rbi +213 -3
  73. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  74. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  75. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  76. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  77. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  78. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
  79. data/rbi/openai/models/images_response.rbi +146 -0
  80. data/rbi/openai/models/responses/response.rbi +75 -44
  81. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  82. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  83. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  84. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  85. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  86. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  87. data/rbi/openai/models/responses/response_create_params.rbi +174 -115
  88. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  89. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  90. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  91. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  92. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  93. data/rbi/openai/models/responses_model.rbi +20 -0
  94. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  95. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  96. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  97. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  98. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  99. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  100. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  101. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  102. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  103. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  104. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  105. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  106. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  107. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  108. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  109. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  110. data/rbi/openai/models.rbi +2 -0
  111. data/rbi/openai/resources/audio/speech.rbi +6 -1
  112. data/rbi/openai/resources/chat/completions.rbi +34 -30
  113. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
  114. data/rbi/openai/resources/responses.rbi +108 -84
  115. data/rbi/openai/resources/webhooks.rbi +68 -0
  116. data/sig/openai/client.rbs +2 -0
  117. data/sig/openai/errors.rbs +9 -0
  118. data/sig/openai/internal/type/converter.rbs +7 -1
  119. data/sig/openai/models/all_models.rbs +8 -0
  120. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  121. data/sig/openai/models/audio/transcription.rbs +95 -3
  122. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  123. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  124. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  125. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  126. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  127. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
  128. data/sig/openai/models/images_response.rbs +83 -0
  129. data/sig/openai/models/responses/response.rbs +13 -1
  130. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  131. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  132. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  133. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  134. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  135. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  136. data/sig/openai/models/responses/response_create_params.rbs +31 -11
  137. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  138. data/sig/openai/models/responses/response_includable.rbs +7 -5
  139. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  140. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  141. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  142. data/sig/openai/models/responses_model.rbs +8 -0
  143. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  144. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  145. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  146. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  147. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  148. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  149. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  150. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  151. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  152. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  153. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  154. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  155. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  156. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  157. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  158. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  159. data/sig/openai/models.rbs +2 -0
  160. data/sig/openai/resources/audio/speech.rbs +1 -0
  161. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  162. data/sig/openai/resources/responses.rbs +8 -4
  163. data/sig/openai/resources/webhooks.rbs +33 -0
  164. metadata +56 -2
@@ -140,6 +140,13 @@ module OpenAI
140
140
  sig { returns(T.nilable(Integer)) }
141
141
  attr_accessor :max_output_tokens
142
142
 
143
+ # The maximum number of total calls to built-in tools that can be processed in a
144
+ # response. This maximum number applies across all built-in tool calls, not per
145
+ # individual tool. Any further attempts to call a tool by the model will be
146
+ # ignored.
147
+ sig { returns(T.nilable(Integer)) }
148
+ attr_accessor :max_tool_calls
149
+
143
150
  # The unique ID of the previous response to the model. Use this to create
144
151
  # multi-turn conversations. Learn more about
145
152
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -168,23 +175,23 @@ module OpenAI
168
175
  sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void }
169
176
  attr_writer :reasoning
170
177
 
171
- # Specifies the latency tier to use for processing the request. This parameter is
172
- # relevant for customers subscribed to the scale tier service:
178
+ # Specifies the processing type used for serving the request.
173
179
  #
174
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
175
- # utilize scale tier credits until they are exhausted.
176
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
177
- # be processed using the default service tier with a lower uptime SLA and no
178
- # latency guarantee.
179
- # - If set to 'default', the request will be processed using the default service
180
- # tier with a lower uptime SLA and no latency guarantee.
181
- # - If set to 'flex', the request will be processed with the Flex Processing
182
- # service tier.
183
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
180
+ # - If set to 'auto', then the request will be processed with the service tier
181
+ # configured in the Project settings. Unless otherwise configured, the Project
182
+ # will use 'default'.
183
+ # - If set to 'default', then the requset will be processed with the standard
184
+ # pricing and performance for the selected model.
185
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
186
+ # 'priority', then the request will be processed with the corresponding service
187
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
188
+ # Priority processing.
184
189
  # - When not set, the default behavior is 'auto'.
185
190
  #
186
- # When this parameter is set, the response body will include the `service_tier`
187
- # utilized.
191
+ # When the `service_tier` parameter is set, the response body will include the
192
+ # `service_tier` value based on the processing mode actually used to serve the
193
+ # request. This response value may be different from the value set in the
194
+ # parameter.
188
195
  sig do
189
196
  returns(
190
197
  T.nilable(OpenAI::Responses::Response::ServiceTier::TaggedSymbol)
@@ -213,6 +220,11 @@ module OpenAI
213
220
  sig { params(text: OpenAI::Responses::ResponseTextConfig::OrHash).void }
214
221
  attr_writer :text
215
222
 
223
+ # An integer between 0 and 20 specifying the number of most likely tokens to
224
+ # return at each token position, each with an associated log probability.
225
+ sig { returns(T.nilable(Integer)) }
226
+ attr_accessor :top_logprobs
227
+
216
228
  # The truncation strategy to use for the model response.
217
229
  #
218
230
  # - `auto`: If the context of this response and previous ones exceeds the model's
@@ -283,7 +295,8 @@ module OpenAI
283
295
  T.any(
284
296
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
285
297
  OpenAI::Responses::ToolChoiceTypes::OrHash,
286
- OpenAI::Responses::ToolChoiceFunction::OrHash
298
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
299
+ OpenAI::Responses::ToolChoiceMcp::OrHash
287
300
  ),
288
301
  tools:
289
302
  T::Array[
@@ -301,6 +314,7 @@ module OpenAI
301
314
  top_p: T.nilable(Float),
302
315
  background: T.nilable(T::Boolean),
303
316
  max_output_tokens: T.nilable(Integer),
317
+ max_tool_calls: T.nilable(Integer),
304
318
  previous_response_id: T.nilable(String),
305
319
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
306
320
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
@@ -308,6 +322,7 @@ module OpenAI
308
322
  T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol),
309
323
  status: OpenAI::Responses::ResponseStatus::OrSymbol,
310
324
  text: OpenAI::Responses::ResponseTextConfig::OrHash,
325
+ top_logprobs: T.nilable(Integer),
311
326
  truncation:
312
327
  T.nilable(OpenAI::Responses::Response::Truncation::OrSymbol),
313
328
  usage: OpenAI::Responses::ResponseUsage::OrHash,
@@ -390,6 +405,11 @@ module OpenAI
390
405
  # including visible output tokens and
391
406
  # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
392
407
  max_output_tokens: nil,
408
+ # The maximum number of total calls to built-in tools that can be processed in a
409
+ # response. This maximum number applies across all built-in tool calls, not per
410
+ # individual tool. Any further attempts to call a tool by the model will be
411
+ # ignored.
412
+ max_tool_calls: nil,
393
413
  # The unique ID of the previous response to the model. Use this to create
394
414
  # multi-turn conversations. Learn more about
395
415
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
@@ -402,23 +422,23 @@ module OpenAI
402
422
  # Configuration options for
403
423
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
404
424
  reasoning: nil,
405
- # Specifies the latency tier to use for processing the request. This parameter is
406
- # relevant for customers subscribed to the scale tier service:
425
+ # Specifies the processing type used for serving the request.
407
426
  #
408
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
409
- # utilize scale tier credits until they are exhausted.
410
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
411
- # be processed using the default service tier with a lower uptime SLA and no
412
- # latency guarantee.
413
- # - If set to 'default', the request will be processed using the default service
414
- # tier with a lower uptime SLA and no latency guarantee.
415
- # - If set to 'flex', the request will be processed with the Flex Processing
416
- # service tier.
417
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
427
+ # - If set to 'auto', then the request will be processed with the service tier
428
+ # configured in the Project settings. Unless otherwise configured, the Project
429
+ # will use 'default'.
430
+ # - If set to 'default', then the requset will be processed with the standard
431
+ # pricing and performance for the selected model.
432
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
433
+ # 'priority', then the request will be processed with the corresponding service
434
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
435
+ # Priority processing.
418
436
  # - When not set, the default behavior is 'auto'.
419
437
  #
420
- # When this parameter is set, the response body will include the `service_tier`
421
- # utilized.
438
+ # When the `service_tier` parameter is set, the response body will include the
439
+ # `service_tier` value based on the processing mode actually used to serve the
440
+ # request. This response value may be different from the value set in the
441
+ # parameter.
422
442
  service_tier: nil,
423
443
  # The status of the response generation. One of `completed`, `failed`,
424
444
  # `in_progress`, `cancelled`, `queued`, or `incomplete`.
@@ -429,6 +449,9 @@ module OpenAI
429
449
  # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
430
450
  # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
431
451
  text: nil,
452
+ # An integer between 0 and 20 specifying the number of most likely tokens to
453
+ # return at each token position, each with an associated log probability.
454
+ top_logprobs: nil,
432
455
  # The truncation strategy to use for the model response.
433
456
  #
434
457
  # - `auto`: If the context of this response and previous ones exceeds the model's
@@ -470,6 +493,7 @@ module OpenAI
470
493
  top_p: T.nilable(Float),
471
494
  background: T.nilable(T::Boolean),
472
495
  max_output_tokens: T.nilable(Integer),
496
+ max_tool_calls: T.nilable(Integer),
473
497
  previous_response_id: T.nilable(String),
474
498
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
475
499
  reasoning: T.nilable(OpenAI::Reasoning),
@@ -479,6 +503,7 @@ module OpenAI
479
503
  ),
480
504
  status: OpenAI::Responses::ResponseStatus::TaggedSymbol,
481
505
  text: OpenAI::Responses::ResponseTextConfig,
506
+ top_logprobs: T.nilable(Integer),
482
507
  truncation:
483
508
  T.nilable(
484
509
  OpenAI::Responses::Response::Truncation::TaggedSymbol
@@ -622,7 +647,8 @@ module OpenAI
622
647
  T.any(
623
648
  OpenAI::Responses::ToolChoiceOptions::TaggedSymbol,
624
649
  OpenAI::Responses::ToolChoiceTypes,
625
- OpenAI::Responses::ToolChoiceFunction
650
+ OpenAI::Responses::ToolChoiceFunction,
651
+ OpenAI::Responses::ToolChoiceMcp
626
652
  )
627
653
  end
628
654
 
@@ -635,23 +661,23 @@ module OpenAI
635
661
  end
636
662
  end
637
663
 
638
- # Specifies the latency tier to use for processing the request. This parameter is
639
- # relevant for customers subscribed to the scale tier service:
664
+ # Specifies the processing type used for serving the request.
640
665
  #
641
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
642
- # utilize scale tier credits until they are exhausted.
643
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
644
- # be processed using the default service tier with a lower uptime SLA and no
645
- # latency guarantee.
646
- # - If set to 'default', the request will be processed using the default service
647
- # tier with a lower uptime SLA and no latency guarantee.
648
- # - If set to 'flex', the request will be processed with the Flex Processing
649
- # service tier.
650
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
666
+ # - If set to 'auto', then the request will be processed with the service tier
667
+ # configured in the Project settings. Unless otherwise configured, the Project
668
+ # will use 'default'.
669
+ # - If set to 'default', then the requset will be processed with the standard
670
+ # pricing and performance for the selected model.
671
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
672
+ # 'priority', then the request will be processed with the corresponding service
673
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
674
+ # Priority processing.
651
675
  # - When not set, the default behavior is 'auto'.
652
676
  #
653
- # When this parameter is set, the response body will include the `service_tier`
654
- # utilized.
677
+ # When the `service_tier` parameter is set, the response body will include the
678
+ # `service_tier` value based on the processing mode actually used to serve the
679
+ # request. This response value may be different from the value set in the
680
+ # parameter.
655
681
  module ServiceTier
656
682
  extend OpenAI::Internal::Type::Enum
657
683
 
@@ -675,6 +701,11 @@ module OpenAI
675
701
  :scale,
676
702
  OpenAI::Responses::Response::ServiceTier::TaggedSymbol
677
703
  )
704
+ PRIORITY =
705
+ T.let(
706
+ :priority,
707
+ OpenAI::Responses::Response::ServiceTier::TaggedSymbol
708
+ )
678
709
 
679
710
  sig do
680
711
  override.returns(
@@ -12,15 +12,20 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # The partial code snippet added by the code interpreter.
15
+ # The partial code snippet being streamed by the code interpreter.
16
16
  sig { returns(String) }
17
17
  attr_accessor :delta
18
18
 
19
- # The index of the output item that the code interpreter call is in progress.
19
+ # The unique identifier of the code interpreter tool call item.
20
+ sig { returns(String) }
21
+ attr_accessor :item_id
22
+
23
+ # The index of the output item in the response for which the code is being
24
+ # streamed.
20
25
  sig { returns(Integer) }
21
26
  attr_accessor :output_index
22
27
 
23
- # The sequence number of this event.
28
+ # The sequence number of this event, used to order streaming events.
24
29
  sig { returns(Integer) }
25
30
  attr_accessor :sequence_number
26
31
 
@@ -28,21 +33,25 @@ module OpenAI
28
33
  sig { returns(Symbol) }
29
34
  attr_accessor :type
30
35
 
31
- # Emitted when a partial code snippet is added by the code interpreter.
36
+ # Emitted when a partial code snippet is streamed by the code interpreter.
32
37
  sig do
33
38
  params(
34
39
  delta: String,
40
+ item_id: String,
35
41
  output_index: Integer,
36
42
  sequence_number: Integer,
37
43
  type: Symbol
38
44
  ).returns(T.attached_class)
39
45
  end
40
46
  def self.new(
41
- # The partial code snippet added by the code interpreter.
47
+ # The partial code snippet being streamed by the code interpreter.
42
48
  delta:,
43
- # The index of the output item that the code interpreter call is in progress.
49
+ # The unique identifier of the code interpreter tool call item.
50
+ item_id:,
51
+ # The index of the output item in the response for which the code is being
52
+ # streamed.
44
53
  output_index:,
45
- # The sequence number of this event.
54
+ # The sequence number of this event, used to order streaming events.
46
55
  sequence_number:,
47
56
  # The type of the event. Always `response.code_interpreter_call_code.delta`.
48
57
  type: :"response.code_interpreter_call_code.delta"
@@ -53,6 +62,7 @@ module OpenAI
53
62
  override.returns(
54
63
  {
55
64
  delta: String,
65
+ item_id: String,
56
66
  output_index: Integer,
57
67
  sequence_number: Integer,
58
68
  type: Symbol
@@ -16,11 +16,15 @@ module OpenAI
16
16
  sig { returns(String) }
17
17
  attr_accessor :code
18
18
 
19
- # The index of the output item that the code interpreter call is in progress.
19
+ # The unique identifier of the code interpreter tool call item.
20
+ sig { returns(String) }
21
+ attr_accessor :item_id
22
+
23
+ # The index of the output item in the response for which the code is finalized.
20
24
  sig { returns(Integer) }
21
25
  attr_accessor :output_index
22
26
 
23
- # The sequence number of this event.
27
+ # The sequence number of this event, used to order streaming events.
24
28
  sig { returns(Integer) }
25
29
  attr_accessor :sequence_number
26
30
 
@@ -28,10 +32,11 @@ module OpenAI
28
32
  sig { returns(Symbol) }
29
33
  attr_accessor :type
30
34
 
31
- # Emitted when code snippet output is finalized by the code interpreter.
35
+ # Emitted when the code snippet is finalized by the code interpreter.
32
36
  sig do
33
37
  params(
34
38
  code: String,
39
+ item_id: String,
35
40
  output_index: Integer,
36
41
  sequence_number: Integer,
37
42
  type: Symbol
@@ -40,9 +45,11 @@ module OpenAI
40
45
  def self.new(
41
46
  # The final code snippet output by the code interpreter.
42
47
  code:,
43
- # The index of the output item that the code interpreter call is in progress.
48
+ # The unique identifier of the code interpreter tool call item.
49
+ item_id:,
50
+ # The index of the output item in the response for which the code is finalized.
44
51
  output_index:,
45
- # The sequence number of this event.
52
+ # The sequence number of this event, used to order streaming events.
46
53
  sequence_number:,
47
54
  # The type of the event. Always `response.code_interpreter_call_code.done`.
48
55
  type: :"response.code_interpreter_call_code.done"
@@ -53,6 +60,7 @@ module OpenAI
53
60
  override.returns(
54
61
  {
55
62
  code: String,
63
+ item_id: String,
56
64
  output_index: Integer,
57
65
  sequence_number: Integer,
58
66
  type: Symbol
@@ -12,23 +12,16 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # A tool call to run code.
16
- sig { returns(OpenAI::Responses::ResponseCodeInterpreterToolCall) }
17
- attr_reader :code_interpreter_call
15
+ # The unique identifier of the code interpreter tool call item.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
18
 
19
- sig do
20
- params(
21
- code_interpreter_call:
22
- OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash
23
- ).void
24
- end
25
- attr_writer :code_interpreter_call
26
-
27
- # The index of the output item that the code interpreter call is in progress.
19
+ # The index of the output item in the response for which the code interpreter call
20
+ # is completed.
28
21
  sig { returns(Integer) }
29
22
  attr_accessor :output_index
30
23
 
31
- # The sequence number of this event.
24
+ # The sequence number of this event, used to order streaming events.
32
25
  sig { returns(Integer) }
33
26
  attr_accessor :sequence_number
34
27
 
@@ -39,19 +32,19 @@ module OpenAI
39
32
  # Emitted when the code interpreter call is completed.
40
33
  sig do
41
34
  params(
42
- code_interpreter_call:
43
- OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
35
+ item_id: String,
44
36
  output_index: Integer,
45
37
  sequence_number: Integer,
46
38
  type: Symbol
47
39
  ).returns(T.attached_class)
48
40
  end
49
41
  def self.new(
50
- # A tool call to run code.
51
- code_interpreter_call:,
52
- # The index of the output item that the code interpreter call is in progress.
42
+ # The unique identifier of the code interpreter tool call item.
43
+ item_id:,
44
+ # The index of the output item in the response for which the code interpreter call
45
+ # is completed.
53
46
  output_index:,
54
- # The sequence number of this event.
47
+ # The sequence number of this event, used to order streaming events.
55
48
  sequence_number:,
56
49
  # The type of the event. Always `response.code_interpreter_call.completed`.
57
50
  type: :"response.code_interpreter_call.completed"
@@ -61,8 +54,7 @@ module OpenAI
61
54
  sig do
62
55
  override.returns(
63
56
  {
64
- code_interpreter_call:
65
- OpenAI::Responses::ResponseCodeInterpreterToolCall,
57
+ item_id: String,
66
58
  output_index: Integer,
67
59
  sequence_number: Integer,
68
60
  type: Symbol
@@ -12,23 +12,16 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # A tool call to run code.
16
- sig { returns(OpenAI::Responses::ResponseCodeInterpreterToolCall) }
17
- attr_reader :code_interpreter_call
15
+ # The unique identifier of the code interpreter tool call item.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
18
 
19
- sig do
20
- params(
21
- code_interpreter_call:
22
- OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash
23
- ).void
24
- end
25
- attr_writer :code_interpreter_call
26
-
27
- # The index of the output item that the code interpreter call is in progress.
19
+ # The index of the output item in the response for which the code interpreter call
20
+ # is in progress.
28
21
  sig { returns(Integer) }
29
22
  attr_accessor :output_index
30
23
 
31
- # The sequence number of this event.
24
+ # The sequence number of this event, used to order streaming events.
32
25
  sig { returns(Integer) }
33
26
  attr_accessor :sequence_number
34
27
 
@@ -39,19 +32,19 @@ module OpenAI
39
32
  # Emitted when a code interpreter call is in progress.
40
33
  sig do
41
34
  params(
42
- code_interpreter_call:
43
- OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
35
+ item_id: String,
44
36
  output_index: Integer,
45
37
  sequence_number: Integer,
46
38
  type: Symbol
47
39
  ).returns(T.attached_class)
48
40
  end
49
41
  def self.new(
50
- # A tool call to run code.
51
- code_interpreter_call:,
52
- # The index of the output item that the code interpreter call is in progress.
42
+ # The unique identifier of the code interpreter tool call item.
43
+ item_id:,
44
+ # The index of the output item in the response for which the code interpreter call
45
+ # is in progress.
53
46
  output_index:,
54
- # The sequence number of this event.
47
+ # The sequence number of this event, used to order streaming events.
55
48
  sequence_number:,
56
49
  # The type of the event. Always `response.code_interpreter_call.in_progress`.
57
50
  type: :"response.code_interpreter_call.in_progress"
@@ -61,8 +54,7 @@ module OpenAI
61
54
  sig do
62
55
  override.returns(
63
56
  {
64
- code_interpreter_call:
65
- OpenAI::Responses::ResponseCodeInterpreterToolCall,
57
+ item_id: String,
66
58
  output_index: Integer,
67
59
  sequence_number: Integer,
68
60
  type: Symbol
@@ -12,23 +12,16 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # A tool call to run code.
16
- sig { returns(OpenAI::Responses::ResponseCodeInterpreterToolCall) }
17
- attr_reader :code_interpreter_call
15
+ # The unique identifier of the code interpreter tool call item.
16
+ sig { returns(String) }
17
+ attr_accessor :item_id
18
18
 
19
- sig do
20
- params(
21
- code_interpreter_call:
22
- OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash
23
- ).void
24
- end
25
- attr_writer :code_interpreter_call
26
-
27
- # The index of the output item that the code interpreter call is in progress.
19
+ # The index of the output item in the response for which the code interpreter is
20
+ # interpreting code.
28
21
  sig { returns(Integer) }
29
22
  attr_accessor :output_index
30
23
 
31
- # The sequence number of this event.
24
+ # The sequence number of this event, used to order streaming events.
32
25
  sig { returns(Integer) }
33
26
  attr_accessor :sequence_number
34
27
 
@@ -39,19 +32,19 @@ module OpenAI
39
32
  # Emitted when the code interpreter is actively interpreting the code snippet.
40
33
  sig do
41
34
  params(
42
- code_interpreter_call:
43
- OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
35
+ item_id: String,
44
36
  output_index: Integer,
45
37
  sequence_number: Integer,
46
38
  type: Symbol
47
39
  ).returns(T.attached_class)
48
40
  end
49
41
  def self.new(
50
- # A tool call to run code.
51
- code_interpreter_call:,
52
- # The index of the output item that the code interpreter call is in progress.
42
+ # The unique identifier of the code interpreter tool call item.
43
+ item_id:,
44
+ # The index of the output item in the response for which the code interpreter is
45
+ # interpreting code.
53
46
  output_index:,
54
- # The sequence number of this event.
47
+ # The sequence number of this event, used to order streaming events.
55
48
  sequence_number:,
56
49
  # The type of the event. Always `response.code_interpreter_call.interpreting`.
57
50
  type: :"response.code_interpreter_call.interpreting"
@@ -61,8 +54,7 @@ module OpenAI
61
54
  sig do
62
55
  override.returns(
63
56
  {
64
- code_interpreter_call:
65
- OpenAI::Responses::ResponseCodeInterpreterToolCall,
57
+ item_id: String,
66
58
  output_index: Integer,
67
59
  sequence_number: Integer,
68
60
  type: Symbol