openai 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +25 -0
- data/lib/openai/internal/type/array_of.rb +6 -1
- data/lib/openai/internal/type/base_model.rb +76 -24
- data/lib/openai/internal/type/boolean.rb +7 -1
- data/lib/openai/internal/type/converter.rb +42 -34
- data/lib/openai/internal/type/enum.rb +10 -2
- data/lib/openai/internal/type/file_input.rb +6 -1
- data/lib/openai/internal/type/hash_of.rb +6 -1
- data/lib/openai/internal/type/union.rb +12 -7
- data/lib/openai/internal/type/unknown.rb +7 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/audio/speech_create_params.rb +23 -2
- data/lib/openai/models/audio/transcription.rb +118 -1
- data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
- data/lib/openai/models/audio/transcription_verbose.rb +31 -1
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
- data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
- data/lib/openai/models/responses/response_create_params.rb +92 -67
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +18 -2
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/audio/speech.rb +3 -1
- data/lib/openai/resources/chat/completions.rb +10 -2
- data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
- data/lib/openai/resources/responses.rb +24 -16
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +16 -0
- data/rbi/openai/internal/type/boolean.rbi +2 -0
- data/rbi/openai/internal/type/converter.rbi +15 -15
- data/rbi/openai/internal/type/union.rbi +5 -0
- data/rbi/openai/internal/type/unknown.rbi +2 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
- data/rbi/openai/models/audio/transcription.rbi +213 -3
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
- data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
- data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
- data/rbi/openai/models/responses/response_create_params.rbi +174 -115
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/response_output_text.rbi +26 -4
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/audio/speech.rbi +6 -1
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
- data/rbi/openai/resources/responses.rbi +108 -84
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/errors.rbs +9 -0
- data/sig/openai/internal/type/converter.rbs +7 -1
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/audio/speech_create_params.rbs +21 -1
- data/sig/openai/models/audio/transcription.rbs +95 -3
- data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
- data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
- data/sig/openai/models/responses/response_create_params.rbs +31 -11
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/response_output_text.rbs +15 -1
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/audio/speech.rbs +1 -0
- data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
- data/sig/openai/resources/responses.rbs +8 -4
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -140,6 +140,13 @@ module OpenAI
|
|
140
140
|
sig { returns(T.nilable(Integer)) }
|
141
141
|
attr_accessor :max_output_tokens
|
142
142
|
|
143
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
144
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
145
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
146
|
+
# ignored.
|
147
|
+
sig { returns(T.nilable(Integer)) }
|
148
|
+
attr_accessor :max_tool_calls
|
149
|
+
|
143
150
|
# The unique ID of the previous response to the model. Use this to create
|
144
151
|
# multi-turn conversations. Learn more about
|
145
152
|
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
|
@@ -168,23 +175,23 @@ module OpenAI
|
|
168
175
|
sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void }
|
169
176
|
attr_writer :reasoning
|
170
177
|
|
171
|
-
# Specifies the
|
172
|
-
# relevant for customers subscribed to the scale tier service:
|
178
|
+
# Specifies the processing type used for serving the request.
|
173
179
|
#
|
174
|
-
# - If set to 'auto',
|
175
|
-
#
|
176
|
-
#
|
177
|
-
#
|
178
|
-
#
|
179
|
-
# - If set to '
|
180
|
-
#
|
181
|
-
# -
|
182
|
-
#
|
183
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
180
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
181
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
182
|
+
# will use 'default'.
|
183
|
+
# - If set to 'default', then the requset will be processed with the standard
|
184
|
+
# pricing and performance for the selected model.
|
185
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
186
|
+
# 'priority', then the request will be processed with the corresponding service
|
187
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
188
|
+
# Priority processing.
|
184
189
|
# - When not set, the default behavior is 'auto'.
|
185
190
|
#
|
186
|
-
# When
|
187
|
-
#
|
191
|
+
# When the `service_tier` parameter is set, the response body will include the
|
192
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
193
|
+
# request. This response value may be different from the value set in the
|
194
|
+
# parameter.
|
188
195
|
sig do
|
189
196
|
returns(
|
190
197
|
T.nilable(OpenAI::Responses::Response::ServiceTier::TaggedSymbol)
|
@@ -213,6 +220,11 @@ module OpenAI
|
|
213
220
|
sig { params(text: OpenAI::Responses::ResponseTextConfig::OrHash).void }
|
214
221
|
attr_writer :text
|
215
222
|
|
223
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
224
|
+
# return at each token position, each with an associated log probability.
|
225
|
+
sig { returns(T.nilable(Integer)) }
|
226
|
+
attr_accessor :top_logprobs
|
227
|
+
|
216
228
|
# The truncation strategy to use for the model response.
|
217
229
|
#
|
218
230
|
# - `auto`: If the context of this response and previous ones exceeds the model's
|
@@ -283,7 +295,8 @@ module OpenAI
|
|
283
295
|
T.any(
|
284
296
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
285
297
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
286
|
-
OpenAI::Responses::ToolChoiceFunction::OrHash
|
298
|
+
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
299
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash
|
287
300
|
),
|
288
301
|
tools:
|
289
302
|
T::Array[
|
@@ -301,6 +314,7 @@ module OpenAI
|
|
301
314
|
top_p: T.nilable(Float),
|
302
315
|
background: T.nilable(T::Boolean),
|
303
316
|
max_output_tokens: T.nilable(Integer),
|
317
|
+
max_tool_calls: T.nilable(Integer),
|
304
318
|
previous_response_id: T.nilable(String),
|
305
319
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
306
320
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
@@ -308,6 +322,7 @@ module OpenAI
|
|
308
322
|
T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol),
|
309
323
|
status: OpenAI::Responses::ResponseStatus::OrSymbol,
|
310
324
|
text: OpenAI::Responses::ResponseTextConfig::OrHash,
|
325
|
+
top_logprobs: T.nilable(Integer),
|
311
326
|
truncation:
|
312
327
|
T.nilable(OpenAI::Responses::Response::Truncation::OrSymbol),
|
313
328
|
usage: OpenAI::Responses::ResponseUsage::OrHash,
|
@@ -390,6 +405,11 @@ module OpenAI
|
|
390
405
|
# including visible output tokens and
|
391
406
|
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
392
407
|
max_output_tokens: nil,
|
408
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
409
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
410
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
411
|
+
# ignored.
|
412
|
+
max_tool_calls: nil,
|
393
413
|
# The unique ID of the previous response to the model. Use this to create
|
394
414
|
# multi-turn conversations. Learn more about
|
395
415
|
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
|
@@ -402,23 +422,23 @@ module OpenAI
|
|
402
422
|
# Configuration options for
|
403
423
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
404
424
|
reasoning: nil,
|
405
|
-
# Specifies the
|
406
|
-
# relevant for customers subscribed to the scale tier service:
|
425
|
+
# Specifies the processing type used for serving the request.
|
407
426
|
#
|
408
|
-
# - If set to 'auto',
|
409
|
-
#
|
410
|
-
#
|
411
|
-
#
|
412
|
-
#
|
413
|
-
# - If set to '
|
414
|
-
#
|
415
|
-
# -
|
416
|
-
#
|
417
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
427
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
428
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
429
|
+
# will use 'default'.
|
430
|
+
# - If set to 'default', then the requset will be processed with the standard
|
431
|
+
# pricing and performance for the selected model.
|
432
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
433
|
+
# 'priority', then the request will be processed with the corresponding service
|
434
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
435
|
+
# Priority processing.
|
418
436
|
# - When not set, the default behavior is 'auto'.
|
419
437
|
#
|
420
|
-
# When
|
421
|
-
#
|
438
|
+
# When the `service_tier` parameter is set, the response body will include the
|
439
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
440
|
+
# request. This response value may be different from the value set in the
|
441
|
+
# parameter.
|
422
442
|
service_tier: nil,
|
423
443
|
# The status of the response generation. One of `completed`, `failed`,
|
424
444
|
# `in_progress`, `cancelled`, `queued`, or `incomplete`.
|
@@ -429,6 +449,9 @@ module OpenAI
|
|
429
449
|
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
430
450
|
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
|
431
451
|
text: nil,
|
452
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
453
|
+
# return at each token position, each with an associated log probability.
|
454
|
+
top_logprobs: nil,
|
432
455
|
# The truncation strategy to use for the model response.
|
433
456
|
#
|
434
457
|
# - `auto`: If the context of this response and previous ones exceeds the model's
|
@@ -470,6 +493,7 @@ module OpenAI
|
|
470
493
|
top_p: T.nilable(Float),
|
471
494
|
background: T.nilable(T::Boolean),
|
472
495
|
max_output_tokens: T.nilable(Integer),
|
496
|
+
max_tool_calls: T.nilable(Integer),
|
473
497
|
previous_response_id: T.nilable(String),
|
474
498
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
|
475
499
|
reasoning: T.nilable(OpenAI::Reasoning),
|
@@ -479,6 +503,7 @@ module OpenAI
|
|
479
503
|
),
|
480
504
|
status: OpenAI::Responses::ResponseStatus::TaggedSymbol,
|
481
505
|
text: OpenAI::Responses::ResponseTextConfig,
|
506
|
+
top_logprobs: T.nilable(Integer),
|
482
507
|
truncation:
|
483
508
|
T.nilable(
|
484
509
|
OpenAI::Responses::Response::Truncation::TaggedSymbol
|
@@ -622,7 +647,8 @@ module OpenAI
|
|
622
647
|
T.any(
|
623
648
|
OpenAI::Responses::ToolChoiceOptions::TaggedSymbol,
|
624
649
|
OpenAI::Responses::ToolChoiceTypes,
|
625
|
-
OpenAI::Responses::ToolChoiceFunction
|
650
|
+
OpenAI::Responses::ToolChoiceFunction,
|
651
|
+
OpenAI::Responses::ToolChoiceMcp
|
626
652
|
)
|
627
653
|
end
|
628
654
|
|
@@ -635,23 +661,23 @@ module OpenAI
|
|
635
661
|
end
|
636
662
|
end
|
637
663
|
|
638
|
-
# Specifies the
|
639
|
-
# relevant for customers subscribed to the scale tier service:
|
664
|
+
# Specifies the processing type used for serving the request.
|
640
665
|
#
|
641
|
-
# - If set to 'auto',
|
642
|
-
#
|
643
|
-
#
|
644
|
-
#
|
645
|
-
#
|
646
|
-
# - If set to '
|
647
|
-
#
|
648
|
-
# -
|
649
|
-
#
|
650
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
666
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
667
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
668
|
+
# will use 'default'.
|
669
|
+
# - If set to 'default', then the requset will be processed with the standard
|
670
|
+
# pricing and performance for the selected model.
|
671
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
672
|
+
# 'priority', then the request will be processed with the corresponding service
|
673
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
674
|
+
# Priority processing.
|
651
675
|
# - When not set, the default behavior is 'auto'.
|
652
676
|
#
|
653
|
-
# When
|
654
|
-
#
|
677
|
+
# When the `service_tier` parameter is set, the response body will include the
|
678
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
679
|
+
# request. This response value may be different from the value set in the
|
680
|
+
# parameter.
|
655
681
|
module ServiceTier
|
656
682
|
extend OpenAI::Internal::Type::Enum
|
657
683
|
|
@@ -675,6 +701,11 @@ module OpenAI
|
|
675
701
|
:scale,
|
676
702
|
OpenAI::Responses::Response::ServiceTier::TaggedSymbol
|
677
703
|
)
|
704
|
+
PRIORITY =
|
705
|
+
T.let(
|
706
|
+
:priority,
|
707
|
+
OpenAI::Responses::Response::ServiceTier::TaggedSymbol
|
708
|
+
)
|
678
709
|
|
679
710
|
sig do
|
680
711
|
override.returns(
|
@@ -12,15 +12,20 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
-
# The partial code snippet
|
15
|
+
# The partial code snippet being streamed by the code interpreter.
|
16
16
|
sig { returns(String) }
|
17
17
|
attr_accessor :delta
|
18
18
|
|
19
|
-
# The
|
19
|
+
# The unique identifier of the code interpreter tool call item.
|
20
|
+
sig { returns(String) }
|
21
|
+
attr_accessor :item_id
|
22
|
+
|
23
|
+
# The index of the output item in the response for which the code is being
|
24
|
+
# streamed.
|
20
25
|
sig { returns(Integer) }
|
21
26
|
attr_accessor :output_index
|
22
27
|
|
23
|
-
# The sequence number of this event.
|
28
|
+
# The sequence number of this event, used to order streaming events.
|
24
29
|
sig { returns(Integer) }
|
25
30
|
attr_accessor :sequence_number
|
26
31
|
|
@@ -28,21 +33,25 @@ module OpenAI
|
|
28
33
|
sig { returns(Symbol) }
|
29
34
|
attr_accessor :type
|
30
35
|
|
31
|
-
# Emitted when a partial code snippet is
|
36
|
+
# Emitted when a partial code snippet is streamed by the code interpreter.
|
32
37
|
sig do
|
33
38
|
params(
|
34
39
|
delta: String,
|
40
|
+
item_id: String,
|
35
41
|
output_index: Integer,
|
36
42
|
sequence_number: Integer,
|
37
43
|
type: Symbol
|
38
44
|
).returns(T.attached_class)
|
39
45
|
end
|
40
46
|
def self.new(
|
41
|
-
# The partial code snippet
|
47
|
+
# The partial code snippet being streamed by the code interpreter.
|
42
48
|
delta:,
|
43
|
-
# The
|
49
|
+
# The unique identifier of the code interpreter tool call item.
|
50
|
+
item_id:,
|
51
|
+
# The index of the output item in the response for which the code is being
|
52
|
+
# streamed.
|
44
53
|
output_index:,
|
45
|
-
# The sequence number of this event.
|
54
|
+
# The sequence number of this event, used to order streaming events.
|
46
55
|
sequence_number:,
|
47
56
|
# The type of the event. Always `response.code_interpreter_call_code.delta`.
|
48
57
|
type: :"response.code_interpreter_call_code.delta"
|
@@ -53,6 +62,7 @@ module OpenAI
|
|
53
62
|
override.returns(
|
54
63
|
{
|
55
64
|
delta: String,
|
65
|
+
item_id: String,
|
56
66
|
output_index: Integer,
|
57
67
|
sequence_number: Integer,
|
58
68
|
type: Symbol
|
@@ -16,11 +16,15 @@ module OpenAI
|
|
16
16
|
sig { returns(String) }
|
17
17
|
attr_accessor :code
|
18
18
|
|
19
|
-
# The
|
19
|
+
# The unique identifier of the code interpreter tool call item.
|
20
|
+
sig { returns(String) }
|
21
|
+
attr_accessor :item_id
|
22
|
+
|
23
|
+
# The index of the output item in the response for which the code is finalized.
|
20
24
|
sig { returns(Integer) }
|
21
25
|
attr_accessor :output_index
|
22
26
|
|
23
|
-
# The sequence number of this event.
|
27
|
+
# The sequence number of this event, used to order streaming events.
|
24
28
|
sig { returns(Integer) }
|
25
29
|
attr_accessor :sequence_number
|
26
30
|
|
@@ -28,10 +32,11 @@ module OpenAI
|
|
28
32
|
sig { returns(Symbol) }
|
29
33
|
attr_accessor :type
|
30
34
|
|
31
|
-
# Emitted when code snippet
|
35
|
+
# Emitted when the code snippet is finalized by the code interpreter.
|
32
36
|
sig do
|
33
37
|
params(
|
34
38
|
code: String,
|
39
|
+
item_id: String,
|
35
40
|
output_index: Integer,
|
36
41
|
sequence_number: Integer,
|
37
42
|
type: Symbol
|
@@ -40,9 +45,11 @@ module OpenAI
|
|
40
45
|
def self.new(
|
41
46
|
# The final code snippet output by the code interpreter.
|
42
47
|
code:,
|
43
|
-
# The
|
48
|
+
# The unique identifier of the code interpreter tool call item.
|
49
|
+
item_id:,
|
50
|
+
# The index of the output item in the response for which the code is finalized.
|
44
51
|
output_index:,
|
45
|
-
# The sequence number of this event.
|
52
|
+
# The sequence number of this event, used to order streaming events.
|
46
53
|
sequence_number:,
|
47
54
|
# The type of the event. Always `response.code_interpreter_call_code.done`.
|
48
55
|
type: :"response.code_interpreter_call_code.done"
|
@@ -53,6 +60,7 @@ module OpenAI
|
|
53
60
|
override.returns(
|
54
61
|
{
|
55
62
|
code: String,
|
63
|
+
item_id: String,
|
56
64
|
output_index: Integer,
|
57
65
|
sequence_number: Integer,
|
58
66
|
type: Symbol
|
@@ -12,23 +12,16 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
-
#
|
16
|
-
sig { returns(
|
17
|
-
|
15
|
+
# The unique identifier of the code interpreter tool call item.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
code_interpreter_call:
|
22
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash
|
23
|
-
).void
|
24
|
-
end
|
25
|
-
attr_writer :code_interpreter_call
|
26
|
-
|
27
|
-
# The index of the output item that the code interpreter call is in progress.
|
19
|
+
# The index of the output item in the response for which the code interpreter call
|
20
|
+
# is completed.
|
28
21
|
sig { returns(Integer) }
|
29
22
|
attr_accessor :output_index
|
30
23
|
|
31
|
-
# The sequence number of this event.
|
24
|
+
# The sequence number of this event, used to order streaming events.
|
32
25
|
sig { returns(Integer) }
|
33
26
|
attr_accessor :sequence_number
|
34
27
|
|
@@ -39,19 +32,19 @@ module OpenAI
|
|
39
32
|
# Emitted when the code interpreter call is completed.
|
40
33
|
sig do
|
41
34
|
params(
|
42
|
-
|
43
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
35
|
+
item_id: String,
|
44
36
|
output_index: Integer,
|
45
37
|
sequence_number: Integer,
|
46
38
|
type: Symbol
|
47
39
|
).returns(T.attached_class)
|
48
40
|
end
|
49
41
|
def self.new(
|
50
|
-
#
|
51
|
-
|
52
|
-
# The index of the output item
|
42
|
+
# The unique identifier of the code interpreter tool call item.
|
43
|
+
item_id:,
|
44
|
+
# The index of the output item in the response for which the code interpreter call
|
45
|
+
# is completed.
|
53
46
|
output_index:,
|
54
|
-
# The sequence number of this event.
|
47
|
+
# The sequence number of this event, used to order streaming events.
|
55
48
|
sequence_number:,
|
56
49
|
# The type of the event. Always `response.code_interpreter_call.completed`.
|
57
50
|
type: :"response.code_interpreter_call.completed"
|
@@ -61,8 +54,7 @@ module OpenAI
|
|
61
54
|
sig do
|
62
55
|
override.returns(
|
63
56
|
{
|
64
|
-
|
65
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall,
|
57
|
+
item_id: String,
|
66
58
|
output_index: Integer,
|
67
59
|
sequence_number: Integer,
|
68
60
|
type: Symbol
|
@@ -12,23 +12,16 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
-
#
|
16
|
-
sig { returns(
|
17
|
-
|
15
|
+
# The unique identifier of the code interpreter tool call item.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
code_interpreter_call:
|
22
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash
|
23
|
-
).void
|
24
|
-
end
|
25
|
-
attr_writer :code_interpreter_call
|
26
|
-
|
27
|
-
# The index of the output item that the code interpreter call is in progress.
|
19
|
+
# The index of the output item in the response for which the code interpreter call
|
20
|
+
# is in progress.
|
28
21
|
sig { returns(Integer) }
|
29
22
|
attr_accessor :output_index
|
30
23
|
|
31
|
-
# The sequence number of this event.
|
24
|
+
# The sequence number of this event, used to order streaming events.
|
32
25
|
sig { returns(Integer) }
|
33
26
|
attr_accessor :sequence_number
|
34
27
|
|
@@ -39,19 +32,19 @@ module OpenAI
|
|
39
32
|
# Emitted when a code interpreter call is in progress.
|
40
33
|
sig do
|
41
34
|
params(
|
42
|
-
|
43
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
35
|
+
item_id: String,
|
44
36
|
output_index: Integer,
|
45
37
|
sequence_number: Integer,
|
46
38
|
type: Symbol
|
47
39
|
).returns(T.attached_class)
|
48
40
|
end
|
49
41
|
def self.new(
|
50
|
-
#
|
51
|
-
|
52
|
-
# The index of the output item
|
42
|
+
# The unique identifier of the code interpreter tool call item.
|
43
|
+
item_id:,
|
44
|
+
# The index of the output item in the response for which the code interpreter call
|
45
|
+
# is in progress.
|
53
46
|
output_index:,
|
54
|
-
# The sequence number of this event.
|
47
|
+
# The sequence number of this event, used to order streaming events.
|
55
48
|
sequence_number:,
|
56
49
|
# The type of the event. Always `response.code_interpreter_call.in_progress`.
|
57
50
|
type: :"response.code_interpreter_call.in_progress"
|
@@ -61,8 +54,7 @@ module OpenAI
|
|
61
54
|
sig do
|
62
55
|
override.returns(
|
63
56
|
{
|
64
|
-
|
65
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall,
|
57
|
+
item_id: String,
|
66
58
|
output_index: Integer,
|
67
59
|
sequence_number: Integer,
|
68
60
|
type: Symbol
|
@@ -12,23 +12,16 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
-
#
|
16
|
-
sig { returns(
|
17
|
-
|
15
|
+
# The unique identifier of the code interpreter tool call item.
|
16
|
+
sig { returns(String) }
|
17
|
+
attr_accessor :item_id
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
code_interpreter_call:
|
22
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash
|
23
|
-
).void
|
24
|
-
end
|
25
|
-
attr_writer :code_interpreter_call
|
26
|
-
|
27
|
-
# The index of the output item that the code interpreter call is in progress.
|
19
|
+
# The index of the output item in the response for which the code interpreter is
|
20
|
+
# interpreting code.
|
28
21
|
sig { returns(Integer) }
|
29
22
|
attr_accessor :output_index
|
30
23
|
|
31
|
-
# The sequence number of this event.
|
24
|
+
# The sequence number of this event, used to order streaming events.
|
32
25
|
sig { returns(Integer) }
|
33
26
|
attr_accessor :sequence_number
|
34
27
|
|
@@ -39,19 +32,19 @@ module OpenAI
|
|
39
32
|
# Emitted when the code interpreter is actively interpreting the code snippet.
|
40
33
|
sig do
|
41
34
|
params(
|
42
|
-
|
43
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
35
|
+
item_id: String,
|
44
36
|
output_index: Integer,
|
45
37
|
sequence_number: Integer,
|
46
38
|
type: Symbol
|
47
39
|
).returns(T.attached_class)
|
48
40
|
end
|
49
41
|
def self.new(
|
50
|
-
#
|
51
|
-
|
52
|
-
# The index of the output item
|
42
|
+
# The unique identifier of the code interpreter tool call item.
|
43
|
+
item_id:,
|
44
|
+
# The index of the output item in the response for which the code interpreter is
|
45
|
+
# interpreting code.
|
53
46
|
output_index:,
|
54
|
-
# The sequence number of this event.
|
47
|
+
# The sequence number of this event, used to order streaming events.
|
55
48
|
sequence_number:,
|
56
49
|
# The type of the event. Always `response.code_interpreter_call.interpreting`.
|
57
50
|
type: :"response.code_interpreter_call.interpreting"
|
@@ -61,8 +54,7 @@ module OpenAI
|
|
61
54
|
sig do
|
62
55
|
override.returns(
|
63
56
|
{
|
64
|
-
|
65
|
-
OpenAI::Responses::ResponseCodeInterpreterToolCall,
|
57
|
+
item_id: String,
|
66
58
|
output_index: Integer,
|
67
59
|
sequence_number: Integer,
|
68
60
|
type: Symbol
|