openai 0.36.1 → 0.37.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +14 -0
- data/README.md +1 -1
- data/lib/openai/models/all_models.rb +1 -0
- data/lib/openai/models/beta/assistant_create_params.rb +4 -3
- data/lib/openai/models/beta/assistant_update_params.rb +4 -3
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -3
- data/lib/openai/models/chat/completion_create_params.rb +4 -3
- data/lib/openai/models/container_create_params.rb +22 -1
- data/lib/openai/models/container_create_response.rb +32 -1
- data/lib/openai/models/container_list_response.rb +32 -1
- data/lib/openai/models/container_retrieve_response.rb +32 -1
- data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
- data/lib/openai/models/conversations/item_create_params.rb +2 -2
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +4 -3
- data/lib/openai/models/evals/run_cancel_response.rb +8 -6
- data/lib/openai/models/evals/run_create_params.rb +8 -6
- data/lib/openai/models/evals/run_create_response.rb +8 -6
- data/lib/openai/models/evals/run_list_response.rb +8 -6
- data/lib/openai/models/evals/run_retrieve_response.rb +8 -6
- data/lib/openai/models/graders/score_model_grader.rb +4 -3
- data/lib/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rb +43 -0
- data/lib/openai/models/realtime/output_audio_buffer_clear_event.rb +4 -4
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +13 -5
- data/lib/openai/models/realtime/realtime_client_event.rb +1 -1
- data/lib/openai/models/realtime/realtime_server_event.rb +16 -9
- data/lib/openai/models/realtime/realtime_session.rb +13 -5
- data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -9
- data/lib/openai/models/realtime/realtime_session_create_response.rb +27 -14
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +13 -5
- data/lib/openai/models/realtime/realtime_truncation.rb +14 -9
- data/lib/openai/models/reasoning.rb +4 -3
- data/lib/openai/models/reasoning_effort.rb +5 -3
- data/lib/openai/models/responses/compacted_response.rb +56 -0
- data/lib/openai/models/responses/input_token_count_params.rb +4 -4
- data/lib/openai/models/responses/response.rb +6 -6
- data/lib/openai/models/responses/response_compact_params.rb +344 -0
- data/lib/openai/models/responses/response_compaction_item.rb +43 -0
- data/lib/openai/models/responses/response_compaction_item_param.rb +36 -0
- data/lib/openai/models/responses/response_create_params.rb +4 -4
- data/lib/openai/models/responses/response_function_shell_call_output_content.rb +10 -10
- data/lib/openai/models/responses/response_function_shell_tool_call.rb +5 -5
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +2 -2
- data/lib/openai/models/responses/response_input_item.rb +20 -17
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/tool.rb +4 -2
- data/lib/openai/models/responses/tool_choice_shell.rb +1 -1
- data/lib/openai/models/responses_model.rb +1 -0
- data/lib/openai/models/video_create_params.rb +11 -6
- data/lib/openai/resources/containers.rb +3 -1
- data/lib/openai/resources/conversations/items.rb +1 -1
- data/lib/openai/resources/conversations.rb +1 -1
- data/lib/openai/resources/responses/input_tokens.rb +1 -1
- data/lib/openai/resources/responses.rb +33 -2
- data/lib/openai/resources/videos.rb +6 -3
- data/lib/openai/resources/webhooks.rb +0 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/manifest.yaml +1 -0
- data/rbi/openai/models/all_models.rbi +5 -0
- data/rbi/openai/models/beta/assistant_create_params.rbi +8 -6
- data/rbi/openai/models/beta/assistant_update_params.rbi +8 -6
- data/rbi/openai/models/beta/threads/run_create_params.rbi +8 -6
- data/rbi/openai/models/chat/completion_create_params.rbi +8 -6
- data/rbi/openai/models/container_create_params.rbi +51 -0
- data/rbi/openai/models/container_create_response.rbi +81 -3
- data/rbi/openai/models/container_list_response.rbi +80 -3
- data/rbi/openai/models/container_retrieve_response.rbi +83 -3
- data/rbi/openai/models/conversations/conversation_create_params.rbi +3 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +3 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -6
- data/rbi/openai/models/evals/run_cancel_response.rbi +16 -12
- data/rbi/openai/models/evals/run_create_params.rbi +16 -12
- data/rbi/openai/models/evals/run_create_response.rbi +16 -12
- data/rbi/openai/models/evals/run_list_response.rbi +16 -12
- data/rbi/openai/models/evals/run_retrieve_response.rbi +16 -12
- data/rbi/openai/models/graders/score_model_grader.rbi +8 -6
- data/rbi/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbi +56 -0
- data/rbi/openai/models/realtime/output_audio_buffer_clear_event.rbi +4 -4
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_server_event.rbi +6 -5
- data/rbi/openai/models/realtime/realtime_session.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +28 -18
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +52 -26
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +24 -8
- data/rbi/openai/models/realtime/realtime_truncation.rbi +14 -9
- data/rbi/openai/models/reasoning.rbi +8 -6
- data/rbi/openai/models/reasoning_effort.rbi +5 -3
- data/rbi/openai/models/responses/compacted_response.rbi +105 -0
- data/rbi/openai/models/responses/response.rbi +1 -0
- data/rbi/openai/models/responses/response_compact_params.rbi +593 -0
- data/rbi/openai/models/responses/response_compaction_item.rbi +67 -0
- data/rbi/openai/models/responses/response_compaction_item_param.rbi +54 -0
- data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +9 -9
- data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +6 -6
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +1 -1
- data/rbi/openai/models/responses/response_input_item.rbi +18 -17
- data/rbi/openai/models/responses/response_output_item.rbi +1 -0
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +1 -0
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +1 -0
- data/rbi/openai/models/responses/tool.rbi +6 -3
- data/rbi/openai/models/responses/tool_choice_shell.rbi +1 -1
- data/rbi/openai/models/responses_model.rbi +5 -0
- data/rbi/openai/models/video_create_params.rbi +10 -6
- data/rbi/openai/resources/beta/assistants.rbi +8 -6
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -6
- data/rbi/openai/resources/chat/completions.rbi +8 -6
- data/rbi/openai/resources/containers.rbi +3 -0
- data/rbi/openai/resources/conversations/items.rbi +1 -0
- data/rbi/openai/resources/conversations.rbi +1 -0
- data/rbi/openai/resources/realtime/calls.rbi +14 -9
- data/rbi/openai/resources/responses.rbi +42 -0
- data/rbi/openai/resources/videos.rbi +5 -3
- data/sig/openai/models/all_models.rbs +2 -0
- data/sig/openai/models/container_create_params.rbs +23 -1
- data/sig/openai/models/container_create_response.rbs +32 -3
- data/sig/openai/models/container_list_response.rbs +32 -3
- data/sig/openai/models/container_retrieve_response.rbs +32 -3
- data/sig/openai/models/realtime/input_audio_buffer_dtmf_event_received_event.rbs +32 -0
- data/sig/openai/models/realtime/realtime_server_event.rbs +1 -0
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/responses/compacted_response.rbs +42 -0
- data/sig/openai/models/responses/response_compact_params.rbs +226 -0
- data/sig/openai/models/responses/response_compaction_item.rbs +39 -0
- data/sig/openai/models/responses/response_compaction_item_param.rbs +28 -0
- data/sig/openai/models/responses/response_input_item.rbs +1 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses_model.rbs +2 -0
- data/sig/openai/resources/containers.rbs +1 -0
- data/sig/openai/resources/responses.rbs +8 -0
- metadata +31 -2
|
@@ -889,9 +889,9 @@ module OpenAI
|
|
|
889
889
|
|
|
890
890
|
# Constrains effort on reasoning for
|
|
891
891
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
892
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
893
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
894
|
-
# reasoning in a response.
|
|
892
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
893
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
894
|
+
# on reasoning in a response.
|
|
895
895
|
#
|
|
896
896
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
897
897
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -899,6 +899,7 @@ module OpenAI
|
|
|
899
899
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
900
900
|
# support `none`.
|
|
901
901
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
902
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
902
903
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
|
903
904
|
attr_accessor :reasoning_effort
|
|
904
905
|
|
|
@@ -996,9 +997,9 @@ module OpenAI
|
|
|
996
997
|
max_completion_tokens: nil,
|
|
997
998
|
# Constrains effort on reasoning for
|
|
998
999
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
999
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1000
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1001
|
-
# reasoning in a response.
|
|
1000
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1001
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1002
|
+
# on reasoning in a response.
|
|
1002
1003
|
#
|
|
1003
1004
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1004
1005
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1006,6 +1007,7 @@ module OpenAI
|
|
|
1006
1007
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1007
1008
|
# support `none`.
|
|
1008
1009
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1010
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1009
1011
|
reasoning_effort: nil,
|
|
1010
1012
|
# An object specifying the format that the model must output.
|
|
1011
1013
|
#
|
|
@@ -512,9 +512,9 @@ module OpenAI
|
|
|
512
512
|
|
|
513
513
|
# Constrains effort on reasoning for
|
|
514
514
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
515
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
516
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
517
|
-
# reasoning in a response.
|
|
515
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
516
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
517
|
+
# on reasoning in a response.
|
|
518
518
|
#
|
|
519
519
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
520
520
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -522,6 +522,7 @@ module OpenAI
|
|
|
522
522
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
523
523
|
# support `none`.
|
|
524
524
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
525
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
525
526
|
sig do
|
|
526
527
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
|
527
528
|
end
|
|
@@ -578,9 +579,9 @@ module OpenAI
|
|
|
578
579
|
model: nil,
|
|
579
580
|
# Constrains effort on reasoning for
|
|
580
581
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
581
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
582
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
583
|
-
# reasoning in a response.
|
|
582
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
583
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
584
|
+
# on reasoning in a response.
|
|
584
585
|
#
|
|
585
586
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
586
587
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -588,6 +589,7 @@ module OpenAI
|
|
|
588
589
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
589
590
|
# support `none`.
|
|
590
591
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
592
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
591
593
|
reasoning_effort: nil,
|
|
592
594
|
# Sampling temperature. This is a query parameter used to select responses.
|
|
593
595
|
temperature: nil,
|
|
@@ -1131,9 +1133,9 @@ module OpenAI
|
|
|
1131
1133
|
|
|
1132
1134
|
# Constrains effort on reasoning for
|
|
1133
1135
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1134
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1135
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1136
|
-
# reasoning in a response.
|
|
1136
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1137
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1138
|
+
# on reasoning in a response.
|
|
1137
1139
|
#
|
|
1138
1140
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1139
1141
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1141,6 +1143,7 @@ module OpenAI
|
|
|
1141
1143
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1142
1144
|
# support `none`.
|
|
1143
1145
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1146
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1144
1147
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
|
1145
1148
|
attr_accessor :reasoning_effort
|
|
1146
1149
|
|
|
@@ -1263,9 +1266,9 @@ module OpenAI
|
|
|
1263
1266
|
max_completion_tokens: nil,
|
|
1264
1267
|
# Constrains effort on reasoning for
|
|
1265
1268
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1266
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1267
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1268
|
-
# reasoning in a response.
|
|
1269
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1270
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1271
|
+
# on reasoning in a response.
|
|
1269
1272
|
#
|
|
1270
1273
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1271
1274
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1273,6 +1276,7 @@ module OpenAI
|
|
|
1273
1276
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1274
1277
|
# support `none`.
|
|
1275
1278
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1279
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1276
1280
|
reasoning_effort: nil,
|
|
1277
1281
|
# A seed value to initialize the randomness, during sampling.
|
|
1278
1282
|
seed: nil,
|
|
@@ -422,9 +422,9 @@ module OpenAI
|
|
|
422
422
|
|
|
423
423
|
# Constrains effort on reasoning for
|
|
424
424
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
425
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
426
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
427
|
-
# reasoning in a response.
|
|
425
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
426
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
427
|
+
# on reasoning in a response.
|
|
428
428
|
#
|
|
429
429
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
430
430
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -432,6 +432,7 @@ module OpenAI
|
|
|
432
432
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
433
433
|
# support `none`.
|
|
434
434
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
435
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
435
436
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
|
436
437
|
attr_accessor :reasoning_effort
|
|
437
438
|
|
|
@@ -486,9 +487,9 @@ module OpenAI
|
|
|
486
487
|
model: nil,
|
|
487
488
|
# Constrains effort on reasoning for
|
|
488
489
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
489
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
490
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
491
|
-
# reasoning in a response.
|
|
490
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
491
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
492
|
+
# on reasoning in a response.
|
|
492
493
|
#
|
|
493
494
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
494
495
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -496,6 +497,7 @@ module OpenAI
|
|
|
496
497
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
497
498
|
# support `none`.
|
|
498
499
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
500
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
499
501
|
reasoning_effort: nil,
|
|
500
502
|
# Sampling temperature. This is a query parameter used to select responses.
|
|
501
503
|
temperature: nil,
|
|
@@ -1089,9 +1091,9 @@ module OpenAI
|
|
|
1089
1091
|
|
|
1090
1092
|
# Constrains effort on reasoning for
|
|
1091
1093
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1092
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1093
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1094
|
-
# reasoning in a response.
|
|
1094
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1095
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1096
|
+
# on reasoning in a response.
|
|
1095
1097
|
#
|
|
1096
1098
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1097
1099
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1099,6 +1101,7 @@ module OpenAI
|
|
|
1099
1101
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1100
1102
|
# support `none`.
|
|
1101
1103
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1104
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1102
1105
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
|
1103
1106
|
attr_accessor :reasoning_effort
|
|
1104
1107
|
|
|
@@ -1240,9 +1243,9 @@ module OpenAI
|
|
|
1240
1243
|
max_completion_tokens: nil,
|
|
1241
1244
|
# Constrains effort on reasoning for
|
|
1242
1245
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1243
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1244
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1245
|
-
# reasoning in a response.
|
|
1246
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1247
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1248
|
+
# on reasoning in a response.
|
|
1246
1249
|
#
|
|
1247
1250
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1248
1251
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1250,6 +1253,7 @@ module OpenAI
|
|
|
1250
1253
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1251
1254
|
# support `none`.
|
|
1252
1255
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1256
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1253
1257
|
reasoning_effort: nil,
|
|
1254
1258
|
# A seed value to initialize the randomness, during sampling.
|
|
1255
1259
|
seed: nil,
|
|
@@ -512,9 +512,9 @@ module OpenAI
|
|
|
512
512
|
|
|
513
513
|
# Constrains effort on reasoning for
|
|
514
514
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
515
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
516
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
517
|
-
# reasoning in a response.
|
|
515
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
516
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
517
|
+
# on reasoning in a response.
|
|
518
518
|
#
|
|
519
519
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
520
520
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -522,6 +522,7 @@ module OpenAI
|
|
|
522
522
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
523
523
|
# support `none`.
|
|
524
524
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
525
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
525
526
|
sig do
|
|
526
527
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
|
527
528
|
end
|
|
@@ -578,9 +579,9 @@ module OpenAI
|
|
|
578
579
|
model: nil,
|
|
579
580
|
# Constrains effort on reasoning for
|
|
580
581
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
581
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
582
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
583
|
-
# reasoning in a response.
|
|
582
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
583
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
584
|
+
# on reasoning in a response.
|
|
584
585
|
#
|
|
585
586
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
586
587
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -588,6 +589,7 @@ module OpenAI
|
|
|
588
589
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
589
590
|
# support `none`.
|
|
590
591
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
592
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
591
593
|
reasoning_effort: nil,
|
|
592
594
|
# Sampling temperature. This is a query parameter used to select responses.
|
|
593
595
|
temperature: nil,
|
|
@@ -1131,9 +1133,9 @@ module OpenAI
|
|
|
1131
1133
|
|
|
1132
1134
|
# Constrains effort on reasoning for
|
|
1133
1135
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1134
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1135
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1136
|
-
# reasoning in a response.
|
|
1136
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1137
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1138
|
+
# on reasoning in a response.
|
|
1137
1139
|
#
|
|
1138
1140
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1139
1141
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1141,6 +1143,7 @@ module OpenAI
|
|
|
1141
1143
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1142
1144
|
# support `none`.
|
|
1143
1145
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1146
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1144
1147
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
|
1145
1148
|
attr_accessor :reasoning_effort
|
|
1146
1149
|
|
|
@@ -1263,9 +1266,9 @@ module OpenAI
|
|
|
1263
1266
|
max_completion_tokens: nil,
|
|
1264
1267
|
# Constrains effort on reasoning for
|
|
1265
1268
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1266
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1267
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1268
|
-
# reasoning in a response.
|
|
1269
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1270
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1271
|
+
# on reasoning in a response.
|
|
1269
1272
|
#
|
|
1270
1273
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1271
1274
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1273,6 +1276,7 @@ module OpenAI
|
|
|
1273
1276
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1274
1277
|
# support `none`.
|
|
1275
1278
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1279
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1276
1280
|
reasoning_effort: nil,
|
|
1277
1281
|
# A seed value to initialize the randomness, during sampling.
|
|
1278
1282
|
seed: nil,
|
|
@@ -508,9 +508,9 @@ module OpenAI
|
|
|
508
508
|
|
|
509
509
|
# Constrains effort on reasoning for
|
|
510
510
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
511
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
512
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
513
|
-
# reasoning in a response.
|
|
511
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
512
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
513
|
+
# on reasoning in a response.
|
|
514
514
|
#
|
|
515
515
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
516
516
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -518,6 +518,7 @@ module OpenAI
|
|
|
518
518
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
519
519
|
# support `none`.
|
|
520
520
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
521
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
521
522
|
sig do
|
|
522
523
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
|
523
524
|
end
|
|
@@ -574,9 +575,9 @@ module OpenAI
|
|
|
574
575
|
model: nil,
|
|
575
576
|
# Constrains effort on reasoning for
|
|
576
577
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
577
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
578
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
579
|
-
# reasoning in a response.
|
|
578
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
579
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
580
|
+
# on reasoning in a response.
|
|
580
581
|
#
|
|
581
582
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
582
583
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -584,6 +585,7 @@ module OpenAI
|
|
|
584
585
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
585
586
|
# support `none`.
|
|
586
587
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
588
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
587
589
|
reasoning_effort: nil,
|
|
588
590
|
# Sampling temperature. This is a query parameter used to select responses.
|
|
589
591
|
temperature: nil,
|
|
@@ -1127,9 +1129,9 @@ module OpenAI
|
|
|
1127
1129
|
|
|
1128
1130
|
# Constrains effort on reasoning for
|
|
1129
1131
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1130
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1131
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1132
|
-
# reasoning in a response.
|
|
1132
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1133
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1134
|
+
# on reasoning in a response.
|
|
1133
1135
|
#
|
|
1134
1136
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1135
1137
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1137,6 +1139,7 @@ module OpenAI
|
|
|
1137
1139
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1138
1140
|
# support `none`.
|
|
1139
1141
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1142
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1140
1143
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
|
1141
1144
|
attr_accessor :reasoning_effort
|
|
1142
1145
|
|
|
@@ -1259,9 +1262,9 @@ module OpenAI
|
|
|
1259
1262
|
max_completion_tokens: nil,
|
|
1260
1263
|
# Constrains effort on reasoning for
|
|
1261
1264
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1262
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1263
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1264
|
-
# reasoning in a response.
|
|
1265
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1266
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1267
|
+
# on reasoning in a response.
|
|
1265
1268
|
#
|
|
1266
1269
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1267
1270
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1269,6 +1272,7 @@ module OpenAI
|
|
|
1269
1272
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1270
1273
|
# support `none`.
|
|
1271
1274
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1275
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1272
1276
|
reasoning_effort: nil,
|
|
1273
1277
|
# A seed value to initialize the randomness, during sampling.
|
|
1274
1278
|
seed: nil,
|
|
@@ -514,9 +514,9 @@ module OpenAI
|
|
|
514
514
|
|
|
515
515
|
# Constrains effort on reasoning for
|
|
516
516
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
517
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
518
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
519
|
-
# reasoning in a response.
|
|
517
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
518
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
519
|
+
# on reasoning in a response.
|
|
520
520
|
#
|
|
521
521
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
522
522
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -524,6 +524,7 @@ module OpenAI
|
|
|
524
524
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
525
525
|
# support `none`.
|
|
526
526
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
527
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
527
528
|
sig do
|
|
528
529
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
|
529
530
|
end
|
|
@@ -580,9 +581,9 @@ module OpenAI
|
|
|
580
581
|
model: nil,
|
|
581
582
|
# Constrains effort on reasoning for
|
|
582
583
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
583
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
584
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
585
|
-
# reasoning in a response.
|
|
584
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
585
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
586
|
+
# on reasoning in a response.
|
|
586
587
|
#
|
|
587
588
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
588
589
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -590,6 +591,7 @@ module OpenAI
|
|
|
590
591
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
591
592
|
# support `none`.
|
|
592
593
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
594
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
593
595
|
reasoning_effort: nil,
|
|
594
596
|
# Sampling temperature. This is a query parameter used to select responses.
|
|
595
597
|
temperature: nil,
|
|
@@ -1133,9 +1135,9 @@ module OpenAI
|
|
|
1133
1135
|
|
|
1134
1136
|
# Constrains effort on reasoning for
|
|
1135
1137
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1136
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1137
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1138
|
-
# reasoning in a response.
|
|
1138
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1139
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1140
|
+
# on reasoning in a response.
|
|
1139
1141
|
#
|
|
1140
1142
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1141
1143
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1143,6 +1145,7 @@ module OpenAI
|
|
|
1143
1145
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1144
1146
|
# support `none`.
|
|
1145
1147
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1148
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1146
1149
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
|
1147
1150
|
attr_accessor :reasoning_effort
|
|
1148
1151
|
|
|
@@ -1265,9 +1268,9 @@ module OpenAI
|
|
|
1265
1268
|
max_completion_tokens: nil,
|
|
1266
1269
|
# Constrains effort on reasoning for
|
|
1267
1270
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1268
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
1269
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
1270
|
-
# reasoning in a response.
|
|
1271
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
1272
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
1273
|
+
# on reasoning in a response.
|
|
1271
1274
|
#
|
|
1272
1275
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1273
1276
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -1275,6 +1278,7 @@ module OpenAI
|
|
|
1275
1278
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1276
1279
|
# support `none`.
|
|
1277
1280
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1281
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
1278
1282
|
reasoning_effort: nil,
|
|
1279
1283
|
# A seed value to initialize the randomness, during sampling.
|
|
1280
1284
|
seed: nil,
|
|
@@ -396,9 +396,9 @@ module OpenAI
|
|
|
396
396
|
|
|
397
397
|
# Constrains effort on reasoning for
|
|
398
398
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
399
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
400
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
401
|
-
# reasoning in a response.
|
|
399
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
400
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
401
|
+
# on reasoning in a response.
|
|
402
402
|
#
|
|
403
403
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
404
404
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -406,6 +406,7 @@ module OpenAI
|
|
|
406
406
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
407
407
|
# support `none`.
|
|
408
408
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
409
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
409
410
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
|
410
411
|
attr_accessor :reasoning_effort
|
|
411
412
|
|
|
@@ -436,9 +437,9 @@ module OpenAI
|
|
|
436
437
|
max_completions_tokens: nil,
|
|
437
438
|
# Constrains effort on reasoning for
|
|
438
439
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
439
|
-
# supported values are `none`, `minimal`, `low`, `medium`, and `
|
|
440
|
-
# reasoning effort can result in faster responses and fewer tokens used
|
|
441
|
-
# reasoning in a response.
|
|
440
|
+
# supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
|
|
441
|
+
# Reducing reasoning effort can result in faster responses and fewer tokens used
|
|
442
|
+
# on reasoning in a response.
|
|
442
443
|
#
|
|
443
444
|
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
444
445
|
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
@@ -446,6 +447,7 @@ module OpenAI
|
|
|
446
447
|
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
447
448
|
# support `none`.
|
|
448
449
|
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
450
|
+
# - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
|
|
449
451
|
reasoning_effort: nil,
|
|
450
452
|
# A seed value to initialize the randomness, during sampling.
|
|
451
453
|
seed: nil,
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# typed: strong
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Realtime
|
|
6
|
+
class InputAudioBufferDtmfEventReceivedEvent < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
OrHash =
|
|
8
|
+
T.type_alias do
|
|
9
|
+
T.any(
|
|
10
|
+
OpenAI::Realtime::InputAudioBufferDtmfEventReceivedEvent,
|
|
11
|
+
OpenAI::Internal::AnyHash
|
|
12
|
+
)
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
# The telephone keypad that was pressed by the user.
|
|
16
|
+
sig { returns(String) }
|
|
17
|
+
attr_accessor :event
|
|
18
|
+
|
|
19
|
+
# UTC Unix Timestamp when DTMF Event was received by server.
|
|
20
|
+
sig { returns(Integer) }
|
|
21
|
+
attr_accessor :received_at
|
|
22
|
+
|
|
23
|
+
# The event type, must be `input_audio_buffer.dtmf_event_received`.
|
|
24
|
+
sig { returns(Symbol) }
|
|
25
|
+
attr_accessor :type
|
|
26
|
+
|
|
27
|
+
# **SIP Only:** Returned when an DTMF event is received. A DTMF event is a message
|
|
28
|
+
# that represents a telephone keypad press (0–9, \*, #, A–D). The `event` property
|
|
29
|
+
# is the keypad that the user press. The `received_at` is the UTC Unix Timestamp
|
|
30
|
+
# that the server received the event.
|
|
31
|
+
sig do
|
|
32
|
+
params(event: String, received_at: Integer, type: Symbol).returns(
|
|
33
|
+
T.attached_class
|
|
34
|
+
)
|
|
35
|
+
end
|
|
36
|
+
def self.new(
|
|
37
|
+
# The telephone keypad that was pressed by the user.
|
|
38
|
+
event:,
|
|
39
|
+
# UTC Unix Timestamp when DTMF Event was received by server.
|
|
40
|
+
received_at:,
|
|
41
|
+
# The event type, must be `input_audio_buffer.dtmf_event_received`.
|
|
42
|
+
type: :"input_audio_buffer.dtmf_event_received"
|
|
43
|
+
)
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
sig do
|
|
47
|
+
override.returns(
|
|
48
|
+
{ event: String, received_at: Integer, type: Symbol }
|
|
49
|
+
)
|
|
50
|
+
end
|
|
51
|
+
def to_hash
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
@@ -23,10 +23,10 @@ module OpenAI
|
|
|
23
23
|
sig { params(event_id: String).void }
|
|
24
24
|
attr_writer :event_id
|
|
25
25
|
|
|
26
|
-
# **WebRTC Only:** Emit to cut off the current audio response. This will
|
|
27
|
-
# the server to stop generating audio and emit a
|
|
28
|
-
# event. This event should be preceded by a
|
|
29
|
-
# the generation of the current response.
|
|
26
|
+
# **WebRTC/SIP Only:** Emit to cut off the current audio response. This will
|
|
27
|
+
# trigger the server to stop generating audio and emit a
|
|
28
|
+
# `output_audio_buffer.cleared` event. This event should be preceded by a
|
|
29
|
+
# `response.cancel` client event to stop the generation of the current response.
|
|
30
30
|
# [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
|
|
31
31
|
sig { params(event_id: String, type: Symbol).returns(T.attached_class) }
|
|
32
32
|
def self.new(
|
|
@@ -41,7 +41,11 @@ module OpenAI
|
|
|
41
41
|
attr_accessor :type
|
|
42
42
|
|
|
43
43
|
# Whether or not to automatically generate a response when a VAD stop event
|
|
44
|
-
# occurs.
|
|
44
|
+
# occurs. If `interrupt_response` is set to `false` this may fail to create a
|
|
45
|
+
# response if the model is already responding.
|
|
46
|
+
#
|
|
47
|
+
# If both `create_response` and `interrupt_response` are set to `false`, the model
|
|
48
|
+
# will never respond automatically but VAD events will still be emitted.
|
|
45
49
|
sig { returns(T.nilable(T::Boolean)) }
|
|
46
50
|
attr_reader :create_response
|
|
47
51
|
|
|
@@ -63,9 +67,13 @@ module OpenAI
|
|
|
63
67
|
sig { returns(T.nilable(Integer)) }
|
|
64
68
|
attr_accessor :idle_timeout_ms
|
|
65
69
|
|
|
66
|
-
# Whether or not to automatically interrupt any ongoing response with
|
|
67
|
-
# the default conversation (i.e. `conversation` of `auto`) when a VAD
|
|
68
|
-
# occurs.
|
|
70
|
+
# Whether or not to automatically interrupt (cancel) any ongoing response with
|
|
71
|
+
# output to the default conversation (i.e. `conversation` of `auto`) when a VAD
|
|
72
|
+
# start event occurs. If `true` then the response will be cancelled, otherwise it
|
|
73
|
+
# will continue until complete.
|
|
74
|
+
#
|
|
75
|
+
# If both `create_response` and `interrupt_response` are set to `false`, the model
|
|
76
|
+
# will never respond automatically but VAD events will still be emitted.
|
|
69
77
|
sig { returns(T.nilable(T::Boolean)) }
|
|
70
78
|
attr_reader :interrupt_response
|
|
71
79
|
|
|
@@ -113,7 +121,11 @@ module OpenAI
|
|
|
113
121
|
end
|
|
114
122
|
def self.new(
|
|
115
123
|
# Whether or not to automatically generate a response when a VAD stop event
|
|
116
|
-
# occurs.
|
|
124
|
+
# occurs. If `interrupt_response` is set to `false` this may fail to create a
|
|
125
|
+
# response if the model is already responding.
|
|
126
|
+
#
|
|
127
|
+
# If both `create_response` and `interrupt_response` are set to `false`, the model
|
|
128
|
+
# will never respond automatically but VAD events will still be emitted.
|
|
117
129
|
create_response: nil,
|
|
118
130
|
# Optional timeout after which a model response will be triggered automatically.
|
|
119
131
|
# This is useful for situations in which a long pause from the user is unexpected,
|
|
@@ -128,9 +140,13 @@ module OpenAI
|
|
|
128
140
|
# Response) will be emitted when the timeout is reached. Idle timeout is currently
|
|
129
141
|
# only supported for `server_vad` mode.
|
|
130
142
|
idle_timeout_ms: nil,
|
|
131
|
-
# Whether or not to automatically interrupt any ongoing response with
|
|
132
|
-
# the default conversation (i.e. `conversation` of `auto`) when a VAD
|
|
133
|
-
# occurs.
|
|
143
|
+
# Whether or not to automatically interrupt (cancel) any ongoing response with
|
|
144
|
+
# output to the default conversation (i.e. `conversation` of `auto`) when a VAD
|
|
145
|
+
# start event occurs. If `true` then the response will be cancelled, otherwise it
|
|
146
|
+
# will continue until complete.
|
|
147
|
+
#
|
|
148
|
+
# If both `create_response` and `interrupt_response` are set to `false`, the model
|
|
149
|
+
# will never respond automatically but VAD events will still be emitted.
|
|
134
150
|
interrupt_response: nil,
|
|
135
151
|
# Used only for `server_vad` mode. Amount of audio to include before the VAD
|
|
136
152
|
# detected speech (in milliseconds). Defaults to 300ms.
|