openai 0.23.1 → 0.23.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +1 -1
- data/lib/openai/models/realtime/input_audio_buffer_timeout_triggered.rb +25 -5
- data/lib/openai/models/realtime/realtime_audio_config_input.rb +14 -11
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +173 -117
- data/lib/openai/models/realtime/realtime_server_event.rb +13 -1
- data/lib/openai/models/realtime/realtime_session.rb +179 -118
- data/lib/openai/models/realtime/realtime_session_create_response.rb +184 -122
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input.rb +16 -11
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +175 -117
- data/lib/openai/models/responses/response.rb +8 -8
- data/lib/openai/models/responses/response_create_params.rb +8 -8
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/models/realtime/input_audio_buffer_timeout_triggered.rbi +24 -5
- data/rbi/openai/models/realtime/realtime_audio_config_input.rbi +44 -28
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +264 -203
- data/rbi/openai/models/realtime/realtime_session.rbi +306 -231
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +298 -232
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input.rbi +39 -28
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +264 -200
- data/rbi/openai/models/responses/response.rbi +12 -12
- data/rbi/openai/models/responses/response_create_params.rbi +12 -12
- data/rbi/openai/resources/responses.rbi +8 -8
- data/sig/openai/models/realtime/realtime_audio_config_input.rbs +4 -8
- data/sig/openai/models/realtime/realtime_audio_input_turn_detection.rbs +91 -65
- data/sig/openai/models/realtime/realtime_session.rbs +95 -69
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +95 -73
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input.rbs +4 -8
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbs +91 -65
- metadata +2 -2
@@ -265,10 +265,10 @@ module OpenAI
|
|
265
265
|
|
266
266
|
# The truncation strategy to use for the model response.
|
267
267
|
#
|
268
|
-
# - `auto`: If the
|
269
|
-
#
|
270
|
-
#
|
271
|
-
# - `disabled` (default): If
|
268
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
269
|
+
# the model will truncate the response to fit the context window by dropping
|
270
|
+
# items from the beginning of the conversation.
|
271
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
272
272
|
# for a model, the request will fail with a 400 error.
|
273
273
|
sig do
|
274
274
|
returns(
|
@@ -521,10 +521,10 @@ module OpenAI
|
|
521
521
|
top_logprobs: nil,
|
522
522
|
# The truncation strategy to use for the model response.
|
523
523
|
#
|
524
|
-
# - `auto`: If the
|
525
|
-
#
|
526
|
-
#
|
527
|
-
# - `disabled` (default): If
|
524
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
525
|
+
# the model will truncate the response to fit the context window by dropping
|
526
|
+
# items from the beginning of the conversation.
|
527
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
528
528
|
# for a model, the request will fail with a 400 error.
|
529
529
|
truncation: nil,
|
530
530
|
# Represents token usage details including input tokens, output tokens, a
|
@@ -819,10 +819,10 @@ module OpenAI
|
|
819
819
|
|
820
820
|
# The truncation strategy to use for the model response.
|
821
821
|
#
|
822
|
-
# - `auto`: If the
|
823
|
-
#
|
824
|
-
#
|
825
|
-
# - `disabled` (default): If
|
822
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
823
|
+
# the model will truncate the response to fit the context window by dropping
|
824
|
+
# items from the beginning of the conversation.
|
825
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
826
826
|
# for a model, the request will fail with a 400 error.
|
827
827
|
module Truncation
|
828
828
|
extend OpenAI::Internal::Type::Enum
|
@@ -378,10 +378,10 @@ module OpenAI
|
|
378
378
|
|
379
379
|
# The truncation strategy to use for the model response.
|
380
380
|
#
|
381
|
-
# - `auto`: If the
|
382
|
-
#
|
383
|
-
#
|
384
|
-
# - `disabled` (default): If
|
381
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
382
|
+
# the model will truncate the response to fit the context window by dropping
|
383
|
+
# items from the beginning of the conversation.
|
384
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
385
385
|
# for a model, the request will fail with a 400 error.
|
386
386
|
sig do
|
387
387
|
returns(
|
@@ -637,10 +637,10 @@ module OpenAI
|
|
637
637
|
top_p: nil,
|
638
638
|
# The truncation strategy to use for the model response.
|
639
639
|
#
|
640
|
-
# - `auto`: If the
|
641
|
-
#
|
642
|
-
#
|
643
|
-
# - `disabled` (default): If
|
640
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
641
|
+
# the model will truncate the response to fit the context window by dropping
|
642
|
+
# items from the beginning of the conversation.
|
643
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
644
644
|
# for a model, the request will fail with a 400 error.
|
645
645
|
truncation: nil,
|
646
646
|
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
@@ -920,10 +920,10 @@ module OpenAI
|
|
920
920
|
|
921
921
|
# The truncation strategy to use for the model response.
|
922
922
|
#
|
923
|
-
# - `auto`: If the
|
924
|
-
#
|
925
|
-
#
|
926
|
-
# - `disabled` (default): If
|
923
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
924
|
+
# the model will truncate the response to fit the context window by dropping
|
925
|
+
# items from the beginning of the conversation.
|
926
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
927
927
|
# for a model, the request will fail with a 400 error.
|
928
928
|
module Truncation
|
929
929
|
extend OpenAI::Internal::Type::Enum
|
@@ -258,10 +258,10 @@ module OpenAI
|
|
258
258
|
top_p: nil,
|
259
259
|
# The truncation strategy to use for the model response.
|
260
260
|
#
|
261
|
-
# - `auto`: If the
|
262
|
-
#
|
263
|
-
#
|
264
|
-
# - `disabled` (default): If
|
261
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
262
|
+
# the model will truncate the response to fit the context window by dropping
|
263
|
+
# items from the beginning of the conversation.
|
264
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
265
265
|
# for a model, the request will fail with a 400 error.
|
266
266
|
truncation: nil,
|
267
267
|
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
@@ -535,10 +535,10 @@ module OpenAI
|
|
535
535
|
top_p: nil,
|
536
536
|
# The truncation strategy to use for the model response.
|
537
537
|
#
|
538
|
-
# - `auto`: If the
|
539
|
-
#
|
540
|
-
#
|
541
|
-
# - `disabled` (default): If
|
538
|
+
# - `auto`: If the input to this Response exceeds the model's context window size,
|
539
|
+
# the model will truncate the response to fit the context window by dropping
|
540
|
+
# items from the beginning of the conversation.
|
541
|
+
# - `disabled` (default): If the input size will exceed the context window size
|
542
542
|
# for a model, the request will fail with a 400 error.
|
543
543
|
truncation: nil,
|
544
544
|
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
@@ -6,7 +6,7 @@ module OpenAI
|
|
6
6
|
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
7
7
|
noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
|
8
8
|
transcription: OpenAI::Realtime::AudioTranscription,
|
9
|
-
turn_detection: OpenAI::Realtime::
|
9
|
+
turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
|
10
10
|
}
|
11
11
|
|
12
12
|
class RealtimeAudioConfigInput < OpenAI::Internal::Type::BaseModel
|
@@ -28,24 +28,20 @@ module OpenAI
|
|
28
28
|
OpenAI::Realtime::AudioTranscription
|
29
29
|
) -> OpenAI::Realtime::AudioTranscription
|
30
30
|
|
31
|
-
|
32
|
-
|
33
|
-
def turn_detection=: (
|
34
|
-
OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
35
|
-
) -> OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
31
|
+
attr_accessor turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
|
36
32
|
|
37
33
|
def initialize: (
|
38
34
|
?format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
39
35
|
?noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
|
40
36
|
?transcription: OpenAI::Realtime::AudioTranscription,
|
41
|
-
?turn_detection: OpenAI::Realtime::
|
37
|
+
?turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
|
42
38
|
) -> void
|
43
39
|
|
44
40
|
def to_hash: -> {
|
45
41
|
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
46
42
|
noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
|
47
43
|
transcription: OpenAI::Realtime::AudioTranscription,
|
48
|
-
turn_detection: OpenAI::Realtime::
|
44
|
+
turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
|
49
45
|
}
|
50
46
|
|
51
47
|
type noise_reduction =
|
@@ -2,97 +2,123 @@ module OpenAI
|
|
2
2
|
module Models
|
3
3
|
module Realtime
|
4
4
|
type realtime_audio_input_turn_detection =
|
5
|
-
|
6
|
-
|
7
|
-
eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
|
8
|
-
idle_timeout_ms: Integer?,
|
9
|
-
interrupt_response: bool,
|
10
|
-
prefix_padding_ms: Integer,
|
11
|
-
silence_duration_ms: Integer,
|
12
|
-
threshold: Float,
|
13
|
-
type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
14
|
-
}
|
5
|
+
OpenAI::Realtime::RealtimeAudioInputTurnDetection::ServerVad
|
6
|
+
| OpenAI::Realtime::RealtimeAudioInputTurnDetection::SemanticVad
|
15
7
|
|
16
|
-
|
17
|
-
|
8
|
+
module RealtimeAudioInputTurnDetection
|
9
|
+
extend OpenAI::Internal::Type::Union
|
18
10
|
|
19
|
-
|
11
|
+
type server_vad =
|
12
|
+
{
|
13
|
+
type: :server_vad,
|
14
|
+
create_response: bool,
|
15
|
+
idle_timeout_ms: Integer?,
|
16
|
+
interrupt_response: bool,
|
17
|
+
prefix_padding_ms: Integer,
|
18
|
+
silence_duration_ms: Integer,
|
19
|
+
threshold: Float
|
20
|
+
}
|
20
21
|
|
21
|
-
|
22
|
+
class ServerVad < OpenAI::Internal::Type::BaseModel
|
23
|
+
attr_accessor type: :server_vad
|
22
24
|
|
23
|
-
|
24
|
-
OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness
|
25
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness
|
25
|
+
attr_reader create_response: bool?
|
26
26
|
|
27
|
-
|
27
|
+
def create_response=: (bool) -> bool
|
28
28
|
|
29
|
-
|
29
|
+
attr_accessor idle_timeout_ms: Integer?
|
30
30
|
|
31
|
-
|
31
|
+
attr_reader interrupt_response: bool?
|
32
32
|
|
33
|
-
|
33
|
+
def interrupt_response=: (bool) -> bool
|
34
34
|
|
35
|
-
|
35
|
+
attr_reader prefix_padding_ms: Integer?
|
36
36
|
|
37
|
-
|
37
|
+
def prefix_padding_ms=: (Integer) -> Integer
|
38
38
|
|
39
|
-
|
39
|
+
attr_reader silence_duration_ms: Integer?
|
40
40
|
|
41
|
-
|
41
|
+
def silence_duration_ms=: (Integer) -> Integer
|
42
42
|
|
43
|
-
|
43
|
+
attr_reader threshold: Float?
|
44
44
|
|
45
|
-
|
45
|
+
def threshold=: (Float) -> Float
|
46
46
|
|
47
|
-
|
48
|
-
|
49
|
-
|
47
|
+
def initialize: (
|
48
|
+
?create_response: bool,
|
49
|
+
?idle_timeout_ms: Integer?,
|
50
|
+
?interrupt_response: bool,
|
51
|
+
?prefix_padding_ms: Integer,
|
52
|
+
?silence_duration_ms: Integer,
|
53
|
+
?threshold: Float,
|
54
|
+
?type: :server_vad
|
55
|
+
) -> void
|
50
56
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
57
|
+
def to_hash: -> {
|
58
|
+
type: :server_vad,
|
59
|
+
create_response: bool,
|
60
|
+
idle_timeout_ms: Integer?,
|
61
|
+
interrupt_response: bool,
|
62
|
+
prefix_padding_ms: Integer,
|
63
|
+
silence_duration_ms: Integer,
|
64
|
+
threshold: Float
|
65
|
+
}
|
66
|
+
end
|
61
67
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
threshold: Float,
|
70
|
-
type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
71
|
-
}
|
68
|
+
type semantic_vad =
|
69
|
+
{
|
70
|
+
type: :semantic_vad,
|
71
|
+
create_response: bool,
|
72
|
+
eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness,
|
73
|
+
interrupt_response: bool
|
74
|
+
}
|
72
75
|
|
73
|
-
|
76
|
+
class SemanticVad < OpenAI::Internal::Type::BaseModel
|
77
|
+
attr_accessor type: :semantic_vad
|
74
78
|
|
75
|
-
|
76
|
-
extend OpenAI::Internal::Type::Enum
|
79
|
+
attr_reader create_response: bool?
|
77
80
|
|
78
|
-
|
79
|
-
MEDIUM: :medium
|
80
|
-
HIGH: :high
|
81
|
-
AUTO: :auto
|
81
|
+
def create_response=: (bool) -> bool
|
82
82
|
|
83
|
-
|
84
|
-
|
83
|
+
attr_reader eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness?
|
84
|
+
|
85
|
+
def eagerness=: (
|
86
|
+
OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness
|
87
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness
|
88
|
+
|
89
|
+
attr_reader interrupt_response: bool?
|
85
90
|
|
86
|
-
|
91
|
+
def interrupt_response=: (bool) -> bool
|
87
92
|
|
88
|
-
|
89
|
-
|
93
|
+
def initialize: (
|
94
|
+
?create_response: bool,
|
95
|
+
?eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness,
|
96
|
+
?interrupt_response: bool,
|
97
|
+
?type: :semantic_vad
|
98
|
+
) -> void
|
90
99
|
|
91
|
-
|
92
|
-
|
100
|
+
def to_hash: -> {
|
101
|
+
type: :semantic_vad,
|
102
|
+
create_response: bool,
|
103
|
+
eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness,
|
104
|
+
interrupt_response: bool
|
105
|
+
}
|
93
106
|
|
94
|
-
|
107
|
+
type eagerness = :low | :medium | :high | :auto
|
108
|
+
|
109
|
+
module Eagerness
|
110
|
+
extend OpenAI::Internal::Type::Enum
|
111
|
+
|
112
|
+
LOW: :low
|
113
|
+
MEDIUM: :medium
|
114
|
+
HIGH: :high
|
115
|
+
AUTO: :auto
|
116
|
+
|
117
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness]
|
118
|
+
end
|
95
119
|
end
|
120
|
+
|
121
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::realtime_audio_input_turn_detection]
|
96
122
|
end
|
97
123
|
end
|
98
124
|
end
|
@@ -21,7 +21,7 @@ module OpenAI
|
|
21
21
|
tool_choice: String,
|
22
22
|
tools: ::Array[OpenAI::Realtime::RealtimeFunctionTool],
|
23
23
|
tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?,
|
24
|
-
turn_detection: OpenAI::Realtime::RealtimeSession::
|
24
|
+
turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?,
|
25
25
|
voice: OpenAI::Models::Realtime::RealtimeSession::voice
|
26
26
|
}
|
27
27
|
|
@@ -106,7 +106,7 @@ module OpenAI
|
|
106
106
|
|
107
107
|
attr_accessor tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?
|
108
108
|
|
109
|
-
attr_accessor turn_detection: OpenAI::Realtime::RealtimeSession::
|
109
|
+
attr_accessor turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?
|
110
110
|
|
111
111
|
attr_reader voice: OpenAI::Models::Realtime::RealtimeSession::voice?
|
112
112
|
|
@@ -133,7 +133,7 @@ module OpenAI
|
|
133
133
|
?tool_choice: String,
|
134
134
|
?tools: ::Array[OpenAI::Realtime::RealtimeFunctionTool],
|
135
135
|
?tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?,
|
136
|
-
?turn_detection: OpenAI::Realtime::RealtimeSession::
|
136
|
+
?turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?,
|
137
137
|
?voice: OpenAI::Models::Realtime::RealtimeSession::voice
|
138
138
|
) -> void
|
139
139
|
|
@@ -156,7 +156,7 @@ module OpenAI
|
|
156
156
|
tool_choice: String,
|
157
157
|
tools: ::Array[OpenAI::Realtime::RealtimeFunctionTool],
|
158
158
|
tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?,
|
159
|
-
turn_detection: OpenAI::Realtime::RealtimeSession::
|
159
|
+
turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?,
|
160
160
|
voice: OpenAI::Models::Realtime::RealtimeSession::voice
|
161
161
|
}
|
162
162
|
|
@@ -307,97 +307,123 @@ module OpenAI
|
|
307
307
|
end
|
308
308
|
|
309
309
|
type turn_detection =
|
310
|
-
|
311
|
-
|
312
|
-
eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness,
|
313
|
-
idle_timeout_ms: Integer?,
|
314
|
-
interrupt_response: bool,
|
315
|
-
prefix_padding_ms: Integer,
|
316
|
-
silence_duration_ms: Integer,
|
317
|
-
threshold: Float,
|
318
|
-
type: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
|
319
|
-
}
|
310
|
+
OpenAI::Realtime::RealtimeSession::TurnDetection::ServerVad
|
311
|
+
| OpenAI::Realtime::RealtimeSession::TurnDetection::SemanticVad
|
320
312
|
|
321
|
-
|
322
|
-
|
313
|
+
module TurnDetection
|
314
|
+
extend OpenAI::Internal::Type::Union
|
323
315
|
|
324
|
-
|
316
|
+
type server_vad =
|
317
|
+
{
|
318
|
+
type: :server_vad,
|
319
|
+
create_response: bool,
|
320
|
+
idle_timeout_ms: Integer?,
|
321
|
+
interrupt_response: bool,
|
322
|
+
prefix_padding_ms: Integer,
|
323
|
+
silence_duration_ms: Integer,
|
324
|
+
threshold: Float
|
325
|
+
}
|
325
326
|
|
326
|
-
|
327
|
+
class ServerVad < OpenAI::Internal::Type::BaseModel
|
328
|
+
attr_accessor type: :server_vad
|
327
329
|
|
328
|
-
|
329
|
-
OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness
|
330
|
-
) -> OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness
|
330
|
+
attr_reader create_response: bool?
|
331
331
|
|
332
|
-
|
332
|
+
def create_response=: (bool) -> bool
|
333
333
|
|
334
|
-
|
334
|
+
attr_accessor idle_timeout_ms: Integer?
|
335
335
|
|
336
|
-
|
336
|
+
attr_reader interrupt_response: bool?
|
337
337
|
|
338
|
-
|
338
|
+
def interrupt_response=: (bool) -> bool
|
339
339
|
|
340
|
-
|
340
|
+
attr_reader prefix_padding_ms: Integer?
|
341
341
|
|
342
|
-
|
342
|
+
def prefix_padding_ms=: (Integer) -> Integer
|
343
343
|
|
344
|
-
|
344
|
+
attr_reader silence_duration_ms: Integer?
|
345
345
|
|
346
|
-
|
346
|
+
def silence_duration_ms=: (Integer) -> Integer
|
347
347
|
|
348
|
-
|
348
|
+
attr_reader threshold: Float?
|
349
349
|
|
350
|
-
|
350
|
+
def threshold=: (Float) -> Float
|
351
351
|
|
352
|
-
|
353
|
-
|
354
|
-
|
352
|
+
def initialize: (
|
353
|
+
?create_response: bool,
|
354
|
+
?idle_timeout_ms: Integer?,
|
355
|
+
?interrupt_response: bool,
|
356
|
+
?prefix_padding_ms: Integer,
|
357
|
+
?silence_duration_ms: Integer,
|
358
|
+
?threshold: Float,
|
359
|
+
?type: :server_vad
|
360
|
+
) -> void
|
355
361
|
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
362
|
+
def to_hash: -> {
|
363
|
+
type: :server_vad,
|
364
|
+
create_response: bool,
|
365
|
+
idle_timeout_ms: Integer?,
|
366
|
+
interrupt_response: bool,
|
367
|
+
prefix_padding_ms: Integer,
|
368
|
+
silence_duration_ms: Integer,
|
369
|
+
threshold: Float
|
370
|
+
}
|
371
|
+
end
|
366
372
|
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
threshold: Float,
|
375
|
-
type: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
|
376
|
-
}
|
373
|
+
type semantic_vad =
|
374
|
+
{
|
375
|
+
type: :semantic_vad,
|
376
|
+
create_response: bool,
|
377
|
+
eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness,
|
378
|
+
interrupt_response: bool
|
379
|
+
}
|
377
380
|
|
378
|
-
|
381
|
+
class SemanticVad < OpenAI::Internal::Type::BaseModel
|
382
|
+
attr_accessor type: :semantic_vad
|
379
383
|
|
380
|
-
|
381
|
-
extend OpenAI::Internal::Type::Enum
|
384
|
+
attr_reader create_response: bool?
|
382
385
|
|
383
|
-
|
384
|
-
MEDIUM: :medium
|
385
|
-
HIGH: :high
|
386
|
-
AUTO: :auto
|
386
|
+
def create_response=: (bool) -> bool
|
387
387
|
|
388
|
-
|
389
|
-
|
388
|
+
attr_reader eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness?
|
389
|
+
|
390
|
+
def eagerness=: (
|
391
|
+
OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness
|
392
|
+
) -> OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness
|
393
|
+
|
394
|
+
attr_reader interrupt_response: bool?
|
395
|
+
|
396
|
+
def interrupt_response=: (bool) -> bool
|
397
|
+
|
398
|
+
def initialize: (
|
399
|
+
?create_response: bool,
|
400
|
+
?eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness,
|
401
|
+
?interrupt_response: bool,
|
402
|
+
?type: :semantic_vad
|
403
|
+
) -> void
|
404
|
+
|
405
|
+
def to_hash: -> {
|
406
|
+
type: :semantic_vad,
|
407
|
+
create_response: bool,
|
408
|
+
eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness,
|
409
|
+
interrupt_response: bool
|
410
|
+
}
|
390
411
|
|
391
|
-
|
412
|
+
type eagerness = :low | :medium | :high | :auto
|
392
413
|
|
393
|
-
|
394
|
-
|
414
|
+
module Eagerness
|
415
|
+
extend OpenAI::Internal::Type::Enum
|
395
416
|
|
396
|
-
|
397
|
-
|
417
|
+
LOW: :low
|
418
|
+
MEDIUM: :medium
|
419
|
+
HIGH: :high
|
420
|
+
AUTO: :auto
|
398
421
|
|
399
|
-
|
422
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness]
|
423
|
+
end
|
400
424
|
end
|
425
|
+
|
426
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::turn_detection]
|
401
427
|
end
|
402
428
|
|
403
429
|
type voice =
|