openai 0.23.1 → 0.23.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/realtime/input_audio_buffer_timeout_triggered.rb +25 -5
  5. data/lib/openai/models/realtime/realtime_audio_config_input.rb +14 -11
  6. data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +173 -117
  7. data/lib/openai/models/realtime/realtime_server_event.rb +13 -1
  8. data/lib/openai/models/realtime/realtime_session.rb +179 -118
  9. data/lib/openai/models/realtime/realtime_session_create_response.rb +184 -122
  10. data/lib/openai/models/realtime/realtime_transcription_session_audio_input.rb +16 -11
  11. data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +175 -117
  12. data/lib/openai/models/responses/response.rb +8 -8
  13. data/lib/openai/models/responses/response_create_params.rb +8 -8
  14. data/lib/openai/version.rb +1 -1
  15. data/rbi/openai/models/realtime/input_audio_buffer_timeout_triggered.rbi +24 -5
  16. data/rbi/openai/models/realtime/realtime_audio_config_input.rbi +44 -28
  17. data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +264 -203
  18. data/rbi/openai/models/realtime/realtime_session.rbi +306 -231
  19. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +298 -232
  20. data/rbi/openai/models/realtime/realtime_transcription_session_audio_input.rbi +39 -28
  21. data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +264 -200
  22. data/rbi/openai/models/responses/response.rbi +12 -12
  23. data/rbi/openai/models/responses/response_create_params.rbi +12 -12
  24. data/rbi/openai/resources/responses.rbi +8 -8
  25. data/sig/openai/models/realtime/realtime_audio_config_input.rbs +4 -8
  26. data/sig/openai/models/realtime/realtime_audio_input_turn_detection.rbs +91 -65
  27. data/sig/openai/models/realtime/realtime_session.rbs +95 -69
  28. data/sig/openai/models/realtime/realtime_session_create_response.rbs +95 -73
  29. data/sig/openai/models/realtime/realtime_transcription_session_audio_input.rbs +4 -8
  30. data/sig/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbs +91 -65
  31. metadata +2 -2
@@ -265,10 +265,10 @@ module OpenAI
265
265
 
266
266
  # The truncation strategy to use for the model response.
267
267
  #
268
- # - `auto`: If the context of this response and previous ones exceeds the model's
269
- # context window size, the model will truncate the response to fit the context
270
- # window by dropping input items in the middle of the conversation.
271
- # - `disabled` (default): If a model response will exceed the context window size
268
+ # - `auto`: If the input to this Response exceeds the model's context window size,
269
+ # the model will truncate the response to fit the context window by dropping
270
+ # items from the beginning of the conversation.
271
+ # - `disabled` (default): If the input size will exceed the context window size
272
272
  # for a model, the request will fail with a 400 error.
273
273
  sig do
274
274
  returns(
@@ -521,10 +521,10 @@ module OpenAI
521
521
  top_logprobs: nil,
522
522
  # The truncation strategy to use for the model response.
523
523
  #
524
- # - `auto`: If the context of this response and previous ones exceeds the model's
525
- # context window size, the model will truncate the response to fit the context
526
- # window by dropping input items in the middle of the conversation.
527
- # - `disabled` (default): If a model response will exceed the context window size
524
+ # - `auto`: If the input to this Response exceeds the model's context window size,
525
+ # the model will truncate the response to fit the context window by dropping
526
+ # items from the beginning of the conversation.
527
+ # - `disabled` (default): If the input size will exceed the context window size
528
528
  # for a model, the request will fail with a 400 error.
529
529
  truncation: nil,
530
530
  # Represents token usage details including input tokens, output tokens, a
@@ -819,10 +819,10 @@ module OpenAI
819
819
 
820
820
  # The truncation strategy to use for the model response.
821
821
  #
822
- # - `auto`: If the context of this response and previous ones exceeds the model's
823
- # context window size, the model will truncate the response to fit the context
824
- # window by dropping input items in the middle of the conversation.
825
- # - `disabled` (default): If a model response will exceed the context window size
822
+ # - `auto`: If the input to this Response exceeds the model's context window size,
823
+ # the model will truncate the response to fit the context window by dropping
824
+ # items from the beginning of the conversation.
825
+ # - `disabled` (default): If the input size will exceed the context window size
826
826
  # for a model, the request will fail with a 400 error.
827
827
  module Truncation
828
828
  extend OpenAI::Internal::Type::Enum
@@ -378,10 +378,10 @@ module OpenAI
378
378
 
379
379
  # The truncation strategy to use for the model response.
380
380
  #
381
- # - `auto`: If the context of this response and previous ones exceeds the model's
382
- # context window size, the model will truncate the response to fit the context
383
- # window by dropping input items in the middle of the conversation.
384
- # - `disabled` (default): If a model response will exceed the context window size
381
+ # - `auto`: If the input to this Response exceeds the model's context window size,
382
+ # the model will truncate the response to fit the context window by dropping
383
+ # items from the beginning of the conversation.
384
+ # - `disabled` (default): If the input size will exceed the context window size
385
385
  # for a model, the request will fail with a 400 error.
386
386
  sig do
387
387
  returns(
@@ -637,10 +637,10 @@ module OpenAI
637
637
  top_p: nil,
638
638
  # The truncation strategy to use for the model response.
639
639
  #
640
- # - `auto`: If the context of this response and previous ones exceeds the model's
641
- # context window size, the model will truncate the response to fit the context
642
- # window by dropping input items in the middle of the conversation.
643
- # - `disabled` (default): If a model response will exceed the context window size
640
+ # - `auto`: If the input to this Response exceeds the model's context window size,
641
+ # the model will truncate the response to fit the context window by dropping
642
+ # items from the beginning of the conversation.
643
+ # - `disabled` (default): If the input size will exceed the context window size
644
644
  # for a model, the request will fail with a 400 error.
645
645
  truncation: nil,
646
646
  # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
@@ -920,10 +920,10 @@ module OpenAI
920
920
 
921
921
  # The truncation strategy to use for the model response.
922
922
  #
923
- # - `auto`: If the context of this response and previous ones exceeds the model's
924
- # context window size, the model will truncate the response to fit the context
925
- # window by dropping input items in the middle of the conversation.
926
- # - `disabled` (default): If a model response will exceed the context window size
923
+ # - `auto`: If the input to this Response exceeds the model's context window size,
924
+ # the model will truncate the response to fit the context window by dropping
925
+ # items from the beginning of the conversation.
926
+ # - `disabled` (default): If the input size will exceed the context window size
927
927
  # for a model, the request will fail with a 400 error.
928
928
  module Truncation
929
929
  extend OpenAI::Internal::Type::Enum
@@ -258,10 +258,10 @@ module OpenAI
258
258
  top_p: nil,
259
259
  # The truncation strategy to use for the model response.
260
260
  #
261
- # - `auto`: If the context of this response and previous ones exceeds the model's
262
- # context window size, the model will truncate the response to fit the context
263
- # window by dropping input items in the middle of the conversation.
264
- # - `disabled` (default): If a model response will exceed the context window size
261
+ # - `auto`: If the input to this Response exceeds the model's context window size,
262
+ # the model will truncate the response to fit the context window by dropping
263
+ # items from the beginning of the conversation.
264
+ # - `disabled` (default): If the input size will exceed the context window size
265
265
  # for a model, the request will fail with a 400 error.
266
266
  truncation: nil,
267
267
  # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
@@ -535,10 +535,10 @@ module OpenAI
535
535
  top_p: nil,
536
536
  # The truncation strategy to use for the model response.
537
537
  #
538
- # - `auto`: If the context of this response and previous ones exceeds the model's
539
- # context window size, the model will truncate the response to fit the context
540
- # window by dropping input items in the middle of the conversation.
541
- # - `disabled` (default): If a model response will exceed the context window size
538
+ # - `auto`: If the input to this Response exceeds the model's context window size,
539
+ # the model will truncate the response to fit the context window by dropping
540
+ # items from the beginning of the conversation.
541
+ # - `disabled` (default): If the input size will exceed the context window size
542
542
  # for a model, the request will fail with a 400 error.
543
543
  truncation: nil,
544
544
  # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
@@ -6,7 +6,7 @@ module OpenAI
6
6
  format_: OpenAI::Models::Realtime::realtime_audio_formats,
7
7
  noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
8
8
  transcription: OpenAI::Realtime::AudioTranscription,
9
- turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection
9
+ turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
10
10
  }
11
11
 
12
12
  class RealtimeAudioConfigInput < OpenAI::Internal::Type::BaseModel
@@ -28,24 +28,20 @@ module OpenAI
28
28
  OpenAI::Realtime::AudioTranscription
29
29
  ) -> OpenAI::Realtime::AudioTranscription
30
30
 
31
- attr_reader turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection?
32
-
33
- def turn_detection=: (
34
- OpenAI::Realtime::RealtimeAudioInputTurnDetection
35
- ) -> OpenAI::Realtime::RealtimeAudioInputTurnDetection
31
+ attr_accessor turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
36
32
 
37
33
  def initialize: (
38
34
  ?format_: OpenAI::Models::Realtime::realtime_audio_formats,
39
35
  ?noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
40
36
  ?transcription: OpenAI::Realtime::AudioTranscription,
41
- ?turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection
37
+ ?turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
42
38
  ) -> void
43
39
 
44
40
  def to_hash: -> {
45
41
  format_: OpenAI::Models::Realtime::realtime_audio_formats,
46
42
  noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
47
43
  transcription: OpenAI::Realtime::AudioTranscription,
48
- turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection
44
+ turn_detection: OpenAI::Models::Realtime::realtime_audio_input_turn_detection?
49
45
  }
50
46
 
51
47
  type noise_reduction =
@@ -2,97 +2,123 @@ module OpenAI
2
2
  module Models
3
3
  module Realtime
4
4
  type realtime_audio_input_turn_detection =
5
- {
6
- create_response: bool,
7
- eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
8
- idle_timeout_ms: Integer?,
9
- interrupt_response: bool,
10
- prefix_padding_ms: Integer,
11
- silence_duration_ms: Integer,
12
- threshold: Float,
13
- type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
14
- }
5
+ OpenAI::Realtime::RealtimeAudioInputTurnDetection::ServerVad
6
+ | OpenAI::Realtime::RealtimeAudioInputTurnDetection::SemanticVad
15
7
 
16
- class RealtimeAudioInputTurnDetection < OpenAI::Internal::Type::BaseModel
17
- attr_reader create_response: bool?
8
+ module RealtimeAudioInputTurnDetection
9
+ extend OpenAI::Internal::Type::Union
18
10
 
19
- def create_response=: (bool) -> bool
11
+ type server_vad =
12
+ {
13
+ type: :server_vad,
14
+ create_response: bool,
15
+ idle_timeout_ms: Integer?,
16
+ interrupt_response: bool,
17
+ prefix_padding_ms: Integer,
18
+ silence_duration_ms: Integer,
19
+ threshold: Float
20
+ }
20
21
 
21
- attr_reader eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness?
22
+ class ServerVad < OpenAI::Internal::Type::BaseModel
23
+ attr_accessor type: :server_vad
22
24
 
23
- def eagerness=: (
24
- OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness
25
- ) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness
25
+ attr_reader create_response: bool?
26
26
 
27
- attr_accessor idle_timeout_ms: Integer?
27
+ def create_response=: (bool) -> bool
28
28
 
29
- attr_reader interrupt_response: bool?
29
+ attr_accessor idle_timeout_ms: Integer?
30
30
 
31
- def interrupt_response=: (bool) -> bool
31
+ attr_reader interrupt_response: bool?
32
32
 
33
- attr_reader prefix_padding_ms: Integer?
33
+ def interrupt_response=: (bool) -> bool
34
34
 
35
- def prefix_padding_ms=: (Integer) -> Integer
35
+ attr_reader prefix_padding_ms: Integer?
36
36
 
37
- attr_reader silence_duration_ms: Integer?
37
+ def prefix_padding_ms=: (Integer) -> Integer
38
38
 
39
- def silence_duration_ms=: (Integer) -> Integer
39
+ attr_reader silence_duration_ms: Integer?
40
40
 
41
- attr_reader threshold: Float?
41
+ def silence_duration_ms=: (Integer) -> Integer
42
42
 
43
- def threshold=: (Float) -> Float
43
+ attr_reader threshold: Float?
44
44
 
45
- attr_reader type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_?
45
+ def threshold=: (Float) -> Float
46
46
 
47
- def type=: (
48
- OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
49
- ) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
47
+ def initialize: (
48
+ ?create_response: bool,
49
+ ?idle_timeout_ms: Integer?,
50
+ ?interrupt_response: bool,
51
+ ?prefix_padding_ms: Integer,
52
+ ?silence_duration_ms: Integer,
53
+ ?threshold: Float,
54
+ ?type: :server_vad
55
+ ) -> void
50
56
 
51
- def initialize: (
52
- ?create_response: bool,
53
- ?eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
54
- ?idle_timeout_ms: Integer?,
55
- ?interrupt_response: bool,
56
- ?prefix_padding_ms: Integer,
57
- ?silence_duration_ms: Integer,
58
- ?threshold: Float,
59
- ?type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
60
- ) -> void
57
+ def to_hash: -> {
58
+ type: :server_vad,
59
+ create_response: bool,
60
+ idle_timeout_ms: Integer?,
61
+ interrupt_response: bool,
62
+ prefix_padding_ms: Integer,
63
+ silence_duration_ms: Integer,
64
+ threshold: Float
65
+ }
66
+ end
61
67
 
62
- def to_hash: -> {
63
- create_response: bool,
64
- eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
65
- idle_timeout_ms: Integer?,
66
- interrupt_response: bool,
67
- prefix_padding_ms: Integer,
68
- silence_duration_ms: Integer,
69
- threshold: Float,
70
- type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
71
- }
68
+ type semantic_vad =
69
+ {
70
+ type: :semantic_vad,
71
+ create_response: bool,
72
+ eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness,
73
+ interrupt_response: bool
74
+ }
72
75
 
73
- type eagerness = :low | :medium | :high | :auto
76
+ class SemanticVad < OpenAI::Internal::Type::BaseModel
77
+ attr_accessor type: :semantic_vad
74
78
 
75
- module Eagerness
76
- extend OpenAI::Internal::Type::Enum
79
+ attr_reader create_response: bool?
77
80
 
78
- LOW: :low
79
- MEDIUM: :medium
80
- HIGH: :high
81
- AUTO: :auto
81
+ def create_response=: (bool) -> bool
82
82
 
83
- def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness]
84
- end
83
+ attr_reader eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness?
84
+
85
+ def eagerness=: (
86
+ OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness
87
+ ) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness
88
+
89
+ attr_reader interrupt_response: bool?
85
90
 
86
- type type_ = :server_vad | :semantic_vad
91
+ def interrupt_response=: (bool) -> bool
87
92
 
88
- module Type
89
- extend OpenAI::Internal::Type::Enum
93
+ def initialize: (
94
+ ?create_response: bool,
95
+ ?eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness,
96
+ ?interrupt_response: bool,
97
+ ?type: :semantic_vad
98
+ ) -> void
90
99
 
91
- SERVER_VAD: :server_vad
92
- SEMANTIC_VAD: :semantic_vad
100
+ def to_hash: -> {
101
+ type: :semantic_vad,
102
+ create_response: bool,
103
+ eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness,
104
+ interrupt_response: bool
105
+ }
93
106
 
94
- def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_]
107
+ type eagerness = :low | :medium | :high | :auto
108
+
109
+ module Eagerness
110
+ extend OpenAI::Internal::Type::Enum
111
+
112
+ LOW: :low
113
+ MEDIUM: :medium
114
+ HIGH: :high
115
+ AUTO: :auto
116
+
117
+ def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::SemanticVad::eagerness]
118
+ end
95
119
  end
120
+
121
+ def self?.variants: -> ::Array[OpenAI::Models::Realtime::realtime_audio_input_turn_detection]
96
122
  end
97
123
  end
98
124
  end
@@ -21,7 +21,7 @@ module OpenAI
21
21
  tool_choice: String,
22
22
  tools: ::Array[OpenAI::Realtime::RealtimeFunctionTool],
23
23
  tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?,
24
- turn_detection: OpenAI::Realtime::RealtimeSession::TurnDetection?,
24
+ turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?,
25
25
  voice: OpenAI::Models::Realtime::RealtimeSession::voice
26
26
  }
27
27
 
@@ -106,7 +106,7 @@ module OpenAI
106
106
 
107
107
  attr_accessor tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?
108
108
 
109
- attr_accessor turn_detection: OpenAI::Realtime::RealtimeSession::TurnDetection?
109
+ attr_accessor turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?
110
110
 
111
111
  attr_reader voice: OpenAI::Models::Realtime::RealtimeSession::voice?
112
112
 
@@ -133,7 +133,7 @@ module OpenAI
133
133
  ?tool_choice: String,
134
134
  ?tools: ::Array[OpenAI::Realtime::RealtimeFunctionTool],
135
135
  ?tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?,
136
- ?turn_detection: OpenAI::Realtime::RealtimeSession::TurnDetection?,
136
+ ?turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?,
137
137
  ?voice: OpenAI::Models::Realtime::RealtimeSession::voice
138
138
  ) -> void
139
139
 
@@ -156,7 +156,7 @@ module OpenAI
156
156
  tool_choice: String,
157
157
  tools: ::Array[OpenAI::Realtime::RealtimeFunctionTool],
158
158
  tracing: OpenAI::Models::Realtime::RealtimeSession::tracing?,
159
- turn_detection: OpenAI::Realtime::RealtimeSession::TurnDetection?,
159
+ turn_detection: OpenAI::Models::Realtime::RealtimeSession::turn_detection?,
160
160
  voice: OpenAI::Models::Realtime::RealtimeSession::voice
161
161
  }
162
162
 
@@ -307,97 +307,123 @@ module OpenAI
307
307
  end
308
308
 
309
309
  type turn_detection =
310
- {
311
- create_response: bool,
312
- eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness,
313
- idle_timeout_ms: Integer?,
314
- interrupt_response: bool,
315
- prefix_padding_ms: Integer,
316
- silence_duration_ms: Integer,
317
- threshold: Float,
318
- type: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
319
- }
310
+ OpenAI::Realtime::RealtimeSession::TurnDetection::ServerVad
311
+ | OpenAI::Realtime::RealtimeSession::TurnDetection::SemanticVad
320
312
 
321
- class TurnDetection < OpenAI::Internal::Type::BaseModel
322
- attr_reader create_response: bool?
313
+ module TurnDetection
314
+ extend OpenAI::Internal::Type::Union
323
315
 
324
- def create_response=: (bool) -> bool
316
+ type server_vad =
317
+ {
318
+ type: :server_vad,
319
+ create_response: bool,
320
+ idle_timeout_ms: Integer?,
321
+ interrupt_response: bool,
322
+ prefix_padding_ms: Integer,
323
+ silence_duration_ms: Integer,
324
+ threshold: Float
325
+ }
325
326
 
326
- attr_reader eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness?
327
+ class ServerVad < OpenAI::Internal::Type::BaseModel
328
+ attr_accessor type: :server_vad
327
329
 
328
- def eagerness=: (
329
- OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness
330
- ) -> OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness
330
+ attr_reader create_response: bool?
331
331
 
332
- attr_accessor idle_timeout_ms: Integer?
332
+ def create_response=: (bool) -> bool
333
333
 
334
- attr_reader interrupt_response: bool?
334
+ attr_accessor idle_timeout_ms: Integer?
335
335
 
336
- def interrupt_response=: (bool) -> bool
336
+ attr_reader interrupt_response: bool?
337
337
 
338
- attr_reader prefix_padding_ms: Integer?
338
+ def interrupt_response=: (bool) -> bool
339
339
 
340
- def prefix_padding_ms=: (Integer) -> Integer
340
+ attr_reader prefix_padding_ms: Integer?
341
341
 
342
- attr_reader silence_duration_ms: Integer?
342
+ def prefix_padding_ms=: (Integer) -> Integer
343
343
 
344
- def silence_duration_ms=: (Integer) -> Integer
344
+ attr_reader silence_duration_ms: Integer?
345
345
 
346
- attr_reader threshold: Float?
346
+ def silence_duration_ms=: (Integer) -> Integer
347
347
 
348
- def threshold=: (Float) -> Float
348
+ attr_reader threshold: Float?
349
349
 
350
- attr_reader type: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_?
350
+ def threshold=: (Float) -> Float
351
351
 
352
- def type=: (
353
- OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
354
- ) -> OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
352
+ def initialize: (
353
+ ?create_response: bool,
354
+ ?idle_timeout_ms: Integer?,
355
+ ?interrupt_response: bool,
356
+ ?prefix_padding_ms: Integer,
357
+ ?silence_duration_ms: Integer,
358
+ ?threshold: Float,
359
+ ?type: :server_vad
360
+ ) -> void
355
361
 
356
- def initialize: (
357
- ?create_response: bool,
358
- ?eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness,
359
- ?idle_timeout_ms: Integer?,
360
- ?interrupt_response: bool,
361
- ?prefix_padding_ms: Integer,
362
- ?silence_duration_ms: Integer,
363
- ?threshold: Float,
364
- ?type: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
365
- ) -> void
362
+ def to_hash: -> {
363
+ type: :server_vad,
364
+ create_response: bool,
365
+ idle_timeout_ms: Integer?,
366
+ interrupt_response: bool,
367
+ prefix_padding_ms: Integer,
368
+ silence_duration_ms: Integer,
369
+ threshold: Float
370
+ }
371
+ end
366
372
 
367
- def to_hash: -> {
368
- create_response: bool,
369
- eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness,
370
- idle_timeout_ms: Integer?,
371
- interrupt_response: bool,
372
- prefix_padding_ms: Integer,
373
- silence_duration_ms: Integer,
374
- threshold: Float,
375
- type: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_
376
- }
373
+ type semantic_vad =
374
+ {
375
+ type: :semantic_vad,
376
+ create_response: bool,
377
+ eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness,
378
+ interrupt_response: bool
379
+ }
377
380
 
378
- type eagerness = :low | :medium | :high | :auto
381
+ class SemanticVad < OpenAI::Internal::Type::BaseModel
382
+ attr_accessor type: :semantic_vad
379
383
 
380
- module Eagerness
381
- extend OpenAI::Internal::Type::Enum
384
+ attr_reader create_response: bool?
382
385
 
383
- LOW: :low
384
- MEDIUM: :medium
385
- HIGH: :high
386
- AUTO: :auto
386
+ def create_response=: (bool) -> bool
387
387
 
388
- def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::TurnDetection::eagerness]
389
- end
388
+ attr_reader eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness?
389
+
390
+ def eagerness=: (
391
+ OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness
392
+ ) -> OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness
393
+
394
+ attr_reader interrupt_response: bool?
395
+
396
+ def interrupt_response=: (bool) -> bool
397
+
398
+ def initialize: (
399
+ ?create_response: bool,
400
+ ?eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness,
401
+ ?interrupt_response: bool,
402
+ ?type: :semantic_vad
403
+ ) -> void
404
+
405
+ def to_hash: -> {
406
+ type: :semantic_vad,
407
+ create_response: bool,
408
+ eagerness: OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness,
409
+ interrupt_response: bool
410
+ }
390
411
 
391
- type type_ = :server_vad | :semantic_vad
412
+ type eagerness = :low | :medium | :high | :auto
392
413
 
393
- module Type
394
- extend OpenAI::Internal::Type::Enum
414
+ module Eagerness
415
+ extend OpenAI::Internal::Type::Enum
395
416
 
396
- SERVER_VAD: :server_vad
397
- SEMANTIC_VAD: :semantic_vad
417
+ LOW: :low
418
+ MEDIUM: :medium
419
+ HIGH: :high
420
+ AUTO: :auto
398
421
 
399
- def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::TurnDetection::type_]
422
+ def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::TurnDetection::SemanticVad::eagerness]
423
+ end
400
424
  end
425
+
426
+ def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::turn_detection]
401
427
  end
402
428
 
403
429
  type voice =