openai 0.22.1 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +1 -1
- data/lib/openai/models/realtime/audio_transcription.rb +60 -0
- data/lib/openai/models/realtime/client_secret_create_params.rb +18 -9
- data/lib/openai/models/realtime/client_secret_create_response.rb +11 -250
- data/lib/openai/models/realtime/conversation_item.rb +1 -1
- data/lib/openai/models/realtime/conversation_item_added.rb +14 -1
- data/lib/openai/models/realtime/conversation_item_done.rb +3 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rb +10 -8
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rb +14 -5
- data/lib/openai/models/realtime/conversation_item_truncate_event.rb +2 -2
- data/lib/openai/models/realtime/input_audio_buffer_append_event.rb +10 -5
- data/lib/openai/models/realtime/models.rb +58 -0
- data/lib/openai/models/realtime/noise_reduction_type.rb +20 -0
- data/lib/openai/models/realtime/realtime_audio_config.rb +6 -427
- data/lib/openai/models/realtime/realtime_audio_config_input.rb +89 -0
- data/lib/openai/models/realtime/realtime_audio_config_output.rb +100 -0
- data/lib/openai/models/realtime/realtime_audio_formats.rb +121 -0
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +131 -0
- data/lib/openai/models/realtime/realtime_client_event.rb +31 -23
- data/lib/openai/models/realtime/realtime_conversation_item_assistant_message.rb +43 -10
- data/lib/openai/models/realtime/realtime_conversation_item_function_call.rb +16 -7
- data/lib/openai/models/realtime/realtime_conversation_item_function_call_output.rb +15 -7
- data/lib/openai/models/realtime/realtime_conversation_item_system_message.rb +18 -6
- data/lib/openai/models/realtime/realtime_conversation_item_user_message.rb +62 -13
- data/lib/openai/models/realtime/realtime_response.rb +117 -107
- data/lib/openai/models/realtime/realtime_response_create_audio_output.rb +100 -0
- data/lib/openai/models/realtime/realtime_response_create_mcp_tool.rb +310 -0
- data/lib/openai/models/realtime/realtime_response_create_params.rb +225 -0
- data/lib/openai/models/realtime/realtime_response_status.rb +1 -1
- data/lib/openai/models/realtime/realtime_response_usage.rb +5 -2
- data/lib/openai/models/realtime/realtime_response_usage_input_token_details.rb +58 -8
- data/lib/openai/models/realtime/realtime_server_event.rb +21 -5
- data/lib/openai/models/realtime/realtime_session.rb +9 -125
- data/lib/openai/models/realtime/realtime_session_client_secret.rb +36 -0
- data/lib/openai/models/realtime/realtime_session_create_request.rb +50 -71
- data/lib/openai/models/realtime/realtime_session_create_response.rb +621 -219
- data/lib/openai/models/realtime/realtime_tools_config_union.rb +2 -53
- data/lib/openai/models/realtime/realtime_tracing_config.rb +7 -6
- data/lib/openai/models/realtime/realtime_transcription_session_audio.rb +19 -0
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input.rb +90 -0
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +131 -0
- data/lib/openai/models/realtime/realtime_transcription_session_client_secret.rb +38 -0
- data/lib/openai/models/realtime/realtime_transcription_session_create_request.rb +12 -270
- data/lib/openai/models/realtime/realtime_transcription_session_create_response.rb +78 -0
- data/lib/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rb +66 -0
- data/lib/openai/models/realtime/realtime_transcription_session_turn_detection.rb +57 -0
- data/lib/openai/models/realtime/realtime_truncation.rb +8 -40
- data/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +34 -0
- data/lib/openai/models/realtime/response_cancel_event.rb +3 -1
- data/lib/openai/models/realtime/response_create_event.rb +18 -348
- data/lib/openai/models/realtime/response_done_event.rb +7 -0
- data/lib/openai/models/realtime/session_created_event.rb +20 -4
- data/lib/openai/models/realtime/session_update_event.rb +36 -12
- data/lib/openai/models/realtime/session_updated_event.rb +20 -4
- data/lib/openai/models/realtime/transcription_session_created.rb +8 -243
- data/lib/openai/models/realtime/transcription_session_update.rb +179 -3
- data/lib/openai/models/realtime/transcription_session_updated_event.rb +8 -243
- data/lib/openai/resources/realtime/client_secrets.rb +2 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -1
- data/rbi/openai/models/realtime/audio_transcription.rbi +132 -0
- data/rbi/openai/models/realtime/client_secret_create_params.rbi +25 -11
- data/rbi/openai/models/realtime/client_secret_create_response.rbi +2 -587
- data/rbi/openai/models/realtime/conversation_item_added.rbi +14 -1
- data/rbi/openai/models/realtime/conversation_item_done.rbi +3 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rbi +11 -8
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rbi +15 -5
- data/rbi/openai/models/realtime/conversation_item_truncate_event.rbi +2 -2
- data/rbi/openai/models/realtime/input_audio_buffer_append_event.rbi +10 -5
- data/rbi/openai/models/realtime/models.rbi +97 -0
- data/rbi/openai/models/realtime/noise_reduction_type.rbi +31 -0
- data/rbi/openai/models/realtime/realtime_audio_config.rbi +8 -956
- data/rbi/openai/models/realtime/realtime_audio_config_input.rbi +221 -0
- data/rbi/openai/models/realtime/realtime_audio_config_output.rbi +222 -0
- data/rbi/openai/models/realtime/realtime_audio_formats.rbi +329 -0
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +262 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_assistant_message.rbi +51 -10
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call.rbi +16 -7
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call_output.rbi +14 -7
- data/rbi/openai/models/realtime/realtime_conversation_item_system_message.rbi +16 -6
- data/rbi/openai/models/realtime/realtime_conversation_item_user_message.rbi +110 -12
- data/rbi/openai/models/realtime/realtime_response.rbi +287 -212
- data/rbi/openai/models/realtime/realtime_response_create_audio_output.rbi +250 -0
- data/rbi/openai/models/realtime/realtime_response_create_mcp_tool.rbi +616 -0
- data/rbi/openai/models/realtime/realtime_response_create_params.rbi +529 -0
- data/rbi/openai/models/realtime/realtime_response_usage.rbi +8 -2
- data/rbi/openai/models/realtime/realtime_response_usage_input_token_details.rbi +106 -7
- data/rbi/openai/models/realtime/realtime_server_event.rbi +4 -1
- data/rbi/openai/models/realtime/realtime_session.rbi +12 -262
- data/rbi/openai/models/realtime/realtime_session_client_secret.rbi +49 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +112 -133
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +1229 -405
- data/rbi/openai/models/realtime/realtime_tools_config_union.rbi +1 -117
- data/rbi/openai/models/realtime/realtime_tracing_config.rbi +11 -10
- data/rbi/openai/models/realtime/realtime_transcription_session_audio.rbi +50 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input.rbi +226 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +259 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_client_secret.rbi +51 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_create_request.rbi +25 -597
- data/rbi/openai/models/realtime/realtime_transcription_session_create_response.rbi +195 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbi +144 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_turn_detection.rbi +94 -0
- data/rbi/openai/models/realtime/realtime_truncation.rbi +5 -56
- data/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +45 -0
- data/rbi/openai/models/realtime/response_cancel_event.rbi +3 -1
- data/rbi/openai/models/realtime/response_create_event.rbi +19 -786
- data/rbi/openai/models/realtime/response_done_event.rbi +7 -0
- data/rbi/openai/models/realtime/session_created_event.rbi +42 -9
- data/rbi/openai/models/realtime/session_update_event.rbi +57 -19
- data/rbi/openai/models/realtime/session_updated_event.rbi +42 -9
- data/rbi/openai/models/realtime/transcription_session_created.rbi +17 -591
- data/rbi/openai/models/realtime/transcription_session_update.rbi +425 -7
- data/rbi/openai/models/realtime/transcription_session_updated_event.rbi +14 -591
- data/rbi/openai/resources/realtime/client_secrets.rbi +5 -3
- data/sig/openai/models/realtime/audio_transcription.rbs +57 -0
- data/sig/openai/models/realtime/client_secret_create_response.rbs +1 -251
- data/sig/openai/models/realtime/models.rbs +57 -0
- data/sig/openai/models/realtime/noise_reduction_type.rbs +16 -0
- data/sig/openai/models/realtime/realtime_audio_config.rbs +12 -331
- data/sig/openai/models/realtime/realtime_audio_config_input.rbs +72 -0
- data/sig/openai/models/realtime/realtime_audio_config_output.rbs +72 -0
- data/sig/openai/models/realtime/realtime_audio_formats.rbs +128 -0
- data/sig/openai/models/realtime/realtime_audio_input_turn_detection.rbs +99 -0
- data/sig/openai/models/realtime/realtime_conversation_item_assistant_message.rbs +17 -2
- data/sig/openai/models/realtime/realtime_conversation_item_user_message.rbs +30 -1
- data/sig/openai/models/realtime/realtime_response.rbs +103 -82
- data/sig/openai/models/realtime/realtime_response_create_audio_output.rbs +84 -0
- data/sig/openai/models/realtime/realtime_response_create_mcp_tool.rbs +218 -0
- data/sig/openai/models/realtime/realtime_response_create_params.rbs +148 -0
- data/sig/openai/models/realtime/realtime_response_usage_input_token_details.rbs +50 -1
- data/sig/openai/models/realtime/realtime_session.rbs +16 -106
- data/sig/openai/models/realtime/realtime_session_client_secret.rbs +20 -0
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +27 -43
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +389 -187
- data/sig/openai/models/realtime/realtime_tools_config_union.rbs +1 -53
- data/sig/openai/models/realtime/realtime_transcription_session_audio.rbs +24 -0
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input.rbs +72 -0
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbs +99 -0
- data/sig/openai/models/realtime/realtime_transcription_session_client_secret.rbs +20 -0
- data/sig/openai/models/realtime/realtime_transcription_session_create_request.rbs +11 -203
- data/sig/openai/models/realtime/realtime_transcription_session_create_response.rbs +69 -0
- data/sig/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbs +59 -0
- data/sig/openai/models/realtime/realtime_transcription_session_turn_detection.rbs +47 -0
- data/sig/openai/models/realtime/realtime_truncation.rbs +1 -28
- data/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +21 -0
- data/sig/openai/models/realtime/response_create_event.rbs +6 -249
- data/sig/openai/models/realtime/session_created_event.rbs +14 -4
- data/sig/openai/models/realtime/session_update_event.rbs +14 -4
- data/sig/openai/models/realtime/session_updated_event.rbs +14 -4
- data/sig/openai/models/realtime/transcription_session_created.rbs +4 -254
- data/sig/openai/models/realtime/transcription_session_update.rbs +154 -4
- data/sig/openai/models/realtime/transcription_session_updated_event.rbs +4 -254
- metadata +59 -5
- data/lib/openai/models/realtime/realtime_client_secret_config.rb +0 -64
- data/rbi/openai/models/realtime/realtime_client_secret_config.rbi +0 -147
- data/sig/openai/models/realtime/realtime_client_secret_config.rbs +0 -60
|
@@ -3,351 +3,32 @@ module OpenAI
|
|
|
3
3
|
module Realtime
|
|
4
4
|
type realtime_audio_config =
|
|
5
5
|
{
|
|
6
|
-
input: OpenAI::Realtime::
|
|
7
|
-
output: OpenAI::Realtime::
|
|
6
|
+
input: OpenAI::Realtime::RealtimeAudioConfigInput,
|
|
7
|
+
output: OpenAI::Realtime::RealtimeAudioConfigOutput
|
|
8
8
|
}
|
|
9
9
|
|
|
10
10
|
class RealtimeAudioConfig < OpenAI::Internal::Type::BaseModel
|
|
11
|
-
attr_reader input: OpenAI::Realtime::
|
|
11
|
+
attr_reader input: OpenAI::Realtime::RealtimeAudioConfigInput?
|
|
12
12
|
|
|
13
13
|
def input=: (
|
|
14
|
-
OpenAI::Realtime::
|
|
15
|
-
) -> OpenAI::Realtime::
|
|
14
|
+
OpenAI::Realtime::RealtimeAudioConfigInput
|
|
15
|
+
) -> OpenAI::Realtime::RealtimeAudioConfigInput
|
|
16
16
|
|
|
17
|
-
attr_reader output: OpenAI::Realtime::
|
|
17
|
+
attr_reader output: OpenAI::Realtime::RealtimeAudioConfigOutput?
|
|
18
18
|
|
|
19
19
|
def output=: (
|
|
20
|
-
OpenAI::Realtime::
|
|
21
|
-
) -> OpenAI::Realtime::
|
|
20
|
+
OpenAI::Realtime::RealtimeAudioConfigOutput
|
|
21
|
+
) -> OpenAI::Realtime::RealtimeAudioConfigOutput
|
|
22
22
|
|
|
23
23
|
def initialize: (
|
|
24
|
-
?input: OpenAI::Realtime::
|
|
25
|
-
?output: OpenAI::Realtime::
|
|
24
|
+
?input: OpenAI::Realtime::RealtimeAudioConfigInput,
|
|
25
|
+
?output: OpenAI::Realtime::RealtimeAudioConfigOutput
|
|
26
26
|
) -> void
|
|
27
27
|
|
|
28
28
|
def to_hash: -> {
|
|
29
|
-
input: OpenAI::Realtime::
|
|
30
|
-
output: OpenAI::Realtime::
|
|
29
|
+
input: OpenAI::Realtime::RealtimeAudioConfigInput,
|
|
30
|
+
output: OpenAI::Realtime::RealtimeAudioConfigOutput
|
|
31
31
|
}
|
|
32
|
-
|
|
33
|
-
type input =
|
|
34
|
-
{
|
|
35
|
-
format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_,
|
|
36
|
-
noise_reduction: OpenAI::Realtime::RealtimeAudioConfig::Input::NoiseReduction,
|
|
37
|
-
transcription: OpenAI::Realtime::RealtimeAudioConfig::Input::Transcription,
|
|
38
|
-
turn_detection: OpenAI::Realtime::RealtimeAudioConfig::Input::TurnDetection
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
class Input < OpenAI::Internal::Type::BaseModel
|
|
42
|
-
attr_reader format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_?
|
|
43
|
-
|
|
44
|
-
def format_=: (
|
|
45
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_
|
|
46
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_
|
|
47
|
-
|
|
48
|
-
attr_reader noise_reduction: OpenAI::Realtime::RealtimeAudioConfig::Input::NoiseReduction?
|
|
49
|
-
|
|
50
|
-
def noise_reduction=: (
|
|
51
|
-
OpenAI::Realtime::RealtimeAudioConfig::Input::NoiseReduction
|
|
52
|
-
) -> OpenAI::Realtime::RealtimeAudioConfig::Input::NoiseReduction
|
|
53
|
-
|
|
54
|
-
attr_reader transcription: OpenAI::Realtime::RealtimeAudioConfig::Input::Transcription?
|
|
55
|
-
|
|
56
|
-
def transcription=: (
|
|
57
|
-
OpenAI::Realtime::RealtimeAudioConfig::Input::Transcription
|
|
58
|
-
) -> OpenAI::Realtime::RealtimeAudioConfig::Input::Transcription
|
|
59
|
-
|
|
60
|
-
attr_reader turn_detection: OpenAI::Realtime::RealtimeAudioConfig::Input::TurnDetection?
|
|
61
|
-
|
|
62
|
-
def turn_detection=: (
|
|
63
|
-
OpenAI::Realtime::RealtimeAudioConfig::Input::TurnDetection
|
|
64
|
-
) -> OpenAI::Realtime::RealtimeAudioConfig::Input::TurnDetection
|
|
65
|
-
|
|
66
|
-
def initialize: (
|
|
67
|
-
?format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_,
|
|
68
|
-
?noise_reduction: OpenAI::Realtime::RealtimeAudioConfig::Input::NoiseReduction,
|
|
69
|
-
?transcription: OpenAI::Realtime::RealtimeAudioConfig::Input::Transcription,
|
|
70
|
-
?turn_detection: OpenAI::Realtime::RealtimeAudioConfig::Input::TurnDetection
|
|
71
|
-
) -> void
|
|
72
|
-
|
|
73
|
-
def to_hash: -> {
|
|
74
|
-
format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_,
|
|
75
|
-
noise_reduction: OpenAI::Realtime::RealtimeAudioConfig::Input::NoiseReduction,
|
|
76
|
-
transcription: OpenAI::Realtime::RealtimeAudioConfig::Input::Transcription,
|
|
77
|
-
turn_detection: OpenAI::Realtime::RealtimeAudioConfig::Input::TurnDetection
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
type format_ = :pcm16 | :g711_ulaw | :g711_alaw
|
|
81
|
-
|
|
82
|
-
module Format
|
|
83
|
-
extend OpenAI::Internal::Type::Enum
|
|
84
|
-
|
|
85
|
-
PCM16: :pcm16
|
|
86
|
-
G711_ULAW: :g711_ulaw
|
|
87
|
-
G711_ALAW: :g711_alaw
|
|
88
|
-
|
|
89
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Input::format_]
|
|
90
|
-
end
|
|
91
|
-
|
|
92
|
-
type noise_reduction =
|
|
93
|
-
{
|
|
94
|
-
type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
class NoiseReduction < OpenAI::Internal::Type::BaseModel
|
|
98
|
-
attr_reader type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_?
|
|
99
|
-
|
|
100
|
-
def type=: (
|
|
101
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_
|
|
102
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_
|
|
103
|
-
|
|
104
|
-
def initialize: (
|
|
105
|
-
?type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_
|
|
106
|
-
) -> void
|
|
107
|
-
|
|
108
|
-
def to_hash: -> {
|
|
109
|
-
type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
type type_ = :near_field | :far_field
|
|
113
|
-
|
|
114
|
-
module Type
|
|
115
|
-
extend OpenAI::Internal::Type::Enum
|
|
116
|
-
|
|
117
|
-
NEAR_FIELD: :near_field
|
|
118
|
-
FAR_FIELD: :far_field
|
|
119
|
-
|
|
120
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Input::NoiseReduction::type_]
|
|
121
|
-
end
|
|
122
|
-
end
|
|
123
|
-
|
|
124
|
-
type transcription =
|
|
125
|
-
{
|
|
126
|
-
language: String,
|
|
127
|
-
model: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model,
|
|
128
|
-
prompt: String
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
class Transcription < OpenAI::Internal::Type::BaseModel
|
|
132
|
-
attr_reader language: String?
|
|
133
|
-
|
|
134
|
-
def language=: (String) -> String
|
|
135
|
-
|
|
136
|
-
attr_reader model: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model?
|
|
137
|
-
|
|
138
|
-
def model=: (
|
|
139
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model
|
|
140
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model
|
|
141
|
-
|
|
142
|
-
attr_reader prompt: String?
|
|
143
|
-
|
|
144
|
-
def prompt=: (String) -> String
|
|
145
|
-
|
|
146
|
-
def initialize: (
|
|
147
|
-
?language: String,
|
|
148
|
-
?model: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model,
|
|
149
|
-
?prompt: String
|
|
150
|
-
) -> void
|
|
151
|
-
|
|
152
|
-
def to_hash: -> {
|
|
153
|
-
language: String,
|
|
154
|
-
model: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model,
|
|
155
|
-
prompt: String
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
type model =
|
|
159
|
-
:"whisper-1"
|
|
160
|
-
| :"gpt-4o-transcribe-latest"
|
|
161
|
-
| :"gpt-4o-mini-transcribe"
|
|
162
|
-
| :"gpt-4o-transcribe"
|
|
163
|
-
| :"gpt-4o-transcribe-diarize"
|
|
164
|
-
|
|
165
|
-
module Model
|
|
166
|
-
extend OpenAI::Internal::Type::Enum
|
|
167
|
-
|
|
168
|
-
WHISPER_1: :"whisper-1"
|
|
169
|
-
GPT_4O_TRANSCRIBE_LATEST: :"gpt-4o-transcribe-latest"
|
|
170
|
-
GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe"
|
|
171
|
-
GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe"
|
|
172
|
-
GPT_4O_TRANSCRIBE_DIARIZE: :"gpt-4o-transcribe-diarize"
|
|
173
|
-
|
|
174
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Input::Transcription::model]
|
|
175
|
-
end
|
|
176
|
-
end
|
|
177
|
-
|
|
178
|
-
type turn_detection =
|
|
179
|
-
{
|
|
180
|
-
create_response: bool,
|
|
181
|
-
eagerness: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness,
|
|
182
|
-
idle_timeout_ms: Integer?,
|
|
183
|
-
interrupt_response: bool,
|
|
184
|
-
prefix_padding_ms: Integer,
|
|
185
|
-
silence_duration_ms: Integer,
|
|
186
|
-
threshold: Float,
|
|
187
|
-
type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
class TurnDetection < OpenAI::Internal::Type::BaseModel
|
|
191
|
-
attr_reader create_response: bool?
|
|
192
|
-
|
|
193
|
-
def create_response=: (bool) -> bool
|
|
194
|
-
|
|
195
|
-
attr_reader eagerness: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness?
|
|
196
|
-
|
|
197
|
-
def eagerness=: (
|
|
198
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness
|
|
199
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness
|
|
200
|
-
|
|
201
|
-
attr_accessor idle_timeout_ms: Integer?
|
|
202
|
-
|
|
203
|
-
attr_reader interrupt_response: bool?
|
|
204
|
-
|
|
205
|
-
def interrupt_response=: (bool) -> bool
|
|
206
|
-
|
|
207
|
-
attr_reader prefix_padding_ms: Integer?
|
|
208
|
-
|
|
209
|
-
def prefix_padding_ms=: (Integer) -> Integer
|
|
210
|
-
|
|
211
|
-
attr_reader silence_duration_ms: Integer?
|
|
212
|
-
|
|
213
|
-
def silence_duration_ms=: (Integer) -> Integer
|
|
214
|
-
|
|
215
|
-
attr_reader threshold: Float?
|
|
216
|
-
|
|
217
|
-
def threshold=: (Float) -> Float
|
|
218
|
-
|
|
219
|
-
attr_reader type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_?
|
|
220
|
-
|
|
221
|
-
def type=: (
|
|
222
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_
|
|
223
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_
|
|
224
|
-
|
|
225
|
-
def initialize: (
|
|
226
|
-
?create_response: bool,
|
|
227
|
-
?eagerness: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness,
|
|
228
|
-
?idle_timeout_ms: Integer?,
|
|
229
|
-
?interrupt_response: bool,
|
|
230
|
-
?prefix_padding_ms: Integer,
|
|
231
|
-
?silence_duration_ms: Integer,
|
|
232
|
-
?threshold: Float,
|
|
233
|
-
?type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_
|
|
234
|
-
) -> void
|
|
235
|
-
|
|
236
|
-
def to_hash: -> {
|
|
237
|
-
create_response: bool,
|
|
238
|
-
eagerness: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness,
|
|
239
|
-
idle_timeout_ms: Integer?,
|
|
240
|
-
interrupt_response: bool,
|
|
241
|
-
prefix_padding_ms: Integer,
|
|
242
|
-
silence_duration_ms: Integer,
|
|
243
|
-
threshold: Float,
|
|
244
|
-
type: OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
type eagerness = :low | :medium | :high | :auto
|
|
248
|
-
|
|
249
|
-
module Eagerness
|
|
250
|
-
extend OpenAI::Internal::Type::Enum
|
|
251
|
-
|
|
252
|
-
LOW: :low
|
|
253
|
-
MEDIUM: :medium
|
|
254
|
-
HIGH: :high
|
|
255
|
-
AUTO: :auto
|
|
256
|
-
|
|
257
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::eagerness]
|
|
258
|
-
end
|
|
259
|
-
|
|
260
|
-
type type_ = :server_vad | :semantic_vad
|
|
261
|
-
|
|
262
|
-
module Type
|
|
263
|
-
extend OpenAI::Internal::Type::Enum
|
|
264
|
-
|
|
265
|
-
SERVER_VAD: :server_vad
|
|
266
|
-
SEMANTIC_VAD: :semantic_vad
|
|
267
|
-
|
|
268
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Input::TurnDetection::type_]
|
|
269
|
-
end
|
|
270
|
-
end
|
|
271
|
-
end
|
|
272
|
-
|
|
273
|
-
type output =
|
|
274
|
-
{
|
|
275
|
-
format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_,
|
|
276
|
-
speed: Float,
|
|
277
|
-
voice: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
class Output < OpenAI::Internal::Type::BaseModel
|
|
281
|
-
attr_reader format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_?
|
|
282
|
-
|
|
283
|
-
def format_=: (
|
|
284
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_
|
|
285
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_
|
|
286
|
-
|
|
287
|
-
attr_reader speed: Float?
|
|
288
|
-
|
|
289
|
-
def speed=: (Float) -> Float
|
|
290
|
-
|
|
291
|
-
attr_reader voice: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice?
|
|
292
|
-
|
|
293
|
-
def voice=: (
|
|
294
|
-
OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice
|
|
295
|
-
) -> OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice
|
|
296
|
-
|
|
297
|
-
def initialize: (
|
|
298
|
-
?format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_,
|
|
299
|
-
?speed: Float,
|
|
300
|
-
?voice: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice
|
|
301
|
-
) -> void
|
|
302
|
-
|
|
303
|
-
def to_hash: -> {
|
|
304
|
-
format_: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_,
|
|
305
|
-
speed: Float,
|
|
306
|
-
voice: OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice
|
|
307
|
-
}
|
|
308
|
-
|
|
309
|
-
type format_ = :pcm16 | :g711_ulaw | :g711_alaw
|
|
310
|
-
|
|
311
|
-
module Format
|
|
312
|
-
extend OpenAI::Internal::Type::Enum
|
|
313
|
-
|
|
314
|
-
PCM16: :pcm16
|
|
315
|
-
G711_ULAW: :g711_ulaw
|
|
316
|
-
G711_ALAW: :g711_alaw
|
|
317
|
-
|
|
318
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Output::format_]
|
|
319
|
-
end
|
|
320
|
-
|
|
321
|
-
type voice =
|
|
322
|
-
String
|
|
323
|
-
| :alloy
|
|
324
|
-
| :ash
|
|
325
|
-
| :ballad
|
|
326
|
-
| :coral
|
|
327
|
-
| :echo
|
|
328
|
-
| :sage
|
|
329
|
-
| :shimmer
|
|
330
|
-
| :verse
|
|
331
|
-
| :marin
|
|
332
|
-
| :cedar
|
|
333
|
-
|
|
334
|
-
module Voice
|
|
335
|
-
extend OpenAI::Internal::Type::Union
|
|
336
|
-
|
|
337
|
-
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfig::Output::voice]
|
|
338
|
-
|
|
339
|
-
ALLOY: :alloy
|
|
340
|
-
ASH: :ash
|
|
341
|
-
BALLAD: :ballad
|
|
342
|
-
CORAL: :coral
|
|
343
|
-
ECHO: :echo
|
|
344
|
-
SAGE: :sage
|
|
345
|
-
SHIMMER: :shimmer
|
|
346
|
-
VERSE: :verse
|
|
347
|
-
MARIN: :marin
|
|
348
|
-
CEDAR: :cedar
|
|
349
|
-
end
|
|
350
|
-
end
|
|
351
32
|
end
|
|
352
33
|
end
|
|
353
34
|
end
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Realtime
|
|
4
|
+
type realtime_audio_config_input =
|
|
5
|
+
{
|
|
6
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
7
|
+
noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
|
|
8
|
+
transcription: OpenAI::Realtime::AudioTranscription,
|
|
9
|
+
turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
class RealtimeAudioConfigInput < OpenAI::Internal::Type::BaseModel
|
|
13
|
+
attr_reader format_: OpenAI::Models::Realtime::realtime_audio_formats?
|
|
14
|
+
|
|
15
|
+
def format_=: (
|
|
16
|
+
OpenAI::Models::Realtime::realtime_audio_formats
|
|
17
|
+
) -> OpenAI::Models::Realtime::realtime_audio_formats
|
|
18
|
+
|
|
19
|
+
attr_reader noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction?
|
|
20
|
+
|
|
21
|
+
def noise_reduction=: (
|
|
22
|
+
OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction
|
|
23
|
+
) -> OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction
|
|
24
|
+
|
|
25
|
+
attr_reader transcription: OpenAI::Realtime::AudioTranscription?
|
|
26
|
+
|
|
27
|
+
def transcription=: (
|
|
28
|
+
OpenAI::Realtime::AudioTranscription
|
|
29
|
+
) -> OpenAI::Realtime::AudioTranscription
|
|
30
|
+
|
|
31
|
+
attr_reader turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection?
|
|
32
|
+
|
|
33
|
+
def turn_detection=: (
|
|
34
|
+
OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
|
35
|
+
) -> OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
|
36
|
+
|
|
37
|
+
def initialize: (
|
|
38
|
+
?format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
39
|
+
?noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
|
|
40
|
+
?transcription: OpenAI::Realtime::AudioTranscription,
|
|
41
|
+
?turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
|
42
|
+
) -> void
|
|
43
|
+
|
|
44
|
+
def to_hash: -> {
|
|
45
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
46
|
+
noise_reduction: OpenAI::Realtime::RealtimeAudioConfigInput::NoiseReduction,
|
|
47
|
+
transcription: OpenAI::Realtime::AudioTranscription,
|
|
48
|
+
turn_detection: OpenAI::Realtime::RealtimeAudioInputTurnDetection
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
type noise_reduction =
|
|
52
|
+
{ type: OpenAI::Models::Realtime::noise_reduction_type }
|
|
53
|
+
|
|
54
|
+
class NoiseReduction < OpenAI::Internal::Type::BaseModel
|
|
55
|
+
attr_reader type: OpenAI::Models::Realtime::noise_reduction_type?
|
|
56
|
+
|
|
57
|
+
def type=: (
|
|
58
|
+
OpenAI::Models::Realtime::noise_reduction_type
|
|
59
|
+
) -> OpenAI::Models::Realtime::noise_reduction_type
|
|
60
|
+
|
|
61
|
+
def initialize: (
|
|
62
|
+
?type: OpenAI::Models::Realtime::noise_reduction_type
|
|
63
|
+
) -> void
|
|
64
|
+
|
|
65
|
+
def to_hash: -> {
|
|
66
|
+
type: OpenAI::Models::Realtime::noise_reduction_type
|
|
67
|
+
}
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Realtime
|
|
4
|
+
type realtime_audio_config_output =
|
|
5
|
+
{
|
|
6
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
7
|
+
speed: Float,
|
|
8
|
+
voice: OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
class RealtimeAudioConfigOutput < OpenAI::Internal::Type::BaseModel
|
|
12
|
+
attr_reader format_: OpenAI::Models::Realtime::realtime_audio_formats?
|
|
13
|
+
|
|
14
|
+
def format_=: (
|
|
15
|
+
OpenAI::Models::Realtime::realtime_audio_formats
|
|
16
|
+
) -> OpenAI::Models::Realtime::realtime_audio_formats
|
|
17
|
+
|
|
18
|
+
attr_reader speed: Float?
|
|
19
|
+
|
|
20
|
+
def speed=: (Float) -> Float
|
|
21
|
+
|
|
22
|
+
attr_reader voice: OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice?
|
|
23
|
+
|
|
24
|
+
def voice=: (
|
|
25
|
+
OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice
|
|
26
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice
|
|
27
|
+
|
|
28
|
+
def initialize: (
|
|
29
|
+
?format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
30
|
+
?speed: Float,
|
|
31
|
+
?voice: OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice
|
|
32
|
+
) -> void
|
|
33
|
+
|
|
34
|
+
def to_hash: -> {
|
|
35
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
36
|
+
speed: Float,
|
|
37
|
+
voice: OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
type voice =
|
|
41
|
+
String
|
|
42
|
+
| :alloy
|
|
43
|
+
| :ash
|
|
44
|
+
| :ballad
|
|
45
|
+
| :coral
|
|
46
|
+
| :echo
|
|
47
|
+
| :sage
|
|
48
|
+
| :shimmer
|
|
49
|
+
| :verse
|
|
50
|
+
| :marin
|
|
51
|
+
| :cedar
|
|
52
|
+
|
|
53
|
+
module Voice
|
|
54
|
+
extend OpenAI::Internal::Type::Union
|
|
55
|
+
|
|
56
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioConfigOutput::voice]
|
|
57
|
+
|
|
58
|
+
ALLOY: :alloy
|
|
59
|
+
ASH: :ash
|
|
60
|
+
BALLAD: :ballad
|
|
61
|
+
CORAL: :coral
|
|
62
|
+
ECHO: :echo
|
|
63
|
+
SAGE: :sage
|
|
64
|
+
SHIMMER: :shimmer
|
|
65
|
+
VERSE: :verse
|
|
66
|
+
MARIN: :marin
|
|
67
|
+
CEDAR: :cedar
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Realtime
|
|
4
|
+
type realtime_audio_formats =
|
|
5
|
+
OpenAI::Realtime::RealtimeAudioFormats::AudioPCM
|
|
6
|
+
| OpenAI::Realtime::RealtimeAudioFormats::AudioPCMU
|
|
7
|
+
| OpenAI::Realtime::RealtimeAudioFormats::AudioPCMA
|
|
8
|
+
|
|
9
|
+
module RealtimeAudioFormats
|
|
10
|
+
extend OpenAI::Internal::Type::Union
|
|
11
|
+
|
|
12
|
+
type audio_pcm =
|
|
13
|
+
{
|
|
14
|
+
rate: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate,
|
|
15
|
+
type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
class AudioPCM < OpenAI::Internal::Type::BaseModel
|
|
19
|
+
attr_reader rate: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate?
|
|
20
|
+
|
|
21
|
+
def rate=: (
|
|
22
|
+
OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate
|
|
23
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate
|
|
24
|
+
|
|
25
|
+
attr_reader type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_?
|
|
26
|
+
|
|
27
|
+
def type=: (
|
|
28
|
+
OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_
|
|
29
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_
|
|
30
|
+
|
|
31
|
+
def initialize: (
|
|
32
|
+
?rate: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate,
|
|
33
|
+
?type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_
|
|
34
|
+
) -> void
|
|
35
|
+
|
|
36
|
+
def to_hash: -> {
|
|
37
|
+
rate: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate,
|
|
38
|
+
type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
type rate = 24000
|
|
42
|
+
|
|
43
|
+
module Rate
|
|
44
|
+
extend OpenAI::Internal::Type::Enum
|
|
45
|
+
|
|
46
|
+
RATE_24000: 24000
|
|
47
|
+
|
|
48
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::rate]
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
type type_ = :"audio/pcm"
|
|
52
|
+
|
|
53
|
+
module Type
|
|
54
|
+
extend OpenAI::Internal::Type::Enum
|
|
55
|
+
|
|
56
|
+
AUDIO_PCM: :"audio/pcm"
|
|
57
|
+
|
|
58
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM::type_]
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
type audio_pcmu =
|
|
63
|
+
{
|
|
64
|
+
type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
class AudioPCMU < OpenAI::Internal::Type::BaseModel
|
|
68
|
+
attr_reader type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_?
|
|
69
|
+
|
|
70
|
+
def type=: (
|
|
71
|
+
OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_
|
|
72
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_
|
|
73
|
+
|
|
74
|
+
def initialize: (
|
|
75
|
+
?type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_
|
|
76
|
+
) -> void
|
|
77
|
+
|
|
78
|
+
def to_hash: -> {
|
|
79
|
+
type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
type type_ = :"audio/pcmu"
|
|
83
|
+
|
|
84
|
+
module Type
|
|
85
|
+
extend OpenAI::Internal::Type::Enum
|
|
86
|
+
|
|
87
|
+
AUDIO_PCMU: :"audio/pcmu"
|
|
88
|
+
|
|
89
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU::type_]
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
type audio_pcma =
|
|
94
|
+
{
|
|
95
|
+
type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
class AudioPCMA < OpenAI::Internal::Type::BaseModel
|
|
99
|
+
attr_reader type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_?
|
|
100
|
+
|
|
101
|
+
def type=: (
|
|
102
|
+
OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_
|
|
103
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_
|
|
104
|
+
|
|
105
|
+
def initialize: (
|
|
106
|
+
?type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_
|
|
107
|
+
) -> void
|
|
108
|
+
|
|
109
|
+
def to_hash: -> {
|
|
110
|
+
type: OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
type type_ = :"audio/pcma"
|
|
114
|
+
|
|
115
|
+
module Type
|
|
116
|
+
extend OpenAI::Internal::Type::Enum
|
|
117
|
+
|
|
118
|
+
AUDIO_PCMA: :"audio/pcma"
|
|
119
|
+
|
|
120
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA::type_]
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::realtime_audio_formats]
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
end
|