openai 0.22.1 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +1 -1
- data/lib/openai/models/realtime/audio_transcription.rb +60 -0
- data/lib/openai/models/realtime/client_secret_create_params.rb +18 -9
- data/lib/openai/models/realtime/client_secret_create_response.rb +11 -250
- data/lib/openai/models/realtime/conversation_item.rb +1 -1
- data/lib/openai/models/realtime/conversation_item_added.rb +14 -1
- data/lib/openai/models/realtime/conversation_item_done.rb +3 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rb +10 -8
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rb +14 -5
- data/lib/openai/models/realtime/conversation_item_truncate_event.rb +2 -2
- data/lib/openai/models/realtime/input_audio_buffer_append_event.rb +10 -5
- data/lib/openai/models/realtime/models.rb +58 -0
- data/lib/openai/models/realtime/noise_reduction_type.rb +20 -0
- data/lib/openai/models/realtime/realtime_audio_config.rb +6 -427
- data/lib/openai/models/realtime/realtime_audio_config_input.rb +89 -0
- data/lib/openai/models/realtime/realtime_audio_config_output.rb +100 -0
- data/lib/openai/models/realtime/realtime_audio_formats.rb +121 -0
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +131 -0
- data/lib/openai/models/realtime/realtime_client_event.rb +31 -23
- data/lib/openai/models/realtime/realtime_conversation_item_assistant_message.rb +43 -10
- data/lib/openai/models/realtime/realtime_conversation_item_function_call.rb +16 -7
- data/lib/openai/models/realtime/realtime_conversation_item_function_call_output.rb +15 -7
- data/lib/openai/models/realtime/realtime_conversation_item_system_message.rb +18 -6
- data/lib/openai/models/realtime/realtime_conversation_item_user_message.rb +62 -13
- data/lib/openai/models/realtime/realtime_response.rb +117 -107
- data/lib/openai/models/realtime/realtime_response_create_audio_output.rb +100 -0
- data/lib/openai/models/realtime/realtime_response_create_mcp_tool.rb +310 -0
- data/lib/openai/models/realtime/realtime_response_create_params.rb +225 -0
- data/lib/openai/models/realtime/realtime_response_status.rb +1 -1
- data/lib/openai/models/realtime/realtime_response_usage.rb +5 -2
- data/lib/openai/models/realtime/realtime_response_usage_input_token_details.rb +58 -8
- data/lib/openai/models/realtime/realtime_server_event.rb +21 -5
- data/lib/openai/models/realtime/realtime_session.rb +9 -125
- data/lib/openai/models/realtime/realtime_session_client_secret.rb +36 -0
- data/lib/openai/models/realtime/realtime_session_create_request.rb +50 -71
- data/lib/openai/models/realtime/realtime_session_create_response.rb +621 -219
- data/lib/openai/models/realtime/realtime_tools_config_union.rb +2 -53
- data/lib/openai/models/realtime/realtime_tracing_config.rb +7 -6
- data/lib/openai/models/realtime/realtime_transcription_session_audio.rb +19 -0
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input.rb +90 -0
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +131 -0
- data/lib/openai/models/realtime/realtime_transcription_session_client_secret.rb +38 -0
- data/lib/openai/models/realtime/realtime_transcription_session_create_request.rb +12 -270
- data/lib/openai/models/realtime/realtime_transcription_session_create_response.rb +78 -0
- data/lib/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rb +66 -0
- data/lib/openai/models/realtime/realtime_transcription_session_turn_detection.rb +57 -0
- data/lib/openai/models/realtime/realtime_truncation.rb +8 -40
- data/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +34 -0
- data/lib/openai/models/realtime/response_cancel_event.rb +3 -1
- data/lib/openai/models/realtime/response_create_event.rb +18 -348
- data/lib/openai/models/realtime/response_done_event.rb +7 -0
- data/lib/openai/models/realtime/session_created_event.rb +20 -4
- data/lib/openai/models/realtime/session_update_event.rb +36 -12
- data/lib/openai/models/realtime/session_updated_event.rb +20 -4
- data/lib/openai/models/realtime/transcription_session_created.rb +8 -243
- data/lib/openai/models/realtime/transcription_session_update.rb +179 -3
- data/lib/openai/models/realtime/transcription_session_updated_event.rb +8 -243
- data/lib/openai/resources/realtime/client_secrets.rb +2 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -1
- data/rbi/openai/models/realtime/audio_transcription.rbi +132 -0
- data/rbi/openai/models/realtime/client_secret_create_params.rbi +25 -11
- data/rbi/openai/models/realtime/client_secret_create_response.rbi +2 -587
- data/rbi/openai/models/realtime/conversation_item_added.rbi +14 -1
- data/rbi/openai/models/realtime/conversation_item_done.rbi +3 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rbi +11 -8
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rbi +15 -5
- data/rbi/openai/models/realtime/conversation_item_truncate_event.rbi +2 -2
- data/rbi/openai/models/realtime/input_audio_buffer_append_event.rbi +10 -5
- data/rbi/openai/models/realtime/models.rbi +97 -0
- data/rbi/openai/models/realtime/noise_reduction_type.rbi +31 -0
- data/rbi/openai/models/realtime/realtime_audio_config.rbi +8 -956
- data/rbi/openai/models/realtime/realtime_audio_config_input.rbi +221 -0
- data/rbi/openai/models/realtime/realtime_audio_config_output.rbi +222 -0
- data/rbi/openai/models/realtime/realtime_audio_formats.rbi +329 -0
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +262 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_assistant_message.rbi +51 -10
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call.rbi +16 -7
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call_output.rbi +14 -7
- data/rbi/openai/models/realtime/realtime_conversation_item_system_message.rbi +16 -6
- data/rbi/openai/models/realtime/realtime_conversation_item_user_message.rbi +110 -12
- data/rbi/openai/models/realtime/realtime_response.rbi +287 -212
- data/rbi/openai/models/realtime/realtime_response_create_audio_output.rbi +250 -0
- data/rbi/openai/models/realtime/realtime_response_create_mcp_tool.rbi +616 -0
- data/rbi/openai/models/realtime/realtime_response_create_params.rbi +529 -0
- data/rbi/openai/models/realtime/realtime_response_usage.rbi +8 -2
- data/rbi/openai/models/realtime/realtime_response_usage_input_token_details.rbi +106 -7
- data/rbi/openai/models/realtime/realtime_server_event.rbi +4 -1
- data/rbi/openai/models/realtime/realtime_session.rbi +12 -262
- data/rbi/openai/models/realtime/realtime_session_client_secret.rbi +49 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +112 -133
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +1229 -405
- data/rbi/openai/models/realtime/realtime_tools_config_union.rbi +1 -117
- data/rbi/openai/models/realtime/realtime_tracing_config.rbi +11 -10
- data/rbi/openai/models/realtime/realtime_transcription_session_audio.rbi +50 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input.rbi +226 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +259 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_client_secret.rbi +51 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_create_request.rbi +25 -597
- data/rbi/openai/models/realtime/realtime_transcription_session_create_response.rbi +195 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbi +144 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_turn_detection.rbi +94 -0
- data/rbi/openai/models/realtime/realtime_truncation.rbi +5 -56
- data/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +45 -0
- data/rbi/openai/models/realtime/response_cancel_event.rbi +3 -1
- data/rbi/openai/models/realtime/response_create_event.rbi +19 -786
- data/rbi/openai/models/realtime/response_done_event.rbi +7 -0
- data/rbi/openai/models/realtime/session_created_event.rbi +42 -9
- data/rbi/openai/models/realtime/session_update_event.rbi +57 -19
- data/rbi/openai/models/realtime/session_updated_event.rbi +42 -9
- data/rbi/openai/models/realtime/transcription_session_created.rbi +17 -591
- data/rbi/openai/models/realtime/transcription_session_update.rbi +425 -7
- data/rbi/openai/models/realtime/transcription_session_updated_event.rbi +14 -591
- data/rbi/openai/resources/realtime/client_secrets.rbi +5 -3
- data/sig/openai/models/realtime/audio_transcription.rbs +57 -0
- data/sig/openai/models/realtime/client_secret_create_response.rbs +1 -251
- data/sig/openai/models/realtime/models.rbs +57 -0
- data/sig/openai/models/realtime/noise_reduction_type.rbs +16 -0
- data/sig/openai/models/realtime/realtime_audio_config.rbs +12 -331
- data/sig/openai/models/realtime/realtime_audio_config_input.rbs +72 -0
- data/sig/openai/models/realtime/realtime_audio_config_output.rbs +72 -0
- data/sig/openai/models/realtime/realtime_audio_formats.rbs +128 -0
- data/sig/openai/models/realtime/realtime_audio_input_turn_detection.rbs +99 -0
- data/sig/openai/models/realtime/realtime_conversation_item_assistant_message.rbs +17 -2
- data/sig/openai/models/realtime/realtime_conversation_item_user_message.rbs +30 -1
- data/sig/openai/models/realtime/realtime_response.rbs +103 -82
- data/sig/openai/models/realtime/realtime_response_create_audio_output.rbs +84 -0
- data/sig/openai/models/realtime/realtime_response_create_mcp_tool.rbs +218 -0
- data/sig/openai/models/realtime/realtime_response_create_params.rbs +148 -0
- data/sig/openai/models/realtime/realtime_response_usage_input_token_details.rbs +50 -1
- data/sig/openai/models/realtime/realtime_session.rbs +16 -106
- data/sig/openai/models/realtime/realtime_session_client_secret.rbs +20 -0
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +27 -43
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +389 -187
- data/sig/openai/models/realtime/realtime_tools_config_union.rbs +1 -53
- data/sig/openai/models/realtime/realtime_transcription_session_audio.rbs +24 -0
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input.rbs +72 -0
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbs +99 -0
- data/sig/openai/models/realtime/realtime_transcription_session_client_secret.rbs +20 -0
- data/sig/openai/models/realtime/realtime_transcription_session_create_request.rbs +11 -203
- data/sig/openai/models/realtime/realtime_transcription_session_create_response.rbs +69 -0
- data/sig/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbs +59 -0
- data/sig/openai/models/realtime/realtime_transcription_session_turn_detection.rbs +47 -0
- data/sig/openai/models/realtime/realtime_truncation.rbs +1 -28
- data/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +21 -0
- data/sig/openai/models/realtime/response_create_event.rbs +6 -249
- data/sig/openai/models/realtime/session_created_event.rbs +14 -4
- data/sig/openai/models/realtime/session_update_event.rbs +14 -4
- data/sig/openai/models/realtime/session_updated_event.rbs +14 -4
- data/sig/openai/models/realtime/transcription_session_created.rbs +4 -254
- data/sig/openai/models/realtime/transcription_session_update.rbs +154 -4
- data/sig/openai/models/realtime/transcription_session_updated_event.rbs +4 -254
- metadata +59 -5
- data/lib/openai/models/realtime/realtime_client_secret_config.rb +0 -64
- data/rbi/openai/models/realtime/realtime_client_secret_config.rbi +0 -147
- data/sig/openai/models/realtime/realtime_client_secret_config.rbs +0 -60
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Realtime
|
|
4
|
+
type realtime_audio_input_turn_detection =
|
|
5
|
+
{
|
|
6
|
+
create_response: bool,
|
|
7
|
+
eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
|
|
8
|
+
idle_timeout_ms: Integer?,
|
|
9
|
+
interrupt_response: bool,
|
|
10
|
+
prefix_padding_ms: Integer,
|
|
11
|
+
silence_duration_ms: Integer,
|
|
12
|
+
threshold: Float,
|
|
13
|
+
type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
class RealtimeAudioInputTurnDetection < OpenAI::Internal::Type::BaseModel
|
|
17
|
+
attr_reader create_response: bool?
|
|
18
|
+
|
|
19
|
+
def create_response=: (bool) -> bool
|
|
20
|
+
|
|
21
|
+
attr_reader eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness?
|
|
22
|
+
|
|
23
|
+
def eagerness=: (
|
|
24
|
+
OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness
|
|
25
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness
|
|
26
|
+
|
|
27
|
+
attr_accessor idle_timeout_ms: Integer?
|
|
28
|
+
|
|
29
|
+
attr_reader interrupt_response: bool?
|
|
30
|
+
|
|
31
|
+
def interrupt_response=: (bool) -> bool
|
|
32
|
+
|
|
33
|
+
attr_reader prefix_padding_ms: Integer?
|
|
34
|
+
|
|
35
|
+
def prefix_padding_ms=: (Integer) -> Integer
|
|
36
|
+
|
|
37
|
+
attr_reader silence_duration_ms: Integer?
|
|
38
|
+
|
|
39
|
+
def silence_duration_ms=: (Integer) -> Integer
|
|
40
|
+
|
|
41
|
+
attr_reader threshold: Float?
|
|
42
|
+
|
|
43
|
+
def threshold=: (Float) -> Float
|
|
44
|
+
|
|
45
|
+
attr_reader type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_?
|
|
46
|
+
|
|
47
|
+
def type=: (
|
|
48
|
+
OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
|
49
|
+
) -> OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
|
50
|
+
|
|
51
|
+
def initialize: (
|
|
52
|
+
?create_response: bool,
|
|
53
|
+
?eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
|
|
54
|
+
?idle_timeout_ms: Integer?,
|
|
55
|
+
?interrupt_response: bool,
|
|
56
|
+
?prefix_padding_ms: Integer,
|
|
57
|
+
?silence_duration_ms: Integer,
|
|
58
|
+
?threshold: Float,
|
|
59
|
+
?type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
|
60
|
+
) -> void
|
|
61
|
+
|
|
62
|
+
def to_hash: -> {
|
|
63
|
+
create_response: bool,
|
|
64
|
+
eagerness: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness,
|
|
65
|
+
idle_timeout_ms: Integer?,
|
|
66
|
+
interrupt_response: bool,
|
|
67
|
+
prefix_padding_ms: Integer,
|
|
68
|
+
silence_duration_ms: Integer,
|
|
69
|
+
threshold: Float,
|
|
70
|
+
type: OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
type eagerness = :low | :medium | :high | :auto
|
|
74
|
+
|
|
75
|
+
module Eagerness
|
|
76
|
+
extend OpenAI::Internal::Type::Enum
|
|
77
|
+
|
|
78
|
+
LOW: :low
|
|
79
|
+
MEDIUM: :medium
|
|
80
|
+
HIGH: :high
|
|
81
|
+
AUTO: :auto
|
|
82
|
+
|
|
83
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::eagerness]
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
type type_ = :server_vad | :semantic_vad
|
|
87
|
+
|
|
88
|
+
module Type
|
|
89
|
+
extend OpenAI::Internal::Type::Enum
|
|
90
|
+
|
|
91
|
+
SERVER_VAD: :server_vad
|
|
92
|
+
SEMANTIC_VAD: :semantic_vad
|
|
93
|
+
|
|
94
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeAudioInputTurnDetection::type_]
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|
|
@@ -54,15 +54,25 @@ module OpenAI
|
|
|
54
54
|
|
|
55
55
|
type content =
|
|
56
56
|
{
|
|
57
|
+
audio: String,
|
|
57
58
|
text: String,
|
|
59
|
+
transcript: String,
|
|
58
60
|
type: OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage::Content::type_
|
|
59
61
|
}
|
|
60
62
|
|
|
61
63
|
class Content < OpenAI::Internal::Type::BaseModel
|
|
64
|
+
attr_reader audio: String?
|
|
65
|
+
|
|
66
|
+
def audio=: (String) -> String
|
|
67
|
+
|
|
62
68
|
attr_reader text: String?
|
|
63
69
|
|
|
64
70
|
def text=: (String) -> String
|
|
65
71
|
|
|
72
|
+
attr_reader transcript: String?
|
|
73
|
+
|
|
74
|
+
def transcript=: (String) -> String
|
|
75
|
+
|
|
66
76
|
attr_reader type: OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage::Content::type_?
|
|
67
77
|
|
|
68
78
|
def type=: (
|
|
@@ -70,21 +80,26 @@ module OpenAI
|
|
|
70
80
|
) -> OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage::Content::type_
|
|
71
81
|
|
|
72
82
|
def initialize: (
|
|
83
|
+
?audio: String,
|
|
73
84
|
?text: String,
|
|
85
|
+
?transcript: String,
|
|
74
86
|
?type: OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage::Content::type_
|
|
75
87
|
) -> void
|
|
76
88
|
|
|
77
89
|
def to_hash: -> {
|
|
90
|
+
audio: String,
|
|
78
91
|
text: String,
|
|
92
|
+
transcript: String,
|
|
79
93
|
type: OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage::Content::type_
|
|
80
94
|
}
|
|
81
95
|
|
|
82
|
-
type type_ = :
|
|
96
|
+
type type_ = :output_text | :output_audio
|
|
83
97
|
|
|
84
98
|
module Type
|
|
85
99
|
extend OpenAI::Internal::Type::Enum
|
|
86
100
|
|
|
87
|
-
|
|
101
|
+
OUTPUT_TEXT: :output_text
|
|
102
|
+
OUTPUT_AUDIO: :output_audio
|
|
88
103
|
|
|
89
104
|
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage::Content::type_]
|
|
90
105
|
end
|
|
@@ -55,6 +55,8 @@ module OpenAI
|
|
|
55
55
|
type content =
|
|
56
56
|
{
|
|
57
57
|
audio: String,
|
|
58
|
+
detail: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail,
|
|
59
|
+
image_url: String,
|
|
58
60
|
text: String,
|
|
59
61
|
transcript: String,
|
|
60
62
|
type: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::type_
|
|
@@ -65,6 +67,16 @@ module OpenAI
|
|
|
65
67
|
|
|
66
68
|
def audio=: (String) -> String
|
|
67
69
|
|
|
70
|
+
attr_reader detail: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail?
|
|
71
|
+
|
|
72
|
+
def detail=: (
|
|
73
|
+
OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail
|
|
74
|
+
) -> OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail
|
|
75
|
+
|
|
76
|
+
attr_reader image_url: String?
|
|
77
|
+
|
|
78
|
+
def image_url=: (String) -> String
|
|
79
|
+
|
|
68
80
|
attr_reader text: String?
|
|
69
81
|
|
|
70
82
|
def text=: (String) -> String
|
|
@@ -81,6 +93,8 @@ module OpenAI
|
|
|
81
93
|
|
|
82
94
|
def initialize: (
|
|
83
95
|
?audio: String,
|
|
96
|
+
?detail: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail,
|
|
97
|
+
?image_url: String,
|
|
84
98
|
?text: String,
|
|
85
99
|
?transcript: String,
|
|
86
100
|
?type: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::type_
|
|
@@ -88,18 +102,33 @@ module OpenAI
|
|
|
88
102
|
|
|
89
103
|
def to_hash: -> {
|
|
90
104
|
audio: String,
|
|
105
|
+
detail: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail,
|
|
106
|
+
image_url: String,
|
|
91
107
|
text: String,
|
|
92
108
|
transcript: String,
|
|
93
109
|
type: OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::type_
|
|
94
110
|
}
|
|
95
111
|
|
|
96
|
-
type
|
|
112
|
+
type detail = :auto | :low | :high
|
|
113
|
+
|
|
114
|
+
module Detail
|
|
115
|
+
extend OpenAI::Internal::Type::Enum
|
|
116
|
+
|
|
117
|
+
AUTO: :auto
|
|
118
|
+
LOW: :low
|
|
119
|
+
HIGH: :high
|
|
120
|
+
|
|
121
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::detail]
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
type type_ = :input_text | :input_audio | :input_image
|
|
97
125
|
|
|
98
126
|
module Type
|
|
99
127
|
extend OpenAI::Internal::Type::Enum
|
|
100
128
|
|
|
101
129
|
INPUT_TEXT: :input_text
|
|
102
130
|
INPUT_AUDIO: :input_audio
|
|
131
|
+
INPUT_IMAGE: :input_image
|
|
103
132
|
|
|
104
133
|
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeConversationItemUserMessage::Content::type_]
|
|
105
134
|
end
|
|
@@ -4,18 +4,16 @@ module OpenAI
|
|
|
4
4
|
type realtime_response =
|
|
5
5
|
{
|
|
6
6
|
id: String,
|
|
7
|
+
audio: OpenAI::Realtime::RealtimeResponse::Audio,
|
|
7
8
|
conversation_id: String,
|
|
8
9
|
max_output_tokens: OpenAI::Models::Realtime::RealtimeResponse::max_output_tokens,
|
|
9
10
|
metadata: OpenAI::Models::metadata?,
|
|
10
|
-
modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::modality],
|
|
11
11
|
object: OpenAI::Models::Realtime::RealtimeResponse::object,
|
|
12
12
|
output: ::Array[OpenAI::Models::Realtime::conversation_item],
|
|
13
|
-
|
|
13
|
+
output_modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality],
|
|
14
14
|
status: OpenAI::Models::Realtime::RealtimeResponse::status,
|
|
15
15
|
status_details: OpenAI::Realtime::RealtimeResponseStatus,
|
|
16
|
-
|
|
17
|
-
usage: OpenAI::Realtime::RealtimeResponseUsage,
|
|
18
|
-
voice: OpenAI::Models::Realtime::RealtimeResponse::voice
|
|
16
|
+
usage: OpenAI::Realtime::RealtimeResponseUsage
|
|
19
17
|
}
|
|
20
18
|
|
|
21
19
|
class RealtimeResponse < OpenAI::Internal::Type::BaseModel
|
|
@@ -23,6 +21,12 @@ module OpenAI
|
|
|
23
21
|
|
|
24
22
|
def id=: (String) -> String
|
|
25
23
|
|
|
24
|
+
attr_reader audio: OpenAI::Realtime::RealtimeResponse::Audio?
|
|
25
|
+
|
|
26
|
+
def audio=: (
|
|
27
|
+
OpenAI::Realtime::RealtimeResponse::Audio
|
|
28
|
+
) -> OpenAI::Realtime::RealtimeResponse::Audio
|
|
29
|
+
|
|
26
30
|
attr_reader conversation_id: String?
|
|
27
31
|
|
|
28
32
|
def conversation_id=: (String) -> String
|
|
@@ -35,12 +39,6 @@ module OpenAI
|
|
|
35
39
|
|
|
36
40
|
attr_accessor metadata: OpenAI::Models::metadata?
|
|
37
41
|
|
|
38
|
-
attr_reader modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::modality]?
|
|
39
|
-
|
|
40
|
-
def modalities=: (
|
|
41
|
-
::Array[OpenAI::Models::Realtime::RealtimeResponse::modality]
|
|
42
|
-
) -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::modality]
|
|
43
|
-
|
|
44
42
|
attr_reader object: OpenAI::Models::Realtime::RealtimeResponse::object?
|
|
45
43
|
|
|
46
44
|
def object=: (
|
|
@@ -53,11 +51,11 @@ module OpenAI
|
|
|
53
51
|
::Array[OpenAI::Models::Realtime::conversation_item]
|
|
54
52
|
) -> ::Array[OpenAI::Models::Realtime::conversation_item]
|
|
55
53
|
|
|
56
|
-
attr_reader
|
|
54
|
+
attr_reader output_modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality]?
|
|
57
55
|
|
|
58
|
-
def
|
|
59
|
-
OpenAI::Models::Realtime::RealtimeResponse::
|
|
60
|
-
) -> OpenAI::Models::Realtime::RealtimeResponse::
|
|
56
|
+
def output_modalities=: (
|
|
57
|
+
::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality]
|
|
58
|
+
) -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality]
|
|
61
59
|
|
|
62
60
|
attr_reader status: OpenAI::Models::Realtime::RealtimeResponse::status?
|
|
63
61
|
|
|
@@ -71,54 +69,119 @@ module OpenAI
|
|
|
71
69
|
OpenAI::Realtime::RealtimeResponseStatus
|
|
72
70
|
) -> OpenAI::Realtime::RealtimeResponseStatus
|
|
73
71
|
|
|
74
|
-
attr_reader temperature: Float?
|
|
75
|
-
|
|
76
|
-
def temperature=: (Float) -> Float
|
|
77
|
-
|
|
78
72
|
attr_reader usage: OpenAI::Realtime::RealtimeResponseUsage?
|
|
79
73
|
|
|
80
74
|
def usage=: (
|
|
81
75
|
OpenAI::Realtime::RealtimeResponseUsage
|
|
82
76
|
) -> OpenAI::Realtime::RealtimeResponseUsage
|
|
83
77
|
|
|
84
|
-
attr_reader voice: OpenAI::Models::Realtime::RealtimeResponse::voice?
|
|
85
|
-
|
|
86
|
-
def voice=: (
|
|
87
|
-
OpenAI::Models::Realtime::RealtimeResponse::voice
|
|
88
|
-
) -> OpenAI::Models::Realtime::RealtimeResponse::voice
|
|
89
|
-
|
|
90
78
|
def initialize: (
|
|
91
79
|
?id: String,
|
|
80
|
+
?audio: OpenAI::Realtime::RealtimeResponse::Audio,
|
|
92
81
|
?conversation_id: String,
|
|
93
82
|
?max_output_tokens: OpenAI::Models::Realtime::RealtimeResponse::max_output_tokens,
|
|
94
83
|
?metadata: OpenAI::Models::metadata?,
|
|
95
|
-
?modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::modality],
|
|
96
84
|
?object: OpenAI::Models::Realtime::RealtimeResponse::object,
|
|
97
85
|
?output: ::Array[OpenAI::Models::Realtime::conversation_item],
|
|
98
|
-
?
|
|
86
|
+
?output_modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality],
|
|
99
87
|
?status: OpenAI::Models::Realtime::RealtimeResponse::status,
|
|
100
88
|
?status_details: OpenAI::Realtime::RealtimeResponseStatus,
|
|
101
|
-
?
|
|
102
|
-
?usage: OpenAI::Realtime::RealtimeResponseUsage,
|
|
103
|
-
?voice: OpenAI::Models::Realtime::RealtimeResponse::voice
|
|
89
|
+
?usage: OpenAI::Realtime::RealtimeResponseUsage
|
|
104
90
|
) -> void
|
|
105
91
|
|
|
106
92
|
def to_hash: -> {
|
|
107
93
|
id: String,
|
|
94
|
+
audio: OpenAI::Realtime::RealtimeResponse::Audio,
|
|
108
95
|
conversation_id: String,
|
|
109
96
|
max_output_tokens: OpenAI::Models::Realtime::RealtimeResponse::max_output_tokens,
|
|
110
97
|
metadata: OpenAI::Models::metadata?,
|
|
111
|
-
modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::modality],
|
|
112
98
|
object: OpenAI::Models::Realtime::RealtimeResponse::object,
|
|
113
99
|
output: ::Array[OpenAI::Models::Realtime::conversation_item],
|
|
114
|
-
|
|
100
|
+
output_modalities: ::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality],
|
|
115
101
|
status: OpenAI::Models::Realtime::RealtimeResponse::status,
|
|
116
102
|
status_details: OpenAI::Realtime::RealtimeResponseStatus,
|
|
117
|
-
|
|
118
|
-
usage: OpenAI::Realtime::RealtimeResponseUsage,
|
|
119
|
-
voice: OpenAI::Models::Realtime::RealtimeResponse::voice
|
|
103
|
+
usage: OpenAI::Realtime::RealtimeResponseUsage
|
|
120
104
|
}
|
|
121
105
|
|
|
106
|
+
type audio =
|
|
107
|
+
{ output: OpenAI::Realtime::RealtimeResponse::Audio::Output }
|
|
108
|
+
|
|
109
|
+
class Audio < OpenAI::Internal::Type::BaseModel
|
|
110
|
+
attr_reader output: OpenAI::Realtime::RealtimeResponse::Audio::Output?
|
|
111
|
+
|
|
112
|
+
def output=: (
|
|
113
|
+
OpenAI::Realtime::RealtimeResponse::Audio::Output
|
|
114
|
+
) -> OpenAI::Realtime::RealtimeResponse::Audio::Output
|
|
115
|
+
|
|
116
|
+
def initialize: (
|
|
117
|
+
?output: OpenAI::Realtime::RealtimeResponse::Audio::Output
|
|
118
|
+
) -> void
|
|
119
|
+
|
|
120
|
+
def to_hash: -> {
|
|
121
|
+
output: OpenAI::Realtime::RealtimeResponse::Audio::Output
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
type output =
|
|
125
|
+
{
|
|
126
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
127
|
+
voice: OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
class Output < OpenAI::Internal::Type::BaseModel
|
|
131
|
+
attr_reader format_: OpenAI::Models::Realtime::realtime_audio_formats?
|
|
132
|
+
|
|
133
|
+
def format_=: (
|
|
134
|
+
OpenAI::Models::Realtime::realtime_audio_formats
|
|
135
|
+
) -> OpenAI::Models::Realtime::realtime_audio_formats
|
|
136
|
+
|
|
137
|
+
attr_reader voice: OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice?
|
|
138
|
+
|
|
139
|
+
def voice=: (
|
|
140
|
+
OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice
|
|
141
|
+
) -> OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice
|
|
142
|
+
|
|
143
|
+
def initialize: (
|
|
144
|
+
?format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
145
|
+
?voice: OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice
|
|
146
|
+
) -> void
|
|
147
|
+
|
|
148
|
+
def to_hash: -> {
|
|
149
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
150
|
+
voice: OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
type voice =
|
|
154
|
+
String
|
|
155
|
+
| :alloy
|
|
156
|
+
| :ash
|
|
157
|
+
| :ballad
|
|
158
|
+
| :coral
|
|
159
|
+
| :echo
|
|
160
|
+
| :sage
|
|
161
|
+
| :shimmer
|
|
162
|
+
| :verse
|
|
163
|
+
| :marin
|
|
164
|
+
| :cedar
|
|
165
|
+
|
|
166
|
+
module Voice
|
|
167
|
+
extend OpenAI::Internal::Type::Union
|
|
168
|
+
|
|
169
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::Audio::Output::voice]
|
|
170
|
+
|
|
171
|
+
ALLOY: :alloy
|
|
172
|
+
ASH: :ash
|
|
173
|
+
BALLAD: :ballad
|
|
174
|
+
CORAL: :coral
|
|
175
|
+
ECHO: :echo
|
|
176
|
+
SAGE: :sage
|
|
177
|
+
SHIMMER: :shimmer
|
|
178
|
+
VERSE: :verse
|
|
179
|
+
MARIN: :marin
|
|
180
|
+
CEDAR: :cedar
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
end
|
|
184
|
+
|
|
122
185
|
type max_output_tokens = Integer | :inf
|
|
123
186
|
|
|
124
187
|
module MaxOutputTokens
|
|
@@ -127,17 +190,6 @@ module OpenAI
|
|
|
127
190
|
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::max_output_tokens]
|
|
128
191
|
end
|
|
129
192
|
|
|
130
|
-
type modality = :text | :audio
|
|
131
|
-
|
|
132
|
-
module Modality
|
|
133
|
-
extend OpenAI::Internal::Type::Enum
|
|
134
|
-
|
|
135
|
-
TEXT: :text
|
|
136
|
-
AUDIO: :audio
|
|
137
|
-
|
|
138
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::modality]
|
|
139
|
-
end
|
|
140
|
-
|
|
141
193
|
type object = :"realtime.response"
|
|
142
194
|
|
|
143
195
|
module Object
|
|
@@ -148,16 +200,15 @@ module OpenAI
|
|
|
148
200
|
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::object]
|
|
149
201
|
end
|
|
150
202
|
|
|
151
|
-
type
|
|
203
|
+
type output_modality = :text | :audio
|
|
152
204
|
|
|
153
|
-
module
|
|
205
|
+
module OutputModality
|
|
154
206
|
extend OpenAI::Internal::Type::Enum
|
|
155
207
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
G711_ALAW: :g711_alaw
|
|
208
|
+
TEXT: :text
|
|
209
|
+
AUDIO: :audio
|
|
159
210
|
|
|
160
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::
|
|
211
|
+
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::output_modality]
|
|
161
212
|
end
|
|
162
213
|
|
|
163
214
|
type status =
|
|
@@ -174,36 +225,6 @@ module OpenAI
|
|
|
174
225
|
|
|
175
226
|
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::status]
|
|
176
227
|
end
|
|
177
|
-
|
|
178
|
-
type voice =
|
|
179
|
-
String
|
|
180
|
-
| :alloy
|
|
181
|
-
| :ash
|
|
182
|
-
| :ballad
|
|
183
|
-
| :coral
|
|
184
|
-
| :echo
|
|
185
|
-
| :sage
|
|
186
|
-
| :shimmer
|
|
187
|
-
| :verse
|
|
188
|
-
| :marin
|
|
189
|
-
| :cedar
|
|
190
|
-
|
|
191
|
-
module Voice
|
|
192
|
-
extend OpenAI::Internal::Type::Union
|
|
193
|
-
|
|
194
|
-
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeResponse::voice]
|
|
195
|
-
|
|
196
|
-
ALLOY: :alloy
|
|
197
|
-
ASH: :ash
|
|
198
|
-
BALLAD: :ballad
|
|
199
|
-
CORAL: :coral
|
|
200
|
-
ECHO: :echo
|
|
201
|
-
SAGE: :sage
|
|
202
|
-
SHIMMER: :shimmer
|
|
203
|
-
VERSE: :verse
|
|
204
|
-
MARIN: :marin
|
|
205
|
-
CEDAR: :cedar
|
|
206
|
-
end
|
|
207
228
|
end
|
|
208
229
|
end
|
|
209
230
|
end
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Realtime
|
|
4
|
+
type realtime_response_create_audio_output =
|
|
5
|
+
{ output: OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output }
|
|
6
|
+
|
|
7
|
+
class RealtimeResponseCreateAudioOutput < OpenAI::Internal::Type::BaseModel
|
|
8
|
+
attr_reader output: OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output?
|
|
9
|
+
|
|
10
|
+
def output=: (
|
|
11
|
+
OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output
|
|
12
|
+
) -> OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output
|
|
13
|
+
|
|
14
|
+
def initialize: (
|
|
15
|
+
?output: OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output
|
|
16
|
+
) -> void
|
|
17
|
+
|
|
18
|
+
def to_hash: -> {
|
|
19
|
+
output: OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
type output =
|
|
23
|
+
{
|
|
24
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
25
|
+
voice: OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
class Output < OpenAI::Internal::Type::BaseModel
|
|
29
|
+
attr_reader format_: OpenAI::Models::Realtime::realtime_audio_formats?
|
|
30
|
+
|
|
31
|
+
def format_=: (
|
|
32
|
+
OpenAI::Models::Realtime::realtime_audio_formats
|
|
33
|
+
) -> OpenAI::Models::Realtime::realtime_audio_formats
|
|
34
|
+
|
|
35
|
+
attr_reader voice: OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice?
|
|
36
|
+
|
|
37
|
+
def voice=: (
|
|
38
|
+
OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice
|
|
39
|
+
) -> OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice
|
|
40
|
+
|
|
41
|
+
def initialize: (
|
|
42
|
+
?format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
43
|
+
?voice: OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice
|
|
44
|
+
) -> void
|
|
45
|
+
|
|
46
|
+
def to_hash: -> {
|
|
47
|
+
format_: OpenAI::Models::Realtime::realtime_audio_formats,
|
|
48
|
+
voice: OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
type voice =
|
|
52
|
+
String
|
|
53
|
+
| :alloy
|
|
54
|
+
| :ash
|
|
55
|
+
| :ballad
|
|
56
|
+
| :coral
|
|
57
|
+
| :echo
|
|
58
|
+
| :sage
|
|
59
|
+
| :shimmer
|
|
60
|
+
| :verse
|
|
61
|
+
| :marin
|
|
62
|
+
| :cedar
|
|
63
|
+
|
|
64
|
+
module Voice
|
|
65
|
+
extend OpenAI::Internal::Type::Union
|
|
66
|
+
|
|
67
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::voice]
|
|
68
|
+
|
|
69
|
+
ALLOY: :alloy
|
|
70
|
+
ASH: :ash
|
|
71
|
+
BALLAD: :ballad
|
|
72
|
+
CORAL: :coral
|
|
73
|
+
ECHO: :echo
|
|
74
|
+
SAGE: :sage
|
|
75
|
+
SHIMMER: :shimmer
|
|
76
|
+
VERSE: :verse
|
|
77
|
+
MARIN: :marin
|
|
78
|
+
CEDAR: :cedar
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
end
|