openai 0.23.0 → 0.23.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/realtime/client_secret_create_response.rb +6 -8
  5. data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +1 -1
  6. data/lib/openai/models/realtime/realtime_client_event.rb +2 -6
  7. data/lib/openai/models/realtime/{models.rb → realtime_function_tool.rb} +6 -6
  8. data/lib/openai/models/realtime/realtime_response_create_params.rb +4 -4
  9. data/lib/openai/models/realtime/realtime_server_event.rb +1 -8
  10. data/lib/openai/models/realtime/realtime_session.rb +3 -3
  11. data/lib/openai/models/realtime/realtime_session_create_request.rb +2 -2
  12. data/lib/openai/models/realtime/realtime_session_create_response.rb +21 -33
  13. data/lib/openai/models/realtime/realtime_tools_config_union.rb +2 -2
  14. data/lib/openai/models/realtime/realtime_transcription_session_create_response.rb +117 -40
  15. data/lib/openai/models/realtime/transcription_session_updated_event.rb +152 -3
  16. data/lib/openai/version.rb +1 -1
  17. data/lib/openai.rb +1 -4
  18. data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +2 -2
  19. data/rbi/openai/models/realtime/realtime_client_event.rbi +1 -2
  20. data/rbi/openai/models/realtime/{models.rbi → realtime_function_tool.rbi} +27 -9
  21. data/rbi/openai/models/realtime/realtime_response_create_params.rbi +5 -5
  22. data/rbi/openai/models/realtime/realtime_server_event.rbi +0 -2
  23. data/rbi/openai/models/realtime/realtime_session.rbi +10 -4
  24. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +4 -4
  25. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +29 -77
  26. data/rbi/openai/models/realtime/realtime_tools_config_union.rbi +1 -1
  27. data/rbi/openai/models/realtime/realtime_transcription_session_create_response.rbi +290 -101
  28. data/rbi/openai/models/realtime/transcription_session_updated_event.rbi +311 -4
  29. data/sig/openai/models/realtime/realtime_client_event.rbs +0 -1
  30. data/sig/openai/models/realtime/{models.rbs → realtime_function_tool.rbs} +9 -9
  31. data/sig/openai/models/realtime/realtime_response_create_params.rbs +1 -1
  32. data/sig/openai/models/realtime/realtime_server_event.rbs +0 -2
  33. data/sig/openai/models/realtime/realtime_session.rbs +6 -6
  34. data/sig/openai/models/realtime/realtime_session_create_response.rbs +13 -31
  35. data/sig/openai/models/realtime/realtime_tools_config_union.rbs +1 -1
  36. data/sig/openai/models/realtime/realtime_transcription_session_create_response.rbs +123 -35
  37. data/sig/openai/models/realtime/transcription_session_updated_event.rbs +118 -4
  38. metadata +5 -14
  39. data/lib/openai/models/realtime/realtime_transcription_session_client_secret.rb +0 -38
  40. data/lib/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rb +0 -66
  41. data/lib/openai/models/realtime/transcription_session_created.rb +0 -43
  42. data/rbi/openai/models/realtime/realtime_transcription_session_client_secret.rbi +0 -51
  43. data/rbi/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbi +0 -144
  44. data/rbi/openai/models/realtime/transcription_session_created.rbi +0 -79
  45. data/sig/openai/models/realtime/realtime_transcription_session_client_secret.rbs +0 -20
  46. data/sig/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbs +0 -59
  47. data/sig/openai/models/realtime/transcription_session_created.rbs +0 -32
@@ -1,144 +0,0 @@
1
- # typed: strong
2
-
3
- module OpenAI
4
- module Models
5
- RealtimeTranscriptionSessionInputAudioTranscription =
6
- Realtime::RealtimeTranscriptionSessionInputAudioTranscription
7
-
8
- module Realtime
9
- class RealtimeTranscriptionSessionInputAudioTranscription < OpenAI::Internal::Type::BaseModel
10
- OrHash =
11
- T.type_alias do
12
- T.any(
13
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription,
14
- OpenAI::Internal::AnyHash
15
- )
16
- end
17
-
18
- # The language of the input audio. Supplying the input language in
19
- # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
20
- # format will improve accuracy and latency.
21
- sig { returns(T.nilable(String)) }
22
- attr_reader :language
23
-
24
- sig { params(language: String).void }
25
- attr_writer :language
26
-
27
- # The model to use for transcription. Current options are `whisper-1`,
28
- # `gpt-4o-transcribe-latest`, `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`.
29
- sig do
30
- returns(
31
- T.nilable(
32
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol
33
- )
34
- )
35
- end
36
- attr_reader :model
37
-
38
- sig do
39
- params(
40
- model:
41
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::OrSymbol
42
- ).void
43
- end
44
- attr_writer :model
45
-
46
- # An optional text to guide the model's style or continue a previous audio
47
- # segment. For `whisper-1`, the
48
- # [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
49
- # For `gpt-4o-transcribe` models, the prompt is a free text string, for example
50
- # "expect words related to technology".
51
- sig { returns(T.nilable(String)) }
52
- attr_reader :prompt
53
-
54
- sig { params(prompt: String).void }
55
- attr_writer :prompt
56
-
57
- # Configuration of the transcription model.
58
- sig do
59
- params(
60
- language: String,
61
- model:
62
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::OrSymbol,
63
- prompt: String
64
- ).returns(T.attached_class)
65
- end
66
- def self.new(
67
- # The language of the input audio. Supplying the input language in
68
- # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
69
- # format will improve accuracy and latency.
70
- language: nil,
71
- # The model to use for transcription. Current options are `whisper-1`,
72
- # `gpt-4o-transcribe-latest`, `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`.
73
- model: nil,
74
- # An optional text to guide the model's style or continue a previous audio
75
- # segment. For `whisper-1`, the
76
- # [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
77
- # For `gpt-4o-transcribe` models, the prompt is a free text string, for example
78
- # "expect words related to technology".
79
- prompt: nil
80
- )
81
- end
82
-
83
- sig do
84
- override.returns(
85
- {
86
- language: String,
87
- model:
88
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol,
89
- prompt: String
90
- }
91
- )
92
- end
93
- def to_hash
94
- end
95
-
96
- # The model to use for transcription. Current options are `whisper-1`,
97
- # `gpt-4o-transcribe-latest`, `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`.
98
- module Model
99
- extend OpenAI::Internal::Type::Enum
100
-
101
- TaggedSymbol =
102
- T.type_alias do
103
- T.all(
104
- Symbol,
105
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model
106
- )
107
- end
108
- OrSymbol = T.type_alias { T.any(Symbol, String) }
109
-
110
- WHISPER_1 =
111
- T.let(
112
- :"whisper-1",
113
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol
114
- )
115
- GPT_4O_TRANSCRIBE_LATEST =
116
- T.let(
117
- :"gpt-4o-transcribe-latest",
118
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol
119
- )
120
- GPT_4O_MINI_TRANSCRIBE =
121
- T.let(
122
- :"gpt-4o-mini-transcribe",
123
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol
124
- )
125
- GPT_4O_TRANSCRIBE =
126
- T.let(
127
- :"gpt-4o-transcribe",
128
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol
129
- )
130
-
131
- sig do
132
- override.returns(
133
- T::Array[
134
- OpenAI::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::Model::TaggedSymbol
135
- ]
136
- )
137
- end
138
- def self.values
139
- end
140
- end
141
- end
142
- end
143
- end
144
- end
@@ -1,79 +0,0 @@
1
- # typed: strong
2
-
3
- module OpenAI
4
- module Models
5
- module Realtime
6
- class TranscriptionSessionCreated < OpenAI::Internal::Type::BaseModel
7
- OrHash =
8
- T.type_alias do
9
- T.any(
10
- OpenAI::Realtime::TranscriptionSessionCreated,
11
- OpenAI::Internal::AnyHash
12
- )
13
- end
14
-
15
- # The unique ID of the server event.
16
- sig { returns(String) }
17
- attr_accessor :event_id
18
-
19
- # A new Realtime transcription session configuration.
20
- #
21
- # When a session is created on the server via REST API, the session object also
22
- # contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
23
- # not present when a session is updated via the WebSocket API.
24
- sig do
25
- returns(OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse)
26
- end
27
- attr_reader :session
28
-
29
- sig do
30
- params(
31
- session:
32
- OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse::OrHash
33
- ).void
34
- end
35
- attr_writer :session
36
-
37
- # The event type, must be `transcription_session.created`.
38
- sig { returns(Symbol) }
39
- attr_accessor :type
40
-
41
- # Returned when a transcription session is created.
42
- sig do
43
- params(
44
- event_id: String,
45
- session:
46
- OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse::OrHash,
47
- type: Symbol
48
- ).returns(T.attached_class)
49
- end
50
- def self.new(
51
- # The unique ID of the server event.
52
- event_id:,
53
- # A new Realtime transcription session configuration.
54
- #
55
- # When a session is created on the server via REST API, the session object also
56
- # contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
57
- # not present when a session is updated via the WebSocket API.
58
- session:,
59
- # The event type, must be `transcription_session.created`.
60
- type: :"transcription_session.created"
61
- )
62
- end
63
-
64
- sig do
65
- override.returns(
66
- {
67
- event_id: String,
68
- session:
69
- OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse,
70
- type: Symbol
71
- }
72
- )
73
- end
74
- def to_hash
75
- end
76
- end
77
- end
78
- end
79
- end
@@ -1,20 +0,0 @@
1
- module OpenAI
2
- module Models
3
- class RealtimeTranscriptionSessionClientSecret = Realtime::RealtimeTranscriptionSessionClientSecret
4
-
5
- module Realtime
6
- type realtime_transcription_session_client_secret =
7
- { expires_at: Integer, value: String }
8
-
9
- class RealtimeTranscriptionSessionClientSecret < OpenAI::Internal::Type::BaseModel
10
- attr_accessor expires_at: Integer
11
-
12
- attr_accessor value: String
13
-
14
- def initialize: (expires_at: Integer, value: String) -> void
15
-
16
- def to_hash: -> { expires_at: Integer, value: String }
17
- end
18
- end
19
- end
20
- end
@@ -1,59 +0,0 @@
1
- module OpenAI
2
- module Models
3
- class RealtimeTranscriptionSessionInputAudioTranscription = Realtime::RealtimeTranscriptionSessionInputAudioTranscription
4
-
5
- module Realtime
6
- type realtime_transcription_session_input_audio_transcription =
7
- {
8
- language: String,
9
- model: OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model,
10
- prompt: String
11
- }
12
-
13
- class RealtimeTranscriptionSessionInputAudioTranscription < OpenAI::Internal::Type::BaseModel
14
- attr_reader language: String?
15
-
16
- def language=: (String) -> String
17
-
18
- attr_reader model: OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model?
19
-
20
- def model=: (
21
- OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model
22
- ) -> OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model
23
-
24
- attr_reader prompt: String?
25
-
26
- def prompt=: (String) -> String
27
-
28
- def initialize: (
29
- ?language: String,
30
- ?model: OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model,
31
- ?prompt: String
32
- ) -> void
33
-
34
- def to_hash: -> {
35
- language: String,
36
- model: OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model,
37
- prompt: String
38
- }
39
-
40
- type model =
41
- :"whisper-1"
42
- | :"gpt-4o-transcribe-latest"
43
- | :"gpt-4o-mini-transcribe"
44
- | :"gpt-4o-transcribe"
45
-
46
- module Model
47
- extend OpenAI::Internal::Type::Enum
48
-
49
- WHISPER_1: :"whisper-1"
50
- GPT_4O_TRANSCRIBE_LATEST: :"gpt-4o-transcribe-latest"
51
- GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe"
52
- GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe"
53
-
54
- def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeTranscriptionSessionInputAudioTranscription::model]
55
- end
56
- end
57
- end
58
- end
59
- end
@@ -1,32 +0,0 @@
1
- module OpenAI
2
- module Models
3
- module Realtime
4
- type transcription_session_created =
5
- {
6
- event_id: String,
7
- session: OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse,
8
- type: :"transcription_session.created"
9
- }
10
-
11
- class TranscriptionSessionCreated < OpenAI::Internal::Type::BaseModel
12
- attr_accessor event_id: String
13
-
14
- attr_accessor session: OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse
15
-
16
- attr_accessor type: :"transcription_session.created"
17
-
18
- def initialize: (
19
- event_id: String,
20
- session: OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse,
21
- ?type: :"transcription_session.created"
22
- ) -> void
23
-
24
- def to_hash: -> {
25
- event_id: String,
26
- session: OpenAI::Realtime::RealtimeTranscriptionSessionCreateResponse,
27
- type: :"transcription_session.created"
28
- }
29
- end
30
- end
31
- end
32
- end