openai 0.41.0 → 0.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +9 -0
- data/README.md +1 -1
- data/lib/openai/internal/util.rb +7 -2
- data/lib/openai/models/audio/speech_create_params.rb +3 -3
- data/lib/openai/models/audio/speech_model.rb +1 -0
- data/lib/openai/models/audio/transcription_create_params.rb +10 -8
- data/lib/openai/models/audio_model.rb +1 -0
- data/lib/openai/models/realtime/audio_transcription.rb +33 -10
- data/lib/openai/models/realtime/realtime_session.rb +46 -6
- data/lib/openai/models/realtime/realtime_session_create_request.rb +6 -0
- data/lib/openai/models/realtime/realtime_session_create_response.rb +6 -0
- data/lib/openai/models/video.rb +3 -3
- data/lib/openai/models/video_create_params.rb +3 -3
- data/lib/openai/models/video_model.rb +23 -3
- data/lib/openai/resources/videos.rb +1 -1
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/models/audio/speech_create_params.rbi +3 -3
- data/rbi/openai/models/audio/speech_model.rbi +5 -0
- data/rbi/openai/models/audio/transcription_create_params.rbi +15 -12
- data/rbi/openai/models/audio_model.rbi +5 -0
- data/rbi/openai/models/realtime/audio_transcription.rbi +52 -21
- data/rbi/openai/models/realtime/realtime_session.rbi +42 -12
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +10 -0
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +10 -0
- data/rbi/openai/models/video.rbi +3 -3
- data/rbi/openai/models/video_create_params.rbi +4 -4
- data/rbi/openai/models/video_model.rbi +8 -5
- data/rbi/openai/resources/audio/speech.rbi +1 -1
- data/rbi/openai/resources/audio/transcriptions.rbi +12 -10
- data/rbi/openai/resources/videos.rbi +1 -1
- data/sig/openai/models/audio/speech_model.rbs +6 -1
- data/sig/openai/models/audio_model.rbs +2 -0
- data/sig/openai/models/realtime/audio_transcription.rbs +7 -4
- data/sig/openai/models/realtime/realtime_session.rbs +9 -4
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +4 -0
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +4 -0
- data/sig/openai/models/video_model.rbs +5 -4
- metadata +2 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 464227db3c30bad373f3cd4600bb7409c8ace214249c3257016363d533d848c8
|
|
4
|
+
data.tar.gz: a3828202f21732efa015e97198bf160fff5dca941a25cc1010f688a583ac8ccb
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: c062d515f082c87524e10ccce322304923f4db54cd489218248f63e2d47e1f976db323f9e1cff5753f66a603112d8b56df3a9ed4f9a144d4e8867c6cdc075e52
|
|
7
|
+
data.tar.gz: 566f420ae6db1b332fe1dd253544271c470f9863d4278ebcf22a873ff2cc4102999d0ed741f68671011c5a761e39d988e3679e41e139f2a7d526334ba48cd452
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,14 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.42.0 (2025-12-19)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v0.41.0...v0.42.0](https://github.com/openai/openai-ruby/compare/v0.41.0...v0.42.0)
|
|
6
|
+
|
|
7
|
+
### Bug Fixes
|
|
8
|
+
|
|
9
|
+
* issue where json.parse errors when receiving HTTP 204 with nobody ([7984c03](https://github.com/openai/openai-ruby/commit/7984c0396f5acd1b801514e280415090deb0cd06))
|
|
10
|
+
* rebuild ([52b19e9](https://github.com/openai/openai-ruby/commit/52b19e9b4b0344c77bec4603df3d4f16ee4cd720))
|
|
11
|
+
|
|
3
12
|
## 0.41.0 (2025-12-16)
|
|
4
13
|
|
|
5
14
|
Full Changelog: [v0.40.0...v0.41.0](https://github.com/openai/openai-ruby/compare/v0.40.0...v0.41.0)
|
data/README.md
CHANGED
data/lib/openai/internal/util.rb
CHANGED
|
@@ -657,7 +657,8 @@ module OpenAI
|
|
|
657
657
|
def decode_content(headers, stream:, suppress_error: false)
|
|
658
658
|
case (content_type = headers["content-type"])
|
|
659
659
|
in OpenAI::Internal::Util::JSON_CONTENT
|
|
660
|
-
json = stream.to_a.join
|
|
660
|
+
return nil if (json = stream.to_a.join).empty?
|
|
661
|
+
|
|
661
662
|
begin
|
|
662
663
|
JSON.parse(json, symbolize_names: true)
|
|
663
664
|
rescue JSON::ParserError => e
|
|
@@ -667,7 +668,11 @@ module OpenAI
|
|
|
667
668
|
in OpenAI::Internal::Util::JSONL_CONTENT
|
|
668
669
|
lines = decode_lines(stream)
|
|
669
670
|
chain_fused(lines) do |y|
|
|
670
|
-
lines.each
|
|
671
|
+
lines.each do
|
|
672
|
+
next if _1.empty?
|
|
673
|
+
|
|
674
|
+
y << JSON.parse(_1, symbolize_names: true)
|
|
675
|
+
end
|
|
671
676
|
end
|
|
672
677
|
in %r{^text/event-stream}
|
|
673
678
|
lines = decode_lines(stream)
|
|
@@ -16,7 +16,7 @@ module OpenAI
|
|
|
16
16
|
|
|
17
17
|
# @!attribute model
|
|
18
18
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
19
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
19
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
20
20
|
#
|
|
21
21
|
# @return [String, Symbol, OpenAI::Models::Audio::SpeechModel]
|
|
22
22
|
required :model, union: -> { OpenAI::Audio::SpeechCreateParams::Model }
|
|
@@ -79,13 +79,13 @@ module OpenAI
|
|
|
79
79
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
|
80
80
|
|
|
81
81
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
82
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
82
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
83
83
|
module Model
|
|
84
84
|
extend OpenAI::Internal::Type::Union
|
|
85
85
|
|
|
86
86
|
variant String
|
|
87
87
|
|
|
88
|
-
# One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
88
|
+
# One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
89
89
|
variant enum: -> { OpenAI::Audio::SpeechModel }
|
|
90
90
|
|
|
91
91
|
# @!method self.variants
|
|
@@ -19,8 +19,9 @@ module OpenAI
|
|
|
19
19
|
|
|
20
20
|
# @!attribute model
|
|
21
21
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
22
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
23
|
-
# Whisper V2 model), and
|
|
22
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
23
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
24
|
+
# `gpt-4o-transcribe-diarize`.
|
|
24
25
|
#
|
|
25
26
|
# @return [String, Symbol, OpenAI::Models::AudioModel]
|
|
26
27
|
required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model }
|
|
@@ -42,9 +43,9 @@ module OpenAI
|
|
|
42
43
|
# Additional information to include in the transcription response. `logprobs` will
|
|
43
44
|
# return the log probabilities of the tokens in the response to understand the
|
|
44
45
|
# model's confidence in the transcription. `logprobs` only works with
|
|
45
|
-
# response_format set to `json` and only with the models `gpt-4o-transcribe
|
|
46
|
-
# `gpt-4o-mini-transcribe`. This field is
|
|
47
|
-
# `gpt-4o-transcribe-diarize`.
|
|
46
|
+
# response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
|
47
|
+
# `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
|
48
|
+
# not supported when using `gpt-4o-transcribe-diarize`.
|
|
48
49
|
#
|
|
49
50
|
# @return [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>, nil]
|
|
50
51
|
optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionInclude] }
|
|
@@ -146,14 +147,15 @@ module OpenAI
|
|
|
146
147
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
|
147
148
|
|
|
148
149
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
149
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
150
|
-
# Whisper V2 model), and
|
|
150
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
151
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
152
|
+
# `gpt-4o-transcribe-diarize`.
|
|
151
153
|
module Model
|
|
152
154
|
extend OpenAI::Internal::Type::Union
|
|
153
155
|
|
|
154
156
|
variant String
|
|
155
157
|
|
|
156
|
-
# ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
|
158
|
+
# ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
|
157
159
|
variant enum: -> { OpenAI::AudioModel }
|
|
158
160
|
|
|
159
161
|
# @!method self.variants
|
|
@@ -8,6 +8,7 @@ module OpenAI
|
|
|
8
8
|
WHISPER_1 = :"whisper-1"
|
|
9
9
|
GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe"
|
|
10
10
|
GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe"
|
|
11
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15 = :"gpt-4o-mini-transcribe-2025-12-15"
|
|
11
12
|
GPT_4O_TRANSCRIBE_DIARIZE = :"gpt-4o-transcribe-diarize"
|
|
12
13
|
|
|
13
14
|
# @!method self.values
|
|
@@ -14,11 +14,12 @@ module OpenAI
|
|
|
14
14
|
|
|
15
15
|
# @!attribute model
|
|
16
16
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
17
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
18
|
-
#
|
|
17
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
18
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
19
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
19
20
|
#
|
|
20
|
-
# @return [Symbol, OpenAI::Models::Realtime::AudioTranscription::Model, nil]
|
|
21
|
-
optional :model,
|
|
21
|
+
# @return [String, Symbol, OpenAI::Models::Realtime::AudioTranscription::Model, nil]
|
|
22
|
+
optional :model, union: -> { OpenAI::Realtime::AudioTranscription::Model }
|
|
22
23
|
|
|
23
24
|
# @!attribute prompt
|
|
24
25
|
# An optional text to guide the model's style or continue a previous audio
|
|
@@ -36,25 +37,47 @@ module OpenAI
|
|
|
36
37
|
#
|
|
37
38
|
# @param language [String] The language of the input audio. Supplying the input language in
|
|
38
39
|
#
|
|
39
|
-
# @param model [Symbol, OpenAI::Models::Realtime::AudioTranscription::Model] The model to use for transcription. Current options are `whisper-1`, `gpt-4o-min
|
|
40
|
+
# @param model [String, Symbol, OpenAI::Models::Realtime::AudioTranscription::Model] The model to use for transcription. Current options are `whisper-1`, `gpt-4o-min
|
|
40
41
|
#
|
|
41
42
|
# @param prompt [String] An optional text to guide the model's style or continue a previous audio
|
|
42
43
|
|
|
43
44
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
44
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
45
|
-
#
|
|
45
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
46
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
47
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
46
48
|
#
|
|
47
49
|
# @see OpenAI::Models::Realtime::AudioTranscription#model
|
|
48
50
|
module Model
|
|
49
|
-
extend OpenAI::Internal::Type::
|
|
51
|
+
extend OpenAI::Internal::Type::Union
|
|
52
|
+
|
|
53
|
+
variant String
|
|
54
|
+
|
|
55
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::WHISPER_1 }
|
|
56
|
+
|
|
57
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_MINI_TRANSCRIBE }
|
|
58
|
+
|
|
59
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_MINI_TRANSCRIBE_2025_12_15 }
|
|
60
|
+
|
|
61
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_TRANSCRIBE }
|
|
62
|
+
|
|
63
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_TRANSCRIBE_DIARIZE }
|
|
64
|
+
|
|
65
|
+
# @!method self.variants
|
|
66
|
+
# @return [Array(String, Symbol)]
|
|
67
|
+
|
|
68
|
+
define_sorbet_constant!(:Variants) do
|
|
69
|
+
T.type_alias { T.any(String, OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol) }
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
# @!group
|
|
50
73
|
|
|
51
74
|
WHISPER_1 = :"whisper-1"
|
|
52
75
|
GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe"
|
|
76
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15 = :"gpt-4o-mini-transcribe-2025-12-15"
|
|
53
77
|
GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe"
|
|
54
78
|
GPT_4O_TRANSCRIBE_DIARIZE = :"gpt-4o-transcribe-diarize"
|
|
55
79
|
|
|
56
|
-
# @!
|
|
57
|
-
# @return [Array<Symbol>]
|
|
80
|
+
# @!endgroup
|
|
58
81
|
end
|
|
59
82
|
end
|
|
60
83
|
end
|
|
@@ -94,8 +94,8 @@ module OpenAI
|
|
|
94
94
|
# @!attribute model
|
|
95
95
|
# The Realtime model used for this session.
|
|
96
96
|
#
|
|
97
|
-
# @return [Symbol, OpenAI::Models::Realtime::RealtimeSession::Model, nil]
|
|
98
|
-
optional :model,
|
|
97
|
+
# @return [String, Symbol, OpenAI::Models::Realtime::RealtimeSession::Model, nil]
|
|
98
|
+
optional :model, union: -> { OpenAI::Realtime::RealtimeSession::Model }
|
|
99
99
|
|
|
100
100
|
# @!attribute object
|
|
101
101
|
# The object type. Always `realtime.session`.
|
|
@@ -205,7 +205,7 @@ module OpenAI
|
|
|
205
205
|
#
|
|
206
206
|
# @param modalities [Array<Symbol, OpenAI::Models::Realtime::RealtimeSession::Modality>] The set of modalities the model can respond with. To disable audio,
|
|
207
207
|
#
|
|
208
|
-
# @param model [Symbol, OpenAI::Models::Realtime::RealtimeSession::Model] The Realtime model used for this session.
|
|
208
|
+
# @param model [String, Symbol, OpenAI::Models::Realtime::RealtimeSession::Model] The Realtime model used for this session.
|
|
209
209
|
#
|
|
210
210
|
# @param object [Symbol, OpenAI::Models::Realtime::RealtimeSession::Object] The object type. Always `realtime.session`.
|
|
211
211
|
#
|
|
@@ -306,7 +306,46 @@ module OpenAI
|
|
|
306
306
|
#
|
|
307
307
|
# @see OpenAI::Models::Realtime::RealtimeSession#model
|
|
308
308
|
module Model
|
|
309
|
-
extend OpenAI::Internal::Type::
|
|
309
|
+
extend OpenAI::Internal::Type::Union
|
|
310
|
+
|
|
311
|
+
variant String
|
|
312
|
+
|
|
313
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME }
|
|
314
|
+
|
|
315
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_2025_08_28 }
|
|
316
|
+
|
|
317
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW }
|
|
318
|
+
|
|
319
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW_2024_10_01 }
|
|
320
|
+
|
|
321
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW_2024_12_17 }
|
|
322
|
+
|
|
323
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW_2025_06_03 }
|
|
324
|
+
|
|
325
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_MINI_REALTIME_PREVIEW }
|
|
326
|
+
|
|
327
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 }
|
|
328
|
+
|
|
329
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_MINI }
|
|
330
|
+
|
|
331
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_MINI_2025_10_06 }
|
|
332
|
+
|
|
333
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_MINI_2025_12_15 }
|
|
334
|
+
|
|
335
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_AUDIO_MINI }
|
|
336
|
+
|
|
337
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_AUDIO_MINI_2025_10_06 }
|
|
338
|
+
|
|
339
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_AUDIO_MINI_2025_12_15 }
|
|
340
|
+
|
|
341
|
+
# @!method self.variants
|
|
342
|
+
# @return [Array(String, Symbol)]
|
|
343
|
+
|
|
344
|
+
define_sorbet_constant!(:Variants) do
|
|
345
|
+
T.type_alias { T.any(String, OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol) }
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
# @!group
|
|
310
349
|
|
|
311
350
|
GPT_REALTIME = :"gpt-realtime"
|
|
312
351
|
GPT_REALTIME_2025_08_28 = :"gpt-realtime-2025-08-28"
|
|
@@ -318,11 +357,12 @@ module OpenAI
|
|
|
318
357
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
319
358
|
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
|
320
359
|
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
|
360
|
+
GPT_REALTIME_MINI_2025_12_15 = :"gpt-realtime-mini-2025-12-15"
|
|
321
361
|
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
|
322
362
|
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
|
363
|
+
GPT_AUDIO_MINI_2025_12_15 = :"gpt-audio-mini-2025-12-15"
|
|
323
364
|
|
|
324
|
-
# @!
|
|
325
|
-
# @return [Array<Symbol>]
|
|
365
|
+
# @!endgroup
|
|
326
366
|
end
|
|
327
367
|
|
|
328
368
|
# The object type. Always `realtime.session`.
|
|
@@ -203,10 +203,14 @@ module OpenAI
|
|
|
203
203
|
|
|
204
204
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_REALTIME_MINI_2025_10_06 }
|
|
205
205
|
|
|
206
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_REALTIME_MINI_2025_12_15 }
|
|
207
|
+
|
|
206
208
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI }
|
|
207
209
|
|
|
208
210
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI_2025_10_06 }
|
|
209
211
|
|
|
212
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI_2025_12_15 }
|
|
213
|
+
|
|
210
214
|
# @!method self.variants
|
|
211
215
|
# @return [Array(String, Symbol)]
|
|
212
216
|
|
|
@@ -226,8 +230,10 @@ module OpenAI
|
|
|
226
230
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
227
231
|
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
|
228
232
|
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
|
233
|
+
GPT_REALTIME_MINI_2025_12_15 = :"gpt-realtime-mini-2025-12-15"
|
|
229
234
|
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
|
230
235
|
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
|
236
|
+
GPT_AUDIO_MINI_2025_12_15 = :"gpt-audio-mini-2025-12-15"
|
|
231
237
|
|
|
232
238
|
# @!endgroup
|
|
233
239
|
end
|
|
@@ -612,10 +612,14 @@ module OpenAI
|
|
|
612
612
|
|
|
613
613
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_REALTIME_MINI_2025_10_06 }
|
|
614
614
|
|
|
615
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_REALTIME_MINI_2025_12_15 }
|
|
616
|
+
|
|
615
617
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI }
|
|
616
618
|
|
|
617
619
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI_2025_10_06 }
|
|
618
620
|
|
|
621
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI_2025_12_15 }
|
|
622
|
+
|
|
619
623
|
# @!method self.variants
|
|
620
624
|
# @return [Array(String, Symbol)]
|
|
621
625
|
|
|
@@ -635,8 +639,10 @@ module OpenAI
|
|
|
635
639
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
636
640
|
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
|
637
641
|
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
|
642
|
+
GPT_REALTIME_MINI_2025_12_15 = :"gpt-realtime-mini-2025-12-15"
|
|
638
643
|
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
|
639
644
|
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
|
645
|
+
GPT_AUDIO_MINI_2025_12_15 = :"gpt-audio-mini-2025-12-15"
|
|
640
646
|
|
|
641
647
|
# @!endgroup
|
|
642
648
|
end
|
data/lib/openai/models/video.rb
CHANGED
|
@@ -37,8 +37,8 @@ module OpenAI
|
|
|
37
37
|
# @!attribute model
|
|
38
38
|
# The video generation model that produced the job.
|
|
39
39
|
#
|
|
40
|
-
# @return [Symbol, OpenAI::Models::VideoModel]
|
|
41
|
-
required :model,
|
|
40
|
+
# @return [String, Symbol, OpenAI::Models::VideoModel]
|
|
41
|
+
required :model, union: -> { OpenAI::VideoModel }
|
|
42
42
|
|
|
43
43
|
# @!attribute object
|
|
44
44
|
# The object type, which is always `video`.
|
|
@@ -95,7 +95,7 @@ module OpenAI
|
|
|
95
95
|
#
|
|
96
96
|
# @param expires_at [Integer, nil] Unix timestamp (seconds) for when the downloadable assets expire, if set.
|
|
97
97
|
#
|
|
98
|
-
# @param model [Symbol, OpenAI::Models::VideoModel] The video generation model that produced the job.
|
|
98
|
+
# @param model [String, Symbol, OpenAI::Models::VideoModel] The video generation model that produced the job.
|
|
99
99
|
#
|
|
100
100
|
# @param progress [Integer] Approximate completion percentage for the generation task.
|
|
101
101
|
#
|
|
@@ -23,8 +23,8 @@ module OpenAI
|
|
|
23
23
|
# The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
24
24
|
# to `sora-2`.
|
|
25
25
|
#
|
|
26
|
-
# @return [Symbol, OpenAI::Models::VideoModel, nil]
|
|
27
|
-
optional :model,
|
|
26
|
+
# @return [String, Symbol, OpenAI::Models::VideoModel, nil]
|
|
27
|
+
optional :model, union: -> { OpenAI::VideoModel }
|
|
28
28
|
|
|
29
29
|
# @!attribute seconds
|
|
30
30
|
# Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
@@ -47,7 +47,7 @@ module OpenAI
|
|
|
47
47
|
#
|
|
48
48
|
# @param input_reference [Pathname, StringIO, IO, String, OpenAI::FilePart] Optional image reference that guides generation.
|
|
49
49
|
#
|
|
50
|
-
# @param model [Symbol, OpenAI::Models::VideoModel] The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
50
|
+
# @param model [String, Symbol, OpenAI::Models::VideoModel] The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
51
51
|
#
|
|
52
52
|
# @param seconds [Symbol, OpenAI::Models::VideoSeconds] Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
53
53
|
#
|
|
@@ -3,7 +3,28 @@
|
|
|
3
3
|
module OpenAI
|
|
4
4
|
module Models
|
|
5
5
|
module VideoModel
|
|
6
|
-
extend OpenAI::Internal::Type::
|
|
6
|
+
extend OpenAI::Internal::Type::Union
|
|
7
|
+
|
|
8
|
+
variant String
|
|
9
|
+
|
|
10
|
+
variant const: -> { OpenAI::Models::VideoModel::SORA_2 }
|
|
11
|
+
|
|
12
|
+
variant const: -> { OpenAI::Models::VideoModel::SORA_2_PRO }
|
|
13
|
+
|
|
14
|
+
variant const: -> { OpenAI::Models::VideoModel::SORA_2_2025_10_06 }
|
|
15
|
+
|
|
16
|
+
variant const: -> { OpenAI::Models::VideoModel::SORA_2_PRO_2025_10_06 }
|
|
17
|
+
|
|
18
|
+
variant const: -> { OpenAI::Models::VideoModel::SORA_2_2025_12_08 }
|
|
19
|
+
|
|
20
|
+
# @!method self.variants
|
|
21
|
+
# @return [Array(String, Symbol)]
|
|
22
|
+
|
|
23
|
+
define_sorbet_constant!(:Variants) do
|
|
24
|
+
T.type_alias { T.any(String, OpenAI::VideoModel::TaggedSymbol) }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# @!group
|
|
7
28
|
|
|
8
29
|
SORA_2 = :"sora-2"
|
|
9
30
|
SORA_2_PRO = :"sora-2-pro"
|
|
@@ -11,8 +32,7 @@ module OpenAI
|
|
|
11
32
|
SORA_2_PRO_2025_10_06 = :"sora-2-pro-2025-10-06"
|
|
12
33
|
SORA_2_2025_12_08 = :"sora-2-2025-12-08"
|
|
13
34
|
|
|
14
|
-
# @!
|
|
15
|
-
# @return [Array<Symbol>]
|
|
35
|
+
# @!endgroup
|
|
16
36
|
end
|
|
17
37
|
end
|
|
18
38
|
end
|
|
@@ -14,7 +14,7 @@ module OpenAI
|
|
|
14
14
|
#
|
|
15
15
|
# @param input_reference [Pathname, StringIO, IO, String, OpenAI::FilePart] Optional image reference that guides generation.
|
|
16
16
|
#
|
|
17
|
-
# @param model [Symbol, OpenAI::Models::VideoModel] The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
17
|
+
# @param model [String, Symbol, OpenAI::Models::VideoModel] The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
18
18
|
#
|
|
19
19
|
# @param seconds [Symbol, OpenAI::Models::VideoSeconds] Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
20
20
|
#
|
data/lib/openai/version.rb
CHANGED
|
@@ -17,7 +17,7 @@ module OpenAI
|
|
|
17
17
|
attr_accessor :input
|
|
18
18
|
|
|
19
19
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
20
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
20
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
21
21
|
sig { returns(T.any(String, OpenAI::Audio::SpeechModel::OrSymbol)) }
|
|
22
22
|
attr_accessor :model
|
|
23
23
|
|
|
@@ -103,7 +103,7 @@ module OpenAI
|
|
|
103
103
|
# The text to generate audio for. The maximum length is 4096 characters.
|
|
104
104
|
input:,
|
|
105
105
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
106
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
106
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
107
107
|
model:,
|
|
108
108
|
# The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
|
|
109
109
|
# `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
|
|
@@ -150,7 +150,7 @@ module OpenAI
|
|
|
150
150
|
end
|
|
151
151
|
|
|
152
152
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
153
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
153
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
154
154
|
module Model
|
|
155
155
|
extend OpenAI::Internal::Type::Union
|
|
156
156
|
|
|
@@ -14,6 +14,11 @@ module OpenAI
|
|
|
14
14
|
TTS_1_HD = T.let(:"tts-1-hd", OpenAI::Audio::SpeechModel::TaggedSymbol)
|
|
15
15
|
GPT_4O_MINI_TTS =
|
|
16
16
|
T.let(:"gpt-4o-mini-tts", OpenAI::Audio::SpeechModel::TaggedSymbol)
|
|
17
|
+
GPT_4O_MINI_TTS_2025_12_15 =
|
|
18
|
+
T.let(
|
|
19
|
+
:"gpt-4o-mini-tts-2025-12-15",
|
|
20
|
+
OpenAI::Audio::SpeechModel::TaggedSymbol
|
|
21
|
+
)
|
|
17
22
|
|
|
18
23
|
sig do
|
|
19
24
|
override.returns(T::Array[OpenAI::Audio::SpeechModel::TaggedSymbol])
|
|
@@ -21,8 +21,9 @@ module OpenAI
|
|
|
21
21
|
attr_accessor :file
|
|
22
22
|
|
|
23
23
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
24
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
25
|
-
# Whisper V2 model), and
|
|
24
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
25
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
26
|
+
# `gpt-4o-transcribe-diarize`.
|
|
26
27
|
sig { returns(T.any(String, OpenAI::AudioModel::OrSymbol)) }
|
|
27
28
|
attr_accessor :model
|
|
28
29
|
|
|
@@ -47,9 +48,9 @@ module OpenAI
|
|
|
47
48
|
# Additional information to include in the transcription response. `logprobs` will
|
|
48
49
|
# return the log probabilities of the tokens in the response to understand the
|
|
49
50
|
# model's confidence in the transcription. `logprobs` only works with
|
|
50
|
-
# response_format set to `json` and only with the models `gpt-4o-transcribe
|
|
51
|
-
# `gpt-4o-mini-transcribe`. This field is
|
|
52
|
-
# `gpt-4o-transcribe-diarize`.
|
|
51
|
+
# response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
|
52
|
+
# `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
|
53
|
+
# not supported when using `gpt-4o-transcribe-diarize`.
|
|
53
54
|
sig do
|
|
54
55
|
returns(
|
|
55
56
|
T.nilable(T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol])
|
|
@@ -185,8 +186,9 @@ module OpenAI
|
|
|
185
186
|
# flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
|
186
187
|
file:,
|
|
187
188
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
188
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
189
|
-
# Whisper V2 model), and
|
|
189
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
190
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
191
|
+
# `gpt-4o-transcribe-diarize`.
|
|
190
192
|
model:,
|
|
191
193
|
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
|
|
192
194
|
# first normalizes loudness and then uses voice activity detection (VAD) to choose
|
|
@@ -198,9 +200,9 @@ module OpenAI
|
|
|
198
200
|
# Additional information to include in the transcription response. `logprobs` will
|
|
199
201
|
# return the log probabilities of the tokens in the response to understand the
|
|
200
202
|
# model's confidence in the transcription. `logprobs` only works with
|
|
201
|
-
# response_format set to `json` and only with the models `gpt-4o-transcribe
|
|
202
|
-
# `gpt-4o-mini-transcribe`. This field is
|
|
203
|
-
# `gpt-4o-transcribe-diarize`.
|
|
203
|
+
# response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
|
204
|
+
# `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
|
205
|
+
# not supported when using `gpt-4o-transcribe-diarize`.
|
|
204
206
|
include: nil,
|
|
205
207
|
# Optional list of speaker names that correspond to the audio samples provided in
|
|
206
208
|
# `known_speaker_references[]`. Each entry should be a short identifier (for
|
|
@@ -276,8 +278,9 @@ module OpenAI
|
|
|
276
278
|
end
|
|
277
279
|
|
|
278
280
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
279
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
280
|
-
# Whisper V2 model), and
|
|
281
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
282
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
283
|
+
# `gpt-4o-transcribe-diarize`.
|
|
281
284
|
module Model
|
|
282
285
|
extend OpenAI::Internal::Type::Union
|
|
283
286
|
|
|
@@ -13,6 +13,11 @@ module OpenAI
|
|
|
13
13
|
T.let(:"gpt-4o-transcribe", OpenAI::AudioModel::TaggedSymbol)
|
|
14
14
|
GPT_4O_MINI_TRANSCRIBE =
|
|
15
15
|
T.let(:"gpt-4o-mini-transcribe", OpenAI::AudioModel::TaggedSymbol)
|
|
16
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15 =
|
|
17
|
+
T.let(
|
|
18
|
+
:"gpt-4o-mini-transcribe-2025-12-15",
|
|
19
|
+
OpenAI::AudioModel::TaggedSymbol
|
|
20
|
+
)
|
|
16
21
|
GPT_4O_TRANSCRIBE_DIARIZE =
|
|
17
22
|
T.let(:"gpt-4o-transcribe-diarize", OpenAI::AudioModel::TaggedSymbol)
|
|
18
23
|
|
|
@@ -22,18 +22,28 @@ module OpenAI
|
|
|
22
22
|
attr_writer :language
|
|
23
23
|
|
|
24
24
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
25
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
26
|
-
#
|
|
25
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
26
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
27
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
27
28
|
sig do
|
|
28
29
|
returns(
|
|
29
|
-
T.nilable(
|
|
30
|
+
T.nilable(
|
|
31
|
+
T.any(
|
|
32
|
+
String,
|
|
33
|
+
OpenAI::Realtime::AudioTranscription::Model::OrSymbol
|
|
34
|
+
)
|
|
35
|
+
)
|
|
30
36
|
)
|
|
31
37
|
end
|
|
32
38
|
attr_reader :model
|
|
33
39
|
|
|
34
40
|
sig do
|
|
35
41
|
params(
|
|
36
|
-
model:
|
|
42
|
+
model:
|
|
43
|
+
T.any(
|
|
44
|
+
String,
|
|
45
|
+
OpenAI::Realtime::AudioTranscription::Model::OrSymbol
|
|
46
|
+
)
|
|
37
47
|
).void
|
|
38
48
|
end
|
|
39
49
|
attr_writer :model
|
|
@@ -52,7 +62,11 @@ module OpenAI
|
|
|
52
62
|
sig do
|
|
53
63
|
params(
|
|
54
64
|
language: String,
|
|
55
|
-
model:
|
|
65
|
+
model:
|
|
66
|
+
T.any(
|
|
67
|
+
String,
|
|
68
|
+
OpenAI::Realtime::AudioTranscription::Model::OrSymbol
|
|
69
|
+
),
|
|
56
70
|
prompt: String
|
|
57
71
|
).returns(T.attached_class)
|
|
58
72
|
end
|
|
@@ -62,8 +76,9 @@ module OpenAI
|
|
|
62
76
|
# format will improve accuracy and latency.
|
|
63
77
|
language: nil,
|
|
64
78
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
65
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
66
|
-
#
|
|
79
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
80
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
81
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
67
82
|
model: nil,
|
|
68
83
|
# An optional text to guide the model's style or continue a previous audio
|
|
69
84
|
# segment. For `whisper-1`, the
|
|
@@ -78,7 +93,11 @@ module OpenAI
|
|
|
78
93
|
override.returns(
|
|
79
94
|
{
|
|
80
95
|
language: String,
|
|
81
|
-
model:
|
|
96
|
+
model:
|
|
97
|
+
T.any(
|
|
98
|
+
String,
|
|
99
|
+
OpenAI::Realtime::AudioTranscription::Model::OrSymbol
|
|
100
|
+
),
|
|
82
101
|
prompt: String
|
|
83
102
|
}
|
|
84
103
|
)
|
|
@@ -87,10 +106,27 @@ module OpenAI
|
|
|
87
106
|
end
|
|
88
107
|
|
|
89
108
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
90
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
91
|
-
#
|
|
109
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
110
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
111
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
92
112
|
module Model
|
|
93
|
-
extend OpenAI::Internal::Type::
|
|
113
|
+
extend OpenAI::Internal::Type::Union
|
|
114
|
+
|
|
115
|
+
Variants =
|
|
116
|
+
T.type_alias do
|
|
117
|
+
T.any(
|
|
118
|
+
String,
|
|
119
|
+
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
|
120
|
+
)
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
sig do
|
|
124
|
+
override.returns(
|
|
125
|
+
T::Array[OpenAI::Realtime::AudioTranscription::Model::Variants]
|
|
126
|
+
)
|
|
127
|
+
end
|
|
128
|
+
def self.variants
|
|
129
|
+
end
|
|
94
130
|
|
|
95
131
|
TaggedSymbol =
|
|
96
132
|
T.type_alias do
|
|
@@ -108,6 +144,11 @@ module OpenAI
|
|
|
108
144
|
:"gpt-4o-mini-transcribe",
|
|
109
145
|
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
|
110
146
|
)
|
|
147
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15 =
|
|
148
|
+
T.let(
|
|
149
|
+
:"gpt-4o-mini-transcribe-2025-12-15",
|
|
150
|
+
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
|
151
|
+
)
|
|
111
152
|
GPT_4O_TRANSCRIBE =
|
|
112
153
|
T.let(
|
|
113
154
|
:"gpt-4o-transcribe",
|
|
@@ -118,16 +159,6 @@ module OpenAI
|
|
|
118
159
|
:"gpt-4o-transcribe-diarize",
|
|
119
160
|
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
|
120
161
|
)
|
|
121
|
-
|
|
122
|
-
sig do
|
|
123
|
-
override.returns(
|
|
124
|
-
T::Array[
|
|
125
|
-
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
|
126
|
-
]
|
|
127
|
-
)
|
|
128
|
-
end
|
|
129
|
-
def self.values
|
|
130
|
-
end
|
|
131
162
|
end
|
|
132
163
|
end
|
|
133
164
|
end
|
|
@@ -144,12 +144,19 @@ module OpenAI
|
|
|
144
144
|
|
|
145
145
|
# The Realtime model used for this session.
|
|
146
146
|
sig do
|
|
147
|
-
returns(
|
|
147
|
+
returns(
|
|
148
|
+
T.nilable(
|
|
149
|
+
T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol)
|
|
150
|
+
)
|
|
151
|
+
)
|
|
148
152
|
end
|
|
149
153
|
attr_reader :model
|
|
150
154
|
|
|
151
155
|
sig do
|
|
152
|
-
params(
|
|
156
|
+
params(
|
|
157
|
+
model:
|
|
158
|
+
T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol)
|
|
159
|
+
).void
|
|
153
160
|
end
|
|
154
161
|
attr_writer :model
|
|
155
162
|
|
|
@@ -318,7 +325,8 @@ module OpenAI
|
|
|
318
325
|
max_response_output_tokens: T.any(Integer, Symbol),
|
|
319
326
|
modalities:
|
|
320
327
|
T::Array[OpenAI::Realtime::RealtimeSession::Modality::OrSymbol],
|
|
321
|
-
model:
|
|
328
|
+
model:
|
|
329
|
+
T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol),
|
|
322
330
|
object: OpenAI::Realtime::RealtimeSession::Object::OrSymbol,
|
|
323
331
|
output_audio_format:
|
|
324
332
|
OpenAI::Realtime::RealtimeSession::OutputAudioFormat::OrSymbol,
|
|
@@ -461,7 +469,11 @@ module OpenAI
|
|
|
461
469
|
max_response_output_tokens: T.any(Integer, Symbol),
|
|
462
470
|
modalities:
|
|
463
471
|
T::Array[OpenAI::Realtime::RealtimeSession::Modality::OrSymbol],
|
|
464
|
-
model:
|
|
472
|
+
model:
|
|
473
|
+
T.any(
|
|
474
|
+
String,
|
|
475
|
+
OpenAI::Realtime::RealtimeSession::Model::OrSymbol
|
|
476
|
+
),
|
|
465
477
|
object: OpenAI::Realtime::RealtimeSession::Object::OrSymbol,
|
|
466
478
|
output_audio_format:
|
|
467
479
|
OpenAI::Realtime::RealtimeSession::OutputAudioFormat::OrSymbol,
|
|
@@ -659,7 +671,23 @@ module OpenAI
|
|
|
659
671
|
|
|
660
672
|
# The Realtime model used for this session.
|
|
661
673
|
module Model
|
|
662
|
-
extend OpenAI::Internal::Type::
|
|
674
|
+
extend OpenAI::Internal::Type::Union
|
|
675
|
+
|
|
676
|
+
Variants =
|
|
677
|
+
T.type_alias do
|
|
678
|
+
T.any(
|
|
679
|
+
String,
|
|
680
|
+
OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
|
|
681
|
+
)
|
|
682
|
+
end
|
|
683
|
+
|
|
684
|
+
sig do
|
|
685
|
+
override.returns(
|
|
686
|
+
T::Array[OpenAI::Realtime::RealtimeSession::Model::Variants]
|
|
687
|
+
)
|
|
688
|
+
end
|
|
689
|
+
def self.variants
|
|
690
|
+
end
|
|
663
691
|
|
|
664
692
|
TaggedSymbol =
|
|
665
693
|
T.type_alias do
|
|
@@ -717,6 +745,11 @@ module OpenAI
|
|
|
717
745
|
:"gpt-realtime-mini-2025-10-06",
|
|
718
746
|
OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
|
|
719
747
|
)
|
|
748
|
+
GPT_REALTIME_MINI_2025_12_15 =
|
|
749
|
+
T.let(
|
|
750
|
+
:"gpt-realtime-mini-2025-12-15",
|
|
751
|
+
OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
|
|
752
|
+
)
|
|
720
753
|
GPT_AUDIO_MINI =
|
|
721
754
|
T.let(
|
|
722
755
|
:"gpt-audio-mini",
|
|
@@ -727,14 +760,11 @@ module OpenAI
|
|
|
727
760
|
:"gpt-audio-mini-2025-10-06",
|
|
728
761
|
OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
|
|
729
762
|
)
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
763
|
+
GPT_AUDIO_MINI_2025_12_15 =
|
|
764
|
+
T.let(
|
|
765
|
+
:"gpt-audio-mini-2025-12-15",
|
|
766
|
+
OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
|
|
734
767
|
)
|
|
735
|
-
end
|
|
736
|
-
def self.values
|
|
737
|
-
end
|
|
738
768
|
end
|
|
739
769
|
|
|
740
770
|
# The object type. Always `realtime.session`.
|
|
@@ -550,6 +550,11 @@ module OpenAI
|
|
|
550
550
|
:"gpt-realtime-mini-2025-10-06",
|
|
551
551
|
OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
|
|
552
552
|
)
|
|
553
|
+
GPT_REALTIME_MINI_2025_12_15 =
|
|
554
|
+
T.let(
|
|
555
|
+
:"gpt-realtime-mini-2025-12-15",
|
|
556
|
+
OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
|
|
557
|
+
)
|
|
553
558
|
GPT_AUDIO_MINI =
|
|
554
559
|
T.let(
|
|
555
560
|
:"gpt-audio-mini",
|
|
@@ -560,6 +565,11 @@ module OpenAI
|
|
|
560
565
|
:"gpt-audio-mini-2025-10-06",
|
|
561
566
|
OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
|
|
562
567
|
)
|
|
568
|
+
GPT_AUDIO_MINI_2025_12_15 =
|
|
569
|
+
T.let(
|
|
570
|
+
:"gpt-audio-mini-2025-12-15",
|
|
571
|
+
OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
|
|
572
|
+
)
|
|
563
573
|
end
|
|
564
574
|
|
|
565
575
|
module OutputModality
|
|
@@ -1366,6 +1366,11 @@ module OpenAI
|
|
|
1366
1366
|
:"gpt-realtime-mini-2025-10-06",
|
|
1367
1367
|
OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
|
|
1368
1368
|
)
|
|
1369
|
+
GPT_REALTIME_MINI_2025_12_15 =
|
|
1370
|
+
T.let(
|
|
1371
|
+
:"gpt-realtime-mini-2025-12-15",
|
|
1372
|
+
OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
|
|
1373
|
+
)
|
|
1369
1374
|
GPT_AUDIO_MINI =
|
|
1370
1375
|
T.let(
|
|
1371
1376
|
:"gpt-audio-mini",
|
|
@@ -1376,6 +1381,11 @@ module OpenAI
|
|
|
1376
1381
|
:"gpt-audio-mini-2025-10-06",
|
|
1377
1382
|
OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
|
|
1378
1383
|
)
|
|
1384
|
+
GPT_AUDIO_MINI_2025_12_15 =
|
|
1385
|
+
T.let(
|
|
1386
|
+
:"gpt-audio-mini-2025-12-15",
|
|
1387
|
+
OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
|
|
1388
|
+
)
|
|
1379
1389
|
end
|
|
1380
1390
|
|
|
1381
1391
|
module OutputModality
|
data/rbi/openai/models/video.rbi
CHANGED
|
@@ -29,7 +29,7 @@ module OpenAI
|
|
|
29
29
|
attr_accessor :expires_at
|
|
30
30
|
|
|
31
31
|
# The video generation model that produced the job.
|
|
32
|
-
sig { returns(OpenAI::VideoModel::
|
|
32
|
+
sig { returns(OpenAI::VideoModel::Variants) }
|
|
33
33
|
attr_accessor :model
|
|
34
34
|
|
|
35
35
|
# The object type, which is always `video`.
|
|
@@ -68,7 +68,7 @@ module OpenAI
|
|
|
68
68
|
created_at: Integer,
|
|
69
69
|
error: T.nilable(OpenAI::VideoCreateError::OrHash),
|
|
70
70
|
expires_at: T.nilable(Integer),
|
|
71
|
-
model: OpenAI::VideoModel::OrSymbol,
|
|
71
|
+
model: T.any(String, OpenAI::VideoModel::OrSymbol),
|
|
72
72
|
progress: Integer,
|
|
73
73
|
prompt: T.nilable(String),
|
|
74
74
|
remixed_from_video_id: T.nilable(String),
|
|
@@ -116,7 +116,7 @@ module OpenAI
|
|
|
116
116
|
created_at: Integer,
|
|
117
117
|
error: T.nilable(OpenAI::VideoCreateError),
|
|
118
118
|
expires_at: T.nilable(Integer),
|
|
119
|
-
model: OpenAI::VideoModel::
|
|
119
|
+
model: OpenAI::VideoModel::Variants,
|
|
120
120
|
object: Symbol,
|
|
121
121
|
progress: Integer,
|
|
122
122
|
prompt: T.nilable(String),
|
|
@@ -24,10 +24,10 @@ module OpenAI
|
|
|
24
24
|
|
|
25
25
|
# The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
|
|
26
26
|
# to `sora-2`.
|
|
27
|
-
sig { returns(T.nilable(OpenAI::VideoModel::OrSymbol)) }
|
|
27
|
+
sig { returns(T.nilable(T.any(String, OpenAI::VideoModel::OrSymbol))) }
|
|
28
28
|
attr_reader :model
|
|
29
29
|
|
|
30
|
-
sig { params(model: OpenAI::VideoModel::OrSymbol).void }
|
|
30
|
+
sig { params(model: T.any(String, OpenAI::VideoModel::OrSymbol)).void }
|
|
31
31
|
attr_writer :model
|
|
32
32
|
|
|
33
33
|
# Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
|
|
@@ -49,7 +49,7 @@ module OpenAI
|
|
|
49
49
|
params(
|
|
50
50
|
prompt: String,
|
|
51
51
|
input_reference: OpenAI::Internal::FileInput,
|
|
52
|
-
model: OpenAI::VideoModel::OrSymbol,
|
|
52
|
+
model: T.any(String, OpenAI::VideoModel::OrSymbol),
|
|
53
53
|
seconds: OpenAI::VideoSeconds::OrSymbol,
|
|
54
54
|
size: OpenAI::VideoSize::OrSymbol,
|
|
55
55
|
request_options: OpenAI::RequestOptions::OrHash
|
|
@@ -77,7 +77,7 @@ module OpenAI
|
|
|
77
77
|
{
|
|
78
78
|
prompt: String,
|
|
79
79
|
input_reference: OpenAI::Internal::FileInput,
|
|
80
|
-
model: OpenAI::VideoModel::OrSymbol,
|
|
80
|
+
model: T.any(String, OpenAI::VideoModel::OrSymbol),
|
|
81
81
|
seconds: OpenAI::VideoSeconds::OrSymbol,
|
|
82
82
|
size: OpenAI::VideoSize::OrSymbol,
|
|
83
83
|
request_options: OpenAI::RequestOptions
|
|
@@ -3,7 +3,14 @@
|
|
|
3
3
|
module OpenAI
|
|
4
4
|
module Models
|
|
5
5
|
module VideoModel
|
|
6
|
-
extend OpenAI::Internal::Type::
|
|
6
|
+
extend OpenAI::Internal::Type::Union
|
|
7
|
+
|
|
8
|
+
Variants =
|
|
9
|
+
T.type_alias { T.any(String, OpenAI::VideoModel::TaggedSymbol) }
|
|
10
|
+
|
|
11
|
+
sig { override.returns(T::Array[OpenAI::VideoModel::Variants]) }
|
|
12
|
+
def self.variants
|
|
13
|
+
end
|
|
7
14
|
|
|
8
15
|
TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::VideoModel) }
|
|
9
16
|
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
|
@@ -16,10 +23,6 @@ module OpenAI
|
|
|
16
23
|
T.let(:"sora-2-pro-2025-10-06", OpenAI::VideoModel::TaggedSymbol)
|
|
17
24
|
SORA_2_2025_12_08 =
|
|
18
25
|
T.let(:"sora-2-2025-12-08", OpenAI::VideoModel::TaggedSymbol)
|
|
19
|
-
|
|
20
|
-
sig { override.returns(T::Array[OpenAI::VideoModel::TaggedSymbol]) }
|
|
21
|
-
def self.values
|
|
22
|
-
end
|
|
23
26
|
end
|
|
24
27
|
end
|
|
25
28
|
end
|
|
@@ -24,7 +24,7 @@ module OpenAI
|
|
|
24
24
|
# The text to generate audio for. The maximum length is 4096 characters.
|
|
25
25
|
input:,
|
|
26
26
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
27
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
27
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
28
28
|
model:,
|
|
29
29
|
# The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
|
|
30
30
|
# `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
|
|
@@ -41,8 +41,9 @@ module OpenAI
|
|
|
41
41
|
# flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
|
42
42
|
file:,
|
|
43
43
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
44
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
45
|
-
# Whisper V2 model), and
|
|
44
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
45
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
46
|
+
# `gpt-4o-transcribe-diarize`.
|
|
46
47
|
model:,
|
|
47
48
|
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
|
|
48
49
|
# first normalizes loudness and then uses voice activity detection (VAD) to choose
|
|
@@ -54,9 +55,9 @@ module OpenAI
|
|
|
54
55
|
# Additional information to include in the transcription response. `logprobs` will
|
|
55
56
|
# return the log probabilities of the tokens in the response to understand the
|
|
56
57
|
# model's confidence in the transcription. `logprobs` only works with
|
|
57
|
-
# response_format set to `json` and only with the models `gpt-4o-transcribe
|
|
58
|
-
# `gpt-4o-mini-transcribe`. This field is
|
|
59
|
-
# `gpt-4o-transcribe-diarize`.
|
|
58
|
+
# response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
|
59
|
+
# `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
|
60
|
+
# not supported when using `gpt-4o-transcribe-diarize`.
|
|
60
61
|
include: nil,
|
|
61
62
|
# Optional list of speaker names that correspond to the audio samples provided in
|
|
62
63
|
# `known_speaker_references[]`. Each entry should be a short identifier (for
|
|
@@ -143,8 +144,9 @@ module OpenAI
|
|
|
143
144
|
# flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
|
144
145
|
file:,
|
|
145
146
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
146
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
147
|
-
# Whisper V2 model), and
|
|
147
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
148
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
149
|
+
# `gpt-4o-transcribe-diarize`.
|
|
148
150
|
model:,
|
|
149
151
|
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
|
|
150
152
|
# first normalizes loudness and then uses voice activity detection (VAD) to choose
|
|
@@ -156,9 +158,9 @@ module OpenAI
|
|
|
156
158
|
# Additional information to include in the transcription response. `logprobs` will
|
|
157
159
|
# return the log probabilities of the tokens in the response to understand the
|
|
158
160
|
# model's confidence in the transcription. `logprobs` only works with
|
|
159
|
-
# response_format set to `json` and only with the models `gpt-4o-transcribe
|
|
160
|
-
# `gpt-4o-mini-transcribe`. This field is
|
|
161
|
-
# `gpt-4o-transcribe-diarize`.
|
|
161
|
+
# response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
|
162
|
+
# `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
|
163
|
+
# not supported when using `gpt-4o-transcribe-diarize`.
|
|
162
164
|
include: nil,
|
|
163
165
|
# Optional list of speaker names that correspond to the audio samples provided in
|
|
164
166
|
# `known_speaker_references[]`. Each entry should be a short identifier (for
|
|
@@ -8,7 +8,7 @@ module OpenAI
|
|
|
8
8
|
params(
|
|
9
9
|
prompt: String,
|
|
10
10
|
input_reference: OpenAI::Internal::FileInput,
|
|
11
|
-
model: OpenAI::VideoModel::OrSymbol,
|
|
11
|
+
model: T.any(String, OpenAI::VideoModel::OrSymbol),
|
|
12
12
|
seconds: OpenAI::VideoSeconds::OrSymbol,
|
|
13
13
|
size: OpenAI::VideoSize::OrSymbol,
|
|
14
14
|
request_options: OpenAI::RequestOptions::OrHash
|
|
@@ -1,7 +1,11 @@
|
|
|
1
1
|
module OpenAI
|
|
2
2
|
module Models
|
|
3
3
|
module Audio
|
|
4
|
-
type speech_model =
|
|
4
|
+
type speech_model =
|
|
5
|
+
:"tts-1"
|
|
6
|
+
| :"tts-1-hd"
|
|
7
|
+
| :"gpt-4o-mini-tts"
|
|
8
|
+
| :"gpt-4o-mini-tts-2025-12-15"
|
|
5
9
|
|
|
6
10
|
module SpeechModel
|
|
7
11
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -9,6 +13,7 @@ module OpenAI
|
|
|
9
13
|
TTS_1: :"tts-1"
|
|
10
14
|
TTS_1_HD: :"tts-1-hd"
|
|
11
15
|
GPT_4O_MINI_TTS: :"gpt-4o-mini-tts"
|
|
16
|
+
GPT_4O_MINI_TTS_2025_12_15: :"gpt-4o-mini-tts-2025-12-15"
|
|
12
17
|
|
|
13
18
|
def self?.values: -> ::Array[OpenAI::Models::Audio::speech_model]
|
|
14
19
|
end
|
|
@@ -4,6 +4,7 @@ module OpenAI
|
|
|
4
4
|
:"whisper-1"
|
|
5
5
|
| :"gpt-4o-transcribe"
|
|
6
6
|
| :"gpt-4o-mini-transcribe"
|
|
7
|
+
| :"gpt-4o-mini-transcribe-2025-12-15"
|
|
7
8
|
| :"gpt-4o-transcribe-diarize"
|
|
8
9
|
|
|
9
10
|
module AudioModel
|
|
@@ -12,6 +13,7 @@ module OpenAI
|
|
|
12
13
|
WHISPER_1: :"whisper-1"
|
|
13
14
|
GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe"
|
|
14
15
|
GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe"
|
|
16
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15: :"gpt-4o-mini-transcribe-2025-12-15"
|
|
15
17
|
GPT_4O_TRANSCRIBE_DIARIZE: :"gpt-4o-transcribe-diarize"
|
|
16
18
|
|
|
17
19
|
def self?.values: -> ::Array[OpenAI::Models::audio_model]
|
|
@@ -36,20 +36,23 @@ module OpenAI
|
|
|
36
36
|
}
|
|
37
37
|
|
|
38
38
|
type model =
|
|
39
|
-
|
|
39
|
+
String
|
|
40
|
+
| :"whisper-1"
|
|
40
41
|
| :"gpt-4o-mini-transcribe"
|
|
42
|
+
| :"gpt-4o-mini-transcribe-2025-12-15"
|
|
41
43
|
| :"gpt-4o-transcribe"
|
|
42
44
|
| :"gpt-4o-transcribe-diarize"
|
|
43
45
|
|
|
44
46
|
module Model
|
|
45
|
-
extend OpenAI::Internal::Type::
|
|
47
|
+
extend OpenAI::Internal::Type::Union
|
|
48
|
+
|
|
49
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::AudioTranscription::model]
|
|
46
50
|
|
|
47
51
|
WHISPER_1: :"whisper-1"
|
|
48
52
|
GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe"
|
|
53
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15: :"gpt-4o-mini-transcribe-2025-12-15"
|
|
49
54
|
GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe"
|
|
50
55
|
GPT_4O_TRANSCRIBE_DIARIZE: :"gpt-4o-transcribe-diarize"
|
|
51
|
-
|
|
52
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::AudioTranscription::model]
|
|
53
56
|
end
|
|
54
57
|
end
|
|
55
58
|
end
|
|
@@ -221,7 +221,8 @@ module OpenAI
|
|
|
221
221
|
end
|
|
222
222
|
|
|
223
223
|
type model =
|
|
224
|
-
|
|
224
|
+
String
|
|
225
|
+
| :"gpt-realtime"
|
|
225
226
|
| :"gpt-realtime-2025-08-28"
|
|
226
227
|
| :"gpt-4o-realtime-preview"
|
|
227
228
|
| :"gpt-4o-realtime-preview-2024-10-01"
|
|
@@ -231,11 +232,15 @@ module OpenAI
|
|
|
231
232
|
| :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
232
233
|
| :"gpt-realtime-mini"
|
|
233
234
|
| :"gpt-realtime-mini-2025-10-06"
|
|
235
|
+
| :"gpt-realtime-mini-2025-12-15"
|
|
234
236
|
| :"gpt-audio-mini"
|
|
235
237
|
| :"gpt-audio-mini-2025-10-06"
|
|
238
|
+
| :"gpt-audio-mini-2025-12-15"
|
|
236
239
|
|
|
237
240
|
module Model
|
|
238
|
-
extend OpenAI::Internal::Type::
|
|
241
|
+
extend OpenAI::Internal::Type::Union
|
|
242
|
+
|
|
243
|
+
def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::model]
|
|
239
244
|
|
|
240
245
|
GPT_REALTIME: :"gpt-realtime"
|
|
241
246
|
GPT_REALTIME_2025_08_28: :"gpt-realtime-2025-08-28"
|
|
@@ -247,10 +252,10 @@ module OpenAI
|
|
|
247
252
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17: :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
248
253
|
GPT_REALTIME_MINI: :"gpt-realtime-mini"
|
|
249
254
|
GPT_REALTIME_MINI_2025_10_06: :"gpt-realtime-mini-2025-10-06"
|
|
255
|
+
GPT_REALTIME_MINI_2025_12_15: :"gpt-realtime-mini-2025-12-15"
|
|
250
256
|
GPT_AUDIO_MINI: :"gpt-audio-mini"
|
|
251
257
|
GPT_AUDIO_MINI_2025_10_06: :"gpt-audio-mini-2025-10-06"
|
|
252
|
-
|
|
253
|
-
def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::model]
|
|
258
|
+
GPT_AUDIO_MINI_2025_12_15: :"gpt-audio-mini-2025-12-15"
|
|
254
259
|
end
|
|
255
260
|
|
|
256
261
|
type object = :"realtime.session"
|
|
@@ -136,8 +136,10 @@ module OpenAI
|
|
|
136
136
|
| :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
137
137
|
| :"gpt-realtime-mini"
|
|
138
138
|
| :"gpt-realtime-mini-2025-10-06"
|
|
139
|
+
| :"gpt-realtime-mini-2025-12-15"
|
|
139
140
|
| :"gpt-audio-mini"
|
|
140
141
|
| :"gpt-audio-mini-2025-10-06"
|
|
142
|
+
| :"gpt-audio-mini-2025-12-15"
|
|
141
143
|
|
|
142
144
|
module Model
|
|
143
145
|
extend OpenAI::Internal::Type::Union
|
|
@@ -154,8 +156,10 @@ module OpenAI
|
|
|
154
156
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17: :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
155
157
|
GPT_REALTIME_MINI: :"gpt-realtime-mini"
|
|
156
158
|
GPT_REALTIME_MINI_2025_10_06: :"gpt-realtime-mini-2025-10-06"
|
|
159
|
+
GPT_REALTIME_MINI_2025_12_15: :"gpt-realtime-mini-2025-12-15"
|
|
157
160
|
GPT_AUDIO_MINI: :"gpt-audio-mini"
|
|
158
161
|
GPT_AUDIO_MINI_2025_10_06: :"gpt-audio-mini-2025-10-06"
|
|
162
|
+
GPT_AUDIO_MINI_2025_12_15: :"gpt-audio-mini-2025-12-15"
|
|
159
163
|
end
|
|
160
164
|
|
|
161
165
|
type output_modality = :text | :audio
|
|
@@ -423,8 +423,10 @@ module OpenAI
|
|
|
423
423
|
| :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
424
424
|
| :"gpt-realtime-mini"
|
|
425
425
|
| :"gpt-realtime-mini-2025-10-06"
|
|
426
|
+
| :"gpt-realtime-mini-2025-12-15"
|
|
426
427
|
| :"gpt-audio-mini"
|
|
427
428
|
| :"gpt-audio-mini-2025-10-06"
|
|
429
|
+
| :"gpt-audio-mini-2025-12-15"
|
|
428
430
|
|
|
429
431
|
module Model
|
|
430
432
|
extend OpenAI::Internal::Type::Union
|
|
@@ -441,8 +443,10 @@ module OpenAI
|
|
|
441
443
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17: :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
442
444
|
GPT_REALTIME_MINI: :"gpt-realtime-mini"
|
|
443
445
|
GPT_REALTIME_MINI_2025_10_06: :"gpt-realtime-mini-2025-10-06"
|
|
446
|
+
GPT_REALTIME_MINI_2025_12_15: :"gpt-realtime-mini-2025-12-15"
|
|
444
447
|
GPT_AUDIO_MINI: :"gpt-audio-mini"
|
|
445
448
|
GPT_AUDIO_MINI_2025_10_06: :"gpt-audio-mini-2025-10-06"
|
|
449
|
+
GPT_AUDIO_MINI_2025_12_15: :"gpt-audio-mini-2025-12-15"
|
|
446
450
|
end
|
|
447
451
|
|
|
448
452
|
type output_modality = :text | :audio
|
|
@@ -1,22 +1,23 @@
|
|
|
1
1
|
module OpenAI
|
|
2
2
|
module Models
|
|
3
3
|
type video_model =
|
|
4
|
-
|
|
4
|
+
String
|
|
5
|
+
| :"sora-2"
|
|
5
6
|
| :"sora-2-pro"
|
|
6
7
|
| :"sora-2-2025-10-06"
|
|
7
8
|
| :"sora-2-pro-2025-10-06"
|
|
8
9
|
| :"sora-2-2025-12-08"
|
|
9
10
|
|
|
10
11
|
module VideoModel
|
|
11
|
-
extend OpenAI::Internal::Type::
|
|
12
|
+
extend OpenAI::Internal::Type::Union
|
|
13
|
+
|
|
14
|
+
def self?.variants: -> ::Array[OpenAI::Models::video_model]
|
|
12
15
|
|
|
13
16
|
SORA_2: :"sora-2"
|
|
14
17
|
SORA_2_PRO: :"sora-2-pro"
|
|
15
18
|
SORA_2_2025_10_06: :"sora-2-2025-10-06"
|
|
16
19
|
SORA_2_PRO_2025_10_06: :"sora-2-pro-2025-10-06"
|
|
17
20
|
SORA_2_2025_12_08: :"sora-2-2025-12-08"
|
|
18
|
-
|
|
19
|
-
def self?.values: -> ::Array[OpenAI::Models::video_model]
|
|
20
21
|
end
|
|
21
22
|
end
|
|
22
23
|
end
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: openai
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.
|
|
4
|
+
version: 0.42.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- OpenAI
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2025-12-
|
|
11
|
+
date: 2025-12-19 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: base64
|