openai 0.40.0 → 0.42.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/transport/pooled_net_requester.rb +12 -10
  5. data/lib/openai/internal/util.rb +7 -2
  6. data/lib/openai/models/audio/speech_create_params.rb +3 -3
  7. data/lib/openai/models/audio/speech_model.rb +1 -0
  8. data/lib/openai/models/audio/transcription_create_params.rb +10 -8
  9. data/lib/openai/models/audio_model.rb +1 -0
  10. data/lib/openai/models/graders/grader_input_item.rb +87 -0
  11. data/lib/openai/models/graders/grader_inputs.rb +0 -80
  12. data/lib/openai/models/image.rb +6 -6
  13. data/lib/openai/models/image_edit_completed_event.rb +5 -3
  14. data/lib/openai/models/image_edit_params.rb +34 -32
  15. data/lib/openai/models/image_gen_completed_event.rb +5 -3
  16. data/lib/openai/models/image_generate_params.rb +38 -36
  17. data/lib/openai/models/image_model.rb +1 -0
  18. data/lib/openai/models/images_response.rb +31 -1
  19. data/lib/openai/models/realtime/audio_transcription.rb +33 -10
  20. data/lib/openai/models/realtime/realtime_session.rb +46 -6
  21. data/lib/openai/models/realtime/realtime_session_create_request.rb +6 -0
  22. data/lib/openai/models/realtime/realtime_session_create_response.rb +6 -0
  23. data/lib/openai/models/responses/tool.rb +22 -8
  24. data/lib/openai/models/video.rb +3 -3
  25. data/lib/openai/models/video_create_params.rb +3 -3
  26. data/lib/openai/models/video_model.rb +23 -3
  27. data/lib/openai/resources/images.rb +6 -6
  28. data/lib/openai/resources/videos.rb +1 -1
  29. data/lib/openai/version.rb +1 -1
  30. data/lib/openai.rb +1 -0
  31. data/rbi/openai/models/audio/speech_create_params.rbi +3 -3
  32. data/rbi/openai/models/audio/speech_model.rbi +5 -0
  33. data/rbi/openai/models/audio/transcription_create_params.rbi +15 -12
  34. data/rbi/openai/models/audio_model.rbi +5 -0
  35. data/rbi/openai/models/graders/grader_input_item.rbi +112 -0
  36. data/rbi/openai/models/graders/grader_inputs.rbi +0 -105
  37. data/rbi/openai/models/image.rbi +10 -10
  38. data/rbi/openai/models/image_edit_completed_event.rbi +6 -3
  39. data/rbi/openai/models/image_edit_params.rbi +49 -46
  40. data/rbi/openai/models/image_gen_completed_event.rbi +6 -3
  41. data/rbi/openai/models/image_generate_params.rbi +54 -51
  42. data/rbi/openai/models/image_model.rbi +1 -0
  43. data/rbi/openai/models/images_response.rbi +61 -3
  44. data/rbi/openai/models/realtime/audio_transcription.rbi +52 -21
  45. data/rbi/openai/models/realtime/realtime_session.rbi +42 -12
  46. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +10 -0
  47. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +10 -0
  48. data/rbi/openai/models/responses/tool.rbi +38 -16
  49. data/rbi/openai/models/video.rbi +3 -3
  50. data/rbi/openai/models/video_create_params.rbi +4 -4
  51. data/rbi/openai/models/video_model.rbi +8 -5
  52. data/rbi/openai/resources/audio/speech.rbi +1 -1
  53. data/rbi/openai/resources/audio/transcriptions.rbi +12 -10
  54. data/rbi/openai/resources/images.rbi +72 -68
  55. data/rbi/openai/resources/videos.rbi +1 -1
  56. data/sig/openai/models/audio/speech_model.rbs +6 -1
  57. data/sig/openai/models/audio_model.rbs +2 -0
  58. data/sig/openai/models/graders/grader_input_item.rbs +55 -0
  59. data/sig/openai/models/graders/grader_inputs.rbs +0 -50
  60. data/sig/openai/models/image_model.rbs +6 -1
  61. data/sig/openai/models/images_response.rbs +25 -3
  62. data/sig/openai/models/realtime/audio_transcription.rbs +7 -4
  63. data/sig/openai/models/realtime/realtime_session.rbs +9 -4
  64. data/sig/openai/models/realtime/realtime_session_create_request.rbs +4 -0
  65. data/sig/openai/models/realtime/realtime_session_create_response.rbs +4 -0
  66. data/sig/openai/models/responses/tool.rbs +4 -4
  67. data/sig/openai/models/video_model.rbs +5 -4
  68. metadata +5 -2
@@ -22,18 +22,28 @@ module OpenAI
22
22
  attr_writer :language
23
23
 
24
24
  # The model to use for transcription. Current options are `whisper-1`,
25
- # `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
26
- # Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
25
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
26
+ # `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
27
+ # `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
27
28
  sig do
28
29
  returns(
29
- T.nilable(OpenAI::Realtime::AudioTranscription::Model::OrSymbol)
30
+ T.nilable(
31
+ T.any(
32
+ String,
33
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
34
+ )
35
+ )
30
36
  )
31
37
  end
32
38
  attr_reader :model
33
39
 
34
40
  sig do
35
41
  params(
36
- model: OpenAI::Realtime::AudioTranscription::Model::OrSymbol
42
+ model:
43
+ T.any(
44
+ String,
45
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
46
+ )
37
47
  ).void
38
48
  end
39
49
  attr_writer :model
@@ -52,7 +62,11 @@ module OpenAI
52
62
  sig do
53
63
  params(
54
64
  language: String,
55
- model: OpenAI::Realtime::AudioTranscription::Model::OrSymbol,
65
+ model:
66
+ T.any(
67
+ String,
68
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
69
+ ),
56
70
  prompt: String
57
71
  ).returns(T.attached_class)
58
72
  end
@@ -62,8 +76,9 @@ module OpenAI
62
76
  # format will improve accuracy and latency.
63
77
  language: nil,
64
78
  # The model to use for transcription. Current options are `whisper-1`,
65
- # `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
66
- # Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
79
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
80
+ # `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
81
+ # `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
67
82
  model: nil,
68
83
  # An optional text to guide the model's style or continue a previous audio
69
84
  # segment. For `whisper-1`, the
@@ -78,7 +93,11 @@ module OpenAI
78
93
  override.returns(
79
94
  {
80
95
  language: String,
81
- model: OpenAI::Realtime::AudioTranscription::Model::OrSymbol,
96
+ model:
97
+ T.any(
98
+ String,
99
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
100
+ ),
82
101
  prompt: String
83
102
  }
84
103
  )
@@ -87,10 +106,27 @@ module OpenAI
87
106
  end
88
107
 
89
108
  # The model to use for transcription. Current options are `whisper-1`,
90
- # `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
91
- # Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
109
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
110
+ # `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
111
+ # `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
92
112
  module Model
93
- extend OpenAI::Internal::Type::Enum
113
+ extend OpenAI::Internal::Type::Union
114
+
115
+ Variants =
116
+ T.type_alias do
117
+ T.any(
118
+ String,
119
+ OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
120
+ )
121
+ end
122
+
123
+ sig do
124
+ override.returns(
125
+ T::Array[OpenAI::Realtime::AudioTranscription::Model::Variants]
126
+ )
127
+ end
128
+ def self.variants
129
+ end
94
130
 
95
131
  TaggedSymbol =
96
132
  T.type_alias do
@@ -108,6 +144,11 @@ module OpenAI
108
144
  :"gpt-4o-mini-transcribe",
109
145
  OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
110
146
  )
147
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 =
148
+ T.let(
149
+ :"gpt-4o-mini-transcribe-2025-12-15",
150
+ OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
151
+ )
111
152
  GPT_4O_TRANSCRIBE =
112
153
  T.let(
113
154
  :"gpt-4o-transcribe",
@@ -118,16 +159,6 @@ module OpenAI
118
159
  :"gpt-4o-transcribe-diarize",
119
160
  OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
120
161
  )
121
-
122
- sig do
123
- override.returns(
124
- T::Array[
125
- OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
126
- ]
127
- )
128
- end
129
- def self.values
130
- end
131
162
  end
132
163
  end
133
164
  end
@@ -144,12 +144,19 @@ module OpenAI
144
144
 
145
145
  # The Realtime model used for this session.
146
146
  sig do
147
- returns(T.nilable(OpenAI::Realtime::RealtimeSession::Model::OrSymbol))
147
+ returns(
148
+ T.nilable(
149
+ T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol)
150
+ )
151
+ )
148
152
  end
149
153
  attr_reader :model
150
154
 
151
155
  sig do
152
- params(model: OpenAI::Realtime::RealtimeSession::Model::OrSymbol).void
156
+ params(
157
+ model:
158
+ T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol)
159
+ ).void
153
160
  end
154
161
  attr_writer :model
155
162
 
@@ -318,7 +325,8 @@ module OpenAI
318
325
  max_response_output_tokens: T.any(Integer, Symbol),
319
326
  modalities:
320
327
  T::Array[OpenAI::Realtime::RealtimeSession::Modality::OrSymbol],
321
- model: OpenAI::Realtime::RealtimeSession::Model::OrSymbol,
328
+ model:
329
+ T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol),
322
330
  object: OpenAI::Realtime::RealtimeSession::Object::OrSymbol,
323
331
  output_audio_format:
324
332
  OpenAI::Realtime::RealtimeSession::OutputAudioFormat::OrSymbol,
@@ -461,7 +469,11 @@ module OpenAI
461
469
  max_response_output_tokens: T.any(Integer, Symbol),
462
470
  modalities:
463
471
  T::Array[OpenAI::Realtime::RealtimeSession::Modality::OrSymbol],
464
- model: OpenAI::Realtime::RealtimeSession::Model::OrSymbol,
472
+ model:
473
+ T.any(
474
+ String,
475
+ OpenAI::Realtime::RealtimeSession::Model::OrSymbol
476
+ ),
465
477
  object: OpenAI::Realtime::RealtimeSession::Object::OrSymbol,
466
478
  output_audio_format:
467
479
  OpenAI::Realtime::RealtimeSession::OutputAudioFormat::OrSymbol,
@@ -659,7 +671,23 @@ module OpenAI
659
671
 
660
672
  # The Realtime model used for this session.
661
673
  module Model
662
- extend OpenAI::Internal::Type::Enum
674
+ extend OpenAI::Internal::Type::Union
675
+
676
+ Variants =
677
+ T.type_alias do
678
+ T.any(
679
+ String,
680
+ OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
681
+ )
682
+ end
683
+
684
+ sig do
685
+ override.returns(
686
+ T::Array[OpenAI::Realtime::RealtimeSession::Model::Variants]
687
+ )
688
+ end
689
+ def self.variants
690
+ end
663
691
 
664
692
  TaggedSymbol =
665
693
  T.type_alias do
@@ -717,6 +745,11 @@ module OpenAI
717
745
  :"gpt-realtime-mini-2025-10-06",
718
746
  OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
719
747
  )
748
+ GPT_REALTIME_MINI_2025_12_15 =
749
+ T.let(
750
+ :"gpt-realtime-mini-2025-12-15",
751
+ OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
752
+ )
720
753
  GPT_AUDIO_MINI =
721
754
  T.let(
722
755
  :"gpt-audio-mini",
@@ -727,14 +760,11 @@ module OpenAI
727
760
  :"gpt-audio-mini-2025-10-06",
728
761
  OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
729
762
  )
730
-
731
- sig do
732
- override.returns(
733
- T::Array[OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol]
763
+ GPT_AUDIO_MINI_2025_12_15 =
764
+ T.let(
765
+ :"gpt-audio-mini-2025-12-15",
766
+ OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
734
767
  )
735
- end
736
- def self.values
737
- end
738
768
  end
739
769
 
740
770
  # The object type. Always `realtime.session`.
@@ -550,6 +550,11 @@ module OpenAI
550
550
  :"gpt-realtime-mini-2025-10-06",
551
551
  OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
552
552
  )
553
+ GPT_REALTIME_MINI_2025_12_15 =
554
+ T.let(
555
+ :"gpt-realtime-mini-2025-12-15",
556
+ OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
557
+ )
553
558
  GPT_AUDIO_MINI =
554
559
  T.let(
555
560
  :"gpt-audio-mini",
@@ -560,6 +565,11 @@ module OpenAI
560
565
  :"gpt-audio-mini-2025-10-06",
561
566
  OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
562
567
  )
568
+ GPT_AUDIO_MINI_2025_12_15 =
569
+ T.let(
570
+ :"gpt-audio-mini-2025-12-15",
571
+ OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
572
+ )
563
573
  end
564
574
 
565
575
  module OutputModality
@@ -1366,6 +1366,11 @@ module OpenAI
1366
1366
  :"gpt-realtime-mini-2025-10-06",
1367
1367
  OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1368
1368
  )
1369
+ GPT_REALTIME_MINI_2025_12_15 =
1370
+ T.let(
1371
+ :"gpt-realtime-mini-2025-12-15",
1372
+ OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1373
+ )
1369
1374
  GPT_AUDIO_MINI =
1370
1375
  T.let(
1371
1376
  :"gpt-audio-mini",
@@ -1376,6 +1381,11 @@ module OpenAI
1376
1381
  :"gpt-audio-mini-2025-10-06",
1377
1382
  OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1378
1383
  )
1384
+ GPT_AUDIO_MINI_2025_12_15 =
1385
+ T.let(
1386
+ :"gpt-audio-mini-2025-12-15",
1387
+ OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1388
+ )
1379
1389
  end
1380
1390
 
1381
1391
  module OutputModality
@@ -888,7 +888,10 @@ module OpenAI
888
888
  sig do
889
889
  returns(
890
890
  T.nilable(
891
- OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
891
+ T.any(
892
+ String,
893
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
894
+ )
892
895
  )
893
896
  )
894
897
  end
@@ -896,7 +899,11 @@ module OpenAI
896
899
 
897
900
  sig do
898
901
  params(
899
- model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
902
+ model:
903
+ T.any(
904
+ String,
905
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
906
+ )
900
907
  ).void
901
908
  end
902
909
  attr_writer :model
@@ -990,7 +997,7 @@ module OpenAI
990
997
  end
991
998
  attr_writer :size
992
999
 
993
- # A tool that generates images using a model like `gpt-image-1`.
1000
+ # A tool that generates images using the GPT image models.
994
1001
  sig do
995
1002
  params(
996
1003
  background:
@@ -1001,7 +1008,11 @@ module OpenAI
1001
1008
  ),
1002
1009
  input_image_mask:
1003
1010
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash,
1004
- model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
1011
+ model:
1012
+ T.any(
1013
+ String,
1014
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
1015
+ ),
1005
1016
  moderation:
1006
1017
  OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol,
1007
1018
  output_compression: Integer,
@@ -1062,7 +1073,10 @@ module OpenAI
1062
1073
  input_image_mask:
1063
1074
  OpenAI::Responses::Tool::ImageGeneration::InputImageMask,
1064
1075
  model:
1065
- OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol,
1076
+ T.any(
1077
+ String,
1078
+ OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol
1079
+ ),
1066
1080
  moderation:
1067
1081
  OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol,
1068
1082
  output_compression: Integer,
@@ -1202,7 +1216,25 @@ module OpenAI
1202
1216
 
1203
1217
  # The image generation model to use. Default: `gpt-image-1`.
1204
1218
  module Model
1205
- extend OpenAI::Internal::Type::Enum
1219
+ extend OpenAI::Internal::Type::Union
1220
+
1221
+ Variants =
1222
+ T.type_alias do
1223
+ T.any(
1224
+ String,
1225
+ OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1226
+ )
1227
+ end
1228
+
1229
+ sig do
1230
+ override.returns(
1231
+ T::Array[
1232
+ OpenAI::Responses::Tool::ImageGeneration::Model::Variants
1233
+ ]
1234
+ )
1235
+ end
1236
+ def self.variants
1237
+ end
1206
1238
 
1207
1239
  TaggedSymbol =
1208
1240
  T.type_alias do
@@ -1220,16 +1252,6 @@ module OpenAI
1220
1252
  :"gpt-image-1-mini",
1221
1253
  OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1222
1254
  )
1223
-
1224
- sig do
1225
- override.returns(
1226
- T::Array[
1227
- OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol
1228
- ]
1229
- )
1230
- end
1231
- def self.values
1232
- end
1233
1255
  end
1234
1256
 
1235
1257
  # Moderation level for the generated image. Default: `auto`.
@@ -29,7 +29,7 @@ module OpenAI
29
29
  attr_accessor :expires_at
30
30
 
31
31
  # The video generation model that produced the job.
32
- sig { returns(OpenAI::VideoModel::TaggedSymbol) }
32
+ sig { returns(OpenAI::VideoModel::Variants) }
33
33
  attr_accessor :model
34
34
 
35
35
  # The object type, which is always `video`.
@@ -68,7 +68,7 @@ module OpenAI
68
68
  created_at: Integer,
69
69
  error: T.nilable(OpenAI::VideoCreateError::OrHash),
70
70
  expires_at: T.nilable(Integer),
71
- model: OpenAI::VideoModel::OrSymbol,
71
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
72
72
  progress: Integer,
73
73
  prompt: T.nilable(String),
74
74
  remixed_from_video_id: T.nilable(String),
@@ -116,7 +116,7 @@ module OpenAI
116
116
  created_at: Integer,
117
117
  error: T.nilable(OpenAI::VideoCreateError),
118
118
  expires_at: T.nilable(Integer),
119
- model: OpenAI::VideoModel::TaggedSymbol,
119
+ model: OpenAI::VideoModel::Variants,
120
120
  object: Symbol,
121
121
  progress: Integer,
122
122
  prompt: T.nilable(String),
@@ -24,10 +24,10 @@ module OpenAI
24
24
 
25
25
  # The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
26
26
  # to `sora-2`.
27
- sig { returns(T.nilable(OpenAI::VideoModel::OrSymbol)) }
27
+ sig { returns(T.nilable(T.any(String, OpenAI::VideoModel::OrSymbol))) }
28
28
  attr_reader :model
29
29
 
30
- sig { params(model: OpenAI::VideoModel::OrSymbol).void }
30
+ sig { params(model: T.any(String, OpenAI::VideoModel::OrSymbol)).void }
31
31
  attr_writer :model
32
32
 
33
33
  # Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
@@ -49,7 +49,7 @@ module OpenAI
49
49
  params(
50
50
  prompt: String,
51
51
  input_reference: OpenAI::Internal::FileInput,
52
- model: OpenAI::VideoModel::OrSymbol,
52
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
53
53
  seconds: OpenAI::VideoSeconds::OrSymbol,
54
54
  size: OpenAI::VideoSize::OrSymbol,
55
55
  request_options: OpenAI::RequestOptions::OrHash
@@ -77,7 +77,7 @@ module OpenAI
77
77
  {
78
78
  prompt: String,
79
79
  input_reference: OpenAI::Internal::FileInput,
80
- model: OpenAI::VideoModel::OrSymbol,
80
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
81
81
  seconds: OpenAI::VideoSeconds::OrSymbol,
82
82
  size: OpenAI::VideoSize::OrSymbol,
83
83
  request_options: OpenAI::RequestOptions
@@ -3,7 +3,14 @@
3
3
  module OpenAI
4
4
  module Models
5
5
  module VideoModel
6
- extend OpenAI::Internal::Type::Enum
6
+ extend OpenAI::Internal::Type::Union
7
+
8
+ Variants =
9
+ T.type_alias { T.any(String, OpenAI::VideoModel::TaggedSymbol) }
10
+
11
+ sig { override.returns(T::Array[OpenAI::VideoModel::Variants]) }
12
+ def self.variants
13
+ end
7
14
 
8
15
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::VideoModel) }
9
16
  OrSymbol = T.type_alias { T.any(Symbol, String) }
@@ -16,10 +23,6 @@ module OpenAI
16
23
  T.let(:"sora-2-pro-2025-10-06", OpenAI::VideoModel::TaggedSymbol)
17
24
  SORA_2_2025_12_08 =
18
25
  T.let(:"sora-2-2025-12-08", OpenAI::VideoModel::TaggedSymbol)
19
-
20
- sig { override.returns(T::Array[OpenAI::VideoModel::TaggedSymbol]) }
21
- def self.values
22
- end
23
26
  end
24
27
  end
25
28
  end
@@ -24,7 +24,7 @@ module OpenAI
24
24
  # The text to generate audio for. The maximum length is 4096 characters.
25
25
  input:,
26
26
  # One of the available [TTS models](https://platform.openai.com/docs/models#tts):
27
- # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
27
+ # `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
28
28
  model:,
29
29
  # The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
30
30
  # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
@@ -41,8 +41,9 @@ module OpenAI
41
41
  # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
42
42
  file:,
43
43
  # ID of the model to use. The options are `gpt-4o-transcribe`,
44
- # `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
45
- # Whisper V2 model), and `gpt-4o-transcribe-diarize`.
44
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
45
+ # (which is powered by our open source Whisper V2 model), and
46
+ # `gpt-4o-transcribe-diarize`.
46
47
  model:,
47
48
  # Controls how the audio is cut into chunks. When set to `"auto"`, the server
48
49
  # first normalizes loudness and then uses voice activity detection (VAD) to choose
@@ -54,9 +55,9 @@ module OpenAI
54
55
  # Additional information to include in the transcription response. `logprobs` will
55
56
  # return the log probabilities of the tokens in the response to understand the
56
57
  # model's confidence in the transcription. `logprobs` only works with
57
- # response_format set to `json` and only with the models `gpt-4o-transcribe` and
58
- # `gpt-4o-mini-transcribe`. This field is not supported when using
59
- # `gpt-4o-transcribe-diarize`.
58
+ # response_format set to `json` and only with the models `gpt-4o-transcribe`,
59
+ # `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
60
+ # not supported when using `gpt-4o-transcribe-diarize`.
60
61
  include: nil,
61
62
  # Optional list of speaker names that correspond to the audio samples provided in
62
63
  # `known_speaker_references[]`. Each entry should be a short identifier (for
@@ -143,8 +144,9 @@ module OpenAI
143
144
  # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
144
145
  file:,
145
146
  # ID of the model to use. The options are `gpt-4o-transcribe`,
146
- # `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
147
- # Whisper V2 model), and `gpt-4o-transcribe-diarize`.
147
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
148
+ # (which is powered by our open source Whisper V2 model), and
149
+ # `gpt-4o-transcribe-diarize`.
148
150
  model:,
149
151
  # Controls how the audio is cut into chunks. When set to `"auto"`, the server
150
152
  # first normalizes loudness and then uses voice activity detection (VAD) to choose
@@ -156,9 +158,9 @@ module OpenAI
156
158
  # Additional information to include in the transcription response. `logprobs` will
157
159
  # return the log probabilities of the tokens in the response to understand the
158
160
  # model's confidence in the transcription. `logprobs` only works with
159
- # response_format set to `json` and only with the models `gpt-4o-transcribe` and
160
- # `gpt-4o-mini-transcribe`. This field is not supported when using
161
- # `gpt-4o-transcribe-diarize`.
161
+ # response_format set to `json` and only with the models `gpt-4o-transcribe`,
162
+ # `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
163
+ # not supported when using `gpt-4o-transcribe-diarize`.
162
164
  include: nil,
163
165
  # Optional list of speaker names that correspond to the audio samples provided in
164
166
  # `known_speaker_references[]`. Each entry should be a short identifier (for