openai 0.41.0 → 0.43.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +25 -0
  3. data/README.md +10 -16
  4. data/lib/openai/internal/util.rb +7 -2
  5. data/lib/openai/models/audio/speech_create_params.rb +12 -10
  6. data/lib/openai/models/audio/speech_model.rb +1 -0
  7. data/lib/openai/models/audio/transcription_create_params.rb +10 -8
  8. data/lib/openai/models/audio_model.rb +1 -0
  9. data/lib/openai/models/chat/chat_completion_audio_param.rb +7 -5
  10. data/lib/openai/models/conversations/conversation_item.rb +1 -1
  11. data/lib/openai/models/conversations/message.rb +1 -1
  12. data/lib/openai/models/realtime/audio_transcription.rb +33 -10
  13. data/lib/openai/models/realtime/realtime_audio_config_output.rb +9 -9
  14. data/lib/openai/models/realtime/realtime_response_create_audio_output.rb +9 -9
  15. data/lib/openai/models/realtime/realtime_session.rb +46 -6
  16. data/lib/openai/models/realtime/realtime_session_create_request.rb +6 -0
  17. data/lib/openai/models/realtime/realtime_session_create_response.rb +6 -0
  18. data/lib/openai/models/responses/input_token_count_params.rb +4 -7
  19. data/lib/openai/models/responses/response.rb +17 -8
  20. data/lib/openai/models/responses/response_compact_params.rb +1 -0
  21. data/lib/openai/models/responses/response_compaction_item.rb +4 -2
  22. data/lib/openai/models/responses/response_compaction_item_param.rb +2 -1
  23. data/lib/openai/models/responses/response_function_call_output_item.rb +1 -1
  24. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +10 -6
  25. data/lib/openai/models/responses/response_function_web_search.rb +11 -3
  26. data/lib/openai/models/responses/response_input_item.rb +1 -0
  27. data/lib/openai/models/responses/response_item.rb +1 -1
  28. data/lib/openai/models/responses/response_output_item.rb +1 -1
  29. data/lib/openai/models/responses/response_output_text.rb +1 -1
  30. data/lib/openai/models/responses/tool.rb +4 -1
  31. data/lib/openai/models/video.rb +3 -3
  32. data/lib/openai/models/video_create_error.rb +7 -2
  33. data/lib/openai/models/video_create_params.rb +3 -3
  34. data/lib/openai/models/video_model.rb +23 -3
  35. data/lib/openai/resources/audio/speech.rb +1 -1
  36. data/lib/openai/resources/images.rb +4 -2
  37. data/lib/openai/resources/responses/input_tokens.rb +1 -1
  38. data/lib/openai/resources/videos.rb +1 -1
  39. data/lib/openai/version.rb +1 -1
  40. data/rbi/openai/models/audio/speech_create_params.rbi +15 -12
  41. data/rbi/openai/models/audio/speech_model.rbi +5 -0
  42. data/rbi/openai/models/audio/transcription_create_params.rbi +15 -12
  43. data/rbi/openai/models/audio_model.rbi +5 -0
  44. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +9 -6
  45. data/rbi/openai/models/conversations/message.rbi +1 -1
  46. data/rbi/openai/models/realtime/audio_transcription.rbi +52 -21
  47. data/rbi/openai/models/realtime/realtime_audio_config_output.rbi +12 -12
  48. data/rbi/openai/models/realtime/realtime_response_create_audio_output.rbi +12 -12
  49. data/rbi/openai/models/realtime/realtime_session.rbi +42 -12
  50. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +10 -0
  51. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +10 -0
  52. data/rbi/openai/models/responses/input_token_count_params.rbi +3 -9
  53. data/rbi/openai/models/responses/response.rbi +18 -8
  54. data/rbi/openai/models/responses/response_compaction_item.rbi +4 -0
  55. data/rbi/openai/models/responses/response_compaction_item_param.rbi +2 -0
  56. data/rbi/openai/models/responses/response_function_call_output_item.rbi +1 -1
  57. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +10 -2
  58. data/rbi/openai/models/responses/response_function_web_search.rbi +13 -2
  59. data/rbi/openai/models/responses/response_output_text.rbi +1 -1
  60. data/rbi/openai/models/responses/tool.rbi +3 -0
  61. data/rbi/openai/models/video.rbi +3 -3
  62. data/rbi/openai/models/video_create_error.rbi +9 -1
  63. data/rbi/openai/models/video_create_params.rbi +4 -4
  64. data/rbi/openai/models/video_model.rbi +8 -5
  65. data/rbi/openai/resources/audio/speech.rbi +5 -4
  66. data/rbi/openai/resources/audio/transcriptions.rbi +12 -10
  67. data/rbi/openai/resources/images.rbi +4 -2
  68. data/rbi/openai/resources/responses/input_tokens.rbi +1 -3
  69. data/rbi/openai/resources/videos.rbi +1 -1
  70. data/sig/openai/models/audio/speech_model.rbs +6 -1
  71. data/sig/openai/models/audio_model.rbs +2 -0
  72. data/sig/openai/models/realtime/audio_transcription.rbs +7 -4
  73. data/sig/openai/models/realtime/realtime_session.rbs +9 -4
  74. data/sig/openai/models/realtime/realtime_session_create_request.rbs +4 -0
  75. data/sig/openai/models/realtime/realtime_session_create_response.rbs +4 -0
  76. data/sig/openai/models/responses/response.rbs +5 -0
  77. data/sig/openai/models/responses/response_function_web_search.rbs +7 -0
  78. data/sig/openai/models/video_model.rbs +5 -4
  79. metadata +16 -2
@@ -13,6 +13,11 @@ module OpenAI
13
13
  T.let(:"gpt-4o-transcribe", OpenAI::AudioModel::TaggedSymbol)
14
14
  GPT_4O_MINI_TRANSCRIBE =
15
15
  T.let(:"gpt-4o-mini-transcribe", OpenAI::AudioModel::TaggedSymbol)
16
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 =
17
+ T.let(
18
+ :"gpt-4o-mini-transcribe-2025-12-15",
19
+ OpenAI::AudioModel::TaggedSymbol
20
+ )
16
21
  GPT_4O_TRANSCRIBE_DIARIZE =
17
22
  T.let(:"gpt-4o-transcribe-diarize", OpenAI::AudioModel::TaggedSymbol)
18
23
 
@@ -21,8 +21,9 @@ module OpenAI
21
21
  end
22
22
  attr_accessor :format_
23
23
 
24
- # The voice the model uses to respond. Supported voices are `alloy`, `ash`,
25
- # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.
24
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
25
+ # `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, `shimmer`,
26
+ # `marin`, and `cedar`.
26
27
  sig do
27
28
  returns(
28
29
  T.any(
@@ -50,8 +51,9 @@ module OpenAI
50
51
  # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`,
51
52
  # or `pcm16`.
52
53
  format_:,
53
- # The voice the model uses to respond. Supported voices are `alloy`, `ash`,
54
- # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.
54
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
55
+ # `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, `shimmer`,
56
+ # `marin`, and `cedar`.
55
57
  voice:
56
58
  )
57
59
  end
@@ -124,8 +126,9 @@ module OpenAI
124
126
  end
125
127
  end
126
128
 
127
- # The voice the model uses to respond. Supported voices are `alloy`, `ash`,
128
- # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.
129
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
130
+ # `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, `shimmer`,
131
+ # `marin`, and `cedar`.
129
132
  module Voice
130
133
  extend OpenAI::Internal::Type::Union
131
134
 
@@ -87,7 +87,7 @@ module OpenAI
87
87
  def to_hash
88
88
  end
89
89
 
90
- # A text input to the model.
90
+ # A content part that makes up an input or output item.
91
91
  module Content
92
92
  extend OpenAI::Internal::Type::Union
93
93
 
@@ -22,18 +22,28 @@ module OpenAI
22
22
  attr_writer :language
23
23
 
24
24
  # The model to use for transcription. Current options are `whisper-1`,
25
- # `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
26
- # Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
25
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
26
+ # `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
27
+ # `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
27
28
  sig do
28
29
  returns(
29
- T.nilable(OpenAI::Realtime::AudioTranscription::Model::OrSymbol)
30
+ T.nilable(
31
+ T.any(
32
+ String,
33
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
34
+ )
35
+ )
30
36
  )
31
37
  end
32
38
  attr_reader :model
33
39
 
34
40
  sig do
35
41
  params(
36
- model: OpenAI::Realtime::AudioTranscription::Model::OrSymbol
42
+ model:
43
+ T.any(
44
+ String,
45
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
46
+ )
37
47
  ).void
38
48
  end
39
49
  attr_writer :model
@@ -52,7 +62,11 @@ module OpenAI
52
62
  sig do
53
63
  params(
54
64
  language: String,
55
- model: OpenAI::Realtime::AudioTranscription::Model::OrSymbol,
65
+ model:
66
+ T.any(
67
+ String,
68
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
69
+ ),
56
70
  prompt: String
57
71
  ).returns(T.attached_class)
58
72
  end
@@ -62,8 +76,9 @@ module OpenAI
62
76
  # format will improve accuracy and latency.
63
77
  language: nil,
64
78
  # The model to use for transcription. Current options are `whisper-1`,
65
- # `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
66
- # Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
79
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
80
+ # `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
81
+ # `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
67
82
  model: nil,
68
83
  # An optional text to guide the model's style or continue a previous audio
69
84
  # segment. For `whisper-1`, the
@@ -78,7 +93,11 @@ module OpenAI
78
93
  override.returns(
79
94
  {
80
95
  language: String,
81
- model: OpenAI::Realtime::AudioTranscription::Model::OrSymbol,
96
+ model:
97
+ T.any(
98
+ String,
99
+ OpenAI::Realtime::AudioTranscription::Model::OrSymbol
100
+ ),
82
101
  prompt: String
83
102
  }
84
103
  )
@@ -87,10 +106,27 @@ module OpenAI
87
106
  end
88
107
 
89
108
  # The model to use for transcription. Current options are `whisper-1`,
90
- # `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
91
- # Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
109
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
110
+ # `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
111
+ # `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
92
112
  module Model
93
- extend OpenAI::Internal::Type::Enum
113
+ extend OpenAI::Internal::Type::Union
114
+
115
+ Variants =
116
+ T.type_alias do
117
+ T.any(
118
+ String,
119
+ OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
120
+ )
121
+ end
122
+
123
+ sig do
124
+ override.returns(
125
+ T::Array[OpenAI::Realtime::AudioTranscription::Model::Variants]
126
+ )
127
+ end
128
+ def self.variants
129
+ end
94
130
 
95
131
  TaggedSymbol =
96
132
  T.type_alias do
@@ -108,6 +144,11 @@ module OpenAI
108
144
  :"gpt-4o-mini-transcribe",
109
145
  OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
110
146
  )
147
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15 =
148
+ T.let(
149
+ :"gpt-4o-mini-transcribe-2025-12-15",
150
+ OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
151
+ )
111
152
  GPT_4O_TRANSCRIBE =
112
153
  T.let(
113
154
  :"gpt-4o-transcribe",
@@ -118,16 +159,6 @@ module OpenAI
118
159
  :"gpt-4o-transcribe-diarize",
119
160
  OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
120
161
  )
121
-
122
- sig do
123
- override.returns(
124
- T::Array[
125
- OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
126
- ]
127
- )
128
- end
129
- def self.values
130
- end
131
162
  end
132
163
  end
133
164
  end
@@ -51,10 +51,10 @@ module OpenAI
51
51
  sig { params(speed: Float).void }
52
52
  attr_writer :speed
53
53
 
54
- # The voice the model uses to respond. Voice cannot be changed during the session
55
- # once the model has responded with audio at least once. Current voice options are
56
- # `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
57
- # and `cedar`. We recommend `marin` and `cedar` for best quality.
54
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
55
+ # `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
56
+ # `cedar`. Voice cannot be changed during the session once the model has responded
57
+ # with audio at least once. We recommend `marin` and `cedar` for best quality.
58
58
  sig do
59
59
  returns(
60
60
  T.nilable(
@@ -105,10 +105,10 @@ module OpenAI
105
105
  # This parameter is a post-processing adjustment to the audio after it is
106
106
  # generated, it's also possible to prompt the model to speak faster or slower.
107
107
  speed: nil,
108
- # The voice the model uses to respond. Voice cannot be changed during the session
109
- # once the model has responded with audio at least once. Current voice options are
110
- # `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
111
- # and `cedar`. We recommend `marin` and `cedar` for best quality.
108
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
109
+ # `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
110
+ # `cedar`. Voice cannot be changed during the session once the model has responded
111
+ # with audio at least once. We recommend `marin` and `cedar` for best quality.
112
112
  voice: nil
113
113
  )
114
114
  end
@@ -134,10 +134,10 @@ module OpenAI
134
134
  def to_hash
135
135
  end
136
136
 
137
- # The voice the model uses to respond. Voice cannot be changed during the session
138
- # once the model has responded with audio at least once. Current voice options are
139
- # `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
140
- # and `cedar`. We recommend `marin` and `cedar` for best quality.
137
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
138
+ # `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
139
+ # `cedar`. Voice cannot be changed during the session once the model has responded
140
+ # with audio at least once. We recommend `marin` and `cedar` for best quality.
141
141
  module Voice
142
142
  extend OpenAI::Internal::Type::Union
143
143
 
@@ -85,10 +85,10 @@ module OpenAI
85
85
  end
86
86
  attr_writer :format_
87
87
 
88
- # The voice the model uses to respond. Voice cannot be changed during the session
89
- # once the model has responded with audio at least once. Current voice options are
90
- # `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
91
- # and `cedar`. We recommend `marin` and `cedar` for best quality.
88
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
89
+ # `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
90
+ # `cedar`. Voice cannot be changed during the session once the model has responded
91
+ # with audio at least once.
92
92
  sig do
93
93
  returns(
94
94
  T.nilable(
@@ -130,10 +130,10 @@ module OpenAI
130
130
  def self.new(
131
131
  # The format of the output audio.
132
132
  format_: nil,
133
- # The voice the model uses to respond. Voice cannot be changed during the session
134
- # once the model has responded with audio at least once. Current voice options are
135
- # `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
136
- # and `cedar`. We recommend `marin` and `cedar` for best quality.
133
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
134
+ # `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
135
+ # `cedar`. Voice cannot be changed during the session once the model has responded
136
+ # with audio at least once.
137
137
  voice: nil
138
138
  )
139
139
  end
@@ -158,10 +158,10 @@ module OpenAI
158
158
  def to_hash
159
159
  end
160
160
 
161
- # The voice the model uses to respond. Voice cannot be changed during the session
162
- # once the model has responded with audio at least once. Current voice options are
163
- # `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
164
- # and `cedar`. We recommend `marin` and `cedar` for best quality.
161
+ # The voice the model uses to respond. Supported built-in voices are `alloy`,
162
+ # `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
163
+ # `cedar`. Voice cannot be changed during the session once the model has responded
164
+ # with audio at least once.
165
165
  module Voice
166
166
  extend OpenAI::Internal::Type::Union
167
167
 
@@ -144,12 +144,19 @@ module OpenAI
144
144
 
145
145
  # The Realtime model used for this session.
146
146
  sig do
147
- returns(T.nilable(OpenAI::Realtime::RealtimeSession::Model::OrSymbol))
147
+ returns(
148
+ T.nilable(
149
+ T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol)
150
+ )
151
+ )
148
152
  end
149
153
  attr_reader :model
150
154
 
151
155
  sig do
152
- params(model: OpenAI::Realtime::RealtimeSession::Model::OrSymbol).void
156
+ params(
157
+ model:
158
+ T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol)
159
+ ).void
153
160
  end
154
161
  attr_writer :model
155
162
 
@@ -318,7 +325,8 @@ module OpenAI
318
325
  max_response_output_tokens: T.any(Integer, Symbol),
319
326
  modalities:
320
327
  T::Array[OpenAI::Realtime::RealtimeSession::Modality::OrSymbol],
321
- model: OpenAI::Realtime::RealtimeSession::Model::OrSymbol,
328
+ model:
329
+ T.any(String, OpenAI::Realtime::RealtimeSession::Model::OrSymbol),
322
330
  object: OpenAI::Realtime::RealtimeSession::Object::OrSymbol,
323
331
  output_audio_format:
324
332
  OpenAI::Realtime::RealtimeSession::OutputAudioFormat::OrSymbol,
@@ -461,7 +469,11 @@ module OpenAI
461
469
  max_response_output_tokens: T.any(Integer, Symbol),
462
470
  modalities:
463
471
  T::Array[OpenAI::Realtime::RealtimeSession::Modality::OrSymbol],
464
- model: OpenAI::Realtime::RealtimeSession::Model::OrSymbol,
472
+ model:
473
+ T.any(
474
+ String,
475
+ OpenAI::Realtime::RealtimeSession::Model::OrSymbol
476
+ ),
465
477
  object: OpenAI::Realtime::RealtimeSession::Object::OrSymbol,
466
478
  output_audio_format:
467
479
  OpenAI::Realtime::RealtimeSession::OutputAudioFormat::OrSymbol,
@@ -659,7 +671,23 @@ module OpenAI
659
671
 
660
672
  # The Realtime model used for this session.
661
673
  module Model
662
- extend OpenAI::Internal::Type::Enum
674
+ extend OpenAI::Internal::Type::Union
675
+
676
+ Variants =
677
+ T.type_alias do
678
+ T.any(
679
+ String,
680
+ OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
681
+ )
682
+ end
683
+
684
+ sig do
685
+ override.returns(
686
+ T::Array[OpenAI::Realtime::RealtimeSession::Model::Variants]
687
+ )
688
+ end
689
+ def self.variants
690
+ end
663
691
 
664
692
  TaggedSymbol =
665
693
  T.type_alias do
@@ -717,6 +745,11 @@ module OpenAI
717
745
  :"gpt-realtime-mini-2025-10-06",
718
746
  OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
719
747
  )
748
+ GPT_REALTIME_MINI_2025_12_15 =
749
+ T.let(
750
+ :"gpt-realtime-mini-2025-12-15",
751
+ OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
752
+ )
720
753
  GPT_AUDIO_MINI =
721
754
  T.let(
722
755
  :"gpt-audio-mini",
@@ -727,14 +760,11 @@ module OpenAI
727
760
  :"gpt-audio-mini-2025-10-06",
728
761
  OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
729
762
  )
730
-
731
- sig do
732
- override.returns(
733
- T::Array[OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol]
763
+ GPT_AUDIO_MINI_2025_12_15 =
764
+ T.let(
765
+ :"gpt-audio-mini-2025-12-15",
766
+ OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol
734
767
  )
735
- end
736
- def self.values
737
- end
738
768
  end
739
769
 
740
770
  # The object type. Always `realtime.session`.
@@ -550,6 +550,11 @@ module OpenAI
550
550
  :"gpt-realtime-mini-2025-10-06",
551
551
  OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
552
552
  )
553
+ GPT_REALTIME_MINI_2025_12_15 =
554
+ T.let(
555
+ :"gpt-realtime-mini-2025-12-15",
556
+ OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
557
+ )
553
558
  GPT_AUDIO_MINI =
554
559
  T.let(
555
560
  :"gpt-audio-mini",
@@ -560,6 +565,11 @@ module OpenAI
560
565
  :"gpt-audio-mini-2025-10-06",
561
566
  OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
562
567
  )
568
+ GPT_AUDIO_MINI_2025_12_15 =
569
+ T.let(
570
+ :"gpt-audio-mini-2025-12-15",
571
+ OpenAI::Realtime::RealtimeSessionCreateRequest::Model::TaggedSymbol
572
+ )
563
573
  end
564
574
 
565
575
  module OutputModality
@@ -1366,6 +1366,11 @@ module OpenAI
1366
1366
  :"gpt-realtime-mini-2025-10-06",
1367
1367
  OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1368
1368
  )
1369
+ GPT_REALTIME_MINI_2025_12_15 =
1370
+ T.let(
1371
+ :"gpt-realtime-mini-2025-12-15",
1372
+ OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1373
+ )
1369
1374
  GPT_AUDIO_MINI =
1370
1375
  T.let(
1371
1376
  :"gpt-audio-mini",
@@ -1376,6 +1381,11 @@ module OpenAI
1376
1381
  :"gpt-audio-mini-2025-10-06",
1377
1382
  OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1378
1383
  )
1384
+ GPT_AUDIO_MINI_2025_12_15 =
1385
+ T.let(
1386
+ :"gpt-audio-mini-2025-12-15",
1387
+ OpenAI::Realtime::RealtimeSessionCreateResponse::Model::TaggedSymbol
1388
+ )
1379
1389
  end
1380
1390
 
1381
1391
  module OutputModality
@@ -88,9 +88,7 @@ module OpenAI
88
88
  end
89
89
  attr_writer :text
90
90
 
91
- # How the model should select which tool (or tools) to use when generating a
92
- # response. See the `tools` parameter to see how to specify which tools the model
93
- # can call.
91
+ # Controls which tool the model should use, if any.
94
92
  sig do
95
93
  returns(
96
94
  T.nilable(
@@ -249,9 +247,7 @@ module OpenAI
249
247
  # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
250
248
  # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
251
249
  text: nil,
252
- # How the model should select which tool (or tools) to use when generating a
253
- # response. See the `tools` parameter to see how to specify which tools the model
254
- # can call.
250
+ # Controls which tool the model should use, if any.
255
251
  tool_choice: nil,
256
252
  # An array of tools the model may call while generating a response. You can
257
253
  # specify which tool to use by setting the `tool_choice` parameter.
@@ -540,9 +536,7 @@ module OpenAI
540
536
  end
541
537
  end
542
538
 
543
- # How the model should select which tool (or tools) to use when generating a
544
- # response. See the `tools` parameter to see how to specify which tools the model
545
- # can call.
539
+ # Controls which tool the model should use, if any.
546
540
  module ToolChoice
547
541
  extend OpenAI::Internal::Type::Union
548
542
 
@@ -139,8 +139,13 @@ module OpenAI
139
139
  sig { returns(T.nilable(T::Boolean)) }
140
140
  attr_accessor :background
141
141
 
142
- # The conversation that this response belongs to. Input items and output items
143
- # from this response are automatically added to this conversation.
142
+ # Unix timestamp (in seconds) of when this Response was completed. Only present
143
+ # when the status is `completed`.
144
+ sig { returns(T.nilable(Float)) }
145
+ attr_accessor :completed_at
146
+
147
+ # The conversation that this response belonged to. Input items and output items
148
+ # from this response were automatically added to this conversation.
144
149
  sig { returns(T.nilable(OpenAI::Responses::Response::Conversation)) }
145
150
  attr_reader :conversation
146
151
 
@@ -380,6 +385,7 @@ module OpenAI
380
385
  ],
381
386
  top_p: T.nilable(Float),
382
387
  background: T.nilable(T::Boolean),
388
+ completed_at: T.nilable(Float),
383
389
  conversation:
384
390
  T.nilable(OpenAI::Responses::Response::Conversation::OrHash),
385
391
  max_output_tokens: T.nilable(Integer),
@@ -481,8 +487,11 @@ module OpenAI
481
487
  # Whether to run the model response in the background.
482
488
  # [Learn more](https://platform.openai.com/docs/guides/background).
483
489
  background: nil,
484
- # The conversation that this response belongs to. Input items and output items
485
- # from this response are automatically added to this conversation.
490
+ # Unix timestamp (in seconds) of when this Response was completed. Only present
491
+ # when the status is `completed`.
492
+ completed_at: nil,
493
+ # The conversation that this response belonged to. Input items and output items
494
+ # from this response were automatically added to this conversation.
486
495
  conversation: nil,
487
496
  # An upper bound for the number of tokens that can be generated for a response,
488
497
  # including visible output tokens and
@@ -592,6 +601,7 @@ module OpenAI
592
601
  tools: T::Array[OpenAI::Responses::Tool::Variants],
593
602
  top_p: T.nilable(Float),
594
603
  background: T.nilable(T::Boolean),
604
+ completed_at: T.nilable(Float),
595
605
  conversation:
596
606
  T.nilable(OpenAI::Responses::Response::Conversation),
597
607
  max_output_tokens: T.nilable(Integer),
@@ -782,15 +792,15 @@ module OpenAI
782
792
  )
783
793
  end
784
794
 
785
- # The unique ID of the conversation.
795
+ # The unique ID of the conversation that this response was associated with.
786
796
  sig { returns(String) }
787
797
  attr_accessor :id
788
798
 
789
- # The conversation that this response belongs to. Input items and output items
790
- # from this response are automatically added to this conversation.
799
+ # The conversation that this response belonged to. Input items and output items
800
+ # from this response were automatically added to this conversation.
791
801
  sig { params(id: String).returns(T.attached_class) }
792
802
  def self.new(
793
- # The unique ID of the conversation.
803
+ # The unique ID of the conversation that this response was associated with.
794
804
  id:
795
805
  )
796
806
  end
@@ -16,6 +16,7 @@ module OpenAI
16
16
  sig { returns(String) }
17
17
  attr_accessor :id
18
18
 
19
+ # The encrypted content that was produced by compaction.
19
20
  sig { returns(String) }
20
21
  attr_accessor :encrypted_content
21
22
 
@@ -23,6 +24,7 @@ module OpenAI
23
24
  sig { returns(Symbol) }
24
25
  attr_accessor :type
25
26
 
27
+ # The identifier of the actor that created the item.
26
28
  sig { returns(T.nilable(String)) }
27
29
  attr_reader :created_by
28
30
 
@@ -42,7 +44,9 @@ module OpenAI
42
44
  def self.new(
43
45
  # The unique ID of the compaction item.
44
46
  id:,
47
+ # The encrypted content that was produced by compaction.
45
48
  encrypted_content:,
49
+ # The identifier of the actor that created the item.
46
50
  created_by: nil,
47
51
  # The type of the item. Always `compaction`.
48
52
  type: :compaction
@@ -12,6 +12,7 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
+ # The encrypted content of the compaction summary.
15
16
  sig { returns(String) }
16
17
  attr_accessor :encrypted_content
17
18
 
@@ -33,6 +34,7 @@ module OpenAI
33
34
  ).returns(T.attached_class)
34
35
  end
35
36
  def self.new(
37
+ # The encrypted content of the compaction summary.
36
38
  encrypted_content:,
37
39
  # The ID of the compaction item.
38
40
  id: nil,
@@ -3,7 +3,7 @@
3
3
  module OpenAI
4
4
  module Models
5
5
  module Responses
6
- # A text input to the model.
6
+ # A piece of message content, such as text, an image, or a file.
7
7
  module ResponseFunctionCallOutputItem
8
8
  extend OpenAI::Internal::Type::Union
9
9
 
@@ -40,13 +40,14 @@ module OpenAI
40
40
  sig { returns(Symbol) }
41
41
  attr_accessor :type
42
42
 
43
+ # The identifier of the actor that created the item.
43
44
  sig { returns(T.nilable(String)) }
44
45
  attr_reader :created_by
45
46
 
46
47
  sig { params(created_by: String).void }
47
48
  attr_writer :created_by
48
49
 
49
- # The output of a shell tool call.
50
+ # The output of a shell tool call that was emitted.
50
51
  sig do
51
52
  params(
52
53
  id: String,
@@ -71,6 +72,7 @@ module OpenAI
71
72
  max_output_length:,
72
73
  # An array of shell call output contents
73
74
  output:,
75
+ # The identifier of the actor that created the item.
74
76
  created_by: nil,
75
77
  # The type of the shell call output. Always `shell_call_output`.
76
78
  type: :shell_call_output
@@ -113,19 +115,22 @@ module OpenAI
113
115
  end
114
116
  attr_accessor :outcome
115
117
 
118
+ # The standard error output that was captured.
116
119
  sig { returns(String) }
117
120
  attr_accessor :stderr
118
121
 
122
+ # The standard output that was captured.
119
123
  sig { returns(String) }
120
124
  attr_accessor :stdout
121
125
 
126
+ # The identifier of the actor that created the item.
122
127
  sig { returns(T.nilable(String)) }
123
128
  attr_reader :created_by
124
129
 
125
130
  sig { params(created_by: String).void }
126
131
  attr_writer :created_by
127
132
 
128
- # The content of a shell call output.
133
+ # The content of a shell tool call output that was emitted.
129
134
  sig do
130
135
  params(
131
136
  outcome:
@@ -142,8 +147,11 @@ module OpenAI
142
147
  # Represents either an exit outcome (with an exit code) or a timeout outcome for a
143
148
  # shell call output chunk.
144
149
  outcome:,
150
+ # The standard error output that was captured.
145
151
  stderr:,
152
+ # The standard output that was captured.
146
153
  stdout:,
154
+ # The identifier of the actor that created the item.
147
155
  created_by: nil
148
156
  )
149
157
  end