openai 0.41.0 → 0.43.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +25 -0
  3. data/README.md +10 -16
  4. data/lib/openai/internal/util.rb +7 -2
  5. data/lib/openai/models/audio/speech_create_params.rb +12 -10
  6. data/lib/openai/models/audio/speech_model.rb +1 -0
  7. data/lib/openai/models/audio/transcription_create_params.rb +10 -8
  8. data/lib/openai/models/audio_model.rb +1 -0
  9. data/lib/openai/models/chat/chat_completion_audio_param.rb +7 -5
  10. data/lib/openai/models/conversations/conversation_item.rb +1 -1
  11. data/lib/openai/models/conversations/message.rb +1 -1
  12. data/lib/openai/models/realtime/audio_transcription.rb +33 -10
  13. data/lib/openai/models/realtime/realtime_audio_config_output.rb +9 -9
  14. data/lib/openai/models/realtime/realtime_response_create_audio_output.rb +9 -9
  15. data/lib/openai/models/realtime/realtime_session.rb +46 -6
  16. data/lib/openai/models/realtime/realtime_session_create_request.rb +6 -0
  17. data/lib/openai/models/realtime/realtime_session_create_response.rb +6 -0
  18. data/lib/openai/models/responses/input_token_count_params.rb +4 -7
  19. data/lib/openai/models/responses/response.rb +17 -8
  20. data/lib/openai/models/responses/response_compact_params.rb +1 -0
  21. data/lib/openai/models/responses/response_compaction_item.rb +4 -2
  22. data/lib/openai/models/responses/response_compaction_item_param.rb +2 -1
  23. data/lib/openai/models/responses/response_function_call_output_item.rb +1 -1
  24. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +10 -6
  25. data/lib/openai/models/responses/response_function_web_search.rb +11 -3
  26. data/lib/openai/models/responses/response_input_item.rb +1 -0
  27. data/lib/openai/models/responses/response_item.rb +1 -1
  28. data/lib/openai/models/responses/response_output_item.rb +1 -1
  29. data/lib/openai/models/responses/response_output_text.rb +1 -1
  30. data/lib/openai/models/responses/tool.rb +4 -1
  31. data/lib/openai/models/video.rb +3 -3
  32. data/lib/openai/models/video_create_error.rb +7 -2
  33. data/lib/openai/models/video_create_params.rb +3 -3
  34. data/lib/openai/models/video_model.rb +23 -3
  35. data/lib/openai/resources/audio/speech.rb +1 -1
  36. data/lib/openai/resources/images.rb +4 -2
  37. data/lib/openai/resources/responses/input_tokens.rb +1 -1
  38. data/lib/openai/resources/videos.rb +1 -1
  39. data/lib/openai/version.rb +1 -1
  40. data/rbi/openai/models/audio/speech_create_params.rbi +15 -12
  41. data/rbi/openai/models/audio/speech_model.rbi +5 -0
  42. data/rbi/openai/models/audio/transcription_create_params.rbi +15 -12
  43. data/rbi/openai/models/audio_model.rbi +5 -0
  44. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +9 -6
  45. data/rbi/openai/models/conversations/message.rbi +1 -1
  46. data/rbi/openai/models/realtime/audio_transcription.rbi +52 -21
  47. data/rbi/openai/models/realtime/realtime_audio_config_output.rbi +12 -12
  48. data/rbi/openai/models/realtime/realtime_response_create_audio_output.rbi +12 -12
  49. data/rbi/openai/models/realtime/realtime_session.rbi +42 -12
  50. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +10 -0
  51. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +10 -0
  52. data/rbi/openai/models/responses/input_token_count_params.rbi +3 -9
  53. data/rbi/openai/models/responses/response.rbi +18 -8
  54. data/rbi/openai/models/responses/response_compaction_item.rbi +4 -0
  55. data/rbi/openai/models/responses/response_compaction_item_param.rbi +2 -0
  56. data/rbi/openai/models/responses/response_function_call_output_item.rbi +1 -1
  57. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +10 -2
  58. data/rbi/openai/models/responses/response_function_web_search.rbi +13 -2
  59. data/rbi/openai/models/responses/response_output_text.rbi +1 -1
  60. data/rbi/openai/models/responses/tool.rbi +3 -0
  61. data/rbi/openai/models/video.rbi +3 -3
  62. data/rbi/openai/models/video_create_error.rbi +9 -1
  63. data/rbi/openai/models/video_create_params.rbi +4 -4
  64. data/rbi/openai/models/video_model.rbi +8 -5
  65. data/rbi/openai/resources/audio/speech.rbi +5 -4
  66. data/rbi/openai/resources/audio/transcriptions.rbi +12 -10
  67. data/rbi/openai/resources/images.rbi +4 -2
  68. data/rbi/openai/resources/responses/input_tokens.rbi +1 -3
  69. data/rbi/openai/resources/videos.rbi +1 -1
  70. data/sig/openai/models/audio/speech_model.rbs +6 -1
  71. data/sig/openai/models/audio_model.rbs +2 -0
  72. data/sig/openai/models/realtime/audio_transcription.rbs +7 -4
  73. data/sig/openai/models/realtime/realtime_session.rbs +9 -4
  74. data/sig/openai/models/realtime/realtime_session_create_request.rbs +4 -0
  75. data/sig/openai/models/realtime/realtime_session_create_response.rbs +4 -0
  76. data/sig/openai/models/responses/response.rbs +5 -0
  77. data/sig/openai/models/responses/response_function_web_search.rbs +7 -0
  78. data/sig/openai/models/video_model.rbs +5 -4
  79. metadata +16 -2
@@ -113,7 +113,7 @@ module OpenAI
113
113
  )
114
114
  end
115
115
 
116
- # The search query.
116
+ # [DEPRECATED] The search query.
117
117
  sig { returns(String) }
118
118
  attr_accessor :query
119
119
 
@@ -121,6 +121,13 @@ module OpenAI
121
121
  sig { returns(Symbol) }
122
122
  attr_accessor :type
123
123
 
124
+ # The search queries.
125
+ sig { returns(T.nilable(T::Array[String])) }
126
+ attr_reader :queries
127
+
128
+ sig { params(queries: T::Array[String]).void }
129
+ attr_writer :queries
130
+
124
131
  # The sources used in the search.
125
132
  sig do
126
133
  returns(
@@ -147,6 +154,7 @@ module OpenAI
147
154
  sig do
148
155
  params(
149
156
  query: String,
157
+ queries: T::Array[String],
150
158
  sources:
151
159
  T::Array[
152
160
  OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source::OrHash
@@ -155,8 +163,10 @@ module OpenAI
155
163
  ).returns(T.attached_class)
156
164
  end
157
165
  def self.new(
158
- # The search query.
166
+ # [DEPRECATED] The search query.
159
167
  query:,
168
+ # The search queries.
169
+ queries: nil,
160
170
  # The sources used in the search.
161
171
  sources: nil,
162
172
  # The action type.
@@ -169,6 +179,7 @@ module OpenAI
169
179
  {
170
180
  query: String,
171
181
  type: Symbol,
182
+ queries: T::Array[String],
172
183
  sources:
173
184
  T::Array[
174
185
  OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source
@@ -104,7 +104,7 @@ module OpenAI
104
104
  def to_hash
105
105
  end
106
106
 
107
- # A citation to a file.
107
+ # An annotation that applies to a span of output text.
108
108
  module Annotation
109
109
  extend OpenAI::Internal::Type::Union
110
110
 
@@ -718,6 +718,7 @@ module OpenAI
718
718
  sig { params(file_ids: T::Array[String]).void }
719
719
  attr_writer :file_ids
720
720
 
721
+ # The memory limit for the code interpreter container.
721
722
  sig do
722
723
  returns(
723
724
  T.nilable(
@@ -742,6 +743,7 @@ module OpenAI
742
743
  def self.new(
743
744
  # An optional list of uploaded files to make available to your code.
744
745
  file_ids: nil,
746
+ # The memory limit for the code interpreter container.
745
747
  memory_limit: nil,
746
748
  # Always `auto`.
747
749
  type: :auto
@@ -763,6 +765,7 @@ module OpenAI
763
765
  def to_hash
764
766
  end
765
767
 
768
+ # The memory limit for the code interpreter container.
766
769
  module MemoryLimit
767
770
  extend OpenAI::Internal::Type::Enum
768
771
 
@@ -29,7 +29,7 @@ module OpenAI
29
29
  attr_accessor :expires_at
30
30
 
31
31
  # The video generation model that produced the job.
32
- sig { returns(OpenAI::VideoModel::TaggedSymbol) }
32
+ sig { returns(OpenAI::VideoModel::Variants) }
33
33
  attr_accessor :model
34
34
 
35
35
  # The object type, which is always `video`.
@@ -68,7 +68,7 @@ module OpenAI
68
68
  created_at: Integer,
69
69
  error: T.nilable(OpenAI::VideoCreateError::OrHash),
70
70
  expires_at: T.nilable(Integer),
71
- model: OpenAI::VideoModel::OrSymbol,
71
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
72
72
  progress: Integer,
73
73
  prompt: T.nilable(String),
74
74
  remixed_from_video_id: T.nilable(String),
@@ -116,7 +116,7 @@ module OpenAI
116
116
  created_at: Integer,
117
117
  error: T.nilable(OpenAI::VideoCreateError),
118
118
  expires_at: T.nilable(Integer),
119
- model: OpenAI::VideoModel::TaggedSymbol,
119
+ model: OpenAI::VideoModel::Variants,
120
120
  object: Symbol,
121
121
  progress: Integer,
122
122
  prompt: T.nilable(String),
@@ -8,14 +8,22 @@ module OpenAI
8
8
  T.any(OpenAI::VideoCreateError, OpenAI::Internal::AnyHash)
9
9
  end
10
10
 
11
+ # A machine-readable error code that was returned.
11
12
  sig { returns(String) }
12
13
  attr_accessor :code
13
14
 
15
+ # A human-readable description of the error that was returned.
14
16
  sig { returns(String) }
15
17
  attr_accessor :message
16
18
 
19
+ # An error that occurred while generating the response.
17
20
  sig { params(code: String, message: String).returns(T.attached_class) }
18
- def self.new(code:, message:)
21
+ def self.new(
22
+ # A machine-readable error code that was returned.
23
+ code:,
24
+ # A human-readable description of the error that was returned.
25
+ message:
26
+ )
19
27
  end
20
28
 
21
29
  sig { override.returns({ code: String, message: String }) }
@@ -24,10 +24,10 @@ module OpenAI
24
24
 
25
25
  # The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
26
26
  # to `sora-2`.
27
- sig { returns(T.nilable(OpenAI::VideoModel::OrSymbol)) }
27
+ sig { returns(T.nilable(T.any(String, OpenAI::VideoModel::OrSymbol))) }
28
28
  attr_reader :model
29
29
 
30
- sig { params(model: OpenAI::VideoModel::OrSymbol).void }
30
+ sig { params(model: T.any(String, OpenAI::VideoModel::OrSymbol)).void }
31
31
  attr_writer :model
32
32
 
33
33
  # Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
@@ -49,7 +49,7 @@ module OpenAI
49
49
  params(
50
50
  prompt: String,
51
51
  input_reference: OpenAI::Internal::FileInput,
52
- model: OpenAI::VideoModel::OrSymbol,
52
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
53
53
  seconds: OpenAI::VideoSeconds::OrSymbol,
54
54
  size: OpenAI::VideoSize::OrSymbol,
55
55
  request_options: OpenAI::RequestOptions::OrHash
@@ -77,7 +77,7 @@ module OpenAI
77
77
  {
78
78
  prompt: String,
79
79
  input_reference: OpenAI::Internal::FileInput,
80
- model: OpenAI::VideoModel::OrSymbol,
80
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
81
81
  seconds: OpenAI::VideoSeconds::OrSymbol,
82
82
  size: OpenAI::VideoSize::OrSymbol,
83
83
  request_options: OpenAI::RequestOptions
@@ -3,7 +3,14 @@
3
3
  module OpenAI
4
4
  module Models
5
5
  module VideoModel
6
- extend OpenAI::Internal::Type::Enum
6
+ extend OpenAI::Internal::Type::Union
7
+
8
+ Variants =
9
+ T.type_alias { T.any(String, OpenAI::VideoModel::TaggedSymbol) }
10
+
11
+ sig { override.returns(T::Array[OpenAI::VideoModel::Variants]) }
12
+ def self.variants
13
+ end
7
14
 
8
15
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::VideoModel) }
9
16
  OrSymbol = T.type_alias { T.any(Symbol, String) }
@@ -16,10 +23,6 @@ module OpenAI
16
23
  T.let(:"sora-2-pro-2025-10-06", OpenAI::VideoModel::TaggedSymbol)
17
24
  SORA_2_2025_12_08 =
18
25
  T.let(:"sora-2-2025-12-08", OpenAI::VideoModel::TaggedSymbol)
19
-
20
- sig { override.returns(T::Array[OpenAI::VideoModel::TaggedSymbol]) }
21
- def self.values
22
- end
23
26
  end
24
27
  end
25
28
  end
@@ -24,11 +24,12 @@ module OpenAI
24
24
  # The text to generate audio for. The maximum length is 4096 characters.
25
25
  input:,
26
26
  # One of the available [TTS models](https://platform.openai.com/docs/models#tts):
27
- # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
27
+ # `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
28
28
  model:,
29
- # The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
30
- # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
31
- # `verse`. Previews of the voices are available in the
29
+ # The voice to use when generating the audio. Supported built-in voices are
30
+ # `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
31
+ # `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
32
+ # in the
32
33
  # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
33
34
  voice:,
34
35
  # Control the voice of your generated audio with additional instructions. Does not
@@ -41,8 +41,9 @@ module OpenAI
41
41
  # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
42
42
  file:,
43
43
  # ID of the model to use. The options are `gpt-4o-transcribe`,
44
- # `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
45
- # Whisper V2 model), and `gpt-4o-transcribe-diarize`.
44
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
45
+ # (which is powered by our open source Whisper V2 model), and
46
+ # `gpt-4o-transcribe-diarize`.
46
47
  model:,
47
48
  # Controls how the audio is cut into chunks. When set to `"auto"`, the server
48
49
  # first normalizes loudness and then uses voice activity detection (VAD) to choose
@@ -54,9 +55,9 @@ module OpenAI
54
55
  # Additional information to include in the transcription response. `logprobs` will
55
56
  # return the log probabilities of the tokens in the response to understand the
56
57
  # model's confidence in the transcription. `logprobs` only works with
57
- # response_format set to `json` and only with the models `gpt-4o-transcribe` and
58
- # `gpt-4o-mini-transcribe`. This field is not supported when using
59
- # `gpt-4o-transcribe-diarize`.
58
+ # response_format set to `json` and only with the models `gpt-4o-transcribe`,
59
+ # `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
60
+ # not supported when using `gpt-4o-transcribe-diarize`.
60
61
  include: nil,
61
62
  # Optional list of speaker names that correspond to the audio samples provided in
62
63
  # `known_speaker_references[]`. Each entry should be a short identifier (for
@@ -143,8 +144,9 @@ module OpenAI
143
144
  # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
144
145
  file:,
145
146
  # ID of the model to use. The options are `gpt-4o-transcribe`,
146
- # `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source
147
- # Whisper V2 model), and `gpt-4o-transcribe-diarize`.
147
+ # `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
148
+ # (which is powered by our open source Whisper V2 model), and
149
+ # `gpt-4o-transcribe-diarize`.
148
150
  model:,
149
151
  # Controls how the audio is cut into chunks. When set to `"auto"`, the server
150
152
  # first normalizes loudness and then uses voice activity detection (VAD) to choose
@@ -156,9 +158,9 @@ module OpenAI
156
158
  # Additional information to include in the transcription response. `logprobs` will
157
159
  # return the log probabilities of the tokens in the response to understand the
158
160
  # model's confidence in the transcription. `logprobs` only works with
159
- # response_format set to `json` and only with the models `gpt-4o-transcribe` and
160
- # `gpt-4o-mini-transcribe`. This field is not supported when using
161
- # `gpt-4o-transcribe-diarize`.
161
+ # response_format set to `json` and only with the models `gpt-4o-transcribe`,
162
+ # `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
163
+ # not supported when using `gpt-4o-transcribe-diarize`.
162
164
  include: nil,
163
165
  # Optional list of speaker names that correspond to the audio samples provided in
164
166
  # `known_speaker_references[]`. Each entry should be a short identifier (for
@@ -45,7 +45,8 @@ module OpenAI
45
45
  # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
46
46
  #
47
47
  # Creates an edited or extended image given one or more source images and a
48
- # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
48
+ # prompt. This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
49
+ # and `gpt-image-1-mini`) and `dall-e-2`.
49
50
  sig do
50
51
  params(
51
52
  image: OpenAI::ImageEditParams::Image::Variants,
@@ -148,7 +149,8 @@ module OpenAI
148
149
  # See {OpenAI::Resources::Images#edit} for non-streaming counterpart.
149
150
  #
150
151
  # Creates an edited or extended image given one or more source images and a
151
- # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
152
+ # prompt. This endpoint supports GPT Image models (`gpt-image-1.5`, `gpt-image-1`,
153
+ # and `gpt-image-1-mini`) and `dall-e-2`.
152
154
  sig do
153
155
  params(
154
156
  image: OpenAI::ImageEditParams::Image::Variants,
@@ -97,9 +97,7 @@ module OpenAI
97
97
  # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
98
98
  # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
99
99
  text: nil,
100
- # How the model should select which tool (or tools) to use when generating a
101
- # response. See the `tools` parameter to see how to specify which tools the model
102
- # can call.
100
+ # Controls which tool the model should use, if any.
103
101
  tool_choice: nil,
104
102
  # An array of tools the model may call while generating a response. You can
105
103
  # specify which tool to use by setting the `tool_choice` parameter.
@@ -8,7 +8,7 @@ module OpenAI
8
8
  params(
9
9
  prompt: String,
10
10
  input_reference: OpenAI::Internal::FileInput,
11
- model: OpenAI::VideoModel::OrSymbol,
11
+ model: T.any(String, OpenAI::VideoModel::OrSymbol),
12
12
  seconds: OpenAI::VideoSeconds::OrSymbol,
13
13
  size: OpenAI::VideoSize::OrSymbol,
14
14
  request_options: OpenAI::RequestOptions::OrHash
@@ -1,7 +1,11 @@
1
1
  module OpenAI
2
2
  module Models
3
3
  module Audio
4
- type speech_model = :"tts-1" | :"tts-1-hd" | :"gpt-4o-mini-tts"
4
+ type speech_model =
5
+ :"tts-1"
6
+ | :"tts-1-hd"
7
+ | :"gpt-4o-mini-tts"
8
+ | :"gpt-4o-mini-tts-2025-12-15"
5
9
 
6
10
  module SpeechModel
7
11
  extend OpenAI::Internal::Type::Enum
@@ -9,6 +13,7 @@ module OpenAI
9
13
  TTS_1: :"tts-1"
10
14
  TTS_1_HD: :"tts-1-hd"
11
15
  GPT_4O_MINI_TTS: :"gpt-4o-mini-tts"
16
+ GPT_4O_MINI_TTS_2025_12_15: :"gpt-4o-mini-tts-2025-12-15"
12
17
 
13
18
  def self?.values: -> ::Array[OpenAI::Models::Audio::speech_model]
14
19
  end
@@ -4,6 +4,7 @@ module OpenAI
4
4
  :"whisper-1"
5
5
  | :"gpt-4o-transcribe"
6
6
  | :"gpt-4o-mini-transcribe"
7
+ | :"gpt-4o-mini-transcribe-2025-12-15"
7
8
  | :"gpt-4o-transcribe-diarize"
8
9
 
9
10
  module AudioModel
@@ -12,6 +13,7 @@ module OpenAI
12
13
  WHISPER_1: :"whisper-1"
13
14
  GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe"
14
15
  GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe"
16
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15: :"gpt-4o-mini-transcribe-2025-12-15"
15
17
  GPT_4O_TRANSCRIBE_DIARIZE: :"gpt-4o-transcribe-diarize"
16
18
 
17
19
  def self?.values: -> ::Array[OpenAI::Models::audio_model]
@@ -36,20 +36,23 @@ module OpenAI
36
36
  }
37
37
 
38
38
  type model =
39
- :"whisper-1"
39
+ String
40
+ | :"whisper-1"
40
41
  | :"gpt-4o-mini-transcribe"
42
+ | :"gpt-4o-mini-transcribe-2025-12-15"
41
43
  | :"gpt-4o-transcribe"
42
44
  | :"gpt-4o-transcribe-diarize"
43
45
 
44
46
  module Model
45
- extend OpenAI::Internal::Type::Enum
47
+ extend OpenAI::Internal::Type::Union
48
+
49
+ def self?.variants: -> ::Array[OpenAI::Models::Realtime::AudioTranscription::model]
46
50
 
47
51
  WHISPER_1: :"whisper-1"
48
52
  GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe"
53
+ GPT_4O_MINI_TRANSCRIBE_2025_12_15: :"gpt-4o-mini-transcribe-2025-12-15"
49
54
  GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe"
50
55
  GPT_4O_TRANSCRIBE_DIARIZE: :"gpt-4o-transcribe-diarize"
51
-
52
- def self?.values: -> ::Array[OpenAI::Models::Realtime::AudioTranscription::model]
53
56
  end
54
57
  end
55
58
  end
@@ -221,7 +221,8 @@ module OpenAI
221
221
  end
222
222
 
223
223
  type model =
224
- :"gpt-realtime"
224
+ String
225
+ | :"gpt-realtime"
225
226
  | :"gpt-realtime-2025-08-28"
226
227
  | :"gpt-4o-realtime-preview"
227
228
  | :"gpt-4o-realtime-preview-2024-10-01"
@@ -231,11 +232,15 @@ module OpenAI
231
232
  | :"gpt-4o-mini-realtime-preview-2024-12-17"
232
233
  | :"gpt-realtime-mini"
233
234
  | :"gpt-realtime-mini-2025-10-06"
235
+ | :"gpt-realtime-mini-2025-12-15"
234
236
  | :"gpt-audio-mini"
235
237
  | :"gpt-audio-mini-2025-10-06"
238
+ | :"gpt-audio-mini-2025-12-15"
236
239
 
237
240
  module Model
238
- extend OpenAI::Internal::Type::Enum
241
+ extend OpenAI::Internal::Type::Union
242
+
243
+ def self?.variants: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::model]
239
244
 
240
245
  GPT_REALTIME: :"gpt-realtime"
241
246
  GPT_REALTIME_2025_08_28: :"gpt-realtime-2025-08-28"
@@ -247,10 +252,10 @@ module OpenAI
247
252
  GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17: :"gpt-4o-mini-realtime-preview-2024-12-17"
248
253
  GPT_REALTIME_MINI: :"gpt-realtime-mini"
249
254
  GPT_REALTIME_MINI_2025_10_06: :"gpt-realtime-mini-2025-10-06"
255
+ GPT_REALTIME_MINI_2025_12_15: :"gpt-realtime-mini-2025-12-15"
250
256
  GPT_AUDIO_MINI: :"gpt-audio-mini"
251
257
  GPT_AUDIO_MINI_2025_10_06: :"gpt-audio-mini-2025-10-06"
252
-
253
- def self?.values: -> ::Array[OpenAI::Models::Realtime::RealtimeSession::model]
258
+ GPT_AUDIO_MINI_2025_12_15: :"gpt-audio-mini-2025-12-15"
254
259
  end
255
260
 
256
261
  type object = :"realtime.session"
@@ -136,8 +136,10 @@ module OpenAI
136
136
  | :"gpt-4o-mini-realtime-preview-2024-12-17"
137
137
  | :"gpt-realtime-mini"
138
138
  | :"gpt-realtime-mini-2025-10-06"
139
+ | :"gpt-realtime-mini-2025-12-15"
139
140
  | :"gpt-audio-mini"
140
141
  | :"gpt-audio-mini-2025-10-06"
142
+ | :"gpt-audio-mini-2025-12-15"
141
143
 
142
144
  module Model
143
145
  extend OpenAI::Internal::Type::Union
@@ -154,8 +156,10 @@ module OpenAI
154
156
  GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17: :"gpt-4o-mini-realtime-preview-2024-12-17"
155
157
  GPT_REALTIME_MINI: :"gpt-realtime-mini"
156
158
  GPT_REALTIME_MINI_2025_10_06: :"gpt-realtime-mini-2025-10-06"
159
+ GPT_REALTIME_MINI_2025_12_15: :"gpt-realtime-mini-2025-12-15"
157
160
  GPT_AUDIO_MINI: :"gpt-audio-mini"
158
161
  GPT_AUDIO_MINI_2025_10_06: :"gpt-audio-mini-2025-10-06"
162
+ GPT_AUDIO_MINI_2025_12_15: :"gpt-audio-mini-2025-12-15"
159
163
  end
160
164
 
161
165
  type output_modality = :text | :audio
@@ -423,8 +423,10 @@ module OpenAI
423
423
  | :"gpt-4o-mini-realtime-preview-2024-12-17"
424
424
  | :"gpt-realtime-mini"
425
425
  | :"gpt-realtime-mini-2025-10-06"
426
+ | :"gpt-realtime-mini-2025-12-15"
426
427
  | :"gpt-audio-mini"
427
428
  | :"gpt-audio-mini-2025-10-06"
429
+ | :"gpt-audio-mini-2025-12-15"
428
430
 
429
431
  module Model
430
432
  extend OpenAI::Internal::Type::Union
@@ -441,8 +443,10 @@ module OpenAI
441
443
  GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17: :"gpt-4o-mini-realtime-preview-2024-12-17"
442
444
  GPT_REALTIME_MINI: :"gpt-realtime-mini"
443
445
  GPT_REALTIME_MINI_2025_10_06: :"gpt-realtime-mini-2025-10-06"
446
+ GPT_REALTIME_MINI_2025_12_15: :"gpt-realtime-mini-2025-12-15"
444
447
  GPT_AUDIO_MINI: :"gpt-audio-mini"
445
448
  GPT_AUDIO_MINI_2025_10_06: :"gpt-audio-mini-2025-10-06"
449
+ GPT_AUDIO_MINI_2025_12_15: :"gpt-audio-mini-2025-12-15"
446
450
  end
447
451
 
448
452
  type output_modality = :text | :audio
@@ -18,6 +18,7 @@ module OpenAI
18
18
  tools: ::Array[OpenAI::Models::Responses::tool],
19
19
  top_p: Float?,
20
20
  background: bool?,
21
+ completed_at: Float?,
21
22
  conversation: OpenAI::Responses::Response::Conversation?,
22
23
  max_output_tokens: Integer?,
23
24
  max_tool_calls: Integer?,
@@ -67,6 +68,8 @@ module OpenAI
67
68
 
68
69
  attr_accessor background: bool?
69
70
 
71
+ attr_accessor completed_at: Float?
72
+
70
73
  attr_accessor conversation: OpenAI::Responses::Response::Conversation?
71
74
 
72
75
  attr_accessor max_output_tokens: Integer?
@@ -132,6 +135,7 @@ module OpenAI
132
135
  tools: ::Array[OpenAI::Models::Responses::tool],
133
136
  top_p: Float?,
134
137
  ?background: bool?,
138
+ ?completed_at: Float?,
135
139
  ?conversation: OpenAI::Responses::Response::Conversation?,
136
140
  ?max_output_tokens: Integer?,
137
141
  ?max_tool_calls: Integer?,
@@ -167,6 +171,7 @@ module OpenAI
167
171
  tools: ::Array[OpenAI::Models::Responses::tool],
168
172
  top_p: Float?,
169
173
  background: bool?,
174
+ completed_at: Float?,
170
175
  conversation: OpenAI::Responses::Response::Conversation?,
171
176
  max_output_tokens: Integer?,
172
177
  max_tool_calls: Integer?,
@@ -44,6 +44,7 @@ module OpenAI
44
44
  {
45
45
  query: String,
46
46
  type: :search,
47
+ queries: ::Array[String],
47
48
  sources: ::Array[OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source]
48
49
  }
49
50
 
@@ -52,6 +53,10 @@ module OpenAI
52
53
 
53
54
  attr_accessor type: :search
54
55
 
56
+ attr_reader queries: ::Array[String]?
57
+
58
+ def queries=: (::Array[String]) -> ::Array[String]
59
+
55
60
  attr_reader sources: ::Array[OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source]?
56
61
 
57
62
  def sources=: (
@@ -60,6 +65,7 @@ module OpenAI
60
65
 
61
66
  def initialize: (
62
67
  query: String,
68
+ ?queries: ::Array[String],
63
69
  ?sources: ::Array[OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source],
64
70
  ?type: :search
65
71
  ) -> void
@@ -67,6 +73,7 @@ module OpenAI
67
73
  def to_hash: -> {
68
74
  query: String,
69
75
  type: :search,
76
+ queries: ::Array[String],
70
77
  sources: ::Array[OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::Source]
71
78
  }
72
79
 
@@ -1,22 +1,23 @@
1
1
  module OpenAI
2
2
  module Models
3
3
  type video_model =
4
- :"sora-2"
4
+ String
5
+ | :"sora-2"
5
6
  | :"sora-2-pro"
6
7
  | :"sora-2-2025-10-06"
7
8
  | :"sora-2-pro-2025-10-06"
8
9
  | :"sora-2-2025-12-08"
9
10
 
10
11
  module VideoModel
11
- extend OpenAI::Internal::Type::Enum
12
+ extend OpenAI::Internal::Type::Union
13
+
14
+ def self?.variants: -> ::Array[OpenAI::Models::video_model]
12
15
 
13
16
  SORA_2: :"sora-2"
14
17
  SORA_2_PRO: :"sora-2-pro"
15
18
  SORA_2_2025_10_06: :"sora-2-2025-10-06"
16
19
  SORA_2_PRO_2025_10_06: :"sora-2-pro-2025-10-06"
17
20
  SORA_2_2025_12_08: :"sora-2-2025-12-08"
18
-
19
- def self?.values: -> ::Array[OpenAI::Models::video_model]
20
21
  end
21
22
  end
22
23
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.41.0
4
+ version: 0.43.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-12-17 00:00:00.000000000 Z
11
+ date: 2026-01-09 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: base64
@@ -24,6 +24,20 @@ dependencies:
24
24
  - - ">="
25
25
  - !ruby/object:Gem::Version
26
26
  version: '0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: cgi
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
27
41
  - !ruby/object:Gem::Dependency
28
42
  name: connection_pool
29
43
  requirement: !ruby/object:Gem::Requirement