openai 0.21.0 → 0.21.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/eval_create_params.rb +10 -6
  5. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +10 -6
  6. data/lib/openai/models/evals/run_cancel_response.rb +10 -6
  7. data/lib/openai/models/evals/run_create_params.rb +10 -6
  8. data/lib/openai/models/evals/run_create_response.rb +10 -6
  9. data/lib/openai/models/evals/run_list_response.rb +10 -6
  10. data/lib/openai/models/evals/run_retrieve_response.rb +10 -6
  11. data/lib/openai/models/graders/label_model_grader.rb +10 -6
  12. data/lib/openai/models/graders/score_model_grader.rb +10 -6
  13. data/lib/openai/models/responses/easy_input_message.rb +3 -3
  14. data/lib/openai/models/responses/response.rb +1 -1
  15. data/lib/openai/models/responses/response_content.rb +4 -1
  16. data/lib/openai/models/responses/response_create_params.rb +1 -1
  17. data/lib/openai/models/responses/response_input_audio.rb +39 -23
  18. data/lib/openai/models/responses/response_input_content.rb +4 -1
  19. data/lib/openai/models/responses/response_input_item.rb +2 -2
  20. data/lib/openai/models/responses/response_input_message_item.rb +2 -2
  21. data/lib/openai/version.rb +1 -1
  22. data/rbi/openai/models/eval_create_params.rbi +5 -1
  23. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +5 -1
  24. data/rbi/openai/models/evals/run_cancel_response.rbi +3 -1
  25. data/rbi/openai/models/evals/run_create_params.rbi +5 -1
  26. data/rbi/openai/models/evals/run_create_response.rbi +3 -1
  27. data/rbi/openai/models/evals/run_list_response.rbi +3 -1
  28. data/rbi/openai/models/evals/run_retrieve_response.rbi +3 -1
  29. data/rbi/openai/models/graders/label_model_grader.rbi +5 -1
  30. data/rbi/openai/models/graders/score_model_grader.rbi +5 -1
  31. data/rbi/openai/models/responses/response.rbi +2 -2
  32. data/rbi/openai/models/responses/response_content.rbi +1 -0
  33. data/rbi/openai/models/responses/response_create_params.rbi +2 -2
  34. data/rbi/openai/models/responses/response_input_audio.rbi +85 -34
  35. data/rbi/openai/models/responses/response_input_content.rbi +2 -1
  36. data/rbi/openai/models/responses/response_input_item.rbi +6 -3
  37. data/rbi/openai/models/responses/response_input_message_item.rbi +2 -1
  38. data/rbi/openai/resources/responses.rbi +2 -2
  39. data/sig/openai/models/eval_create_params.rbs +2 -1
  40. data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +2 -1
  41. data/sig/openai/models/evals/run_cancel_response.rbs +2 -1
  42. data/sig/openai/models/evals/run_create_params.rbs +2 -1
  43. data/sig/openai/models/evals/run_create_response.rbs +2 -1
  44. data/sig/openai/models/evals/run_list_response.rbs +2 -1
  45. data/sig/openai/models/evals/run_retrieve_response.rbs +2 -1
  46. data/sig/openai/models/graders/label_model_grader.rbs +2 -1
  47. data/sig/openai/models/graders/score_model_grader.rbs +2 -1
  48. data/sig/openai/models/responses/response_content.rbs +1 -0
  49. data/sig/openai/models/responses/response_input_audio.rbs +32 -15
  50. data/sig/openai/models/responses/response_input_content.rbs +1 -0
  51. metadata +1 -1
@@ -12,13 +12,16 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # Base64-encoded audio data.
16
- sig { returns(String) }
17
- attr_accessor :data
15
+ sig { returns(OpenAI::Responses::ResponseInputAudio::InputAudio) }
16
+ attr_reader :input_audio
18
17
 
19
- # The format of the audio data. Currently supported formats are `mp3` and `wav`.
20
- sig { returns(OpenAI::Responses::ResponseInputAudio::Format::OrSymbol) }
21
- attr_accessor :format_
18
+ sig do
19
+ params(
20
+ input_audio:
21
+ OpenAI::Responses::ResponseInputAudio::InputAudio::OrHash
22
+ ).void
23
+ end
24
+ attr_writer :input_audio
22
25
 
23
26
  # The type of the input item. Always `input_audio`.
24
27
  sig { returns(Symbol) }
@@ -27,16 +30,13 @@ module OpenAI
27
30
  # An audio input to the model.
28
31
  sig do
29
32
  params(
30
- data: String,
31
- format_: OpenAI::Responses::ResponseInputAudio::Format::OrSymbol,
33
+ input_audio:
34
+ OpenAI::Responses::ResponseInputAudio::InputAudio::OrHash,
32
35
  type: Symbol
33
36
  ).returns(T.attached_class)
34
37
  end
35
38
  def self.new(
36
- # Base64-encoded audio data.
37
- data:,
38
- # The format of the audio data. Currently supported formats are `mp3` and `wav`.
39
- format_:,
39
+ input_audio:,
40
40
  # The type of the input item. Always `input_audio`.
41
41
  type: :input_audio
42
42
  )
@@ -45,8 +45,7 @@ module OpenAI
45
45
  sig do
46
46
  override.returns(
47
47
  {
48
- data: String,
49
- format_: OpenAI::Responses::ResponseInputAudio::Format::OrSymbol,
48
+ input_audio: OpenAI::Responses::ResponseInputAudio::InputAudio,
50
49
  type: Symbol
51
50
  }
52
51
  )
@@ -54,35 +53,87 @@ module OpenAI
54
53
  def to_hash
55
54
  end
56
55
 
57
- # The format of the audio data. Currently supported formats are `mp3` and `wav`.
58
- module Format
59
- extend OpenAI::Internal::Type::Enum
60
-
61
- TaggedSymbol =
56
+ class InputAudio < OpenAI::Internal::Type::BaseModel
57
+ OrHash =
62
58
  T.type_alias do
63
- T.all(Symbol, OpenAI::Responses::ResponseInputAudio::Format)
59
+ T.any(
60
+ OpenAI::Responses::ResponseInputAudio::InputAudio,
61
+ OpenAI::Internal::AnyHash
62
+ )
64
63
  end
65
- OrSymbol = T.type_alias { T.any(Symbol, String) }
66
64
 
67
- MP3 =
68
- T.let(
69
- :mp3,
70
- OpenAI::Responses::ResponseInputAudio::Format::TaggedSymbol
71
- )
72
- WAV =
73
- T.let(
74
- :wav,
75
- OpenAI::Responses::ResponseInputAudio::Format::TaggedSymbol
65
+ # Base64-encoded audio data.
66
+ sig { returns(String) }
67
+ attr_accessor :data
68
+
69
+ # The format of the audio data. Currently supported formats are `mp3` and `wav`.
70
+ sig do
71
+ returns(
72
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format::OrSymbol
76
73
  )
74
+ end
75
+ attr_accessor :format_
76
+
77
+ sig do
78
+ params(
79
+ data: String,
80
+ format_:
81
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format::OrSymbol
82
+ ).returns(T.attached_class)
83
+ end
84
+ def self.new(
85
+ # Base64-encoded audio data.
86
+ data:,
87
+ # The format of the audio data. Currently supported formats are `mp3` and `wav`.
88
+ format_:
89
+ )
90
+ end
77
91
 
78
92
  sig do
79
93
  override.returns(
80
- T::Array[
81
- OpenAI::Responses::ResponseInputAudio::Format::TaggedSymbol
82
- ]
94
+ {
95
+ data: String,
96
+ format_:
97
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format::OrSymbol
98
+ }
83
99
  )
84
100
  end
85
- def self.values
101
+ def to_hash
102
+ end
103
+
104
+ # The format of the audio data. Currently supported formats are `mp3` and `wav`.
105
+ module Format
106
+ extend OpenAI::Internal::Type::Enum
107
+
108
+ TaggedSymbol =
109
+ T.type_alias do
110
+ T.all(
111
+ Symbol,
112
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format
113
+ )
114
+ end
115
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
116
+
117
+ MP3 =
118
+ T.let(
119
+ :mp3,
120
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format::TaggedSymbol
121
+ )
122
+ WAV =
123
+ T.let(
124
+ :wav,
125
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format::TaggedSymbol
126
+ )
127
+
128
+ sig do
129
+ override.returns(
130
+ T::Array[
131
+ OpenAI::Responses::ResponseInputAudio::InputAudio::Format::TaggedSymbol
132
+ ]
133
+ )
134
+ end
135
+ def self.values
136
+ end
86
137
  end
87
138
  end
88
139
  end
@@ -12,7 +12,8 @@ module OpenAI
12
12
  T.any(
13
13
  OpenAI::Responses::ResponseInputText,
14
14
  OpenAI::Responses::ResponseInputImage,
15
- OpenAI::Responses::ResponseInputFile
15
+ OpenAI::Responses::ResponseInputFile,
16
+ OpenAI::Responses::ResponseInputAudio
16
17
  )
17
18
  end
18
19
 
@@ -55,7 +55,8 @@ module OpenAI
55
55
  T.any(
56
56
  OpenAI::Responses::ResponseInputText,
57
57
  OpenAI::Responses::ResponseInputImage,
58
- OpenAI::Responses::ResponseInputFile
58
+ OpenAI::Responses::ResponseInputFile,
59
+ OpenAI::Responses::ResponseInputAudio
59
60
  )
60
61
  ]
61
62
  )
@@ -117,7 +118,8 @@ module OpenAI
117
118
  T.any(
118
119
  OpenAI::Responses::ResponseInputText::OrHash,
119
120
  OpenAI::Responses::ResponseInputImage::OrHash,
120
- OpenAI::Responses::ResponseInputFile::OrHash
121
+ OpenAI::Responses::ResponseInputFile::OrHash,
122
+ OpenAI::Responses::ResponseInputAudio::OrHash
121
123
  )
122
124
  ],
123
125
  role:
@@ -150,7 +152,8 @@ module OpenAI
150
152
  T.any(
151
153
  OpenAI::Responses::ResponseInputText,
152
154
  OpenAI::Responses::ResponseInputImage,
153
- OpenAI::Responses::ResponseInputFile
155
+ OpenAI::Responses::ResponseInputFile,
156
+ OpenAI::Responses::ResponseInputAudio
154
157
  )
155
158
  ],
156
159
  role:
@@ -75,7 +75,8 @@ module OpenAI
75
75
  T.any(
76
76
  OpenAI::Responses::ResponseInputText::OrHash,
77
77
  OpenAI::Responses::ResponseInputImage::OrHash,
78
- OpenAI::Responses::ResponseInputFile::OrHash
78
+ OpenAI::Responses::ResponseInputFile::OrHash,
79
+ OpenAI::Responses::ResponseInputAudio::OrHash
79
80
  )
80
81
  ],
81
82
  role: OpenAI::Responses::ResponseInputMessageItem::Role::OrSymbol,
@@ -239,7 +239,7 @@ module OpenAI
239
239
  # Learn more about
240
240
  # [built-in tools](https://platform.openai.com/docs/guides/tools).
241
241
  # - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
242
- # predefined connectors such as Google Drive and Notion. Learn more about
242
+ # predefined connectors such as Google Drive and SharePoint. Learn more about
243
243
  # [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
244
244
  # - **Function calls (custom tools)**: Functions that are defined by you, enabling
245
245
  # the model to call your own code with strongly typed arguments and outputs.
@@ -516,7 +516,7 @@ module OpenAI
516
516
  # Learn more about
517
517
  # [built-in tools](https://platform.openai.com/docs/guides/tools).
518
518
  # - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
519
- # predefined connectors such as Google Drive and Notion. Learn more about
519
+ # predefined connectors such as Google Drive and SharePoint. Learn more about
520
520
  # [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
521
521
  # - **Function calls (custom tools)**: Functions that are defined by you, enabling
522
522
  # the model to call your own code with strongly typed arguments and outputs.
@@ -219,6 +219,7 @@ module OpenAI
219
219
  | OpenAI::Responses::ResponseInputText
220
220
  | OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText
221
221
  | OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage
222
+ | OpenAI::Responses::ResponseInputAudio
222
223
  | ::Array[top]
223
224
 
224
225
  module Content
@@ -263,7 +264,7 @@ module OpenAI
263
264
 
264
265
  def self?.variants: -> ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content]
265
266
 
266
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
267
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
267
268
  end
268
269
 
269
270
  type role = :user | :assistant | :system | :developer
@@ -235,6 +235,7 @@ module OpenAI
235
235
  | OpenAI::Responses::ResponseInputText
236
236
  | OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText
237
237
  | OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage
238
+ | OpenAI::Responses::ResponseInputAudio
238
239
  | ::Array[top]
239
240
 
240
241
  module Content
@@ -279,7 +280,7 @@ module OpenAI
279
280
 
280
281
  def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::content]
281
282
 
282
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
283
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
283
284
  end
284
285
 
285
286
  type role = :user | :assistant | :system | :developer
@@ -351,6 +351,7 @@ module OpenAI
351
351
  | OpenAI::Responses::ResponseInputText
352
352
  | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
353
353
  | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
354
+ | OpenAI::Responses::ResponseInputAudio
354
355
  | ::Array[top]
355
356
 
356
357
  module Content
@@ -402,7 +403,7 @@ module OpenAI
402
403
 
403
404
  def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
404
405
 
405
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
406
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
406
407
  end
407
408
 
408
409
  type role = :user | :assistant | :system | :developer
@@ -314,6 +314,7 @@ module OpenAI
314
314
  | OpenAI::Responses::ResponseInputText
315
315
  | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText
316
316
  | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage
317
+ | OpenAI::Responses::ResponseInputAudio
317
318
  | ::Array[top]
318
319
 
319
320
  module Content
@@ -365,7 +366,7 @@ module OpenAI
365
366
 
366
367
  def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content]
367
368
 
368
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
369
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
369
370
  end
370
371
 
371
372
  type role = :user | :assistant | :system | :developer
@@ -351,6 +351,7 @@ module OpenAI
351
351
  | OpenAI::Responses::ResponseInputText
352
352
  | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
353
353
  | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
354
+ | OpenAI::Responses::ResponseInputAudio
354
355
  | ::Array[top]
355
356
 
356
357
  module Content
@@ -402,7 +403,7 @@ module OpenAI
402
403
 
403
404
  def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
404
405
 
405
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
406
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
406
407
  end
407
408
 
408
409
  type role = :user | :assistant | :system | :developer
@@ -351,6 +351,7 @@ module OpenAI
351
351
  | OpenAI::Responses::ResponseInputText
352
352
  | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
353
353
  | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
354
+ | OpenAI::Responses::ResponseInputAudio
354
355
  | ::Array[top]
355
356
 
356
357
  module Content
@@ -402,7 +403,7 @@ module OpenAI
402
403
 
403
404
  def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
404
405
 
405
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
406
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
406
407
  end
407
408
 
408
409
  type role = :user | :assistant | :system | :developer
@@ -351,6 +351,7 @@ module OpenAI
351
351
  | OpenAI::Responses::ResponseInputText
352
352
  | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText
353
353
  | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage
354
+ | OpenAI::Responses::ResponseInputAudio
354
355
  | ::Array[top]
355
356
 
356
357
  module Content
@@ -402,7 +403,7 @@ module OpenAI
402
403
 
403
404
  def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content]
404
405
 
405
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
406
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
406
407
  end
407
408
 
408
409
  type role = :user | :assistant | :system | :developer
@@ -79,6 +79,7 @@ module OpenAI
79
79
  | OpenAI::Responses::ResponseInputText
80
80
  | OpenAI::Graders::LabelModelGrader::Input::Content::OutputText
81
81
  | OpenAI::Graders::LabelModelGrader::Input::Content::InputImage
82
+ | OpenAI::Responses::ResponseInputAudio
82
83
  | ::Array[top]
83
84
 
84
85
  module Content
@@ -123,7 +124,7 @@ module OpenAI
123
124
 
124
125
  def self?.variants: -> ::Array[OpenAI::Models::Graders::LabelModelGrader::Input::content]
125
126
 
126
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
127
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
127
128
  end
128
129
 
129
130
  type role = :user | :assistant | :system | :developer
@@ -83,6 +83,7 @@ module OpenAI
83
83
  | OpenAI::Responses::ResponseInputText
84
84
  | OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText
85
85
  | OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage
86
+ | OpenAI::Responses::ResponseInputAudio
86
87
  | ::Array[top]
87
88
 
88
89
  module Content
@@ -127,7 +128,7 @@ module OpenAI
127
128
 
128
129
  def self?.variants: -> ::Array[OpenAI::Models::Graders::ScoreModelGrader::Input::content]
129
130
 
130
- AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter
131
+ AnArrayOfInputTextInputImageAndInputAudioArray: OpenAI::Internal::Type::Converter
131
132
  end
132
133
 
133
134
  type role = :user | :assistant | :system | :developer
@@ -5,6 +5,7 @@ module OpenAI
5
5
  OpenAI::Responses::ResponseInputText
6
6
  | OpenAI::Responses::ResponseInputImage
7
7
  | OpenAI::Responses::ResponseInputFile
8
+ | OpenAI::Responses::ResponseInputAudio
8
9
  | OpenAI::Responses::ResponseOutputText
9
10
  | OpenAI::Responses::ResponseOutputRefusal
10
11
 
@@ -3,39 +3,56 @@ module OpenAI
3
3
  module Responses
4
4
  type response_input_audio =
5
5
  {
6
- data: String,
7
- format_: OpenAI::Models::Responses::ResponseInputAudio::format_,
6
+ input_audio: OpenAI::Responses::ResponseInputAudio::InputAudio,
8
7
  type: :input_audio
9
8
  }
10
9
 
11
10
  class ResponseInputAudio < OpenAI::Internal::Type::BaseModel
12
- attr_accessor data: String
13
-
14
- attr_accessor format_: OpenAI::Models::Responses::ResponseInputAudio::format_
11
+ attr_accessor input_audio: OpenAI::Responses::ResponseInputAudio::InputAudio
15
12
 
16
13
  attr_accessor type: :input_audio
17
14
 
18
15
  def initialize: (
19
- data: String,
20
- format_: OpenAI::Models::Responses::ResponseInputAudio::format_,
16
+ input_audio: OpenAI::Responses::ResponseInputAudio::InputAudio,
21
17
  ?type: :input_audio
22
18
  ) -> void
23
19
 
24
20
  def to_hash: -> {
25
- data: String,
26
- format_: OpenAI::Models::Responses::ResponseInputAudio::format_,
21
+ input_audio: OpenAI::Responses::ResponseInputAudio::InputAudio,
27
22
  type: :input_audio
28
23
  }
29
24
 
30
- type format_ = :mp3 | :wav
25
+ type input_audio =
26
+ {
27
+ data: String,
28
+ format_: OpenAI::Models::Responses::ResponseInputAudio::InputAudio::format_
29
+ }
30
+
31
+ class InputAudio < OpenAI::Internal::Type::BaseModel
32
+ attr_accessor data: String
33
+
34
+ attr_accessor format_: OpenAI::Models::Responses::ResponseInputAudio::InputAudio::format_
35
+
36
+ def initialize: (
37
+ data: String,
38
+ format_: OpenAI::Models::Responses::ResponseInputAudio::InputAudio::format_
39
+ ) -> void
40
+
41
+ def to_hash: -> {
42
+ data: String,
43
+ format_: OpenAI::Models::Responses::ResponseInputAudio::InputAudio::format_
44
+ }
45
+
46
+ type format_ = :mp3 | :wav
31
47
 
32
- module Format
33
- extend OpenAI::Internal::Type::Enum
48
+ module Format
49
+ extend OpenAI::Internal::Type::Enum
34
50
 
35
- MP3: :mp3
36
- WAV: :wav
51
+ MP3: :mp3
52
+ WAV: :wav
37
53
 
38
- def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputAudio::format_]
54
+ def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputAudio::InputAudio::format_]
55
+ end
39
56
  end
40
57
  end
41
58
  end
@@ -5,6 +5,7 @@ module OpenAI
5
5
  OpenAI::Responses::ResponseInputText
6
6
  | OpenAI::Responses::ResponseInputImage
7
7
  | OpenAI::Responses::ResponseInputFile
8
+ | OpenAI::Responses::ResponseInputAudio
8
9
 
9
10
  module ResponseInputContent
10
11
  extend OpenAI::Internal::Type::Union
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.21.0
4
+ version: 0.21.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI