openai 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +1 -1
  4. data/lib/openai/errors.rb +22 -0
  5. data/lib/openai/internal/type/array_of.rb +6 -1
  6. data/lib/openai/internal/type/base_model.rb +76 -24
  7. data/lib/openai/internal/type/boolean.rb +7 -1
  8. data/lib/openai/internal/type/converter.rb +42 -34
  9. data/lib/openai/internal/type/enum.rb +10 -2
  10. data/lib/openai/internal/type/file_input.rb +6 -1
  11. data/lib/openai/internal/type/hash_of.rb +6 -1
  12. data/lib/openai/internal/type/union.rb +12 -7
  13. data/lib/openai/internal/type/unknown.rb +7 -1
  14. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  15. data/lib/openai/models/audio/transcription.rb +118 -1
  16. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  17. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  18. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
  19. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  20. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  21. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  22. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  23. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  24. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  25. data/lib/openai/models/responses/response_create_params.rb +29 -29
  26. data/lib/openai/models/responses/response_output_text.rb +18 -2
  27. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  28. data/lib/openai/resources/audio/speech.rb +3 -1
  29. data/lib/openai/resources/chat/completions.rb +8 -0
  30. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
  31. data/lib/openai/resources/responses.rb +12 -12
  32. data/lib/openai/version.rb +1 -1
  33. data/rbi/openai/errors.rbi +16 -0
  34. data/rbi/openai/internal/type/boolean.rbi +2 -0
  35. data/rbi/openai/internal/type/converter.rbi +15 -15
  36. data/rbi/openai/internal/type/union.rbi +5 -0
  37. data/rbi/openai/internal/type/unknown.rbi +2 -0
  38. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  39. data/rbi/openai/models/audio/transcription.rbi +213 -3
  40. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  41. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  42. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
  43. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  44. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  45. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  46. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  47. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  48. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  49. data/rbi/openai/models/responses/response_create_params.rbi +83 -60
  50. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  51. data/rbi/openai/resources/audio/speech.rbi +6 -1
  52. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
  53. data/rbi/openai/resources/responses.rbi +46 -46
  54. data/sig/openai/errors.rbs +9 -0
  55. data/sig/openai/internal/type/converter.rbs +7 -1
  56. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  57. data/sig/openai/models/audio/transcription.rbs +95 -3
  58. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  59. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  60. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
  61. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  62. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  63. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  64. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  65. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  66. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  67. data/sig/openai/models/responses/response_create_params.rbs +18 -10
  68. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  69. data/sig/openai/resources/audio/speech.rbs +1 -0
  70. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  71. data/sig/openai/resources/responses.rbs +4 -4
  72. metadata +2 -2
@@ -16,22 +16,29 @@ module OpenAI
16
16
  sig { returns(String) }
17
17
  attr_accessor :id
18
18
 
19
- # The code to run.
20
- sig { returns(String) }
19
+ # The code to run, or null if not available.
20
+ sig { returns(T.nilable(String)) }
21
21
  attr_accessor :code
22
22
 
23
- # The results of the code interpreter tool call.
23
+ # The ID of the container used to run the code.
24
+ sig { returns(String) }
25
+ attr_accessor :container_id
26
+
27
+ # The outputs generated by the code interpreter, such as logs or images. Can be
28
+ # null if no outputs are available.
24
29
  sig do
25
30
  returns(
26
- T::Array[
27
- T.any(
28
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Logs,
29
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files
30
- )
31
- ]
31
+ T.nilable(
32
+ T::Array[
33
+ T.any(
34
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs,
35
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image
36
+ )
37
+ ]
38
+ )
32
39
  )
33
40
  end
34
- attr_accessor :results
41
+ attr_accessor :outputs
35
42
 
36
43
  # The status of the code interpreter tool call.
37
44
  sig do
@@ -45,42 +52,38 @@ module OpenAI
45
52
  sig { returns(Symbol) }
46
53
  attr_accessor :type
47
54
 
48
- # The ID of the container used to run the code.
49
- sig { returns(T.nilable(String)) }
50
- attr_reader :container_id
51
-
52
- sig { params(container_id: String).void }
53
- attr_writer :container_id
54
-
55
55
  # A tool call to run code.
56
56
  sig do
57
57
  params(
58
58
  id: String,
59
- code: String,
60
- results:
61
- T::Array[
62
- T.any(
63
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Logs::OrHash,
64
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files::OrHash
65
- )
66
- ],
59
+ code: T.nilable(String),
60
+ container_id: String,
61
+ outputs:
62
+ T.nilable(
63
+ T::Array[
64
+ T.any(
65
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs::OrHash,
66
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image::OrHash
67
+ )
68
+ ]
69
+ ),
67
70
  status:
68
71
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol,
69
- container_id: String,
70
72
  type: Symbol
71
73
  ).returns(T.attached_class)
72
74
  end
73
75
  def self.new(
74
76
  # The unique ID of the code interpreter tool call.
75
77
  id:,
76
- # The code to run.
78
+ # The code to run, or null if not available.
77
79
  code:,
78
- # The results of the code interpreter tool call.
79
- results:,
80
+ # The ID of the container used to run the code.
81
+ container_id:,
82
+ # The outputs generated by the code interpreter, such as logs or images. Can be
83
+ # null if no outputs are available.
84
+ outputs:,
80
85
  # The status of the code interpreter tool call.
81
86
  status:,
82
- # The ID of the container used to run the code.
83
- container_id: nil,
84
87
  # The type of the code interpreter tool call. Always `code_interpreter_call`.
85
88
  type: :code_interpreter_call
86
89
  )
@@ -90,33 +93,35 @@ module OpenAI
90
93
  override.returns(
91
94
  {
92
95
  id: String,
93
- code: String,
94
- results:
95
- T::Array[
96
- T.any(
97
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Logs,
98
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files
99
- )
100
- ],
96
+ code: T.nilable(String),
97
+ container_id: String,
98
+ outputs:
99
+ T.nilable(
100
+ T::Array[
101
+ T.any(
102
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs,
103
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image
104
+ )
105
+ ]
106
+ ),
101
107
  status:
102
108
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol,
103
- type: Symbol,
104
- container_id: String
109
+ type: Symbol
105
110
  }
106
111
  )
107
112
  end
108
113
  def to_hash
109
114
  end
110
115
 
111
- # The output of a code interpreter tool.
112
- module Result
116
+ # The logs output from the code interpreter.
117
+ module Output
113
118
  extend OpenAI::Internal::Type::Union
114
119
 
115
120
  Variants =
116
121
  T.type_alias do
117
122
  T.any(
118
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Logs,
119
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files
123
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs,
124
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image
120
125
  )
121
126
  end
122
127
 
@@ -124,25 +129,25 @@ module OpenAI
124
129
  OrHash =
125
130
  T.type_alias do
126
131
  T.any(
127
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Logs,
132
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs,
128
133
  OpenAI::Internal::AnyHash
129
134
  )
130
135
  end
131
136
 
132
- # The logs of the code interpreter tool call.
137
+ # The logs output from the code interpreter.
133
138
  sig { returns(String) }
134
139
  attr_accessor :logs
135
140
 
136
- # The type of the code interpreter text output. Always `logs`.
141
+ # The type of the output. Always 'logs'.
137
142
  sig { returns(Symbol) }
138
143
  attr_accessor :type
139
144
 
140
- # The output of a code interpreter tool call that is text.
145
+ # The logs output from the code interpreter.
141
146
  sig { params(logs: String, type: Symbol).returns(T.attached_class) }
142
147
  def self.new(
143
- # The logs of the code interpreter tool call.
148
+ # The logs output from the code interpreter.
144
149
  logs:,
145
- # The type of the code interpreter text output. Always `logs`.
150
+ # The type of the output. Always 'logs'.
146
151
  type: :logs
147
152
  )
148
153
  end
@@ -152,99 +157,42 @@ module OpenAI
152
157
  end
153
158
  end
154
159
 
155
- class Files < OpenAI::Internal::Type::BaseModel
160
+ class Image < OpenAI::Internal::Type::BaseModel
156
161
  OrHash =
157
162
  T.type_alias do
158
163
  T.any(
159
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files,
164
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image,
160
165
  OpenAI::Internal::AnyHash
161
166
  )
162
167
  end
163
168
 
164
- sig do
165
- returns(
166
- T::Array[
167
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files::File
168
- ]
169
- )
170
- end
171
- attr_accessor :files
172
-
173
- # The type of the code interpreter file output. Always `files`.
169
+ # The type of the output. Always 'image'.
174
170
  sig { returns(Symbol) }
175
171
  attr_accessor :type
176
172
 
177
- # The output of a code interpreter tool call that is a file.
178
- sig do
179
- params(
180
- files:
181
- T::Array[
182
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files::File::OrHash
183
- ],
184
- type: Symbol
185
- ).returns(T.attached_class)
186
- end
173
+ # The URL of the image output from the code interpreter.
174
+ sig { returns(String) }
175
+ attr_accessor :url
176
+
177
+ # The image output from the code interpreter.
178
+ sig { params(url: String, type: Symbol).returns(T.attached_class) }
187
179
  def self.new(
188
- files:,
189
- # The type of the code interpreter file output. Always `files`.
190
- type: :files
180
+ # The URL of the image output from the code interpreter.
181
+ url:,
182
+ # The type of the output. Always 'image'.
183
+ type: :image
191
184
  )
192
185
  end
193
186
 
194
- sig do
195
- override.returns(
196
- {
197
- files:
198
- T::Array[
199
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files::File
200
- ],
201
- type: Symbol
202
- }
203
- )
204
- end
187
+ sig { override.returns({ type: Symbol, url: String }) }
205
188
  def to_hash
206
189
  end
207
-
208
- class File < OpenAI::Internal::Type::BaseModel
209
- OrHash =
210
- T.type_alias do
211
- T.any(
212
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Files::File,
213
- OpenAI::Internal::AnyHash
214
- )
215
- end
216
-
217
- # The ID of the file.
218
- sig { returns(String) }
219
- attr_accessor :file_id
220
-
221
- # The MIME type of the file.
222
- sig { returns(String) }
223
- attr_accessor :mime_type
224
-
225
- sig do
226
- params(file_id: String, mime_type: String).returns(
227
- T.attached_class
228
- )
229
- end
230
- def self.new(
231
- # The ID of the file.
232
- file_id:,
233
- # The MIME type of the file.
234
- mime_type:
235
- )
236
- end
237
-
238
- sig { override.returns({ file_id: String, mime_type: String }) }
239
- def to_hash
240
- end
241
- end
242
190
  end
243
191
 
244
192
  sig do
245
193
  override.returns(
246
194
  T::Array[
247
- OpenAI::Responses::ResponseCodeInterpreterToolCall::Result::Variants
195
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Variants
248
196
  ]
249
197
  )
250
198
  end
@@ -270,14 +218,24 @@ module OpenAI
270
218
  :in_progress,
271
219
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol
272
220
  )
221
+ COMPLETED =
222
+ T.let(
223
+ :completed,
224
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol
225
+ )
226
+ INCOMPLETE =
227
+ T.let(
228
+ :incomplete,
229
+ OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol
230
+ )
273
231
  INTERPRETING =
274
232
  T.let(
275
233
  :interpreting,
276
234
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol
277
235
  )
278
- COMPLETED =
236
+ FAILED =
279
237
  T.let(
280
- :completed,
238
+ :failed,
281
239
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol
282
240
  )
283
241
 
@@ -15,36 +15,6 @@ module OpenAI
15
15
  )
16
16
  end
17
17
 
18
- # Text, image, or file inputs to the model, used to generate a response.
19
- #
20
- # Learn more:
21
- #
22
- # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
23
- # - [Image inputs](https://platform.openai.com/docs/guides/images)
24
- # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
25
- # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
26
- # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
27
- sig do
28
- returns(OpenAI::Responses::ResponseCreateParams::Input::Variants)
29
- end
30
- attr_accessor :input
31
-
32
- # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
33
- # wide range of models with different capabilities, performance characteristics,
34
- # and price points. Refer to the
35
- # [model guide](https://platform.openai.com/docs/models) to browse and compare
36
- # available models.
37
- sig do
38
- returns(
39
- T.any(
40
- String,
41
- OpenAI::ChatModel::OrSymbol,
42
- OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
43
- )
44
- )
45
- end
46
- attr_accessor :model
47
-
48
18
  # Whether to run the model response in the background.
49
19
  # [Learn more](https://platform.openai.com/docs/guides/background).
50
20
  sig { returns(T.nilable(T::Boolean)) }
@@ -72,6 +42,29 @@ module OpenAI
72
42
  end
73
43
  attr_accessor :include
74
44
 
45
+ # Text, image, or file inputs to the model, used to generate a response.
46
+ #
47
+ # Learn more:
48
+ #
49
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
50
+ # - [Image inputs](https://platform.openai.com/docs/guides/images)
51
+ # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
52
+ # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
53
+ # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
54
+ sig do
55
+ returns(
56
+ T.nilable(OpenAI::Responses::ResponseCreateParams::Input::Variants)
57
+ )
58
+ end
59
+ attr_reader :input
60
+
61
+ sig do
62
+ params(
63
+ input: OpenAI::Responses::ResponseCreateParams::Input::Variants
64
+ ).void
65
+ end
66
+ attr_writer :input
67
+
75
68
  # A system (or developer) message inserted into the model's context.
76
69
  #
77
70
  # When using along with `previous_response_id`, the instructions from a previous
@@ -95,6 +88,36 @@ module OpenAI
95
88
  sig { returns(T.nilable(T::Hash[Symbol, String])) }
96
89
  attr_accessor :metadata
97
90
 
91
+ # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
92
+ # wide range of models with different capabilities, performance characteristics,
93
+ # and price points. Refer to the
94
+ # [model guide](https://platform.openai.com/docs/models) to browse and compare
95
+ # available models.
96
+ sig do
97
+ returns(
98
+ T.nilable(
99
+ T.any(
100
+ String,
101
+ OpenAI::ChatModel::OrSymbol,
102
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
103
+ )
104
+ )
105
+ )
106
+ end
107
+ attr_reader :model
108
+
109
+ sig do
110
+ params(
111
+ model:
112
+ T.any(
113
+ String,
114
+ OpenAI::ChatModel::OrSymbol,
115
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
116
+ )
117
+ ).void
118
+ end
119
+ attr_writer :model
120
+
98
121
  # Whether to allow the model to run tool calls in parallel.
99
122
  sig { returns(T.nilable(T::Boolean)) }
100
123
  attr_accessor :parallel_tool_calls
@@ -299,21 +322,21 @@ module OpenAI
299
322
 
300
323
  sig do
301
324
  params(
302
- input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
303
- model:
304
- T.any(
305
- String,
306
- OpenAI::ChatModel::OrSymbol,
307
- OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
308
- ),
309
325
  background: T.nilable(T::Boolean),
310
326
  include:
311
327
  T.nilable(
312
328
  T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
313
329
  ),
330
+ input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
314
331
  instructions: T.nilable(String),
315
332
  max_output_tokens: T.nilable(Integer),
316
333
  metadata: T.nilable(T::Hash[Symbol, String]),
334
+ model:
335
+ T.any(
336
+ String,
337
+ OpenAI::ChatModel::OrSymbol,
338
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
339
+ ),
317
340
  parallel_tool_calls: T.nilable(T::Boolean),
318
341
  previous_response_id: T.nilable(String),
319
342
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
@@ -354,22 +377,6 @@ module OpenAI
354
377
  ).returns(T.attached_class)
355
378
  end
356
379
  def self.new(
357
- # Text, image, or file inputs to the model, used to generate a response.
358
- #
359
- # Learn more:
360
- #
361
- # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
362
- # - [Image inputs](https://platform.openai.com/docs/guides/images)
363
- # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
364
- # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
365
- # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
366
- input:,
367
- # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
368
- # wide range of models with different capabilities, performance characteristics,
369
- # and price points. Refer to the
370
- # [model guide](https://platform.openai.com/docs/models) to browse and compare
371
- # available models.
372
- model:,
373
380
  # Whether to run the model response in the background.
374
381
  # [Learn more](https://platform.openai.com/docs/guides/background).
375
382
  background: nil,
@@ -389,6 +396,16 @@ module OpenAI
389
396
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
390
397
  # in code interpreter tool call items.
391
398
  include: nil,
399
+ # Text, image, or file inputs to the model, used to generate a response.
400
+ #
401
+ # Learn more:
402
+ #
403
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
404
+ # - [Image inputs](https://platform.openai.com/docs/guides/images)
405
+ # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
406
+ # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
407
+ # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
408
+ input: nil,
392
409
  # A system (or developer) message inserted into the model's context.
393
410
  #
394
411
  # When using along with `previous_response_id`, the instructions from a previous
@@ -406,6 +423,12 @@ module OpenAI
406
423
  # Keys are strings with a maximum length of 64 characters. Values are strings with
407
424
  # a maximum length of 512 characters.
408
425
  metadata: nil,
426
+ # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
427
+ # wide range of models with different capabilities, performance characteristics,
428
+ # and price points. Refer to the
429
+ # [model guide](https://platform.openai.com/docs/models) to browse and compare
430
+ # available models.
431
+ model: nil,
409
432
  # Whether to allow the model to run tool calls in parallel.
410
433
  parallel_tool_calls: nil,
411
434
  # The unique ID of the previous response to the model. Use this to create
@@ -495,21 +518,21 @@ module OpenAI
495
518
  sig do
496
519
  override.returns(
497
520
  {
498
- input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
499
- model:
500
- T.any(
501
- String,
502
- OpenAI::ChatModel::OrSymbol,
503
- OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
504
- ),
505
521
  background: T.nilable(T::Boolean),
506
522
  include:
507
523
  T.nilable(
508
524
  T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
509
525
  ),
526
+ input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
510
527
  instructions: T.nilable(String),
511
528
  max_output_tokens: T.nilable(Integer),
512
529
  metadata: T.nilable(T::Hash[Symbol, String]),
530
+ model:
531
+ T.any(
532
+ String,
533
+ OpenAI::ChatModel::OrSymbol,
534
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
535
+ ),
513
536
  parallel_tool_calls: T.nilable(T::Boolean),
514
537
  previous_response_id: T.nilable(String),
515
538
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
@@ -131,6 +131,10 @@ module OpenAI
131
131
  sig { returns(String) }
132
132
  attr_accessor :file_id
133
133
 
134
+ # The filename of the file cited.
135
+ sig { returns(String) }
136
+ attr_accessor :filename
137
+
134
138
  # The index of the file in the list of files.
135
139
  sig { returns(Integer) }
136
140
  attr_accessor :index
@@ -141,13 +145,18 @@ module OpenAI
141
145
 
142
146
  # A citation to a file.
143
147
  sig do
144
- params(file_id: String, index: Integer, type: Symbol).returns(
145
- T.attached_class
146
- )
148
+ params(
149
+ file_id: String,
150
+ filename: String,
151
+ index: Integer,
152
+ type: Symbol
153
+ ).returns(T.attached_class)
147
154
  end
148
155
  def self.new(
149
156
  # The ID of the file.
150
157
  file_id:,
158
+ # The filename of the file cited.
159
+ filename:,
151
160
  # The index of the file in the list of files.
152
161
  index:,
153
162
  # The type of the file citation. Always `file_citation`.
@@ -157,7 +166,12 @@ module OpenAI
157
166
 
158
167
  sig do
159
168
  override.returns(
160
- { file_id: String, index: Integer, type: Symbol }
169
+ {
170
+ file_id: String,
171
+ filename: String,
172
+ index: Integer,
173
+ type: Symbol
174
+ }
161
175
  )
162
176
  end
163
177
  def to_hash
@@ -253,6 +267,10 @@ module OpenAI
253
267
  sig { returns(String) }
254
268
  attr_accessor :file_id
255
269
 
270
+ # The filename of the container file cited.
271
+ sig { returns(String) }
272
+ attr_accessor :filename
273
+
256
274
  # The index of the first character of the container file citation in the message.
257
275
  sig { returns(Integer) }
258
276
  attr_accessor :start_index
@@ -267,6 +285,7 @@ module OpenAI
267
285
  container_id: String,
268
286
  end_index: Integer,
269
287
  file_id: String,
288
+ filename: String,
270
289
  start_index: Integer,
271
290
  type: Symbol
272
291
  ).returns(T.attached_class)
@@ -278,6 +297,8 @@ module OpenAI
278
297
  end_index:,
279
298
  # The ID of the file.
280
299
  file_id:,
300
+ # The filename of the container file cited.
301
+ filename:,
281
302
  # The index of the first character of the container file citation in the message.
282
303
  start_index:,
283
304
  # The type of the container file citation. Always `container_file_citation`.
@@ -291,6 +312,7 @@ module OpenAI
291
312
  container_id: String,
292
313
  end_index: Integer,
293
314
  file_id: String,
315
+ filename: String,
294
316
  start_index: Integer,
295
317
  type: Symbol
296
318
  }
@@ -15,6 +15,8 @@ module OpenAI
15
15
  response_format:
16
16
  OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol,
17
17
  speed: Float,
18
+ stream_format:
19
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol,
18
20
  request_options: OpenAI::RequestOptions::OrHash
19
21
  ).returns(StringIO)
20
22
  end
@@ -36,8 +38,11 @@ module OpenAI
36
38
  # `wav`, and `pcm`.
37
39
  response_format: nil,
38
40
  # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
39
- # the default. Does not work with `gpt-4o-mini-tts`.
41
+ # the default.
40
42
  speed: nil,
43
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
44
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
45
+ stream_format: nil,
41
46
  request_options: {}
42
47
  )
43
48
  end
@@ -43,9 +43,7 @@ module OpenAI
43
43
  project_id: String,
44
44
  request_options: OpenAI::RequestOptions::OrHash
45
45
  ).returns(
46
- OpenAI::Internal::CursorPage[
47
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
48
- ]
46
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
49
47
  )
50
48
  end
51
49
  def retrieve(