openai 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +26 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/type/enum.rb +6 -3
  5. data/lib/openai/internal/type/union.rb +5 -2
  6. data/lib/openai/models/audio/transcription_text_delta_event.rb +3 -3
  7. data/lib/openai/models/audio/transcription_text_done_event.rb +3 -3
  8. data/lib/openai/models/chat/chat_completion.rb +4 -4
  9. data/lib/openai/models/chat/chat_completion_chunk.rb +4 -4
  10. data/lib/openai/models/chat/completion_create_params.rb +4 -4
  11. data/lib/openai/models/chat_model.rb +1 -0
  12. data/lib/openai/models/fine_tuning/alpha/grader_run_params.rb +17 -30
  13. data/lib/openai/models/fine_tuning/fine_tuning_job.rb +3 -5
  14. data/lib/openai/models/graders/multi_grader.rb +11 -4
  15. data/lib/openai/models/image_edit_params.rb +2 -2
  16. data/lib/openai/models/responses/response.rb +4 -4
  17. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +5 -5
  18. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +5 -5
  19. data/lib/openai/models/responses/response_create_params.rb +6 -4
  20. data/lib/openai/models/responses/response_includable.rb +3 -0
  21. data/lib/openai/models/responses/response_output_text.rb +52 -3
  22. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  23. data/lib/openai/models.rb +4 -4
  24. data/lib/openai/resources/beta/threads.rb +2 -2
  25. data/lib/openai/resources/fine_tuning/alpha/graders.rb +6 -3
  26. data/lib/openai/resources/responses.rb +2 -2
  27. data/lib/openai/version.rb +1 -1
  28. data/rbi/openai/models/audio/transcription_text_delta_event.rbi +4 -4
  29. data/rbi/openai/models/audio/transcription_text_done_event.rbi +4 -4
  30. data/rbi/openai/models/chat/chat_completion.rbi +6 -6
  31. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -6
  32. data/rbi/openai/models/chat/completion_create_params.rbi +6 -6
  33. data/rbi/openai/models/chat_model.rbi +5 -0
  34. data/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi +24 -43
  35. data/rbi/openai/models/fine_tuning/fine_tuning_job.rbi +2 -3
  36. data/rbi/openai/models/graders/multi_grader.rbi +27 -32
  37. data/rbi/openai/models/image_edit_params.rbi +3 -3
  38. data/rbi/openai/models/responses/response.rbi +6 -6
  39. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +3 -3
  40. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +3 -3
  41. data/rbi/openai/models/responses/response_create_params.rbi +10 -6
  42. data/rbi/openai/models/responses/response_includable.rbi +7 -0
  43. data/rbi/openai/models/responses/response_output_text.rbi +72 -0
  44. data/rbi/openai/resources/chat/completions.rbi +4 -4
  45. data/rbi/openai/resources/fine_tuning/alpha/graders.rbi +10 -5
  46. data/rbi/openai/resources/images.rbi +1 -1
  47. data/rbi/openai/resources/responses.rbi +9 -5
  48. data/sig/openai/models/audio/transcription_text_delta_event.rbs +10 -5
  49. data/sig/openai/models/audio/transcription_text_done_event.rbs +10 -5
  50. data/sig/openai/models/chat_model.rbs +2 -0
  51. data/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs +6 -14
  52. data/sig/openai/models/fine_tuning/fine_tuning_job.rbs +1 -1
  53. data/sig/openai/models/graders/multi_grader.rbs +7 -7
  54. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +4 -4
  55. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +4 -4
  56. data/sig/openai/models/responses/response_includable.rbs +2 -0
  57. data/sig/openai/models/responses/response_output_text.rbs +38 -0
  58. data/sig/openai/resources/fine_tuning/alpha/graders.rbs +1 -1
  59. data/sig/openai/resources/responses.rbs +1 -1
  60. metadata +2 -2
@@ -19,6 +19,7 @@ module OpenAI
19
19
  T.any(
20
20
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation,
21
21
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation,
22
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
22
23
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath
23
24
  )
24
25
  ]
@@ -61,6 +62,7 @@ module OpenAI
61
62
  T.any(
62
63
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation::OrHash,
63
64
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation::OrHash,
65
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation::OrHash,
64
66
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath::OrHash
65
67
  )
66
68
  ],
@@ -89,6 +91,7 @@ module OpenAI
89
91
  T.any(
90
92
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation,
91
93
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation,
94
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
92
95
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath
93
96
  )
94
97
  ],
@@ -110,6 +113,7 @@ module OpenAI
110
113
  T.any(
111
114
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation,
112
115
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation,
116
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
113
117
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath
114
118
  )
115
119
  end
@@ -228,6 +232,74 @@ module OpenAI
228
232
  end
229
233
  end
230
234
 
235
+ class ContainerFileCitation < OpenAI::Internal::Type::BaseModel
236
+ OrHash =
237
+ T.type_alias do
238
+ T.any(
239
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
240
+ OpenAI::Internal::AnyHash
241
+ )
242
+ end
243
+
244
+ # The ID of the container file.
245
+ sig { returns(String) }
246
+ attr_accessor :container_id
247
+
248
+ # The index of the last character of the container file citation in the message.
249
+ sig { returns(Integer) }
250
+ attr_accessor :end_index
251
+
252
+ # The ID of the file.
253
+ sig { returns(String) }
254
+ attr_accessor :file_id
255
+
256
+ # The index of the first character of the container file citation in the message.
257
+ sig { returns(Integer) }
258
+ attr_accessor :start_index
259
+
260
+ # The type of the container file citation. Always `container_file_citation`.
261
+ sig { returns(Symbol) }
262
+ attr_accessor :type
263
+
264
+ # A citation for a container file used to generate a model response.
265
+ sig do
266
+ params(
267
+ container_id: String,
268
+ end_index: Integer,
269
+ file_id: String,
270
+ start_index: Integer,
271
+ type: Symbol
272
+ ).returns(T.attached_class)
273
+ end
274
+ def self.new(
275
+ # The ID of the container file.
276
+ container_id:,
277
+ # The index of the last character of the container file citation in the message.
278
+ end_index:,
279
+ # The ID of the file.
280
+ file_id:,
281
+ # The index of the first character of the container file citation in the message.
282
+ start_index:,
283
+ # The type of the container file citation. Always `container_file_citation`.
284
+ type: :container_file_citation
285
+ )
286
+ end
287
+
288
+ sig do
289
+ override.returns(
290
+ {
291
+ container_id: String,
292
+ end_index: Integer,
293
+ file_id: String,
294
+ start_index: Integer,
295
+ type: Symbol
296
+ }
297
+ )
298
+ end
299
+ def to_hash
300
+ end
301
+ end
302
+
231
303
  class FilePath < OpenAI::Internal::Type::BaseModel
232
304
  OrHash =
233
305
  T.type_alias do
@@ -235,9 +235,9 @@ module OpenAI
235
235
  # utilize scale tier credits until they are exhausted.
236
236
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
237
237
  # be processed using the default service tier with a lower uptime SLA and no
238
- # latency guarentee.
238
+ # latency guarantee.
239
239
  # - If set to 'default', the request will be processed using the default service
240
- # tier with a lower uptime SLA and no latency guarentee.
240
+ # tier with a lower uptime SLA and no latency guarantee.
241
241
  # - If set to 'flex', the request will be processed with the Flex Processing
242
242
  # service tier.
243
243
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -522,9 +522,9 @@ module OpenAI
522
522
  # utilize scale tier credits until they are exhausted.
523
523
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
524
524
  # be processed using the default service tier with a lower uptime SLA and no
525
- # latency guarentee.
525
+ # latency guarantee.
526
526
  # - If set to 'default', the request will be processed using the default service
527
- # tier with a lower uptime SLA and no latency guarentee.
527
+ # tier with a lower uptime SLA and no latency guarantee.
528
528
  # - If set to 'flex', the request will be processed with the Flex Processing
529
529
  # service tier.
530
530
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -17,18 +17,23 @@ module OpenAI
17
17
  OpenAI::Graders::MultiGrader::OrHash
18
18
  ),
19
19
  model_sample: String,
20
- reference_answer:
21
- OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants,
20
+ item: T.anything,
22
21
  request_options: OpenAI::RequestOptions::OrHash
23
22
  ).returns(OpenAI::Models::FineTuning::Alpha::GraderRunResponse)
24
23
  end
25
24
  def run(
26
25
  # The grader used for the fine-tuning job.
27
26
  grader:,
28
- # The model sample to be evaluated.
27
+ # The model sample to be evaluated. This value will be used to populate the
28
+ # `sample` namespace. See
29
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
30
+ # The `output_json` variable will be populated if the model sample is a valid JSON
31
+ # string.
29
32
  model_sample:,
30
- # The reference answer for the evaluation.
31
- reference_answer:,
33
+ # The dataset item provided to the grader. This will be used to populate the
34
+ # `item` namespace. See
35
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
36
+ item: nil,
32
37
  request_options: {}
33
38
  )
34
39
  end
@@ -64,7 +64,7 @@ module OpenAI
64
64
  # The image(s) to edit. Must be a supported image file or an array of images.
65
65
  #
66
66
  # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
67
- # 25MB. You can provide up to 16 images.
67
+ # 50MB. You can provide up to 16 images.
68
68
  #
69
69
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
70
70
  # file less than 4MB.
@@ -112,6 +112,8 @@ module OpenAI
112
112
  # multi-turn conversations when using the Responses API statelessly (like when
113
113
  # the `store` parameter is set to `false`, or when an organization is enrolled
114
114
  # in the zero data retention program).
115
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
116
+ # in code interpreter tool call items.
115
117
  include: nil,
116
118
  # Inserts a system (or developer) message as the first item in the model's
117
119
  # context.
@@ -149,9 +151,9 @@ module OpenAI
149
151
  # utilize scale tier credits until they are exhausted.
150
152
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
151
153
  # be processed using the default service tier with a lower uptime SLA and no
152
- # latency guarentee.
154
+ # latency guarantee.
153
155
  # - If set to 'default', the request will be processed using the default service
154
- # tier with a lower uptime SLA and no latency guarentee.
156
+ # tier with a lower uptime SLA and no latency guarantee.
155
157
  # - If set to 'flex', the request will be processed with the Flex Processing
156
158
  # service tier.
157
159
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -323,6 +325,8 @@ module OpenAI
323
325
  # multi-turn conversations when using the Responses API statelessly (like when
324
326
  # the `store` parameter is set to `false`, or when an organization is enrolled
325
327
  # in the zero data retention program).
328
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
329
+ # in code interpreter tool call items.
326
330
  include: nil,
327
331
  # Inserts a system (or developer) message as the first item in the model's
328
332
  # context.
@@ -360,9 +364,9 @@ module OpenAI
360
364
  # utilize scale tier credits until they are exhausted.
361
365
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
362
366
  # be processed using the default service tier with a lower uptime SLA and no
363
- # latency guarentee.
367
+ # latency guarantee.
364
368
  # - If set to 'default', the request will be processed using the default service
365
- # tier with a lower uptime SLA and no latency guarentee.
369
+ # tier with a lower uptime SLA and no latency guarantee.
366
370
  # - If set to 'flex', the request will be processed with the Flex Processing
367
371
  # service tier.
368
372
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -507,7 +511,7 @@ module OpenAI
507
511
  params(
508
512
  response_id: String,
509
513
  request_options: OpenAI::RequestOptions::OrHash
510
- ).void
514
+ ).returns(OpenAI::Responses::Response)
511
515
  end
512
516
  def cancel(
513
517
  # The ID of the response to cancel.
@@ -31,16 +31,17 @@ module OpenAI
31
31
  logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob]
32
32
  }
33
33
 
34
- type logprob = { token: String, bytes: ::Array[top], logprob: Float }
34
+ type logprob =
35
+ { token: String, bytes: ::Array[Integer], logprob: Float }
35
36
 
36
37
  class Logprob < OpenAI::Internal::Type::BaseModel
37
38
  attr_reader token: String?
38
39
 
39
40
  def token=: (String) -> String
40
41
 
41
- attr_reader bytes: ::Array[top]?
42
+ attr_reader bytes: ::Array[Integer]?
42
43
 
43
- def bytes=: (::Array[top]) -> ::Array[top]
44
+ def bytes=: (::Array[Integer]) -> ::Array[Integer]
44
45
 
45
46
  attr_reader logprob: Float?
46
47
 
@@ -48,11 +49,15 @@ module OpenAI
48
49
 
49
50
  def initialize: (
50
51
  ?token: String,
51
- ?bytes: ::Array[top],
52
+ ?bytes: ::Array[Integer],
52
53
  ?logprob: Float
53
54
  ) -> void
54
55
 
55
- def to_hash: -> { token: String, bytes: ::Array[top], logprob: Float }
56
+ def to_hash: -> {
57
+ token: String,
58
+ bytes: ::Array[Integer],
59
+ logprob: Float
60
+ }
56
61
  end
57
62
  end
58
63
  end
@@ -31,16 +31,17 @@ module OpenAI
31
31
  logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
32
32
  }
33
33
 
34
- type logprob = { token: String, bytes: ::Array[top], logprob: Float }
34
+ type logprob =
35
+ { token: String, bytes: ::Array[Integer], logprob: Float }
35
36
 
36
37
  class Logprob < OpenAI::Internal::Type::BaseModel
37
38
  attr_reader token: String?
38
39
 
39
40
  def token=: (String) -> String
40
41
 
41
- attr_reader bytes: ::Array[top]?
42
+ attr_reader bytes: ::Array[Integer]?
42
43
 
43
- def bytes=: (::Array[top]) -> ::Array[top]
44
+ def bytes=: (::Array[Integer]) -> ::Array[Integer]
44
45
 
45
46
  attr_reader logprob: Float?
46
47
 
@@ -48,11 +49,15 @@ module OpenAI
48
49
 
49
50
  def initialize: (
50
51
  ?token: String,
51
- ?bytes: ::Array[top],
52
+ ?bytes: ::Array[Integer],
52
53
  ?logprob: Float
53
54
  ) -> void
54
55
 
55
- def to_hash: -> { token: String, bytes: ::Array[top], logprob: Float }
56
+ def to_hash: -> {
57
+ token: String,
58
+ bytes: ::Array[Integer],
59
+ logprob: Float
60
+ }
56
61
  end
57
62
  end
58
63
  end
@@ -26,6 +26,7 @@ module OpenAI
26
26
  | :"gpt-4o-audio-preview"
27
27
  | :"gpt-4o-audio-preview-2024-10-01"
28
28
  | :"gpt-4o-audio-preview-2024-12-17"
29
+ | :"gpt-4o-audio-preview-2025-06-03"
29
30
  | :"gpt-4o-mini-audio-preview"
30
31
  | :"gpt-4o-mini-audio-preview-2024-12-17"
31
32
  | :"gpt-4o-search-preview"
@@ -84,6 +85,7 @@ module OpenAI
84
85
  GPT_4O_AUDIO_PREVIEW: :"gpt-4o-audio-preview"
85
86
  GPT_4O_AUDIO_PREVIEW_2024_10_01: :"gpt-4o-audio-preview-2024-10-01"
86
87
  GPT_4O_AUDIO_PREVIEW_2024_12_17: :"gpt-4o-audio-preview-2024-12-17"
88
+ GPT_4O_AUDIO_PREVIEW_2025_06_03: :"gpt-4o-audio-preview-2025-06-03"
87
89
  GPT_4O_MINI_AUDIO_PREVIEW: :"gpt-4o-mini-audio-preview"
88
90
  GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17: :"gpt-4o-mini-audio-preview-2024-12-17"
89
91
  GPT_4O_SEARCH_PREVIEW: :"gpt-4o-search-preview"
@@ -6,7 +6,7 @@ module OpenAI
6
6
  {
7
7
  grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
8
8
  model_sample: String,
9
- reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer
9
+ item: top
10
10
  }
11
11
  & OpenAI::Internal::Type::request_parameters
12
12
 
@@ -18,19 +18,21 @@ module OpenAI
18
18
 
19
19
  attr_accessor model_sample: String
20
20
 
21
- attr_accessor reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer
21
+ attr_reader item: top?
22
+
23
+ def item=: (top) -> top
22
24
 
23
25
  def initialize: (
24
26
  grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
25
27
  model_sample: String,
26
- reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer,
28
+ ?item: top,
27
29
  ?request_options: OpenAI::request_opts
28
30
  ) -> void
29
31
 
30
32
  def to_hash: -> {
31
33
  grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
32
34
  model_sample: String,
33
- reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer,
35
+ item: top,
34
36
  request_options: OpenAI::RequestOptions
35
37
  }
36
38
 
@@ -46,16 +48,6 @@ module OpenAI
46
48
 
47
49
  def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader]
48
50
  end
49
-
50
- type reference_answer = String | top | ::Array[top] | Float
51
-
52
- module ReferenceAnswer
53
- extend OpenAI::Internal::Type::Union
54
-
55
- def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer]
56
-
57
- UnionMember2Array: OpenAI::Internal::Type::Converter
58
- end
59
51
  end
60
52
  end
61
53
  end
@@ -165,7 +165,7 @@ module OpenAI
165
165
  n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs
166
166
  }
167
167
 
168
- type batch_size = (top | :auto | Integer)?
168
+ type batch_size = :auto | Integer
169
169
 
170
170
  module BatchSize
171
171
  extend OpenAI::Internal::Type::Union
@@ -6,7 +6,7 @@ module OpenAI
6
6
  type multi_grader =
7
7
  {
8
8
  calculate_output: String,
9
- graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader],
9
+ graders: OpenAI::Models::Graders::MultiGrader::graders,
10
10
  name: String,
11
11
  type: :multi
12
12
  }
@@ -14,7 +14,7 @@ module OpenAI
14
14
  class MultiGrader < OpenAI::Internal::Type::BaseModel
15
15
  attr_accessor calculate_output: String
16
16
 
17
- attr_accessor graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader]
17
+ attr_accessor graders: OpenAI::Models::Graders::MultiGrader::graders
18
18
 
19
19
  attr_accessor name: String
20
20
 
@@ -22,29 +22,29 @@ module OpenAI
22
22
 
23
23
  def initialize: (
24
24
  calculate_output: String,
25
- graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader],
25
+ graders: OpenAI::Models::Graders::MultiGrader::graders,
26
26
  name: String,
27
27
  ?type: :multi
28
28
  ) -> void
29
29
 
30
30
  def to_hash: -> {
31
31
  calculate_output: String,
32
- graders: ::Hash[Symbol, OpenAI::Models::Graders::MultiGrader::grader],
32
+ graders: OpenAI::Models::Graders::MultiGrader::graders,
33
33
  name: String,
34
34
  type: :multi
35
35
  }
36
36
 
37
- type grader =
37
+ type graders =
38
38
  OpenAI::Graders::StringCheckGrader
39
39
  | OpenAI::Graders::TextSimilarityGrader
40
40
  | OpenAI::Graders::PythonGrader
41
41
  | OpenAI::Graders::ScoreModelGrader
42
42
  | OpenAI::Graders::LabelModelGrader
43
43
 
44
- module Grader
44
+ module Graders
45
45
  extend OpenAI::Internal::Type::Union
46
46
 
47
- def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::grader]
47
+ def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::graders]
48
48
  end
49
49
  end
50
50
  end
@@ -6,7 +6,7 @@ module OpenAI
6
6
  delta: String,
7
7
  output_index: Integer,
8
8
  sequence_number: Integer,
9
- type: :"response.code_interpreter_call.code.delta"
9
+ type: :"response.code_interpreter_call_code.delta"
10
10
  }
11
11
 
12
12
  class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel
@@ -16,20 +16,20 @@ module OpenAI
16
16
 
17
17
  attr_accessor sequence_number: Integer
18
18
 
19
- attr_accessor type: :"response.code_interpreter_call.code.delta"
19
+ attr_accessor type: :"response.code_interpreter_call_code.delta"
20
20
 
21
21
  def initialize: (
22
22
  delta: String,
23
23
  output_index: Integer,
24
24
  sequence_number: Integer,
25
- ?type: :"response.code_interpreter_call.code.delta"
25
+ ?type: :"response.code_interpreter_call_code.delta"
26
26
  ) -> void
27
27
 
28
28
  def to_hash: -> {
29
29
  delta: String,
30
30
  output_index: Integer,
31
31
  sequence_number: Integer,
32
- type: :"response.code_interpreter_call.code.delta"
32
+ type: :"response.code_interpreter_call_code.delta"
33
33
  }
34
34
  end
35
35
  end
@@ -6,7 +6,7 @@ module OpenAI
6
6
  code: String,
7
7
  output_index: Integer,
8
8
  sequence_number: Integer,
9
- type: :"response.code_interpreter_call.code.done"
9
+ type: :"response.code_interpreter_call_code.done"
10
10
  }
11
11
 
12
12
  class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel
@@ -16,20 +16,20 @@ module OpenAI
16
16
 
17
17
  attr_accessor sequence_number: Integer
18
18
 
19
- attr_accessor type: :"response.code_interpreter_call.code.done"
19
+ attr_accessor type: :"response.code_interpreter_call_code.done"
20
20
 
21
21
  def initialize: (
22
22
  code: String,
23
23
  output_index: Integer,
24
24
  sequence_number: Integer,
25
- ?type: :"response.code_interpreter_call.code.done"
25
+ ?type: :"response.code_interpreter_call_code.done"
26
26
  ) -> void
27
27
 
28
28
  def to_hash: -> {
29
29
  code: String,
30
30
  output_index: Integer,
31
31
  sequence_number: Integer,
32
- type: :"response.code_interpreter_call.code.done"
32
+ type: :"response.code_interpreter_call_code.done"
33
33
  }
34
34
  end
35
35
  end
@@ -6,6 +6,7 @@ module OpenAI
6
6
  | :"message.input_image.image_url"
7
7
  | :"computer_call_output.output.image_url"
8
8
  | :"reasoning.encrypted_content"
9
+ | :"code_interpreter_call.outputs"
9
10
 
10
11
  module ResponseIncludable
11
12
  extend OpenAI::Internal::Type::Enum
@@ -14,6 +15,7 @@ module OpenAI
14
15
  MESSAGE_INPUT_IMAGE_IMAGE_URL: :"message.input_image.image_url"
15
16
  COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url"
16
17
  REASONING_ENCRYPTED_CONTENT: :"reasoning.encrypted_content"
18
+ CODE_INTERPRETER_CALL_OUTPUTS: :"code_interpreter_call.outputs"
17
19
 
18
20
  def self?.values: -> ::Array[OpenAI::Models::Responses::response_includable]
19
21
  end
@@ -39,6 +39,7 @@ module OpenAI
39
39
  type annotation =
40
40
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation
41
41
  | OpenAI::Responses::ResponseOutputText::Annotation::URLCitation
42
+ | OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation
42
43
  | OpenAI::Responses::ResponseOutputText::Annotation::FilePath
43
44
 
44
45
  module Annotation
@@ -104,6 +105,43 @@ module OpenAI
104
105
  }
105
106
  end
106
107
 
108
+ type container_file_citation =
109
+ {
110
+ container_id: String,
111
+ end_index: Integer,
112
+ file_id: String,
113
+ start_index: Integer,
114
+ type: :container_file_citation
115
+ }
116
+
117
+ class ContainerFileCitation < OpenAI::Internal::Type::BaseModel
118
+ attr_accessor container_id: String
119
+
120
+ attr_accessor end_index: Integer
121
+
122
+ attr_accessor file_id: String
123
+
124
+ attr_accessor start_index: Integer
125
+
126
+ attr_accessor type: :container_file_citation
127
+
128
+ def initialize: (
129
+ container_id: String,
130
+ end_index: Integer,
131
+ file_id: String,
132
+ start_index: Integer,
133
+ ?type: :container_file_citation
134
+ ) -> void
135
+
136
+ def to_hash: -> {
137
+ container_id: String,
138
+ end_index: Integer,
139
+ file_id: String,
140
+ start_index: Integer,
141
+ type: :container_file_citation
142
+ }
143
+ end
144
+
107
145
  type file_path = { file_id: String, index: Integer, type: :file_path }
108
146
 
109
147
  class FilePath < OpenAI::Internal::Type::BaseModel
@@ -6,7 +6,7 @@ module OpenAI
6
6
  def run: (
7
7
  grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader,
8
8
  model_sample: String,
9
- reference_answer: OpenAI::Models::FineTuning::Alpha::GraderRunParams::reference_answer,
9
+ ?item: top,
10
10
  ?request_options: OpenAI::request_opts
11
11
  ) -> OpenAI::Models::FineTuning::Alpha::GraderRunResponse
12
12
 
@@ -71,7 +71,7 @@ module OpenAI
71
71
  def cancel: (
72
72
  String response_id,
73
73
  ?request_options: OpenAI::request_opts
74
- ) -> nil
74
+ ) -> OpenAI::Responses::Response
75
75
 
76
76
  def initialize: (client: OpenAI::Client) -> void
77
77
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-05-29 00:00:00.000000000 Z
11
+ date: 2025-06-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: connection_pool