openai 0.5.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/audio/transcription_text_delta_event.rb +3 -3
  5. data/lib/openai/models/audio/transcription_text_done_event.rb +3 -3
  6. data/lib/openai/models/chat/chat_completion.rb +4 -4
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +4 -4
  8. data/lib/openai/models/chat/completion_create_params.rb +4 -4
  9. data/lib/openai/models/fine_tuning/alpha/grader_run_params.rb +17 -30
  10. data/lib/openai/models/fine_tuning/fine_tuning_job.rb +3 -5
  11. data/lib/openai/models/graders/multi_grader.rb +11 -4
  12. data/lib/openai/models/image_edit_params.rb +2 -2
  13. data/lib/openai/models/responses/response.rb +4 -4
  14. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +5 -5
  15. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +5 -5
  16. data/lib/openai/models/responses/response_create_params.rb +6 -4
  17. data/lib/openai/models/responses/response_includable.rb +3 -0
  18. data/lib/openai/models/responses/response_output_text.rb +52 -3
  19. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  20. data/lib/openai/resources/fine_tuning/alpha/graders.rb +6 -3
  21. data/lib/openai/resources/responses.rb +2 -2
  22. data/lib/openai/version.rb +1 -1
  23. data/rbi/openai/models/audio/transcription_text_delta_event.rbi +4 -4
  24. data/rbi/openai/models/audio/transcription_text_done_event.rbi +4 -4
  25. data/rbi/openai/models/chat/chat_completion.rbi +6 -6
  26. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -6
  27. data/rbi/openai/models/chat/completion_create_params.rbi +6 -6
  28. data/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi +24 -43
  29. data/rbi/openai/models/fine_tuning/fine_tuning_job.rbi +2 -3
  30. data/rbi/openai/models/graders/multi_grader.rbi +27 -32
  31. data/rbi/openai/models/image_edit_params.rbi +3 -3
  32. data/rbi/openai/models/responses/response.rbi +6 -6
  33. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +3 -3
  34. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +3 -3
  35. data/rbi/openai/models/responses/response_create_params.rbi +10 -6
  36. data/rbi/openai/models/responses/response_includable.rbi +7 -0
  37. data/rbi/openai/models/responses/response_output_text.rbi +72 -0
  38. data/rbi/openai/resources/chat/completions.rbi +4 -4
  39. data/rbi/openai/resources/fine_tuning/alpha/graders.rbi +10 -5
  40. data/rbi/openai/resources/images.rbi +1 -1
  41. data/rbi/openai/resources/responses.rbi +9 -5
  42. data/sig/openai/models/audio/transcription_text_delta_event.rbs +10 -5
  43. data/sig/openai/models/audio/transcription_text_done_event.rbs +10 -5
  44. data/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs +6 -14
  45. data/sig/openai/models/fine_tuning/fine_tuning_job.rbs +1 -1
  46. data/sig/openai/models/graders/multi_grader.rbs +7 -7
  47. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +4 -4
  48. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +4 -4
  49. data/sig/openai/models/responses/response_includable.rbs +2 -0
  50. data/sig/openai/models/responses/response_output_text.rbs +38 -0
  51. data/sig/openai/resources/fine_tuning/alpha/graders.rbs +1 -1
  52. data/sig/openai/resources/responses.rbs +1 -1
  53. metadata +2 -2
@@ -100,10 +100,10 @@ module OpenAI
100
100
  attr_writer :token
101
101
 
102
102
  # The bytes that were used to generate the log probability.
103
- sig { returns(T.nilable(T::Array[T.anything])) }
103
+ sig { returns(T.nilable(T::Array[Integer])) }
104
104
  attr_reader :bytes
105
105
 
106
- sig { params(bytes: T::Array[T.anything]).void }
106
+ sig { params(bytes: T::Array[Integer]).void }
107
107
  attr_writer :bytes
108
108
 
109
109
  # The log probability of the token.
@@ -116,7 +116,7 @@ module OpenAI
116
116
  sig do
117
117
  params(
118
118
  token: String,
119
- bytes: T::Array[T.anything],
119
+ bytes: T::Array[Integer],
120
120
  logprob: Float
121
121
  ).returns(T.attached_class)
122
122
  end
@@ -132,7 +132,7 @@ module OpenAI
132
132
 
133
133
  sig do
134
134
  override.returns(
135
- { token: String, bytes: T::Array[T.anything], logprob: Float }
135
+ { token: String, bytes: T::Array[Integer], logprob: Float }
136
136
  )
137
137
  end
138
138
  def to_hash
@@ -39,9 +39,9 @@ module OpenAI
39
39
  # utilize scale tier credits until they are exhausted.
40
40
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
41
41
  # be processed using the default service tier with a lower uptime SLA and no
42
- # latency guarentee.
42
+ # latency guarantee.
43
43
  # - If set to 'default', the request will be processed using the default service
44
- # tier with a lower uptime SLA and no latency guarentee.
44
+ # tier with a lower uptime SLA and no latency guarantee.
45
45
  # - If set to 'flex', the request will be processed with the Flex Processing
46
46
  # service tier.
47
47
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -105,9 +105,9 @@ module OpenAI
105
105
  # utilize scale tier credits until they are exhausted.
106
106
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
107
107
  # be processed using the default service tier with a lower uptime SLA and no
108
- # latency guarentee.
108
+ # latency guarantee.
109
109
  # - If set to 'default', the request will be processed using the default service
110
- # tier with a lower uptime SLA and no latency guarentee.
110
+ # tier with a lower uptime SLA and no latency guarantee.
111
111
  # - If set to 'flex', the request will be processed with the Flex Processing
112
112
  # service tier.
113
113
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -370,9 +370,9 @@ module OpenAI
370
370
  # utilize scale tier credits until they are exhausted.
371
371
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
372
372
  # be processed using the default service tier with a lower uptime SLA and no
373
- # latency guarentee.
373
+ # latency guarantee.
374
374
  # - If set to 'default', the request will be processed using the default service
375
- # tier with a lower uptime SLA and no latency guarentee.
375
+ # tier with a lower uptime SLA and no latency guarantee.
376
376
  # - If set to 'flex', the request will be processed with the Flex Processing
377
377
  # service tier.
378
378
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -41,9 +41,9 @@ module OpenAI
41
41
  # utilize scale tier credits until they are exhausted.
42
42
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
43
43
  # be processed using the default service tier with a lower uptime SLA and no
44
- # latency guarentee.
44
+ # latency guarantee.
45
45
  # - If set to 'default', the request will be processed using the default service
46
- # tier with a lower uptime SLA and no latency guarentee.
46
+ # tier with a lower uptime SLA and no latency guarantee.
47
47
  # - If set to 'flex', the request will be processed with the Flex Processing
48
48
  # service tier.
49
49
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -120,9 +120,9 @@ module OpenAI
120
120
  # utilize scale tier credits until they are exhausted.
121
121
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
122
122
  # be processed using the default service tier with a lower uptime SLA and no
123
- # latency guarentee.
123
+ # latency guarantee.
124
124
  # - If set to 'default', the request will be processed using the default service
125
- # tier with a lower uptime SLA and no latency guarentee.
125
+ # tier with a lower uptime SLA and no latency guarantee.
126
126
  # - If set to 'flex', the request will be processed with the Flex Processing
127
127
  # service tier.
128
128
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -790,9 +790,9 @@ module OpenAI
790
790
  # utilize scale tier credits until they are exhausted.
791
791
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
792
792
  # be processed using the default service tier with a lower uptime SLA and no
793
- # latency guarentee.
793
+ # latency guarantee.
794
794
  # - If set to 'default', the request will be processed using the default service
795
- # tier with a lower uptime SLA and no latency guarentee.
795
+ # tier with a lower uptime SLA and no latency guarantee.
796
796
  # - If set to 'flex', the request will be processed with the Flex Processing
797
797
  # service tier.
798
798
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -277,9 +277,9 @@ module OpenAI
277
277
  # utilize scale tier credits until they are exhausted.
278
278
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
279
279
  # be processed using the default service tier with a lower uptime SLA and no
280
- # latency guarentee.
280
+ # latency guarantee.
281
281
  # - If set to 'default', the request will be processed using the default service
282
- # tier with a lower uptime SLA and no latency guarentee.
282
+ # tier with a lower uptime SLA and no latency guarantee.
283
283
  # - If set to 'flex', the request will be processed with the Flex Processing
284
284
  # service tier.
285
285
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -632,9 +632,9 @@ module OpenAI
632
632
  # utilize scale tier credits until they are exhausted.
633
633
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
634
634
  # be processed using the default service tier with a lower uptime SLA and no
635
- # latency guarentee.
635
+ # latency guarantee.
636
636
  # - If set to 'default', the request will be processed using the default service
637
- # tier with a lower uptime SLA and no latency guarentee.
637
+ # tier with a lower uptime SLA and no latency guarantee.
638
638
  # - If set to 'flex', the request will be processed with the Flex Processing
639
639
  # service tier.
640
640
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1015,9 +1015,9 @@ module OpenAI
1015
1015
  # utilize scale tier credits until they are exhausted.
1016
1016
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
1017
1017
  # be processed using the default service tier with a lower uptime SLA and no
1018
- # latency guarentee.
1018
+ # latency guarantee.
1019
1019
  # - If set to 'default', the request will be processed using the default service
1020
- # tier with a lower uptime SLA and no latency guarentee.
1020
+ # tier with a lower uptime SLA and no latency guarantee.
1021
1021
  # - If set to 'flex', the request will be processed with the Flex Processing
1022
1022
  # service tier.
1023
1023
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -30,17 +30,22 @@ module OpenAI
30
30
  end
31
31
  attr_accessor :grader
32
32
 
33
- # The model sample to be evaluated.
33
+ # The model sample to be evaluated. This value will be used to populate the
34
+ # `sample` namespace. See
35
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
36
+ # The `output_json` variable will be populated if the model sample is a valid JSON
37
+ # string.
34
38
  sig { returns(String) }
35
39
  attr_accessor :model_sample
36
40
 
37
- # The reference answer for the evaluation.
38
- sig do
39
- returns(
40
- OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants
41
- )
42
- end
43
- attr_accessor :reference_answer
41
+ # The dataset item provided to the grader. This will be used to populate the
42
+ # `item` namespace. See
43
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
44
+ sig { returns(T.nilable(T.anything)) }
45
+ attr_reader :item
46
+
47
+ sig { params(item: T.anything).void }
48
+ attr_writer :item
44
49
 
45
50
  sig do
46
51
  params(
@@ -53,18 +58,23 @@ module OpenAI
53
58
  OpenAI::Graders::MultiGrader::OrHash
54
59
  ),
55
60
  model_sample: String,
56
- reference_answer:
57
- OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants,
61
+ item: T.anything,
58
62
  request_options: OpenAI::RequestOptions::OrHash
59
63
  ).returns(T.attached_class)
60
64
  end
61
65
  def self.new(
62
66
  # The grader used for the fine-tuning job.
63
67
  grader:,
64
- # The model sample to be evaluated.
68
+ # The model sample to be evaluated. This value will be used to populate the
69
+ # `sample` namespace. See
70
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
71
+ # The `output_json` variable will be populated if the model sample is a valid JSON
72
+ # string.
65
73
  model_sample:,
66
- # The reference answer for the evaluation.
67
- reference_answer:,
74
+ # The dataset item provided to the grader. This will be used to populate the
75
+ # `item` namespace. See
76
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
77
+ item: nil,
68
78
  request_options: {}
69
79
  )
70
80
  end
@@ -81,8 +91,7 @@ module OpenAI
81
91
  OpenAI::Graders::MultiGrader
82
92
  ),
83
93
  model_sample: String,
84
- reference_answer:
85
- OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants,
94
+ item: T.anything,
86
95
  request_options: OpenAI::RequestOptions
87
96
  }
88
97
  )
@@ -115,34 +124,6 @@ module OpenAI
115
124
  def self.variants
116
125
  end
117
126
  end
118
-
119
- # The reference answer for the evaluation.
120
- module ReferenceAnswer
121
- extend OpenAI::Internal::Type::Union
122
-
123
- Variants =
124
- T.type_alias do
125
- T.any(String, T.anything, T::Array[T.anything], Float)
126
- end
127
-
128
- sig do
129
- override.returns(
130
- T::Array[
131
- OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::Variants
132
- ]
133
- )
134
- end
135
- def self.variants
136
- end
137
-
138
- UnionMember2Array =
139
- T.let(
140
- OpenAI::Internal::Type::ArrayOf[
141
- OpenAI::Internal::Type::Unknown
142
- ],
143
- OpenAI::Internal::Type::Converter
144
- )
145
- end
146
127
  end
147
128
  end
148
129
  end
@@ -359,7 +359,7 @@ module OpenAI
359
359
  # returned when running `supervised` jobs.
360
360
  sig do
361
361
  params(
362
- batch_size: T.nilable(T.any(T.anything, Symbol, Integer)),
362
+ batch_size: T.nilable(T.any(Symbol, Integer)),
363
363
  learning_rate_multiplier: T.any(Symbol, Float),
364
364
  n_epochs: T.any(Symbol, Integer)
365
365
  ).returns(T.attached_class)
@@ -399,8 +399,7 @@ module OpenAI
399
399
  module BatchSize
400
400
  extend OpenAI::Internal::Type::Union
401
401
 
402
- Variants =
403
- T.type_alias { T.nilable(T.any(T.anything, Symbol, Integer)) }
402
+ Variants = T.type_alias { T.any(Symbol, Integer) }
404
403
 
405
404
  sig do
406
405
  override.returns(
@@ -15,18 +15,17 @@ module OpenAI
15
15
  sig { returns(String) }
16
16
  attr_accessor :calculate_output
17
17
 
18
+ # A StringCheckGrader object that performs a string comparison between input and
19
+ # reference using a specified operation.
18
20
  sig do
19
21
  returns(
20
- T::Hash[
21
- Symbol,
22
- T.any(
23
- OpenAI::Graders::StringCheckGrader,
24
- OpenAI::Graders::TextSimilarityGrader,
25
- OpenAI::Graders::PythonGrader,
26
- OpenAI::Graders::ScoreModelGrader,
27
- OpenAI::Graders::LabelModelGrader
28
- )
29
- ]
22
+ T.any(
23
+ OpenAI::Graders::StringCheckGrader,
24
+ OpenAI::Graders::TextSimilarityGrader,
25
+ OpenAI::Graders::PythonGrader,
26
+ OpenAI::Graders::ScoreModelGrader,
27
+ OpenAI::Graders::LabelModelGrader
28
+ )
30
29
  )
31
30
  end
32
31
  attr_accessor :graders
@@ -45,16 +44,13 @@ module OpenAI
45
44
  params(
46
45
  calculate_output: String,
47
46
  graders:
48
- T::Hash[
49
- Symbol,
50
- T.any(
51
- OpenAI::Graders::StringCheckGrader::OrHash,
52
- OpenAI::Graders::TextSimilarityGrader::OrHash,
53
- OpenAI::Graders::PythonGrader::OrHash,
54
- OpenAI::Graders::ScoreModelGrader::OrHash,
55
- OpenAI::Graders::LabelModelGrader::OrHash
56
- )
57
- ],
47
+ T.any(
48
+ OpenAI::Graders::StringCheckGrader::OrHash,
49
+ OpenAI::Graders::TextSimilarityGrader::OrHash,
50
+ OpenAI::Graders::PythonGrader::OrHash,
51
+ OpenAI::Graders::ScoreModelGrader::OrHash,
52
+ OpenAI::Graders::LabelModelGrader::OrHash
53
+ ),
58
54
  name: String,
59
55
  type: Symbol
60
56
  ).returns(T.attached_class)
@@ -62,6 +58,8 @@ module OpenAI
62
58
  def self.new(
63
59
  # A formula to calculate the output based on grader results.
64
60
  calculate_output:,
61
+ # A StringCheckGrader object that performs a string comparison between input and
62
+ # reference using a specified operation.
65
63
  graders:,
66
64
  # The name of the grader.
67
65
  name:,
@@ -75,16 +73,13 @@ module OpenAI
75
73
  {
76
74
  calculate_output: String,
77
75
  graders:
78
- T::Hash[
79
- Symbol,
80
- T.any(
81
- OpenAI::Graders::StringCheckGrader,
82
- OpenAI::Graders::TextSimilarityGrader,
83
- OpenAI::Graders::PythonGrader,
84
- OpenAI::Graders::ScoreModelGrader,
85
- OpenAI::Graders::LabelModelGrader
86
- )
87
- ],
76
+ T.any(
77
+ OpenAI::Graders::StringCheckGrader,
78
+ OpenAI::Graders::TextSimilarityGrader,
79
+ OpenAI::Graders::PythonGrader,
80
+ OpenAI::Graders::ScoreModelGrader,
81
+ OpenAI::Graders::LabelModelGrader
82
+ ),
88
83
  name: String,
89
84
  type: Symbol
90
85
  }
@@ -95,7 +90,7 @@ module OpenAI
95
90
 
96
91
  # A StringCheckGrader object that performs a string comparison between input and
97
92
  # reference using a specified operation.
98
- module Grader
93
+ module Graders
99
94
  extend OpenAI::Internal::Type::Union
100
95
 
101
96
  Variants =
@@ -111,7 +106,7 @@ module OpenAI
111
106
 
112
107
  sig do
113
108
  override.returns(
114
- T::Array[OpenAI::Graders::MultiGrader::Grader::Variants]
109
+ T::Array[OpenAI::Graders::MultiGrader::Graders::Variants]
115
110
  )
116
111
  end
117
112
  def self.variants
@@ -14,7 +14,7 @@ module OpenAI
14
14
  # The image(s) to edit. Must be a supported image file or an array of images.
15
15
  #
16
16
  # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
17
- # 25MB. You can provide up to 16 images.
17
+ # 50MB. You can provide up to 16 images.
18
18
  #
19
19
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
20
20
  # file less than 4MB.
@@ -106,7 +106,7 @@ module OpenAI
106
106
  # The image(s) to edit. Must be a supported image file or an array of images.
107
107
  #
108
108
  # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
109
- # 25MB. You can provide up to 16 images.
109
+ # 50MB. You can provide up to 16 images.
110
110
  #
111
111
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
112
112
  # file less than 4MB.
@@ -179,7 +179,7 @@ module OpenAI
179
179
  # The image(s) to edit. Must be a supported image file or an array of images.
180
180
  #
181
181
  # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
182
- # 25MB. You can provide up to 16 images.
182
+ # 50MB. You can provide up to 16 images.
183
183
  #
184
184
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
185
185
  # file less than 4MB.
@@ -160,9 +160,9 @@ module OpenAI
160
160
  # utilize scale tier credits until they are exhausted.
161
161
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
162
162
  # be processed using the default service tier with a lower uptime SLA and no
163
- # latency guarentee.
163
+ # latency guarantee.
164
164
  # - If set to 'default', the request will be processed using the default service
165
- # tier with a lower uptime SLA and no latency guarentee.
165
+ # tier with a lower uptime SLA and no latency guarantee.
166
166
  # - If set to 'flex', the request will be processed with the Flex Processing
167
167
  # service tier.
168
168
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -390,9 +390,9 @@ module OpenAI
390
390
  # utilize scale tier credits until they are exhausted.
391
391
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
392
392
  # be processed using the default service tier with a lower uptime SLA and no
393
- # latency guarentee.
393
+ # latency guarantee.
394
394
  # - If set to 'default', the request will be processed using the default service
395
- # tier with a lower uptime SLA and no latency guarentee.
395
+ # tier with a lower uptime SLA and no latency guarantee.
396
396
  # - If set to 'flex', the request will be processed with the Flex Processing
397
397
  # service tier.
398
398
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -588,9 +588,9 @@ module OpenAI
588
588
  # utilize scale tier credits until they are exhausted.
589
589
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
590
590
  # be processed using the default service tier with a lower uptime SLA and no
591
- # latency guarentee.
591
+ # latency guarantee.
592
592
  # - If set to 'default', the request will be processed using the default service
593
- # tier with a lower uptime SLA and no latency guarentee.
593
+ # tier with a lower uptime SLA and no latency guarantee.
594
594
  # - If set to 'flex', the request will be processed with the Flex Processing
595
595
  # service tier.
596
596
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -24,7 +24,7 @@ module OpenAI
24
24
  sig { returns(Integer) }
25
25
  attr_accessor :sequence_number
26
26
 
27
- # The type of the event. Always `response.code_interpreter_call.code.delta`.
27
+ # The type of the event. Always `response.code_interpreter_call_code.delta`.
28
28
  sig { returns(Symbol) }
29
29
  attr_accessor :type
30
30
 
@@ -44,8 +44,8 @@ module OpenAI
44
44
  output_index:,
45
45
  # The sequence number of this event.
46
46
  sequence_number:,
47
- # The type of the event. Always `response.code_interpreter_call.code.delta`.
48
- type: :"response.code_interpreter_call.code.delta"
47
+ # The type of the event. Always `response.code_interpreter_call_code.delta`.
48
+ type: :"response.code_interpreter_call_code.delta"
49
49
  )
50
50
  end
51
51
 
@@ -24,7 +24,7 @@ module OpenAI
24
24
  sig { returns(Integer) }
25
25
  attr_accessor :sequence_number
26
26
 
27
- # The type of the event. Always `response.code_interpreter_call.code.done`.
27
+ # The type of the event. Always `response.code_interpreter_call_code.done`.
28
28
  sig { returns(Symbol) }
29
29
  attr_accessor :type
30
30
 
@@ -44,8 +44,8 @@ module OpenAI
44
44
  output_index:,
45
45
  # The sequence number of this event.
46
46
  sequence_number:,
47
- # The type of the event. Always `response.code_interpreter_call.code.done`.
48
- type: :"response.code_interpreter_call.code.done"
47
+ # The type of the event. Always `response.code_interpreter_call_code.done`.
48
+ type: :"response.code_interpreter_call_code.done"
49
49
  )
50
50
  end
51
51
 
@@ -63,6 +63,8 @@ module OpenAI
63
63
  # multi-turn conversations when using the Responses API statelessly (like when
64
64
  # the `store` parameter is set to `false`, or when an organization is enrolled
65
65
  # in the zero data retention program).
66
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
67
+ # in code interpreter tool call items.
66
68
  sig do
67
69
  returns(
68
70
  T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol])
@@ -121,9 +123,9 @@ module OpenAI
121
123
  # utilize scale tier credits until they are exhausted.
122
124
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
123
125
  # be processed using the default service tier with a lower uptime SLA and no
124
- # latency guarentee.
126
+ # latency guarantee.
125
127
  # - If set to 'default', the request will be processed using the default service
126
- # tier with a lower uptime SLA and no latency guarentee.
128
+ # tier with a lower uptime SLA and no latency guarantee.
127
129
  # - If set to 'flex', the request will be processed with the Flex Processing
128
130
  # service tier.
129
131
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -372,6 +374,8 @@ module OpenAI
372
374
  # multi-turn conversations when using the Responses API statelessly (like when
373
375
  # the `store` parameter is set to `false`, or when an organization is enrolled
374
376
  # in the zero data retention program).
377
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
378
+ # in code interpreter tool call items.
375
379
  include: nil,
376
380
  # Inserts a system (or developer) message as the first item in the model's
377
381
  # context.
@@ -409,9 +413,9 @@ module OpenAI
409
413
  # utilize scale tier credits until they are exhausted.
410
414
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
411
415
  # be processed using the default service tier with a lower uptime SLA and no
412
- # latency guarentee.
416
+ # latency guarantee.
413
417
  # - If set to 'default', the request will be processed using the default service
414
- # tier with a lower uptime SLA and no latency guarentee.
418
+ # tier with a lower uptime SLA and no latency guarantee.
415
419
  # - If set to 'flex', the request will be processed with the Flex Processing
416
420
  # service tier.
417
421
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -570,9 +574,9 @@ module OpenAI
570
574
  # utilize scale tier credits until they are exhausted.
571
575
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
572
576
  # be processed using the default service tier with a lower uptime SLA and no
573
- # latency guarentee.
577
+ # latency guarantee.
574
578
  # - If set to 'default', the request will be processed using the default service
575
- # tier with a lower uptime SLA and no latency guarentee.
579
+ # tier with a lower uptime SLA and no latency guarantee.
576
580
  # - If set to 'flex', the request will be processed with the Flex Processing
577
581
  # service tier.
578
582
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -16,6 +16,8 @@ module OpenAI
16
16
  # multi-turn conversations when using the Responses API statelessly (like when
17
17
  # the `store` parameter is set to `false`, or when an organization is enrolled
18
18
  # in the zero data retention program).
19
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
20
+ # in code interpreter tool call items.
19
21
  module ResponseIncludable
20
22
  extend OpenAI::Internal::Type::Enum
21
23
 
@@ -43,6 +45,11 @@ module OpenAI
43
45
  :"reasoning.encrypted_content",
44
46
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
45
47
  )
48
+ CODE_INTERPRETER_CALL_OUTPUTS =
49
+ T.let(
50
+ :"code_interpreter_call.outputs",
51
+ OpenAI::Responses::ResponseIncludable::TaggedSymbol
52
+ )
46
53
 
47
54
  sig do
48
55
  override.returns(
@@ -19,6 +19,7 @@ module OpenAI
19
19
  T.any(
20
20
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation,
21
21
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation,
22
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
22
23
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath
23
24
  )
24
25
  ]
@@ -61,6 +62,7 @@ module OpenAI
61
62
  T.any(
62
63
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation::OrHash,
63
64
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation::OrHash,
65
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation::OrHash,
64
66
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath::OrHash
65
67
  )
66
68
  ],
@@ -89,6 +91,7 @@ module OpenAI
89
91
  T.any(
90
92
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation,
91
93
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation,
94
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
92
95
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath
93
96
  )
94
97
  ],
@@ -110,6 +113,7 @@ module OpenAI
110
113
  T.any(
111
114
  OpenAI::Responses::ResponseOutputText::Annotation::FileCitation,
112
115
  OpenAI::Responses::ResponseOutputText::Annotation::URLCitation,
116
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
113
117
  OpenAI::Responses::ResponseOutputText::Annotation::FilePath
114
118
  )
115
119
  end
@@ -228,6 +232,74 @@ module OpenAI
228
232
  end
229
233
  end
230
234
 
235
+ class ContainerFileCitation < OpenAI::Internal::Type::BaseModel
236
+ OrHash =
237
+ T.type_alias do
238
+ T.any(
239
+ OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation,
240
+ OpenAI::Internal::AnyHash
241
+ )
242
+ end
243
+
244
+ # The ID of the container file.
245
+ sig { returns(String) }
246
+ attr_accessor :container_id
247
+
248
+ # The index of the last character of the container file citation in the message.
249
+ sig { returns(Integer) }
250
+ attr_accessor :end_index
251
+
252
+ # The ID of the file.
253
+ sig { returns(String) }
254
+ attr_accessor :file_id
255
+
256
+ # The index of the first character of the container file citation in the message.
257
+ sig { returns(Integer) }
258
+ attr_accessor :start_index
259
+
260
+ # The type of the container file citation. Always `container_file_citation`.
261
+ sig { returns(Symbol) }
262
+ attr_accessor :type
263
+
264
+ # A citation for a container file used to generate a model response.
265
+ sig do
266
+ params(
267
+ container_id: String,
268
+ end_index: Integer,
269
+ file_id: String,
270
+ start_index: Integer,
271
+ type: Symbol
272
+ ).returns(T.attached_class)
273
+ end
274
+ def self.new(
275
+ # The ID of the container file.
276
+ container_id:,
277
+ # The index of the last character of the container file citation in the message.
278
+ end_index:,
279
+ # The ID of the file.
280
+ file_id:,
281
+ # The index of the first character of the container file citation in the message.
282
+ start_index:,
283
+ # The type of the container file citation. Always `container_file_citation`.
284
+ type: :container_file_citation
285
+ )
286
+ end
287
+
288
+ sig do
289
+ override.returns(
290
+ {
291
+ container_id: String,
292
+ end_index: Integer,
293
+ file_id: String,
294
+ start_index: Integer,
295
+ type: Symbol
296
+ }
297
+ )
298
+ end
299
+ def to_hash
300
+ end
301
+ end
302
+
231
303
  class FilePath < OpenAI::Internal::Type::BaseModel
232
304
  OrHash =
233
305
  T.type_alias do