openai 0.5.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/audio/transcription_text_delta_event.rb +3 -3
  5. data/lib/openai/models/audio/transcription_text_done_event.rb +3 -3
  6. data/lib/openai/models/chat/chat_completion.rb +4 -4
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +4 -4
  8. data/lib/openai/models/chat/completion_create_params.rb +4 -4
  9. data/lib/openai/models/fine_tuning/alpha/grader_run_params.rb +17 -30
  10. data/lib/openai/models/fine_tuning/fine_tuning_job.rb +3 -5
  11. data/lib/openai/models/graders/multi_grader.rb +11 -4
  12. data/lib/openai/models/image_edit_params.rb +2 -2
  13. data/lib/openai/models/responses/response.rb +4 -4
  14. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +5 -5
  15. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +5 -5
  16. data/lib/openai/models/responses/response_create_params.rb +6 -4
  17. data/lib/openai/models/responses/response_includable.rb +3 -0
  18. data/lib/openai/models/responses/response_output_text.rb +52 -3
  19. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  20. data/lib/openai/resources/fine_tuning/alpha/graders.rb +6 -3
  21. data/lib/openai/resources/responses.rb +2 -2
  22. data/lib/openai/version.rb +1 -1
  23. data/rbi/openai/models/audio/transcription_text_delta_event.rbi +4 -4
  24. data/rbi/openai/models/audio/transcription_text_done_event.rbi +4 -4
  25. data/rbi/openai/models/chat/chat_completion.rbi +6 -6
  26. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -6
  27. data/rbi/openai/models/chat/completion_create_params.rbi +6 -6
  28. data/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi +24 -43
  29. data/rbi/openai/models/fine_tuning/fine_tuning_job.rbi +2 -3
  30. data/rbi/openai/models/graders/multi_grader.rbi +27 -32
  31. data/rbi/openai/models/image_edit_params.rbi +3 -3
  32. data/rbi/openai/models/responses/response.rbi +6 -6
  33. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +3 -3
  34. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +3 -3
  35. data/rbi/openai/models/responses/response_create_params.rbi +10 -6
  36. data/rbi/openai/models/responses/response_includable.rbi +7 -0
  37. data/rbi/openai/models/responses/response_output_text.rbi +72 -0
  38. data/rbi/openai/resources/chat/completions.rbi +4 -4
  39. data/rbi/openai/resources/fine_tuning/alpha/graders.rbi +10 -5
  40. data/rbi/openai/resources/images.rbi +1 -1
  41. data/rbi/openai/resources/responses.rbi +9 -5
  42. data/sig/openai/models/audio/transcription_text_delta_event.rbs +10 -5
  43. data/sig/openai/models/audio/transcription_text_done_event.rbs +10 -5
  44. data/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs +6 -14
  45. data/sig/openai/models/fine_tuning/fine_tuning_job.rbs +1 -1
  46. data/sig/openai/models/graders/multi_grader.rbs +7 -7
  47. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +4 -4
  48. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +4 -4
  49. data/sig/openai/models/responses/response_includable.rbs +2 -0
  50. data/sig/openai/models/responses/response_output_text.rbs +38 -0
  51. data/sig/openai/resources/fine_tuning/alpha/graders.rbs +1 -1
  52. data/sig/openai/resources/responses.rbs +1 -1
  53. metadata +2 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 47fb7720e93045ef9ea453bfa010f7c371e787bbef29cd4a030b4022f9402cae
4
- data.tar.gz: f055a7952e0aef69b1b7c55ed9b388c3c96c870022bc2f24b199fce4d15cd029
3
+ metadata.gz: 6a5c45a919ed6bf4ffc290e1b6e09be3ce694b50f7956282fa3bb2346f2c88ee
4
+ data.tar.gz: 0c76fb2d6312a2fa1e215179d0ffbf96ebcf0ba7253cf3e83ac71bd0eda5f34c
5
5
  SHA512:
6
- metadata.gz: 05d0d579d341ae49f9e37afce59d8231490ba4e97c0f7015223ae66b22ea70243a3dc4ea4f58cd3214f49550bef85f71916f67056a2d0bdc22884733172414bf
7
- data.tar.gz: f808c8aa99ec25fcd98bdfc7cba0a2736e2c495a8c84c1cdeea59709b29a498c469d1e3a8611c00aa7bb6453c00ea2c1c60ee86d7cc21b574d58f31ac3bcb484
6
+ metadata.gz: ecf3726095de057b79ec004e51f3163db95f39bfde70e4735954646c1e50c18e245ed0a43c46b1fe770970fb571a704ca5767773f61d95a819929d0caac0675c
7
+ data.tar.gz: afae3a96adde0dbbb0fff1c423751641a5e97834640104ed176bb66002e4d5a89d62488c1f3a4a6d6d2bfd47bad541eeddb034992aee8449e4391e018a5aeada
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.5.1 (2025-06-02)
4
+
5
+ Full Changelog: [v0.5.0...v0.5.1](https://github.com/openai/openai-ruby/compare/v0.5.0...v0.5.1)
6
+
7
+ ### Bug Fixes
8
+
9
+ * **api:** Fix evals and code interpreter interfaces ([24a9100](https://github.com/openai/openai-ruby/commit/24a910015e6885fc19a2ad689fe70a148bed5787))
10
+
3
11
  ## 0.5.0 (2025-05-29)
4
12
 
5
13
  Full Changelog: [v0.4.1...v0.5.0](https://github.com/openai/openai-ruby/compare/v0.4.1...v0.5.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.5.0"
18
+ gem "openai", "~> 0.5.1"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -50,8 +50,8 @@ module OpenAI
50
50
  # @!attribute bytes
51
51
  # The bytes that were used to generate the log probability.
52
52
  #
53
- # @return [Array<Object>, nil]
54
- optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
53
+ # @return [Array<Integer>, nil]
54
+ optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
55
55
 
56
56
  # @!attribute logprob
57
57
  # The log probability of the token.
@@ -65,7 +65,7 @@ module OpenAI
65
65
  #
66
66
  # @param token [String] The token that was used to generate the log probability.
67
67
  #
68
- # @param bytes [Array<Object>] The bytes that were used to generate the log probability.
68
+ # @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
69
69
  #
70
70
  # @param logprob [Float] The log probability of the token.
71
71
  end
@@ -51,8 +51,8 @@ module OpenAI
51
51
  # @!attribute bytes
52
52
  # The bytes that were used to generate the log probability.
53
53
  #
54
- # @return [Array<Object>, nil]
55
- optional :bytes, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
54
+ # @return [Array<Integer>, nil]
55
+ optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
56
56
 
57
57
  # @!attribute logprob
58
58
  # The log probability of the token.
@@ -66,7 +66,7 @@ module OpenAI
66
66
  #
67
67
  # @param token [String] The token that was used to generate the log probability.
68
68
  #
69
- # @param bytes [Array<Object>] The bytes that were used to generate the log probability.
69
+ # @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
70
70
  #
71
71
  # @param logprob [Float] The log probability of the token.
72
72
  end
@@ -46,9 +46,9 @@ module OpenAI
46
46
  # utilize scale tier credits until they are exhausted.
47
47
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
48
48
  # be processed using the default service tier with a lower uptime SLA and no
49
- # latency guarentee.
49
+ # latency guarantee.
50
50
  # - If set to 'default', the request will be processed using the default service
51
- # tier with a lower uptime SLA and no latency guarentee.
51
+ # tier with a lower uptime SLA and no latency guarantee.
52
52
  # - If set to 'flex', the request will be processed with the Flex Processing
53
53
  # service tier.
54
54
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -195,9 +195,9 @@ module OpenAI
195
195
  # utilize scale tier credits until they are exhausted.
196
196
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
197
197
  # be processed using the default service tier with a lower uptime SLA and no
198
- # latency guarentee.
198
+ # latency guarantee.
199
199
  # - If set to 'default', the request will be processed using the default service
200
- # tier with a lower uptime SLA and no latency guarentee.
200
+ # tier with a lower uptime SLA and no latency guarantee.
201
201
  # - If set to 'flex', the request will be processed with the Flex Processing
202
202
  # service tier.
203
203
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -45,9 +45,9 @@ module OpenAI
45
45
  # utilize scale tier credits until they are exhausted.
46
46
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
47
47
  # be processed using the default service tier with a lower uptime SLA and no
48
- # latency guarentee.
48
+ # latency guarantee.
49
49
  # - If set to 'default', the request will be processed using the default service
50
- # tier with a lower uptime SLA and no latency guarentee.
50
+ # tier with a lower uptime SLA and no latency guarantee.
51
51
  # - If set to 'flex', the request will be processed with the Flex Processing
52
52
  # service tier.
53
53
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -378,9 +378,9 @@ module OpenAI
378
378
  # utilize scale tier credits until they are exhausted.
379
379
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
380
380
  # be processed using the default service tier with a lower uptime SLA and no
381
- # latency guarentee.
381
+ # latency guarantee.
382
382
  # - If set to 'default', the request will be processed using the default service
383
- # tier with a lower uptime SLA and no latency guarentee.
383
+ # tier with a lower uptime SLA and no latency guarantee.
384
384
  # - If set to 'flex', the request will be processed with the Flex Processing
385
385
  # service tier.
386
386
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -226,9 +226,9 @@ module OpenAI
226
226
  # utilize scale tier credits until they are exhausted.
227
227
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
228
228
  # be processed using the default service tier with a lower uptime SLA and no
229
- # latency guarentee.
229
+ # latency guarantee.
230
230
  # - If set to 'default', the request will be processed using the default service
231
- # tier with a lower uptime SLA and no latency guarentee.
231
+ # tier with a lower uptime SLA and no latency guarantee.
232
232
  # - If set to 'flex', the request will be processed with the Flex Processing
233
233
  # service tier.
234
234
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -553,9 +553,9 @@ module OpenAI
553
553
  # utilize scale tier credits until they are exhausted.
554
554
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
555
555
  # be processed using the default service tier with a lower uptime SLA and no
556
- # latency guarentee.
556
+ # latency guarantee.
557
557
  # - If set to 'default', the request will be processed using the default service
558
- # tier with a lower uptime SLA and no latency guarentee.
558
+ # tier with a lower uptime SLA and no latency guarantee.
559
559
  # - If set to 'flex', the request will be processed with the Flex Processing
560
560
  # service tier.
561
561
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -16,26 +16,32 @@ module OpenAI
16
16
  required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderRunParams::Grader }
17
17
 
18
18
  # @!attribute model_sample
19
- # The model sample to be evaluated.
19
+ # The model sample to be evaluated. This value will be used to populate the
20
+ # `sample` namespace. See
21
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
22
+ # The `output_json` variable will be populated if the model sample is a valid JSON
23
+ # string.
20
24
  #
21
25
  # @return [String]
22
26
  required :model_sample, String
23
27
 
24
- # @!attribute reference_answer
25
- # The reference answer for the evaluation.
28
+ # @!attribute item
29
+ # The dataset item provided to the grader. This will be used to populate the
30
+ # `item` namespace. See
31
+ # [the guide](https://platform.openai.com/docs/guides/graders) for more details.
26
32
  #
27
- # @return [String, Object, Array<Object>, Float]
28
- required :reference_answer,
29
- union: -> {
30
- OpenAI::FineTuning::Alpha::GraderRunParams::ReferenceAnswer
31
- }
33
+ # @return [Object, nil]
34
+ optional :item, OpenAI::Internal::Type::Unknown
32
35
 
33
- # @!method initialize(grader:, model_sample:, reference_answer:, request_options: {})
36
+ # @!method initialize(grader:, model_sample:, item: nil, request_options: {})
37
+ # Some parameter documentations has been truncated, see
38
+ # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details.
39
+ #
34
40
  # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
35
41
  #
36
- # @param model_sample [String] The model sample to be evaluated.
42
+ # @param model_sample [String] The model sample to be evaluated. This value will be used to populate
37
43
  #
38
- # @param reference_answer [String, Object, Array<Object>, Float] The reference answer for the evaluation.
44
+ # @param item [Object] The dataset item provided to the grader. This will be used to populate
39
45
  #
40
46
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
41
47
 
@@ -63,25 +69,6 @@ module OpenAI
63
69
  # @!method self.variants
64
70
  # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)]
65
71
  end
66
-
67
- # The reference answer for the evaluation.
68
- module ReferenceAnswer
69
- extend OpenAI::Internal::Type::Union
70
-
71
- variant String
72
-
73
- variant OpenAI::Internal::Type::Unknown
74
-
75
- variant -> { OpenAI::Models::FineTuning::Alpha::GraderRunParams::ReferenceAnswer::UnionMember2Array }
76
-
77
- variant Float
78
-
79
- # @!method self.variants
80
- # @return [Array(String, Object, Array<Object>, Float)]
81
-
82
- # @type [OpenAI::Internal::Type::Converter]
83
- UnionMember2Array = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
84
- end
85
72
  end
86
73
  end
87
74
  end
@@ -226,7 +226,7 @@ module OpenAI
226
226
  # Number of examples in each batch. A larger batch size means that model
227
227
  # parameters are updated less frequently, but with lower variance.
228
228
  #
229
- # @return [Object, Symbol, :auto, Integer, nil]
229
+ # @return [Symbol, :auto, Integer, nil]
230
230
  optional :batch_size,
231
231
  union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize },
232
232
  nil?: true
@@ -253,7 +253,7 @@ module OpenAI
253
253
  # The hyperparameters used for the fine-tuning job. This value will only be
254
254
  # returned when running `supervised` jobs.
255
255
  #
256
- # @param batch_size [Object, Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter
256
+ # @param batch_size [Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter
257
257
  #
258
258
  # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
259
259
  #
@@ -266,14 +266,12 @@ module OpenAI
266
266
  module BatchSize
267
267
  extend OpenAI::Internal::Type::Union
268
268
 
269
- variant OpenAI::Internal::Type::Unknown
270
-
271
269
  variant const: :auto
272
270
 
273
271
  variant Integer
274
272
 
275
273
  # @!method self.variants
276
- # @return [Array(Object, Symbol, :auto, Integer)]
274
+ # @return [Array(Symbol, :auto, Integer)]
277
275
  end
278
276
 
279
277
  # Scaling factor for the learning rate. A smaller learning rate may be useful to
@@ -11,9 +11,11 @@ module OpenAI
11
11
  required :calculate_output, String
12
12
 
13
13
  # @!attribute graders
14
+ # A StringCheckGrader object that performs a string comparison between input and
15
+ # reference using a specified operation.
14
16
  #
15
- # @return [Hash{Symbol=>OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader}]
16
- required :graders, -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Graders::MultiGrader::Grader] }
17
+ # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader]
18
+ required :graders, union: -> { OpenAI::Graders::MultiGrader::Graders }
17
19
 
18
20
  # @!attribute name
19
21
  # The name of the grader.
@@ -28,12 +30,15 @@ module OpenAI
28
30
  required :type, const: :multi
29
31
 
30
32
  # @!method initialize(calculate_output:, graders:, name:, type: :multi)
33
+ # Some parameter documentations has been truncated, see
34
+ # {OpenAI::Models::Graders::MultiGrader} for more details.
35
+ #
31
36
  # A MultiGrader object combines the output of multiple graders to produce a single
32
37
  # score.
33
38
  #
34
39
  # @param calculate_output [String] A formula to calculate the output based on grader results.
35
40
  #
36
- # @param graders [Hash{Symbol=>OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader}]
41
+ # @param graders [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] A StringCheckGrader object that performs a string comparison between input and r
37
42
  #
38
43
  # @param name [String] The name of the grader.
39
44
  #
@@ -41,7 +46,9 @@ module OpenAI
41
46
 
42
47
  # A StringCheckGrader object that performs a string comparison between input and
43
48
  # reference using a specified operation.
44
- module Grader
49
+ #
50
+ # @see OpenAI::Models::Graders::MultiGrader#graders
51
+ module Graders
45
52
  extend OpenAI::Internal::Type::Union
46
53
 
47
54
  # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
@@ -11,7 +11,7 @@ module OpenAI
11
11
  # The image(s) to edit. Must be a supported image file or an array of images.
12
12
  #
13
13
  # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
14
- # 25MB. You can provide up to 16 images.
14
+ # 50MB. You can provide up to 16 images.
15
15
  #
16
16
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
17
17
  # file less than 4MB.
@@ -123,7 +123,7 @@ module OpenAI
123
123
  # The image(s) to edit. Must be a supported image file or an array of images.
124
124
  #
125
125
  # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
126
- # 25MB. You can provide up to 16 images.
126
+ # 50MB. You can provide up to 16 images.
127
127
  #
128
128
  # For `dall-e-2`, you can only provide one image, and it should be a square `png`
129
129
  # file less than 4MB.
@@ -173,9 +173,9 @@ module OpenAI
173
173
  # utilize scale tier credits until they are exhausted.
174
174
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
175
175
  # be processed using the default service tier with a lower uptime SLA and no
176
- # latency guarentee.
176
+ # latency guarantee.
177
177
  # - If set to 'default', the request will be processed using the default service
178
- # tier with a lower uptime SLA and no latency guarentee.
178
+ # tier with a lower uptime SLA and no latency guarantee.
179
179
  # - If set to 'flex', the request will be processed with the Flex Processing
180
180
  # service tier.
181
181
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -346,9 +346,9 @@ module OpenAI
346
346
  # utilize scale tier credits until they are exhausted.
347
347
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
348
348
  # be processed using the default service tier with a lower uptime SLA and no
349
- # latency guarentee.
349
+ # latency guarantee.
350
350
  # - If set to 'default', the request will be processed using the default service
351
- # tier with a lower uptime SLA and no latency guarentee.
351
+ # tier with a lower uptime SLA and no latency guarantee.
352
352
  # - If set to 'flex', the request will be processed with the Flex Processing
353
353
  # service tier.
354
354
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -23,12 +23,12 @@ module OpenAI
23
23
  required :sequence_number, Integer
24
24
 
25
25
  # @!attribute type
26
- # The type of the event. Always `response.code_interpreter_call.code.delta`.
26
+ # The type of the event. Always `response.code_interpreter_call_code.delta`.
27
27
  #
28
- # @return [Symbol, :"response.code_interpreter_call.code.delta"]
29
- required :type, const: :"response.code_interpreter_call.code.delta"
28
+ # @return [Symbol, :"response.code_interpreter_call_code.delta"]
29
+ required :type, const: :"response.code_interpreter_call_code.delta"
30
30
 
31
- # @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call.code.delta")
31
+ # @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta")
32
32
  # Some parameter documentations has been truncated, see
33
33
  # {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent} for more
34
34
  # details.
@@ -41,7 +41,7 @@ module OpenAI
41
41
  #
42
42
  # @param sequence_number [Integer] The sequence number of this event.
43
43
  #
44
- # @param type [Symbol, :"response.code_interpreter_call.code.delta"] The type of the event. Always `response.code_interpreter_call.code.delta`.
44
+ # @param type [Symbol, :"response.code_interpreter_call_code.delta"] The type of the event. Always `response.code_interpreter_call_code.delta`.
45
45
  end
46
46
  end
47
47
  end
@@ -23,12 +23,12 @@ module OpenAI
23
23
  required :sequence_number, Integer
24
24
 
25
25
  # @!attribute type
26
- # The type of the event. Always `response.code_interpreter_call.code.done`.
26
+ # The type of the event. Always `response.code_interpreter_call_code.done`.
27
27
  #
28
- # @return [Symbol, :"response.code_interpreter_call.code.done"]
29
- required :type, const: :"response.code_interpreter_call.code.done"
28
+ # @return [Symbol, :"response.code_interpreter_call_code.done"]
29
+ required :type, const: :"response.code_interpreter_call_code.done"
30
30
 
31
- # @!method initialize(code:, output_index:, sequence_number:, type: :"response.code_interpreter_call.code.done")
31
+ # @!method initialize(code:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.done")
32
32
  # Some parameter documentations has been truncated, see
33
33
  # {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent} for more
34
34
  # details.
@@ -41,7 +41,7 @@ module OpenAI
41
41
  #
42
42
  # @param sequence_number [Integer] The sequence number of this event.
43
43
  #
44
- # @param type [Symbol, :"response.code_interpreter_call.code.done"] The type of the event. Always `response.code_interpreter_call.code.done`.
44
+ # @param type [Symbol, :"response.code_interpreter_call_code.done"] The type of the event. Always `response.code_interpreter_call_code.done`.
45
45
  end
46
46
  end
47
47
  end
@@ -55,6 +55,8 @@ module OpenAI
55
55
  # multi-turn conversations when using the Responses API statelessly (like when
56
56
  # the `store` parameter is set to `false`, or when an organization is enrolled
57
57
  # in the zero data retention program).
58
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
59
+ # in code interpreter tool call items.
58
60
  #
59
61
  # @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
60
62
  optional :include,
@@ -122,9 +124,9 @@ module OpenAI
122
124
  # utilize scale tier credits until they are exhausted.
123
125
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
124
126
  # be processed using the default service tier with a lower uptime SLA and no
125
- # latency guarentee.
127
+ # latency guarantee.
126
128
  # - If set to 'default', the request will be processed using the default service
127
- # tier with a lower uptime SLA and no latency guarentee.
129
+ # tier with a lower uptime SLA and no latency guarantee.
128
130
  # - If set to 'flex', the request will be processed with the Flex Processing
129
131
  # service tier.
130
132
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -299,9 +301,9 @@ module OpenAI
299
301
  # utilize scale tier credits until they are exhausted.
300
302
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
301
303
  # be processed using the default service tier with a lower uptime SLA and no
302
- # latency guarentee.
304
+ # latency guarantee.
303
305
  # - If set to 'default', the request will be processed using the default service
304
- # tier with a lower uptime SLA and no latency guarentee.
306
+ # tier with a lower uptime SLA and no latency guarantee.
305
307
  # - If set to 'flex', the request will be processed with the Flex Processing
306
308
  # service tier.
307
309
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -16,6 +16,8 @@ module OpenAI
16
16
  # multi-turn conversations when using the Responses API statelessly (like when
17
17
  # the `store` parameter is set to `false`, or when an organization is enrolled
18
18
  # in the zero data retention program).
19
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
20
+ # in code interpreter tool call items.
19
21
  module ResponseIncludable
20
22
  extend OpenAI::Internal::Type::Enum
21
23
 
@@ -23,6 +25,7 @@ module OpenAI
23
25
  MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url"
24
26
  COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url"
25
27
  REASONING_ENCRYPTED_CONTENT = :"reasoning.encrypted_content"
28
+ CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs"
26
29
 
27
30
  # @!method self.values
28
31
  # @return [Array<Symbol>]
@@ -7,7 +7,7 @@ module OpenAI
7
7
  # @!attribute annotations
8
8
  # The annotations of the text output.
9
9
  #
10
- # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>]
10
+ # @return [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>]
11
11
  required :annotations,
12
12
  -> {
13
13
  OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation]
@@ -42,7 +42,7 @@ module OpenAI
42
42
  # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text)
43
43
  # A text output from the model.
44
44
  #
45
- # @param annotations [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>] The annotations of the text output.
45
+ # @param annotations [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>] The annotations of the text output.
46
46
  #
47
47
  # @param text [String] The text output from the model.
48
48
  #
@@ -62,6 +62,10 @@ module OpenAI
62
62
  # A citation for a web resource used to generate a model response.
63
63
  variant :url_citation, -> { OpenAI::Responses::ResponseOutputText::Annotation::URLCitation }
64
64
 
65
+ # A citation for a container file used to generate a model response.
66
+ variant :container_file_citation,
67
+ -> { OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation }
68
+
65
69
  # A path to a file.
66
70
  variant :file_path, -> { OpenAI::Responses::ResponseOutputText::Annotation::FilePath }
67
71
 
@@ -139,6 +143,51 @@ module OpenAI
139
143
  # @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`.
140
144
  end
141
145
 
146
+ class ContainerFileCitation < OpenAI::Internal::Type::BaseModel
147
+ # @!attribute container_id
148
+ # The ID of the container file.
149
+ #
150
+ # @return [String]
151
+ required :container_id, String
152
+
153
+ # @!attribute end_index
154
+ # The index of the last character of the container file citation in the message.
155
+ #
156
+ # @return [Integer]
157
+ required :end_index, Integer
158
+
159
+ # @!attribute file_id
160
+ # The ID of the file.
161
+ #
162
+ # @return [String]
163
+ required :file_id, String
164
+
165
+ # @!attribute start_index
166
+ # The index of the first character of the container file citation in the message.
167
+ #
168
+ # @return [Integer]
169
+ required :start_index, Integer
170
+
171
+ # @!attribute type
172
+ # The type of the container file citation. Always `container_file_citation`.
173
+ #
174
+ # @return [Symbol, :container_file_citation]
175
+ required :type, const: :container_file_citation
176
+
177
+ # @!method initialize(container_id:, end_index:, file_id:, start_index:, type: :container_file_citation)
178
+ # A citation for a container file used to generate a model response.
179
+ #
180
+ # @param container_id [String] The ID of the container file.
181
+ #
182
+ # @param end_index [Integer] The index of the last character of the container file citation in the message.
183
+ #
184
+ # @param file_id [String] The ID of the file.
185
+ #
186
+ # @param start_index [Integer] The index of the first character of the container file citation in the message.
187
+ #
188
+ # @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`.
189
+ end
190
+
142
191
  class FilePath < OpenAI::Internal::Type::BaseModel
143
192
  # @!attribute file_id
144
193
  # The ID of the file.
@@ -173,7 +222,7 @@ module OpenAI
173
222
  end
174
223
 
175
224
  # @!method self.variants
176
- # @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)]
225
+ # @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)]
177
226
  end
178
227
 
179
228
  class Logprob < OpenAI::Internal::Type::BaseModel
@@ -25,11 +25,11 @@ module OpenAI
25
25
  variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
26
26
 
27
27
  # Emitted when a partial code snippet is added by the code interpreter.
28
- variant :"response.code_interpreter_call.code.delta",
28
+ variant :"response.code_interpreter_call_code.delta",
29
29
  -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent }
30
30
 
31
31
  # Emitted when code snippet output is finalized by the code interpreter.
32
- variant :"response.code_interpreter_call.code.done",
32
+ variant :"response.code_interpreter_call_code.done",
33
33
  -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent }
34
34
 
35
35
  # Emitted when the code interpreter call is completed.
@@ -5,15 +5,18 @@ module OpenAI
5
5
  class FineTuning
6
6
  class Alpha
7
7
  class Graders
8
+ # Some parameter documentations has been truncated, see
9
+ # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details.
10
+ #
8
11
  # Run a grader.
9
12
  #
10
- # @overload run(grader:, model_sample:, reference_answer:, request_options: {})
13
+ # @overload run(grader:, model_sample:, item: nil, request_options: {})
11
14
  #
12
15
  # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
13
16
  #
14
- # @param model_sample [String] The model sample to be evaluated.
17
+ # @param model_sample [String] The model sample to be evaluated. This value will be used to populate
15
18
  #
16
- # @param reference_answer [String, Object, Array<Object>, Float] The reference answer for the evaluation.
19
+ # @param item [Object] The dataset item provided to the grader. This will be used to populate
17
20
  #
18
21
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
19
22
  #
@@ -345,14 +345,14 @@ module OpenAI
345
345
  #
346
346
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
347
347
  #
348
- # @return [nil]
348
+ # @return [OpenAI::Models::Responses::Response]
349
349
  #
350
350
  # @see OpenAI::Models::Responses::ResponseCancelParams
351
351
  def cancel(response_id, params = {})
352
352
  @client.request(
353
353
  method: :post,
354
354
  path: ["responses/%1$s/cancel", response_id],
355
- model: NilClass,
355
+ model: OpenAI::Responses::Response,
356
356
  options: params[:request_options]
357
357
  )
358
358
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.5.0"
4
+ VERSION = "0.5.1"
5
5
  end
@@ -98,10 +98,10 @@ module OpenAI
98
98
  attr_writer :token
99
99
 
100
100
  # The bytes that were used to generate the log probability.
101
- sig { returns(T.nilable(T::Array[T.anything])) }
101
+ sig { returns(T.nilable(T::Array[Integer])) }
102
102
  attr_reader :bytes
103
103
 
104
- sig { params(bytes: T::Array[T.anything]).void }
104
+ sig { params(bytes: T::Array[Integer]).void }
105
105
  attr_writer :bytes
106
106
 
107
107
  # The log probability of the token.
@@ -114,7 +114,7 @@ module OpenAI
114
114
  sig do
115
115
  params(
116
116
  token: String,
117
- bytes: T::Array[T.anything],
117
+ bytes: T::Array[Integer],
118
118
  logprob: Float
119
119
  ).returns(T.attached_class)
120
120
  end
@@ -130,7 +130,7 @@ module OpenAI
130
130
 
131
131
  sig do
132
132
  override.returns(
133
- { token: String, bytes: T::Array[T.anything], logprob: Float }
133
+ { token: String, bytes: T::Array[Integer], logprob: Float }
134
134
  )
135
135
  end
136
136
  def to_hash