openai 0.17.1 → 0.18.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +30 -0
  3. data/README.md +1 -1
  4. data/lib/openai/helpers/structured_output/array_of.rb +2 -10
  5. data/lib/openai/helpers/structured_output/base_model.rb +4 -11
  6. data/lib/openai/helpers/structured_output/json_schema_converter.rb +19 -3
  7. data/lib/openai/helpers/structured_output/union_of.rb +2 -10
  8. data/lib/openai/models/batch_create_params.rb +38 -1
  9. data/lib/openai/models/beta/thread_create_and_run_params.rb +2 -2
  10. data/lib/openai/models/beta/threads/run.rb +2 -2
  11. data/lib/openai/models/beta/threads/run_create_params.rb +2 -2
  12. data/lib/openai/models/chat/chat_completion.rb +6 -6
  13. data/lib/openai/models/chat/chat_completion_chunk.rb +6 -6
  14. data/lib/openai/models/chat/completion_create_params.rb +7 -7
  15. data/lib/openai/models/file_create_params.rb +37 -1
  16. data/lib/openai/models/graders/text_similarity_grader.rb +6 -5
  17. data/lib/openai/models/reasoning.rb +1 -1
  18. data/lib/openai/models/responses/response.rb +6 -8
  19. data/lib/openai/models/responses/response_create_params.rb +6 -8
  20. data/lib/openai/models/upload_create_params.rb +37 -1
  21. data/lib/openai/resources/batches.rb +3 -1
  22. data/lib/openai/resources/files.rb +4 -2
  23. data/lib/openai/resources/responses.rb +2 -2
  24. data/lib/openai/resources/uploads.rb +3 -1
  25. data/lib/openai/version.rb +1 -1
  26. data/rbi/openai/helpers/structured_output/array_of.rbi +0 -3
  27. data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +10 -0
  28. data/rbi/openai/models/batch_create_params.rbi +60 -0
  29. data/rbi/openai/models/beta/thread_create_and_run_params.rbi +3 -3
  30. data/rbi/openai/models/beta/threads/run.rbi +3 -3
  31. data/rbi/openai/models/beta/threads/run_create_params.rbi +3 -3
  32. data/rbi/openai/models/chat/chat_completion.rbi +6 -9
  33. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -9
  34. data/rbi/openai/models/chat/completion_create_params.rbi +8 -11
  35. data/rbi/openai/models/file_create_params.rbi +56 -0
  36. data/rbi/openai/models/graders/text_similarity_grader.rbi +11 -6
  37. data/rbi/openai/models/reasoning.rbi +1 -1
  38. data/rbi/openai/models/responses/response.rbi +8 -11
  39. data/rbi/openai/models/responses/response_create_params.rbi +8 -11
  40. data/rbi/openai/models/upload_create_params.rbi +56 -0
  41. data/rbi/openai/resources/batches.rbi +5 -0
  42. data/rbi/openai/resources/beta/threads/runs.rbi +2 -2
  43. data/rbi/openai/resources/beta/threads.rbi +2 -2
  44. data/rbi/openai/resources/chat/completions.rbi +6 -8
  45. data/rbi/openai/resources/files.rbi +5 -1
  46. data/rbi/openai/resources/responses.rbi +6 -8
  47. data/rbi/openai/resources/uploads.rbi +4 -0
  48. data/sig/openai/models/batch_create_params.rbs +22 -1
  49. data/sig/openai/models/file_create_params.rbs +22 -1
  50. data/sig/openai/models/graders/text_similarity_grader.rbs +3 -1
  51. data/sig/openai/models/upload_create_params.rbs +22 -1
  52. data/sig/openai/resources/batches.rbs +1 -0
  53. data/sig/openai/resources/files.rbs +1 -0
  54. data/sig/openai/resources/uploads.rbs +1 -0
  55. metadata +2 -2
@@ -37,7 +37,14 @@ module OpenAI
37
37
  # @return [Symbol, OpenAI::Models::FilePurpose]
38
38
  required :purpose, enum: -> { OpenAI::FilePurpose }
39
39
 
40
- # @!method initialize(bytes:, filename:, mime_type:, purpose:, request_options: {})
40
+ # @!attribute expires_after
41
+ # The expiration policy for a file. By default, files with `purpose=batch` expire
42
+ # after 30 days and all other files are persisted until they are manually deleted.
43
+ #
44
+ # @return [OpenAI::Models::UploadCreateParams::ExpiresAfter, nil]
45
+ optional :expires_after, -> { OpenAI::UploadCreateParams::ExpiresAfter }
46
+
47
+ # @!method initialize(bytes:, filename:, mime_type:, purpose:, expires_after: nil, request_options: {})
41
48
  # Some parameter documentations has been truncated, see
42
49
  # {OpenAI::Models::UploadCreateParams} for more details.
43
50
  #
@@ -49,7 +56,36 @@ module OpenAI
49
56
  #
50
57
  # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file.
51
58
  #
59
+ # @param expires_after [OpenAI::Models::UploadCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire
60
+ #
52
61
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
62
+
63
+ class ExpiresAfter < OpenAI::Internal::Type::BaseModel
64
+ # @!attribute anchor
65
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
66
+ # `created_at`.
67
+ #
68
+ # @return [Symbol, :created_at]
69
+ required :anchor, const: :created_at
70
+
71
+ # @!attribute seconds
72
+ # The number of seconds after the anchor time that the file will expire. Must be
73
+ # between 3600 (1 hour) and 2592000 (30 days).
74
+ #
75
+ # @return [Integer]
76
+ required :seconds, Integer
77
+
78
+ # @!method initialize(seconds:, anchor: :created_at)
79
+ # Some parameter documentations has been truncated, see
80
+ # {OpenAI::Models::UploadCreateParams::ExpiresAfter} for more details.
81
+ #
82
+ # The expiration policy for a file. By default, files with `purpose=batch` expire
83
+ # after 30 days and all other files are persisted until they are manually deleted.
84
+ #
85
+ # @param seconds [Integer] The number of seconds after the anchor time that the file will expire. Must be b
86
+ #
87
+ # @param anchor [Symbol, :created_at] Anchor timestamp after which the expiration policy applies. Supported anchors: `
88
+ end
53
89
  end
54
90
  end
55
91
  end
@@ -8,7 +8,7 @@ module OpenAI
8
8
  #
9
9
  # Creates and executes a batch from an uploaded file of requests
10
10
  #
11
- # @overload create(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {})
11
+ # @overload create(completion_window:, endpoint:, input_file_id:, metadata: nil, output_expires_after: nil, request_options: {})
12
12
  #
13
13
  # @param completion_window [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] The time frame within which the batch should be processed. Currently only `24h`
14
14
  #
@@ -18,6 +18,8 @@ module OpenAI
18
18
  #
19
19
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
20
20
  #
21
+ # @param output_expires_after [OpenAI::Models::BatchCreateParams::OutputExpiresAfter] The expiration policy for the output and/or error file that are generated for a
22
+ #
21
23
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
22
24
  #
23
25
  # @return [OpenAI::Models::Batch]
@@ -8,7 +8,7 @@ module OpenAI
8
8
  #
9
9
  # Upload a file that can be used across various endpoints. Individual files can be
10
10
  # up to 512 MB, and the size of all files uploaded by one organization can be up
11
- # to 100 GB.
11
+ # to 1 TB.
12
12
  #
13
13
  # The Assistants API supports files up to 2 million tokens and of specific file
14
14
  # types. See the
@@ -28,12 +28,14 @@ module OpenAI
28
28
  # Please [contact us](https://help.openai.com/) if you need to increase these
29
29
  # storage limits.
30
30
  #
31
- # @overload create(file:, purpose:, request_options: {})
31
+ # @overload create(file:, purpose:, expires_after: nil, request_options: {})
32
32
  #
33
33
  # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded.
34
34
  #
35
35
  # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. One of: - `assistants`: Used in the A
36
36
  #
37
+ # @param expires_after [OpenAI::Models::FileCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire
38
+ #
37
39
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
38
40
  #
39
41
  # @return [OpenAI::Models::FileObject]
@@ -49,7 +49,7 @@ module OpenAI
49
49
  #
50
50
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
51
51
  #
52
- # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
52
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
53
53
  #
54
54
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
55
55
  #
@@ -264,7 +264,7 @@ module OpenAI
264
264
  #
265
265
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
266
266
  #
267
- # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
267
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
268
268
  #
269
269
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
270
270
  #
@@ -29,7 +29,7 @@ module OpenAI
29
29
  # the documentation on
30
30
  # [creating a File](https://platform.openai.com/docs/api-reference/files/create).
31
31
  #
32
- # @overload create(bytes:, filename:, mime_type:, purpose:, request_options: {})
32
+ # @overload create(bytes:, filename:, mime_type:, purpose:, expires_after: nil, request_options: {})
33
33
  #
34
34
  # @param bytes [Integer] The number of bytes in the file you are uploading.
35
35
  #
@@ -39,6 +39,8 @@ module OpenAI
39
39
  #
40
40
  # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file.
41
41
  #
42
+ # @param expires_after [OpenAI::Models::UploadCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire
43
+ #
42
44
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
43
45
  #
44
46
  # @return [OpenAI::Models::Upload]
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.17.1"
4
+ VERSION = "0.18.1"
5
5
  end
@@ -7,9 +7,6 @@ module OpenAI
7
7
  include OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
8
8
 
9
9
  Elem = type_member(:out)
10
-
11
- sig { returns(String) }
12
- attr_reader :description
13
10
  end
14
11
  end
15
12
  end
@@ -46,6 +46,16 @@ module OpenAI
46
46
  def to_nilable(schema)
47
47
  end
48
48
 
49
+ # @api private
50
+ sig do
51
+ params(
52
+ schema: OpenAI::Helpers::StructuredOutput::JsonSchema,
53
+ meta: OpenAI::Internal::AnyHash
54
+ ).void
55
+ end
56
+ def assoc_meta!(schema, meta:)
57
+ end
58
+
49
59
  # @api private
50
60
  sig do
51
61
  params(
@@ -44,6 +44,19 @@ module OpenAI
44
44
  sig { returns(T.nilable(T::Hash[Symbol, String])) }
45
45
  attr_accessor :metadata
46
46
 
47
+ # The expiration policy for the output and/or error file that are generated for a
48
+ # batch.
49
+ sig { returns(T.nilable(OpenAI::BatchCreateParams::OutputExpiresAfter)) }
50
+ attr_reader :output_expires_after
51
+
52
+ sig do
53
+ params(
54
+ output_expires_after:
55
+ OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash
56
+ ).void
57
+ end
58
+ attr_writer :output_expires_after
59
+
47
60
  sig do
48
61
  params(
49
62
  completion_window:
@@ -51,6 +64,8 @@ module OpenAI
51
64
  endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol,
52
65
  input_file_id: String,
53
66
  metadata: T.nilable(T::Hash[Symbol, String]),
67
+ output_expires_after:
68
+ OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash,
54
69
  request_options: OpenAI::RequestOptions::OrHash
55
70
  ).returns(T.attached_class)
56
71
  end
@@ -80,6 +95,9 @@ module OpenAI
80
95
  # Keys are strings with a maximum length of 64 characters. Values are strings with
81
96
  # a maximum length of 512 characters.
82
97
  metadata: nil,
98
+ # The expiration policy for the output and/or error file that are generated for a
99
+ # batch.
100
+ output_expires_after: nil,
83
101
  request_options: {}
84
102
  )
85
103
  end
@@ -92,6 +110,7 @@ module OpenAI
92
110
  endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol,
93
111
  input_file_id: String,
94
112
  metadata: T.nilable(T::Hash[Symbol, String]),
113
+ output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter,
95
114
  request_options: OpenAI::RequestOptions
96
115
  }
97
116
  )
@@ -165,6 +184,47 @@ module OpenAI
165
184
  def self.values
166
185
  end
167
186
  end
187
+
188
+ class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel
189
+ OrHash =
190
+ T.type_alias do
191
+ T.any(
192
+ OpenAI::BatchCreateParams::OutputExpiresAfter,
193
+ OpenAI::Internal::AnyHash
194
+ )
195
+ end
196
+
197
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
198
+ # `created_at`. Note that the anchor is the file creation time, not the time the
199
+ # batch is created.
200
+ sig { returns(Symbol) }
201
+ attr_accessor :anchor
202
+
203
+ # The number of seconds after the anchor time that the file will expire. Must be
204
+ # between 3600 (1 hour) and 2592000 (30 days).
205
+ sig { returns(Integer) }
206
+ attr_accessor :seconds
207
+
208
+ # The expiration policy for the output and/or error file that are generated for a
209
+ # batch.
210
+ sig do
211
+ params(seconds: Integer, anchor: Symbol).returns(T.attached_class)
212
+ end
213
+ def self.new(
214
+ # The number of seconds after the anchor time that the file will expire. Must be
215
+ # between 3600 (1 hour) and 2592000 (30 days).
216
+ seconds:,
217
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
218
+ # `created_at`. Note that the anchor is the file creation time, not the time the
219
+ # batch is created.
220
+ anchor: :created_at
221
+ )
222
+ end
223
+
224
+ sig { override.returns({ anchor: Symbol, seconds: Integer }) }
225
+ def to_hash
226
+ end
227
+ end
168
228
  end
169
229
  end
170
230
  end
@@ -187,7 +187,7 @@ module OpenAI
187
187
  attr_accessor :top_p
188
188
 
189
189
  # Controls for how a thread will be truncated prior to the run. Use this to
190
- # control the intial context window of the run.
190
+ # control the initial context window of the run.
191
191
  sig do
192
192
  returns(
193
193
  T.nilable(
@@ -343,7 +343,7 @@ module OpenAI
343
343
  # We generally recommend altering this or temperature but not both.
344
344
  top_p: nil,
345
345
  # Controls for how a thread will be truncated prior to the run. Use this to
346
- # control the intial context window of the run.
346
+ # control the initial context window of the run.
347
347
  truncation_strategy: nil,
348
348
  request_options: {}
349
349
  )
@@ -1459,7 +1459,7 @@ module OpenAI
1459
1459
  attr_accessor :last_messages
1460
1460
 
1461
1461
  # Controls for how a thread will be truncated prior to the run. Use this to
1462
- # control the intial context window of the run.
1462
+ # control the initial context window of the run.
1463
1463
  sig do
1464
1464
  params(
1465
1465
  type:
@@ -184,7 +184,7 @@ module OpenAI
184
184
  attr_accessor :tools
185
185
 
186
186
  # Controls for how a thread will be truncated prior to the run. Use this to
187
- # control the intial context window of the run.
187
+ # control the initial context window of the run.
188
188
  sig do
189
189
  returns(T.nilable(OpenAI::Beta::Threads::Run::TruncationStrategy))
190
190
  end
@@ -375,7 +375,7 @@ module OpenAI
375
375
  # this run.
376
376
  tools:,
377
377
  # Controls for how a thread will be truncated prior to the run. Use this to
378
- # control the intial context window of the run.
378
+ # control the initial context window of the run.
379
379
  truncation_strategy:,
380
380
  # Usage statistics related to the run. This value will be `null` if the run is not
381
381
  # in a terminal state (i.e. `in_progress`, `queued`, etc.).
@@ -740,7 +740,7 @@ module OpenAI
740
740
  attr_accessor :last_messages
741
741
 
742
742
  # Controls for how a thread will be truncated prior to the run. Use this to
743
- # control the intial context window of the run.
743
+ # control the initial context window of the run.
744
744
  sig do
745
745
  params(
746
746
  type:
@@ -204,7 +204,7 @@ module OpenAI
204
204
  attr_accessor :top_p
205
205
 
206
206
  # Controls for how a thread will be truncated prior to the run. Use this to
207
- # control the intial context window of the run.
207
+ # control the initial context window of the run.
208
208
  sig do
209
209
  returns(
210
210
  T.nilable(
@@ -378,7 +378,7 @@ module OpenAI
378
378
  # We generally recommend altering this or temperature but not both.
379
379
  top_p: nil,
380
380
  # Controls for how a thread will be truncated prior to the run. Use this to
381
- # control the intial context window of the run.
381
+ # control the initial context window of the run.
382
382
  truncation_strategy: nil,
383
383
  request_options: {}
384
384
  )
@@ -803,7 +803,7 @@ module OpenAI
803
803
  attr_accessor :last_messages
804
804
 
805
805
  # Controls for how a thread will be truncated prior to the run. Use this to
806
- # control the intial context window of the run.
806
+ # control the initial context window of the run.
807
807
  sig do
808
808
  params(
809
809
  type:
@@ -40,9 +40,8 @@ module OpenAI
40
40
  # - If set to 'default', then the request will be processed with the standard
41
41
  # pricing and performance for the selected model.
42
42
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
43
- # 'priority', then the request will be processed with the corresponding service
44
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
45
- # Priority processing.
43
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
44
+ # will be processed with the corresponding service tier.
46
45
  # - When not set, the default behavior is 'auto'.
47
46
  #
48
47
  # When the `service_tier` parameter is set, the response body will include the
@@ -106,9 +105,8 @@ module OpenAI
106
105
  # - If set to 'default', then the request will be processed with the standard
107
106
  # pricing and performance for the selected model.
108
107
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
109
- # 'priority', then the request will be processed with the corresponding service
110
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
111
- # Priority processing.
108
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
109
+ # will be processed with the corresponding service tier.
112
110
  # - When not set, the default behavior is 'auto'.
113
111
  #
114
112
  # When the `service_tier` parameter is set, the response body will include the
@@ -371,9 +369,8 @@ module OpenAI
371
369
  # - If set to 'default', then the request will be processed with the standard
372
370
  # pricing and performance for the selected model.
373
371
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
374
- # 'priority', then the request will be processed with the corresponding service
375
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
376
- # Priority processing.
372
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
373
+ # will be processed with the corresponding service tier.
377
374
  # - When not set, the default behavior is 'auto'.
378
375
  #
379
376
  # When the `service_tier` parameter is set, the response body will include the
@@ -42,9 +42,8 @@ module OpenAI
42
42
  # - If set to 'default', then the request will be processed with the standard
43
43
  # pricing and performance for the selected model.
44
44
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
- # 'priority', then the request will be processed with the corresponding service
46
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
47
- # Priority processing.
45
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
46
+ # will be processed with the corresponding service tier.
48
47
  # - When not set, the default behavior is 'auto'.
49
48
  #
50
49
  # When the `service_tier` parameter is set, the response body will include the
@@ -121,9 +120,8 @@ module OpenAI
121
120
  # - If set to 'default', then the request will be processed with the standard
122
121
  # pricing and performance for the selected model.
123
122
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
- # 'priority', then the request will be processed with the corresponding service
125
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
126
- # Priority processing.
123
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
124
+ # will be processed with the corresponding service tier.
127
125
  # - When not set, the default behavior is 'auto'.
128
126
  #
129
127
  # When the `service_tier` parameter is set, the response body will include the
@@ -791,9 +789,8 @@ module OpenAI
791
789
  # - If set to 'default', then the request will be processed with the standard
792
790
  # pricing and performance for the selected model.
793
791
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
- # 'priority', then the request will be processed with the corresponding service
795
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
796
- # Priority processing.
792
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
793
+ # will be processed with the corresponding service tier.
797
794
  # - When not set, the default behavior is 'auto'.
798
795
  #
799
796
  # When the `service_tier` parameter is set, the response body will include the
@@ -297,9 +297,8 @@ module OpenAI
297
297
  # - If set to 'default', then the request will be processed with the standard
298
298
  # pricing and performance for the selected model.
299
299
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
300
- # 'priority', then the request will be processed with the corresponding service
301
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
302
- # Priority processing.
300
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
301
+ # will be processed with the corresponding service tier.
303
302
  # - When not set, the default behavior is 'auto'.
304
303
  #
305
304
  # When the `service_tier` parameter is set, the response body will include the
@@ -330,7 +329,7 @@ module OpenAI
330
329
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
331
330
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
332
331
  #
333
- # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
332
+ # Supports text and image inputs. Note: image inputs over 8MB will be dropped.
334
333
  sig { returns(T.nilable(T::Boolean)) }
335
334
  attr_accessor :store
336
335
 
@@ -700,9 +699,8 @@ module OpenAI
700
699
  # - If set to 'default', then the request will be processed with the standard
701
700
  # pricing and performance for the selected model.
702
701
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
703
- # 'priority', then the request will be processed with the corresponding service
704
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
705
- # Priority processing.
702
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
703
+ # will be processed with the corresponding service tier.
706
704
  # - When not set, the default behavior is 'auto'.
707
705
  #
708
706
  # When the `service_tier` parameter is set, the response body will include the
@@ -719,7 +717,7 @@ module OpenAI
719
717
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
720
718
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
721
719
  #
722
- # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
720
+ # Supports text and image inputs. Note: image inputs over 8MB will be dropped.
723
721
  store: nil,
724
722
  # Options for streaming response. Only set this when you set `stream: true`.
725
723
  stream_options: nil,
@@ -1100,9 +1098,8 @@ module OpenAI
1100
1098
  # - If set to 'default', then the request will be processed with the standard
1101
1099
  # pricing and performance for the selected model.
1102
1100
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1103
- # 'priority', then the request will be processed with the corresponding service
1104
- # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
1105
- # Priority processing.
1101
+ # '[priority](https://openai.com/api-priority-processing/)', then the request
1102
+ # will be processed with the corresponding service tier.
1106
1103
  # - When not set, the default behavior is 'auto'.
1107
1104
  #
1108
1105
  # When the `service_tier` parameter is set, the response body will include the
@@ -22,10 +22,23 @@ module OpenAI
22
22
  sig { returns(OpenAI::FilePurpose::OrSymbol) }
23
23
  attr_accessor :purpose
24
24
 
25
+ # The expiration policy for a file. By default, files with `purpose=batch` expire
26
+ # after 30 days and all other files are persisted until they are manually deleted.
27
+ sig { returns(T.nilable(OpenAI::FileCreateParams::ExpiresAfter)) }
28
+ attr_reader :expires_after
29
+
30
+ sig do
31
+ params(
32
+ expires_after: OpenAI::FileCreateParams::ExpiresAfter::OrHash
33
+ ).void
34
+ end
35
+ attr_writer :expires_after
36
+
25
37
  sig do
26
38
  params(
27
39
  file: OpenAI::Internal::FileInput,
28
40
  purpose: OpenAI::FilePurpose::OrSymbol,
41
+ expires_after: OpenAI::FileCreateParams::ExpiresAfter::OrHash,
29
42
  request_options: OpenAI::RequestOptions::OrHash
30
43
  ).returns(T.attached_class)
31
44
  end
@@ -37,6 +50,9 @@ module OpenAI
37
50
  # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
38
51
  # Flexible file type for any purpose - `evals`: Used for eval data sets
39
52
  purpose:,
53
+ # The expiration policy for a file. By default, files with `purpose=batch` expire
54
+ # after 30 days and all other files are persisted until they are manually deleted.
55
+ expires_after: nil,
40
56
  request_options: {}
41
57
  )
42
58
  end
@@ -46,12 +62,52 @@ module OpenAI
46
62
  {
47
63
  file: OpenAI::Internal::FileInput,
48
64
  purpose: OpenAI::FilePurpose::OrSymbol,
65
+ expires_after: OpenAI::FileCreateParams::ExpiresAfter,
49
66
  request_options: OpenAI::RequestOptions
50
67
  }
51
68
  )
52
69
  end
53
70
  def to_hash
54
71
  end
72
+
73
+ class ExpiresAfter < OpenAI::Internal::Type::BaseModel
74
+ OrHash =
75
+ T.type_alias do
76
+ T.any(
77
+ OpenAI::FileCreateParams::ExpiresAfter,
78
+ OpenAI::Internal::AnyHash
79
+ )
80
+ end
81
+
82
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
83
+ # `created_at`.
84
+ sig { returns(Symbol) }
85
+ attr_accessor :anchor
86
+
87
+ # The number of seconds after the anchor time that the file will expire. Must be
88
+ # between 3600 (1 hour) and 2592000 (30 days).
89
+ sig { returns(Integer) }
90
+ attr_accessor :seconds
91
+
92
+ # The expiration policy for a file. By default, files with `purpose=batch` expire
93
+ # after 30 days and all other files are persisted until they are manually deleted.
94
+ sig do
95
+ params(seconds: Integer, anchor: Symbol).returns(T.attached_class)
96
+ end
97
+ def self.new(
98
+ # The number of seconds after the anchor time that the file will expire. Must be
99
+ # between 3600 (1 hour) and 2592000 (30 days).
100
+ seconds:,
101
+ # Anchor timestamp after which the expiration policy applies. Supported anchors:
102
+ # `created_at`.
103
+ anchor: :created_at
104
+ )
105
+ end
106
+
107
+ sig { override.returns({ anchor: Symbol, seconds: Integer }) }
108
+ def to_hash
109
+ end
110
+ end
55
111
  end
56
112
  end
57
113
  end
@@ -14,8 +14,8 @@ module OpenAI
14
14
  )
15
15
  end
16
16
 
17
- # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
18
- # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
17
+ # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`,
18
+ # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
19
19
  sig do
20
20
  returns(
21
21
  OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::OrSymbol
@@ -51,8 +51,8 @@ module OpenAI
51
51
  ).returns(T.attached_class)
52
52
  end
53
53
  def self.new(
54
- # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
55
- # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
54
+ # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`,
55
+ # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
56
56
  evaluation_metric:,
57
57
  # The text being graded.
58
58
  input:,
@@ -80,8 +80,8 @@ module OpenAI
80
80
  def to_hash
81
81
  end
82
82
 
83
- # The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
84
- # `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
83
+ # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`,
84
+ # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
85
85
  module EvaluationMetric
86
86
  extend OpenAI::Internal::Type::Enum
87
87
 
@@ -94,6 +94,11 @@ module OpenAI
94
94
  end
95
95
  OrSymbol = T.type_alias { T.any(Symbol, String) }
96
96
 
97
+ COSINE =
98
+ T.let(
99
+ :cosine,
100
+ OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol
101
+ )
97
102
  FUZZY_MATCH =
98
103
  T.let(
99
104
  :fuzzy_match,
@@ -28,7 +28,7 @@ module OpenAI
28
28
  sig { returns(T.nilable(OpenAI::Reasoning::Summary::OrSymbol)) }
29
29
  attr_accessor :summary
30
30
 
31
- # **o-series models only**
31
+ # **gpt-5 and o-series models only**
32
32
  #
33
33
  # Configuration options for
34
34
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).