openai 0.17.1 → 0.18.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +30 -0
- data/README.md +1 -1
- data/lib/openai/helpers/structured_output/array_of.rb +2 -10
- data/lib/openai/helpers/structured_output/base_model.rb +4 -11
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +19 -3
- data/lib/openai/helpers/structured_output/union_of.rb +2 -10
- data/lib/openai/models/batch_create_params.rb +38 -1
- data/lib/openai/models/beta/thread_create_and_run_params.rb +2 -2
- data/lib/openai/models/beta/threads/run.rb +2 -2
- data/lib/openai/models/beta/threads/run_create_params.rb +2 -2
- data/lib/openai/models/chat/chat_completion.rb +6 -6
- data/lib/openai/models/chat/chat_completion_chunk.rb +6 -6
- data/lib/openai/models/chat/completion_create_params.rb +7 -7
- data/lib/openai/models/file_create_params.rb +37 -1
- data/lib/openai/models/graders/text_similarity_grader.rb +6 -5
- data/lib/openai/models/reasoning.rb +1 -1
- data/lib/openai/models/responses/response.rb +6 -8
- data/lib/openai/models/responses/response_create_params.rb +6 -8
- data/lib/openai/models/upload_create_params.rb +37 -1
- data/lib/openai/resources/batches.rb +3 -1
- data/lib/openai/resources/files.rb +4 -2
- data/lib/openai/resources/responses.rb +2 -2
- data/lib/openai/resources/uploads.rb +3 -1
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/helpers/structured_output/array_of.rbi +0 -3
- data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +10 -0
- data/rbi/openai/models/batch_create_params.rbi +60 -0
- data/rbi/openai/models/beta/thread_create_and_run_params.rbi +3 -3
- data/rbi/openai/models/beta/threads/run.rbi +3 -3
- data/rbi/openai/models/beta/threads/run_create_params.rbi +3 -3
- data/rbi/openai/models/chat/chat_completion.rbi +6 -9
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -9
- data/rbi/openai/models/chat/completion_create_params.rbi +8 -11
- data/rbi/openai/models/file_create_params.rbi +56 -0
- data/rbi/openai/models/graders/text_similarity_grader.rbi +11 -6
- data/rbi/openai/models/reasoning.rbi +1 -1
- data/rbi/openai/models/responses/response.rbi +8 -11
- data/rbi/openai/models/responses/response_create_params.rbi +8 -11
- data/rbi/openai/models/upload_create_params.rbi +56 -0
- data/rbi/openai/resources/batches.rbi +5 -0
- data/rbi/openai/resources/beta/threads/runs.rbi +2 -2
- data/rbi/openai/resources/beta/threads.rbi +2 -2
- data/rbi/openai/resources/chat/completions.rbi +6 -8
- data/rbi/openai/resources/files.rbi +5 -1
- data/rbi/openai/resources/responses.rbi +6 -8
- data/rbi/openai/resources/uploads.rbi +4 -0
- data/sig/openai/models/batch_create_params.rbs +22 -1
- data/sig/openai/models/file_create_params.rbs +22 -1
- data/sig/openai/models/graders/text_similarity_grader.rbs +3 -1
- data/sig/openai/models/upload_create_params.rbs +22 -1
- data/sig/openai/resources/batches.rbs +1 -0
- data/sig/openai/resources/files.rbs +1 -0
- data/sig/openai/resources/uploads.rbs +1 -0
- metadata +2 -2
@@ -176,7 +176,7 @@ module OpenAI
|
|
176
176
|
sig { params(prompt_cache_key: String).void }
|
177
177
|
attr_writer :prompt_cache_key
|
178
178
|
|
179
|
-
# **o-series models only**
|
179
|
+
# **gpt-5 and o-series models only**
|
180
180
|
#
|
181
181
|
# Configuration options for
|
182
182
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
@@ -205,9 +205,8 @@ module OpenAI
|
|
205
205
|
# - If set to 'default', then the request will be processed with the standard
|
206
206
|
# pricing and performance for the selected model.
|
207
207
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
208
|
-
# 'priority', then the request
|
209
|
-
#
|
210
|
-
# Priority processing.
|
208
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
209
|
+
# will be processed with the corresponding service tier.
|
211
210
|
# - When not set, the default behavior is 'auto'.
|
212
211
|
#
|
213
212
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -453,7 +452,7 @@ module OpenAI
|
|
453
452
|
# hit rates. Replaces the `user` field.
|
454
453
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
455
454
|
prompt_cache_key: nil,
|
456
|
-
# **o-series models only**
|
455
|
+
# **gpt-5 and o-series models only**
|
457
456
|
#
|
458
457
|
# Configuration options for
|
459
458
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
@@ -472,9 +471,8 @@ module OpenAI
|
|
472
471
|
# - If set to 'default', then the request will be processed with the standard
|
473
472
|
# pricing and performance for the selected model.
|
474
473
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
475
|
-
# 'priority', then the request
|
476
|
-
#
|
477
|
-
# Priority processing.
|
474
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
475
|
+
# will be processed with the corresponding service tier.
|
478
476
|
# - When not set, the default behavior is 'auto'.
|
479
477
|
#
|
480
478
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -717,9 +715,8 @@ module OpenAI
|
|
717
715
|
# - If set to 'default', then the request will be processed with the standard
|
718
716
|
# pricing and performance for the selected model.
|
719
717
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
720
|
-
# 'priority', then the request
|
721
|
-
#
|
722
|
-
# Priority processing.
|
718
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
719
|
+
# will be processed with the corresponding service tier.
|
723
720
|
# - When not set, the default behavior is 'auto'.
|
724
721
|
#
|
725
722
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -157,7 +157,7 @@ module OpenAI
|
|
157
157
|
sig { params(prompt_cache_key: String).void }
|
158
158
|
attr_writer :prompt_cache_key
|
159
159
|
|
160
|
-
# **o-series models only**
|
160
|
+
# **gpt-5 and o-series models only**
|
161
161
|
#
|
162
162
|
# Configuration options for
|
163
163
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
@@ -186,9 +186,8 @@ module OpenAI
|
|
186
186
|
# - If set to 'default', then the request will be processed with the standard
|
187
187
|
# pricing and performance for the selected model.
|
188
188
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
189
|
-
# 'priority', then the request
|
190
|
-
#
|
191
|
-
# Priority processing.
|
189
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
190
|
+
# will be processed with the corresponding service tier.
|
192
191
|
# - When not set, the default behavior is 'auto'.
|
193
192
|
#
|
194
193
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -523,7 +522,7 @@ module OpenAI
|
|
523
522
|
# hit rates. Replaces the `user` field.
|
524
523
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
525
524
|
prompt_cache_key: nil,
|
526
|
-
# **o-series models only**
|
525
|
+
# **gpt-5 and o-series models only**
|
527
526
|
#
|
528
527
|
# Configuration options for
|
529
528
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
@@ -542,9 +541,8 @@ module OpenAI
|
|
542
541
|
# - If set to 'default', then the request will be processed with the standard
|
543
542
|
# pricing and performance for the selected model.
|
544
543
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
545
|
-
# 'priority', then the request
|
546
|
-
#
|
547
|
-
# Priority processing.
|
544
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
545
|
+
# will be processed with the corresponding service tier.
|
548
546
|
# - When not set, the default behavior is 'auto'.
|
549
547
|
#
|
550
548
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -725,9 +723,8 @@ module OpenAI
|
|
725
723
|
# - If set to 'default', then the request will be processed with the standard
|
726
724
|
# pricing and performance for the selected model.
|
727
725
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
728
|
-
# 'priority', then the request
|
729
|
-
#
|
730
|
-
# Priority processing.
|
726
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
727
|
+
# will be processed with the corresponding service tier.
|
731
728
|
# - When not set, the default behavior is 'auto'.
|
732
729
|
#
|
733
730
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -33,12 +33,25 @@ module OpenAI
|
|
33
33
|
sig { returns(OpenAI::FilePurpose::OrSymbol) }
|
34
34
|
attr_accessor :purpose
|
35
35
|
|
36
|
+
# The expiration policy for a file. By default, files with `purpose=batch` expire
|
37
|
+
# after 30 days and all other files are persisted until they are manually deleted.
|
38
|
+
sig { returns(T.nilable(OpenAI::UploadCreateParams::ExpiresAfter)) }
|
39
|
+
attr_reader :expires_after
|
40
|
+
|
41
|
+
sig do
|
42
|
+
params(
|
43
|
+
expires_after: OpenAI::UploadCreateParams::ExpiresAfter::OrHash
|
44
|
+
).void
|
45
|
+
end
|
46
|
+
attr_writer :expires_after
|
47
|
+
|
36
48
|
sig do
|
37
49
|
params(
|
38
50
|
bytes: Integer,
|
39
51
|
filename: String,
|
40
52
|
mime_type: String,
|
41
53
|
purpose: OpenAI::FilePurpose::OrSymbol,
|
54
|
+
expires_after: OpenAI::UploadCreateParams::ExpiresAfter::OrHash,
|
42
55
|
request_options: OpenAI::RequestOptions::OrHash
|
43
56
|
).returns(T.attached_class)
|
44
57
|
end
|
@@ -57,6 +70,9 @@ module OpenAI
|
|
57
70
|
# See the
|
58
71
|
# [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
|
59
72
|
purpose:,
|
73
|
+
# The expiration policy for a file. By default, files with `purpose=batch` expire
|
74
|
+
# after 30 days and all other files are persisted until they are manually deleted.
|
75
|
+
expires_after: nil,
|
60
76
|
request_options: {}
|
61
77
|
)
|
62
78
|
end
|
@@ -68,12 +84,52 @@ module OpenAI
|
|
68
84
|
filename: String,
|
69
85
|
mime_type: String,
|
70
86
|
purpose: OpenAI::FilePurpose::OrSymbol,
|
87
|
+
expires_after: OpenAI::UploadCreateParams::ExpiresAfter,
|
71
88
|
request_options: OpenAI::RequestOptions
|
72
89
|
}
|
73
90
|
)
|
74
91
|
end
|
75
92
|
def to_hash
|
76
93
|
end
|
94
|
+
|
95
|
+
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
|
96
|
+
OrHash =
|
97
|
+
T.type_alias do
|
98
|
+
T.any(
|
99
|
+
OpenAI::UploadCreateParams::ExpiresAfter,
|
100
|
+
OpenAI::Internal::AnyHash
|
101
|
+
)
|
102
|
+
end
|
103
|
+
|
104
|
+
# Anchor timestamp after which the expiration policy applies. Supported anchors:
|
105
|
+
# `created_at`.
|
106
|
+
sig { returns(Symbol) }
|
107
|
+
attr_accessor :anchor
|
108
|
+
|
109
|
+
# The number of seconds after the anchor time that the file will expire. Must be
|
110
|
+
# between 3600 (1 hour) and 2592000 (30 days).
|
111
|
+
sig { returns(Integer) }
|
112
|
+
attr_accessor :seconds
|
113
|
+
|
114
|
+
# The expiration policy for a file. By default, files with `purpose=batch` expire
|
115
|
+
# after 30 days and all other files are persisted until they are manually deleted.
|
116
|
+
sig do
|
117
|
+
params(seconds: Integer, anchor: Symbol).returns(T.attached_class)
|
118
|
+
end
|
119
|
+
def self.new(
|
120
|
+
# The number of seconds after the anchor time that the file will expire. Must be
|
121
|
+
# between 3600 (1 hour) and 2592000 (30 days).
|
122
|
+
seconds:,
|
123
|
+
# Anchor timestamp after which the expiration policy applies. Supported anchors:
|
124
|
+
# `created_at`.
|
125
|
+
anchor: :created_at
|
126
|
+
)
|
127
|
+
end
|
128
|
+
|
129
|
+
sig { override.returns({ anchor: Symbol, seconds: Integer }) }
|
130
|
+
def to_hash
|
131
|
+
end
|
132
|
+
end
|
77
133
|
end
|
78
134
|
end
|
79
135
|
end
|
@@ -11,6 +11,8 @@ module OpenAI
|
|
11
11
|
endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol,
|
12
12
|
input_file_id: String,
|
13
13
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
14
|
+
output_expires_after:
|
15
|
+
OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash,
|
14
16
|
request_options: OpenAI::RequestOptions::OrHash
|
15
17
|
).returns(OpenAI::Batch)
|
16
18
|
end
|
@@ -40,6 +42,9 @@ module OpenAI
|
|
40
42
|
# Keys are strings with a maximum length of 64 characters. Values are strings with
|
41
43
|
# a maximum length of 512 characters.
|
42
44
|
metadata: nil,
|
45
|
+
# The expiration policy for the output and/or error file that are generated for a
|
46
|
+
# batch.
|
47
|
+
output_expires_after: nil,
|
43
48
|
request_options: {}
|
44
49
|
)
|
45
50
|
end
|
@@ -174,7 +174,7 @@ module OpenAI
|
|
174
174
|
# We generally recommend altering this or temperature but not both.
|
175
175
|
top_p: nil,
|
176
176
|
# Body param: Controls for how a thread will be truncated prior to the run. Use
|
177
|
-
# this to control the
|
177
|
+
# this to control the initial context window of the run.
|
178
178
|
truncation_strategy: nil,
|
179
179
|
# There is no need to provide `stream:`. Instead, use `#create_stream_raw` or
|
180
180
|
# `#create` for streaming and non-streaming use cases, respectively.
|
@@ -353,7 +353,7 @@ module OpenAI
|
|
353
353
|
# We generally recommend altering this or temperature but not both.
|
354
354
|
top_p: nil,
|
355
355
|
# Body param: Controls for how a thread will be truncated prior to the run. Use
|
356
|
-
# this to control the
|
356
|
+
# this to control the initial context window of the run.
|
357
357
|
truncation_strategy: nil,
|
358
358
|
# There is no need to provide `stream:`. Instead, use `#create_stream_raw` or
|
359
359
|
# `#create` for streaming and non-streaming use cases, respectively.
|
@@ -242,7 +242,7 @@ module OpenAI
|
|
242
242
|
# We generally recommend altering this or temperature but not both.
|
243
243
|
top_p: nil,
|
244
244
|
# Controls for how a thread will be truncated prior to the run. Use this to
|
245
|
-
# control the
|
245
|
+
# control the initial context window of the run.
|
246
246
|
truncation_strategy: nil,
|
247
247
|
# There is no need to provide `stream:`. Instead, use `#stream_raw` or
|
248
248
|
# `#create_and_run` for streaming and non-streaming use cases, respectively.
|
@@ -396,7 +396,7 @@ module OpenAI
|
|
396
396
|
# We generally recommend altering this or temperature but not both.
|
397
397
|
top_p: nil,
|
398
398
|
# Controls for how a thread will be truncated prior to the run. Use this to
|
399
|
-
# control the
|
399
|
+
# control the initial context window of the run.
|
400
400
|
truncation_strategy: nil,
|
401
401
|
# There is no need to provide `stream:`. Instead, use `#stream_raw` or
|
402
402
|
# `#create_and_run` for streaming and non-streaming use cases, respectively.
|
@@ -254,9 +254,8 @@ module OpenAI
|
|
254
254
|
# - If set to 'default', then the request will be processed with the standard
|
255
255
|
# pricing and performance for the selected model.
|
256
256
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
257
|
-
# 'priority', then the request
|
258
|
-
#
|
259
|
-
# Priority processing.
|
257
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
258
|
+
# will be processed with the corresponding service tier.
|
260
259
|
# - When not set, the default behavior is 'auto'.
|
261
260
|
#
|
262
261
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -273,7 +272,7 @@ module OpenAI
|
|
273
272
|
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
274
273
|
# or [evals](https://platform.openai.com/docs/guides/evals) products.
|
275
274
|
#
|
276
|
-
# Supports text and image inputs. Note: image inputs over
|
275
|
+
# Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
277
276
|
store: nil,
|
278
277
|
# Options for streaming response. Only set this when you set `stream: true`.
|
279
278
|
stream_options: nil,
|
@@ -572,9 +571,8 @@ module OpenAI
|
|
572
571
|
# - If set to 'default', then the request will be processed with the standard
|
573
572
|
# pricing and performance for the selected model.
|
574
573
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
575
|
-
# 'priority', then the request
|
576
|
-
#
|
577
|
-
# Priority processing.
|
574
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
575
|
+
# will be processed with the corresponding service tier.
|
578
576
|
# - When not set, the default behavior is 'auto'.
|
579
577
|
#
|
580
578
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -591,7 +589,7 @@ module OpenAI
|
|
591
589
|
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
592
590
|
# or [evals](https://platform.openai.com/docs/guides/evals) products.
|
593
591
|
#
|
594
|
-
# Supports text and image inputs. Note: image inputs over
|
592
|
+
# Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
595
593
|
store: nil,
|
596
594
|
# Options for streaming response. Only set this when you set `stream: true`.
|
597
595
|
stream_options: nil,
|
@@ -5,7 +5,7 @@ module OpenAI
|
|
5
5
|
class Files
|
6
6
|
# Upload a file that can be used across various endpoints. Individual files can be
|
7
7
|
# up to 512 MB, and the size of all files uploaded by one organization can be up
|
8
|
-
# to
|
8
|
+
# to 1 TB.
|
9
9
|
#
|
10
10
|
# The Assistants API supports files up to 2 million tokens and of specific file
|
11
11
|
# types. See the
|
@@ -28,6 +28,7 @@ module OpenAI
|
|
28
28
|
params(
|
29
29
|
file: OpenAI::Internal::FileInput,
|
30
30
|
purpose: OpenAI::FilePurpose::OrSymbol,
|
31
|
+
expires_after: OpenAI::FileCreateParams::ExpiresAfter::OrHash,
|
31
32
|
request_options: OpenAI::RequestOptions::OrHash
|
32
33
|
).returns(OpenAI::FileObject)
|
33
34
|
end
|
@@ -39,6 +40,9 @@ module OpenAI
|
|
39
40
|
# fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
|
40
41
|
# Flexible file type for any purpose - `evals`: Used for eval data sets
|
41
42
|
purpose:,
|
43
|
+
# The expiration policy for a file. By default, files with `purpose=batch` expire
|
44
|
+
# after 30 days and all other files are persisted until they are manually deleted.
|
45
|
+
expires_after: nil,
|
42
46
|
request_options: {}
|
43
47
|
)
|
44
48
|
end
|
@@ -164,7 +164,7 @@ module OpenAI
|
|
164
164
|
# hit rates. Replaces the `user` field.
|
165
165
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
166
166
|
prompt_cache_key: nil,
|
167
|
-
# **o-series models only**
|
167
|
+
# **gpt-5 and o-series models only**
|
168
168
|
#
|
169
169
|
# Configuration options for
|
170
170
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
@@ -183,9 +183,8 @@ module OpenAI
|
|
183
183
|
# - If set to 'default', then the request will be processed with the standard
|
184
184
|
# pricing and performance for the selected model.
|
185
185
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
186
|
-
# 'priority', then the request
|
187
|
-
#
|
188
|
-
# Priority processing.
|
186
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
187
|
+
# will be processed with the corresponding service tier.
|
189
188
|
# - When not set, the default behavior is 'auto'.
|
190
189
|
#
|
191
190
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -423,7 +422,7 @@ module OpenAI
|
|
423
422
|
# hit rates. Replaces the `user` field.
|
424
423
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
425
424
|
prompt_cache_key: nil,
|
426
|
-
# **o-series models only**
|
425
|
+
# **gpt-5 and o-series models only**
|
427
426
|
#
|
428
427
|
# Configuration options for
|
429
428
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
@@ -442,9 +441,8 @@ module OpenAI
|
|
442
441
|
# - If set to 'default', then the request will be processed with the standard
|
443
442
|
# pricing and performance for the selected model.
|
444
443
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
445
|
-
# 'priority', then the request
|
446
|
-
#
|
447
|
-
# Priority processing.
|
444
|
+
# '[priority](https://openai.com/api-priority-processing/)', then the request
|
445
|
+
# will be processed with the corresponding service tier.
|
448
446
|
# - When not set, the default behavior is 'auto'.
|
449
447
|
#
|
450
448
|
# When the `service_tier` parameter is set, the response body will include the
|
@@ -31,6 +31,7 @@ module OpenAI
|
|
31
31
|
filename: String,
|
32
32
|
mime_type: String,
|
33
33
|
purpose: OpenAI::FilePurpose::OrSymbol,
|
34
|
+
expires_after: OpenAI::UploadCreateParams::ExpiresAfter::OrHash,
|
34
35
|
request_options: OpenAI::RequestOptions::OrHash
|
35
36
|
).returns(OpenAI::Upload)
|
36
37
|
end
|
@@ -49,6 +50,9 @@ module OpenAI
|
|
49
50
|
# See the
|
50
51
|
# [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
|
51
52
|
purpose:,
|
53
|
+
# The expiration policy for a file. By default, files with `purpose=batch` expire
|
54
|
+
# after 30 days and all other files are persisted until they are manually deleted.
|
55
|
+
expires_after: nil,
|
52
56
|
request_options: {}
|
53
57
|
)
|
54
58
|
end
|
@@ -5,7 +5,8 @@ module OpenAI
|
|
5
5
|
completion_window: OpenAI::Models::BatchCreateParams::completion_window,
|
6
6
|
endpoint: OpenAI::Models::BatchCreateParams::endpoint,
|
7
7
|
input_file_id: String,
|
8
|
-
metadata: OpenAI::Models::metadata
|
8
|
+
metadata: OpenAI::Models::metadata?,
|
9
|
+
output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter
|
9
10
|
}
|
10
11
|
& OpenAI::Internal::Type::request_parameters
|
11
12
|
|
@@ -21,11 +22,18 @@ module OpenAI
|
|
21
22
|
|
22
23
|
attr_accessor metadata: OpenAI::Models::metadata?
|
23
24
|
|
25
|
+
attr_reader output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter?
|
26
|
+
|
27
|
+
def output_expires_after=: (
|
28
|
+
OpenAI::BatchCreateParams::OutputExpiresAfter
|
29
|
+
) -> OpenAI::BatchCreateParams::OutputExpiresAfter
|
30
|
+
|
24
31
|
def initialize: (
|
25
32
|
completion_window: OpenAI::Models::BatchCreateParams::completion_window,
|
26
33
|
endpoint: OpenAI::Models::BatchCreateParams::endpoint,
|
27
34
|
input_file_id: String,
|
28
35
|
?metadata: OpenAI::Models::metadata?,
|
36
|
+
?output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter,
|
29
37
|
?request_options: OpenAI::request_opts
|
30
38
|
) -> void
|
31
39
|
|
@@ -34,6 +42,7 @@ module OpenAI
|
|
34
42
|
endpoint: OpenAI::Models::BatchCreateParams::endpoint,
|
35
43
|
input_file_id: String,
|
36
44
|
metadata: OpenAI::Models::metadata?,
|
45
|
+
output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter,
|
37
46
|
request_options: OpenAI::RequestOptions
|
38
47
|
}
|
39
48
|
|
@@ -63,6 +72,18 @@ module OpenAI
|
|
63
72
|
|
64
73
|
def self?.values: -> ::Array[OpenAI::Models::BatchCreateParams::endpoint]
|
65
74
|
end
|
75
|
+
|
76
|
+
type output_expires_after = { anchor: :created_at, seconds: Integer }
|
77
|
+
|
78
|
+
class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel
|
79
|
+
attr_accessor anchor: :created_at
|
80
|
+
|
81
|
+
attr_accessor seconds: Integer
|
82
|
+
|
83
|
+
def initialize: (seconds: Integer, ?anchor: :created_at) -> void
|
84
|
+
|
85
|
+
def to_hash: -> { anchor: :created_at, seconds: Integer }
|
86
|
+
end
|
66
87
|
end
|
67
88
|
end
|
68
89
|
end
|
@@ -3,7 +3,8 @@ module OpenAI
|
|
3
3
|
type file_create_params =
|
4
4
|
{
|
5
5
|
file: OpenAI::Internal::file_input,
|
6
|
-
purpose: OpenAI::Models::file_purpose
|
6
|
+
purpose: OpenAI::Models::file_purpose,
|
7
|
+
expires_after: OpenAI::FileCreateParams::ExpiresAfter
|
7
8
|
}
|
8
9
|
& OpenAI::Internal::Type::request_parameters
|
9
10
|
|
@@ -15,17 +16,37 @@ module OpenAI
|
|
15
16
|
|
16
17
|
attr_accessor purpose: OpenAI::Models::file_purpose
|
17
18
|
|
19
|
+
attr_reader expires_after: OpenAI::FileCreateParams::ExpiresAfter?
|
20
|
+
|
21
|
+
def expires_after=: (
|
22
|
+
OpenAI::FileCreateParams::ExpiresAfter
|
23
|
+
) -> OpenAI::FileCreateParams::ExpiresAfter
|
24
|
+
|
18
25
|
def initialize: (
|
19
26
|
file: OpenAI::Internal::file_input,
|
20
27
|
purpose: OpenAI::Models::file_purpose,
|
28
|
+
?expires_after: OpenAI::FileCreateParams::ExpiresAfter,
|
21
29
|
?request_options: OpenAI::request_opts
|
22
30
|
) -> void
|
23
31
|
|
24
32
|
def to_hash: -> {
|
25
33
|
file: OpenAI::Internal::file_input,
|
26
34
|
purpose: OpenAI::Models::file_purpose,
|
35
|
+
expires_after: OpenAI::FileCreateParams::ExpiresAfter,
|
27
36
|
request_options: OpenAI::RequestOptions
|
28
37
|
}
|
38
|
+
|
39
|
+
type expires_after = { anchor: :created_at, seconds: Integer }
|
40
|
+
|
41
|
+
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
|
42
|
+
attr_accessor anchor: :created_at
|
43
|
+
|
44
|
+
attr_accessor seconds: Integer
|
45
|
+
|
46
|
+
def initialize: (seconds: Integer, ?anchor: :created_at) -> void
|
47
|
+
|
48
|
+
def to_hash: -> { anchor: :created_at, seconds: Integer }
|
49
|
+
end
|
29
50
|
end
|
30
51
|
end
|
31
52
|
end
|
@@ -40,7 +40,8 @@ module OpenAI
|
|
40
40
|
}
|
41
41
|
|
42
42
|
type evaluation_metric =
|
43
|
-
:
|
43
|
+
:cosine
|
44
|
+
| :fuzzy_match
|
44
45
|
| :bleu
|
45
46
|
| :gleu
|
46
47
|
| :meteor
|
@@ -54,6 +55,7 @@ module OpenAI
|
|
54
55
|
module EvaluationMetric
|
55
56
|
extend OpenAI::Internal::Type::Enum
|
56
57
|
|
58
|
+
COSINE: :cosine
|
57
59
|
FUZZY_MATCH: :fuzzy_match
|
58
60
|
BLEU: :bleu
|
59
61
|
GLEU: :gleu
|
@@ -5,7 +5,8 @@ module OpenAI
|
|
5
5
|
bytes: Integer,
|
6
6
|
filename: String,
|
7
7
|
mime_type: String,
|
8
|
-
purpose: OpenAI::Models::file_purpose
|
8
|
+
purpose: OpenAI::Models::file_purpose,
|
9
|
+
expires_after: OpenAI::UploadCreateParams::ExpiresAfter
|
9
10
|
}
|
10
11
|
& OpenAI::Internal::Type::request_parameters
|
11
12
|
|
@@ -21,11 +22,18 @@ module OpenAI
|
|
21
22
|
|
22
23
|
attr_accessor purpose: OpenAI::Models::file_purpose
|
23
24
|
|
25
|
+
attr_reader expires_after: OpenAI::UploadCreateParams::ExpiresAfter?
|
26
|
+
|
27
|
+
def expires_after=: (
|
28
|
+
OpenAI::UploadCreateParams::ExpiresAfter
|
29
|
+
) -> OpenAI::UploadCreateParams::ExpiresAfter
|
30
|
+
|
24
31
|
def initialize: (
|
25
32
|
bytes: Integer,
|
26
33
|
filename: String,
|
27
34
|
mime_type: String,
|
28
35
|
purpose: OpenAI::Models::file_purpose,
|
36
|
+
?expires_after: OpenAI::UploadCreateParams::ExpiresAfter,
|
29
37
|
?request_options: OpenAI::request_opts
|
30
38
|
) -> void
|
31
39
|
|
@@ -34,8 +42,21 @@ module OpenAI
|
|
34
42
|
filename: String,
|
35
43
|
mime_type: String,
|
36
44
|
purpose: OpenAI::Models::file_purpose,
|
45
|
+
expires_after: OpenAI::UploadCreateParams::ExpiresAfter,
|
37
46
|
request_options: OpenAI::RequestOptions
|
38
47
|
}
|
48
|
+
|
49
|
+
type expires_after = { anchor: :created_at, seconds: Integer }
|
50
|
+
|
51
|
+
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
|
52
|
+
attr_accessor anchor: :created_at
|
53
|
+
|
54
|
+
attr_accessor seconds: Integer
|
55
|
+
|
56
|
+
def initialize: (seconds: Integer, ?anchor: :created_at) -> void
|
57
|
+
|
58
|
+
def to_hash: -> { anchor: :created_at, seconds: Integer }
|
59
|
+
end
|
39
60
|
end
|
40
61
|
end
|
41
62
|
end
|
@@ -6,6 +6,7 @@ module OpenAI
|
|
6
6
|
endpoint: OpenAI::Models::BatchCreateParams::endpoint,
|
7
7
|
input_file_id: String,
|
8
8
|
?metadata: OpenAI::Models::metadata?,
|
9
|
+
?output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter,
|
9
10
|
?request_options: OpenAI::request_opts
|
10
11
|
) -> OpenAI::Batch
|
11
12
|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.18.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- OpenAI
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-08-
|
11
|
+
date: 2025-08-19 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: connection_pool
|