openai 0.59.0 → 0.60.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +28 -0
  3. data/README.md +3 -3
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/models/chat/completion_create_params.rb +1 -1
  6. data/lib/openai/models/responses/response.rb +1 -1
  7. data/lib/openai/models/responses/response_compact_params.rb +22 -1
  8. data/lib/openai/models/responses/response_create_params.rb +1 -1
  9. data/lib/openai/models/responses/responses_client_event.rb +1 -1
  10. data/lib/openai/resources/audio/transcriptions.rb +5 -1
  11. data/lib/openai/resources/beta/threads/runs.rb +2 -2
  12. data/lib/openai/resources/beta/threads.rb +1 -1
  13. data/lib/openai/resources/chat/completions.rb +1 -1
  14. data/lib/openai/resources/completions.rb +1 -1
  15. data/lib/openai/resources/files.rb +8 -1
  16. data/lib/openai/resources/images.rb +6 -2
  17. data/lib/openai/resources/responses.rb +5 -3
  18. data/lib/openai/version.rb +1 -1
  19. data/rbi/openai/models/chat/completion_create_params.rbi +1 -1
  20. data/rbi/openai/models/responses/response.rbi +1 -1
  21. data/rbi/openai/models/responses/response_compact_params.rbi +55 -0
  22. data/rbi/openai/models/responses/response_create_params.rbi +1 -1
  23. data/rbi/openai/models/responses/responses_client_event.rbi +1 -1
  24. data/rbi/openai/resources/files.rbi +8 -1
  25. data/rbi/openai/resources/responses.rbi +6 -0
  26. data/sig/openai/models/chat/completion_create_params.rbs +2 -2
  27. data/sig/openai/models/responses/response.rbs +2 -2
  28. data/sig/openai/models/responses/response_compact_params.rbs +17 -1
  29. data/sig/openai/models/responses/response_create_params.rbs +2 -2
  30. data/sig/openai/models/responses/responses_client_event.rbs +2 -2
  31. data/sig/openai/resources/responses.rbs +1 -0
  32. metadata +2 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0474a27b4d565706e771aa87d26edda2bbed3004cf0c506c19e1c7d629f9db9d
4
- data.tar.gz: 8af111e9a7f8435f9ffb4acb74b93d7cc679b724ef58855b223f22b4cc844707
3
+ metadata.gz: 8a23e970e2907a5c12466a0fe422f2474e22f139cb1ef3236c28cbd7580b25b4
4
+ data.tar.gz: 56cbf7ddbb893e7df07214f85e564f8471c97117f0f0c59664b4970cc0d1177b
5
5
  SHA512:
6
- metadata.gz: e806a4a7a030edae725e87613b8eee4b196afcd063c144e1e92f64c9bdd6adaacbb3824eff5e7498045b6c87073df0de1c0f21edd420bee8d97bc913a989d186
7
- data.tar.gz: b70cb7b78621b602b0d4692c748fe8f7ba7361b704bb9521c113dec82a95d1a488af2286b5abf5398410f2889130d55e3f9f9f65752408af007aae81b4838c94
6
+ metadata.gz: 13460eb99f7d27be65434cc9e46d965ee6f2a07f93b970b42eab04c1377e61d042db5efee2ddb55e7b238baf607f4485308defdf96a4301c119b96364809a423
7
+ data.tar.gz: 11d8b7275d1beb13c65b30722d55df7aa0c6231cd9edfb74b1d053bdf3604b78157afb713e066bff1cd9a47e1940e95d88471320086a49bc467d2d4067fb345e
data/CHANGELOG.md CHANGED
@@ -1,5 +1,33 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.60.0 (2026-04-28)
4
+
5
+ Full Changelog: [v0.59.0...v0.60.0](https://github.com/openai/openai-ruby/compare/v0.59.0...v0.60.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** add prompt_cache_retention parameter to responses compact method ([a27cd24](https://github.com/openai/openai-ruby/commit/a27cd24ad266e52a3b3f3067158839ea570f15e1))
10
+ * support setting headers via env ([334512e](https://github.com/openai/openai-ruby/commit/334512e35ea5f4701b1617b4268718df5fa75b12))
11
+
12
+
13
+ ### Bug Fixes
14
+
15
+ * avoid gzip buffering during streaming ([ec1b1fa](https://github.com/openai/openai-ruby/commit/ec1b1fa435e2bc7f5a70b481081fc933d02ac253))
16
+ * **types:** correct PromptCacheRetention IN_MEMORY enum value in chat/responses ([e81e954](https://github.com/openai/openai-ruby/commit/e81e9543ca20634c1e756822a6285cdc0c46d06f))
17
+
18
+
19
+ ### Chores
20
+
21
+ * **ci:** remove release-doctor workflow ([0f371f5](https://github.com/openai/openai-ruby/commit/0f371f59552efd273d800570757ce61cd91b9709))
22
+ * **internal:** more robust bootstrap script ([7bcbf5a](https://github.com/openai/openai-ruby/commit/7bcbf5aa493bb7c7f310a000735ad3ec3ae0b291))
23
+ * **tests:** bump steady to v0.22.1 ([14594c1](https://github.com/openai/openai-ruby/commit/14594c1b3b84bf5bafc14a8c8779ea8b4507e7dc))
24
+
25
+
26
+ ### Documentation
27
+
28
+ * **api:** add rate limit details to files.create documentation ([c7c9967](https://github.com/openai/openai-ruby/commit/c7c99675cd0d570d44db91a66ce8f50855a57c5d))
29
+ * **api:** update rate limit documentation for files.create ([09e4d86](https://github.com/openai/openai-ruby/commit/09e4d86b590d0d9db32207fa9fd568bbd8025a6d))
30
+
3
31
  ## 0.59.0 (2026-04-14)
4
32
 
5
33
  Full Changelog: [v0.58.0...v0.59.0](https://github.com/openai/openai-ruby/compare/v0.58.0...v0.59.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.59.0"
18
+ gem "openai", "~> 0.60.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -561,7 +561,7 @@ openai.chat.completions.create(**params)
561
561
  Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime:
562
562
 
563
563
  ```ruby
564
- # :"in-memory"
564
+ # :in_memory
565
565
  puts(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY)
566
566
 
567
567
  # Revealed type: `T.all(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention, Symbol)`
@@ -579,7 +579,7 @@ openai.chat.completions.create(
579
579
 
580
580
  # Literal values are also permissible:
581
581
  openai.chat.completions.create(
582
- prompt_cache_retention: :"in-memory",
582
+ prompt_cache_retention: :in_memory,
583
583
  # …
584
584
  )
585
585
  ```
data/lib/openai/client.rb CHANGED
@@ -212,6 +212,17 @@ module OpenAI
212
212
  "openai-organization" => (@organization = organization&.to_s),
213
213
  "openai-project" => (@project = project&.to_s)
214
214
  }
215
+ custom_headers_env = ENV["OPENAI_CUSTOM_HEADERS"]
216
+ unless custom_headers_env.nil?
217
+ parsed = {}
218
+ custom_headers_env.split("\n").each do |line|
219
+ colon = line.index(":")
220
+ unless colon.nil?
221
+ parsed[line[0...colon].strip] = line[(colon + 1)..].strip
222
+ end
223
+ end
224
+ headers = parsed.merge(headers)
225
+ end
215
226
 
216
227
  @api_key = api_key.to_s
217
228
  @webhook_secret = webhook_secret&.to_s
@@ -577,7 +577,7 @@ module OpenAI
577
577
  module PromptCacheRetention
578
578
  extend OpenAI::Internal::Type::Enum
579
579
 
580
- IN_MEMORY = :"in-memory"
580
+ IN_MEMORY = :in_memory
581
581
  PROMPT_CACHE_RETENTION_24H = :"24h"
582
582
 
583
583
  # @!method self.values
@@ -515,7 +515,7 @@ module OpenAI
515
515
  module PromptCacheRetention
516
516
  extend OpenAI::Internal::Type::Enum
517
517
 
518
- IN_MEMORY = :"in-memory"
518
+ IN_MEMORY = :in_memory
519
519
  PROMPT_CACHE_RETENTION_24H = :"24h"
520
520
 
521
521
  # @!method self.values
@@ -48,7 +48,15 @@ module OpenAI
48
48
  # @return [String, nil]
49
49
  optional :prompt_cache_key, String, nil?: true
50
50
 
51
- # @!method initialize(model:, input: nil, instructions: nil, previous_response_id: nil, prompt_cache_key: nil, request_options: {})
51
+ # @!attribute prompt_cache_retention
52
+ # How long to retain a prompt cache entry created by this request.
53
+ #
54
+ # @return [Symbol, OpenAI::Models::Responses::ResponseCompactParams::PromptCacheRetention, nil]
55
+ optional :prompt_cache_retention,
56
+ enum: -> { OpenAI::Responses::ResponseCompactParams::PromptCacheRetention },
57
+ nil?: true
58
+
59
+ # @!method initialize(model:, input: nil, instructions: nil, previous_response_id: nil, prompt_cache_key: nil, prompt_cache_retention: nil, request_options: {})
52
60
  # Some parameter documentations has been truncated, see
53
61
  # {OpenAI::Models::Responses::ResponseCompactParams} for more details.
54
62
  #
@@ -62,6 +70,8 @@ module OpenAI
62
70
  #
63
71
  # @param prompt_cache_key [String, nil] A key to use when reading from or writing to the prompt cache.
64
72
  #
73
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCompactParams::PromptCacheRetention, nil] How long to retain a prompt cache entry created by this request.
74
+ #
65
75
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
66
76
 
67
77
  # Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
@@ -380,6 +390,17 @@ module OpenAI
380
390
  ResponseInputItemArray =
381
391
  OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
382
392
  end
393
+
394
+ # How long to retain a prompt cache entry created by this request.
395
+ module PromptCacheRetention
396
+ extend OpenAI::Internal::Type::Enum
397
+
398
+ IN_MEMORY = :in_memory
399
+ PROMPT_CACHE_RETENTION_24H = :"24h"
400
+
401
+ # @!method self.values
402
+ # @return [Array<Symbol>]
403
+ end
383
404
  end
384
405
  end
385
406
  end
@@ -448,7 +448,7 @@ module OpenAI
448
448
  module PromptCacheRetention
449
449
  extend OpenAI::Internal::Type::Enum
450
450
 
451
- IN_MEMORY = :"in-memory"
451
+ IN_MEMORY = :in_memory
452
452
  PROMPT_CACHE_RETENTION_24H = :"24h"
453
453
 
454
454
  # @!method self.values
@@ -461,7 +461,7 @@ module OpenAI
461
461
  module PromptCacheRetention
462
462
  extend OpenAI::Internal::Type::Enum
463
463
 
464
- IN_MEMORY = :"in-memory"
464
+ IN_MEMORY = :in_memory
465
465
  PROMPT_CACHE_RETENTION_24H = :"24h"
466
466
 
467
467
  # @!method self.values
@@ -111,7 +111,11 @@ module OpenAI
111
111
  @client.request(
112
112
  method: :post,
113
113
  path: "audio/transcriptions",
114
- headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"},
114
+ headers: {
115
+ "content-type" => "multipart/form-data",
116
+ "accept" => "text/event-stream",
117
+ "accept-encoding" => "identity"
118
+ },
115
119
  body: parsed,
116
120
  stream: OpenAI::Internal::Stream,
117
121
  model: OpenAI::Audio::TranscriptionStreamEvent,
@@ -149,7 +149,7 @@ module OpenAI
149
149
  method: :post,
150
150
  path: ["threads/%1$s/runs", thread_id],
151
151
  query: query,
152
- headers: {"accept" => "text/event-stream"},
152
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
153
153
  body: parsed.except(*query_params),
154
154
  stream: OpenAI::Internal::Stream,
155
155
  model: OpenAI::Beta::AssistantStreamEvent,
@@ -375,7 +375,7 @@ module OpenAI
375
375
  @client.request(
376
376
  method: :post,
377
377
  path: ["threads/%1$s/runs/%2$s/submit_tool_outputs", thread_id, run_id],
378
- headers: {"accept" => "text/event-stream"},
378
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
379
379
  body: parsed,
380
380
  stream: OpenAI::Internal::Stream,
381
381
  model: OpenAI::Beta::AssistantStreamEvent,
@@ -243,7 +243,7 @@ module OpenAI
243
243
  @client.request(
244
244
  method: :post,
245
245
  path: "threads/runs",
246
- headers: {"accept" => "text/event-stream"},
246
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
247
247
  body: parsed,
248
248
  stream: OpenAI::Internal::Stream,
249
249
  model: OpenAI::Beta::AssistantStreamEvent,
@@ -372,7 +372,7 @@ module OpenAI
372
372
  @client.request(
373
373
  method: :post,
374
374
  path: "chat/completions",
375
- headers: {"accept" => "text/event-stream"},
375
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
376
376
  body: parsed,
377
377
  stream: OpenAI::Internal::Stream,
378
378
  model: OpenAI::Chat::ChatCompletionChunk,
@@ -132,7 +132,7 @@ module OpenAI
132
132
  @client.request(
133
133
  method: :post,
134
134
  path: "completions",
135
- headers: {"accept" => "text/event-stream"},
135
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
136
136
  body: parsed,
137
137
  stream: OpenAI::Internal::Stream,
138
138
  model: OpenAI::Completion,
@@ -10,7 +10,8 @@ module OpenAI
10
10
  #
11
11
  # Upload a file that can be used across various endpoints. Individual files can be
12
12
  # up to 512 MB, and each project can store up to 2.5 TB of files in total. There
13
- # is no organization-wide storage limit.
13
+ # is no organization-wide storage limit. Uploads to this endpoint are rate-limited
14
+ # to 1,000 requests per minute per authenticated user.
14
15
  #
15
16
  # - The Assistants API supports files up to 2 million tokens and of specific file
16
17
  # types. See the
@@ -25,6 +26,12 @@ module OpenAI
25
26
  # - The Batch API only supports `.jsonl` files up to 200 MB in size. The input
26
27
  # also has a specific required
27
28
  # [format](https://platform.openai.com/docs/api-reference/batch/request-input).
29
+ # - For Retrieval or `file_search` ingestion, upload files here first. If you need
30
+ # to attach multiple uploaded files to the same vector store, use
31
+ # [`/vector_stores/{vector_store_id}/file_batches`](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)
32
+ # instead of attaching them one by one. Vector store attachment has separate
33
+ # limits from file upload, including 2,000 attached files per minute per
34
+ # organization.
28
35
  #
29
36
  # Please [contact us](https://help.openai.com/) if you need to increase these
30
37
  # storage limits.
@@ -154,7 +154,11 @@ module OpenAI
154
154
  @client.request(
155
155
  method: :post,
156
156
  path: "images/edits",
157
- headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"},
157
+ headers: {
158
+ "content-type" => "multipart/form-data",
159
+ "accept" => "text/event-stream",
160
+ "accept-encoding" => "identity"
161
+ },
158
162
  body: parsed,
159
163
  stream: OpenAI::Internal::Stream,
160
164
  model: OpenAI::ImageEditStreamEvent,
@@ -269,7 +273,7 @@ module OpenAI
269
273
  @client.request(
270
274
  method: :post,
271
275
  path: "images/generations",
272
- headers: {"accept" => "text/event-stream"},
276
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
273
277
  body: parsed,
274
278
  stream: OpenAI::Internal::Stream,
275
279
  model: OpenAI::ImageGenStreamEvent,
@@ -321,7 +321,7 @@ module OpenAI
321
321
  @client.request(
322
322
  method: :post,
323
323
  path: "responses",
324
- headers: {"accept" => "text/event-stream"},
324
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
325
325
  body: parsed,
326
326
  stream: OpenAI::Internal::Stream,
327
327
  model: OpenAI::Responses::ResponseStreamEvent,
@@ -401,7 +401,7 @@ module OpenAI
401
401
  method: :get,
402
402
  path: ["responses/%1$s", response_id],
403
403
  query: query,
404
- headers: {"accept" => "text/event-stream"},
404
+ headers: {"accept" => "text/event-stream", "accept-encoding" => "identity"},
405
405
  stream: OpenAI::Internal::Stream,
406
406
  model: OpenAI::Responses::ResponseStreamEvent,
407
407
  options: options
@@ -475,7 +475,7 @@ module OpenAI
475
475
  # For ZDR-compatible compaction details, see
476
476
  # [Compaction (advanced)](https://platform.openai.com/docs/guides/conversation-state#compaction-advanced).
477
477
  #
478
- # @overload compact(model:, input: nil, instructions: nil, previous_response_id: nil, prompt_cache_key: nil, request_options: {})
478
+ # @overload compact(model:, input: nil, instructions: nil, previous_response_id: nil, prompt_cache_key: nil, prompt_cache_retention: nil, request_options: {})
479
479
  #
480
480
  # @param model [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil] Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wi
481
481
  #
@@ -487,6 +487,8 @@ module OpenAI
487
487
  #
488
488
  # @param prompt_cache_key [String, nil] A key to use when reading from or writing to the prompt cache.
489
489
  #
490
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCompactParams::PromptCacheRetention, nil] How long to retain a prompt cache entry created by this request.
491
+ #
490
492
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
491
493
  #
492
494
  # @return [OpenAI::Models::Responses::CompactedResponse]
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.59.0"
4
+ VERSION = "0.60.0"
5
5
  end
@@ -1118,7 +1118,7 @@ module OpenAI
1118
1118
 
1119
1119
  IN_MEMORY =
1120
1120
  T.let(
1121
- :"in-memory",
1121
+ :in_memory,
1122
1122
  OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol
1123
1123
  )
1124
1124
  PROMPT_CACHE_RETENTION_24H =
@@ -837,7 +837,7 @@ module OpenAI
837
837
 
838
838
  IN_MEMORY =
839
839
  T.let(
840
- :"in-memory",
840
+ :in_memory,
841
841
  OpenAI::Responses::Response::PromptCacheRetention::TaggedSymbol
842
842
  )
843
843
  PROMPT_CACHE_RETENTION_24H =
@@ -58,6 +58,16 @@ module OpenAI
58
58
  sig { returns(T.nilable(String)) }
59
59
  attr_accessor :prompt_cache_key
60
60
 
61
+ # How long to retain a prompt cache entry created by this request.
62
+ sig do
63
+ returns(
64
+ T.nilable(
65
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::OrSymbol
66
+ )
67
+ )
68
+ end
69
+ attr_accessor :prompt_cache_retention
70
+
61
71
  sig do
62
72
  params(
63
73
  model:
@@ -74,6 +84,10 @@ module OpenAI
74
84
  instructions: T.nilable(String),
75
85
  previous_response_id: T.nilable(String),
76
86
  prompt_cache_key: T.nilable(String),
87
+ prompt_cache_retention:
88
+ T.nilable(
89
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::OrSymbol
90
+ ),
77
91
  request_options: OpenAI::RequestOptions::OrHash
78
92
  ).returns(T.attached_class)
79
93
  end
@@ -98,6 +112,8 @@ module OpenAI
98
112
  previous_response_id: nil,
99
113
  # A key to use when reading from or writing to the prompt cache.
100
114
  prompt_cache_key: nil,
115
+ # How long to retain a prompt cache entry created by this request.
116
+ prompt_cache_retention: nil,
101
117
  request_options: {}
102
118
  )
103
119
  end
@@ -119,6 +135,10 @@ module OpenAI
119
135
  instructions: T.nilable(String),
120
136
  previous_response_id: T.nilable(String),
121
137
  prompt_cache_key: T.nilable(String),
138
+ prompt_cache_retention:
139
+ T.nilable(
140
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::OrSymbol
141
+ ),
122
142
  request_options: OpenAI::RequestOptions
123
143
  }
124
144
  )
@@ -650,6 +670,41 @@ module OpenAI
650
670
  OpenAI::Internal::Type::Converter
651
671
  )
652
672
  end
673
+
674
+ # How long to retain a prompt cache entry created by this request.
675
+ module PromptCacheRetention
676
+ extend OpenAI::Internal::Type::Enum
677
+
678
+ TaggedSymbol =
679
+ T.type_alias do
680
+ T.all(
681
+ Symbol,
682
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention
683
+ )
684
+ end
685
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
686
+
687
+ IN_MEMORY =
688
+ T.let(
689
+ :in_memory,
690
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::TaggedSymbol
691
+ )
692
+ PROMPT_CACHE_RETENTION_24H =
693
+ T.let(
694
+ :"24h",
695
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::TaggedSymbol
696
+ )
697
+
698
+ sig do
699
+ override.returns(
700
+ T::Array[
701
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::TaggedSymbol
702
+ ]
703
+ )
704
+ end
705
+ def self.values
706
+ end
707
+ end
653
708
  end
654
709
  end
655
710
  end
@@ -922,7 +922,7 @@ module OpenAI
922
922
 
923
923
  IN_MEMORY =
924
924
  T.let(
925
- :"in-memory",
925
+ :in_memory,
926
926
  OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::TaggedSymbol
927
927
  )
928
928
  PROMPT_CACHE_RETENTION_24H =
@@ -934,7 +934,7 @@ module OpenAI
934
934
 
935
935
  IN_MEMORY =
936
936
  T.let(
937
- :"in-memory",
937
+ :in_memory,
938
938
  OpenAI::Responses::ResponsesClientEvent::PromptCacheRetention::TaggedSymbol
939
939
  )
940
940
  PROMPT_CACHE_RETENTION_24H =
@@ -7,7 +7,8 @@ module OpenAI
7
7
  class Files
8
8
  # Upload a file that can be used across various endpoints. Individual files can be
9
9
  # up to 512 MB, and each project can store up to 2.5 TB of files in total. There
10
- # is no organization-wide storage limit.
10
+ # is no organization-wide storage limit. Uploads to this endpoint are rate-limited
11
+ # to 1,000 requests per minute per authenticated user.
11
12
  #
12
13
  # - The Assistants API supports files up to 2 million tokens and of specific file
13
14
  # types. See the
@@ -22,6 +23,12 @@ module OpenAI
22
23
  # - The Batch API only supports `.jsonl` files up to 200 MB in size. The input
23
24
  # also has a specific required
24
25
  # [format](https://platform.openai.com/docs/api-reference/batch/request-input).
26
+ # - For Retrieval or `file_search` ingestion, upload files here first. If you need
27
+ # to attach multiple uploaded files to the same vector store, use
28
+ # [`/vector_stores/{vector_store_id}/file_batches`](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)
29
+ # instead of attaching them one by one. Vector store attachment has separate
30
+ # limits from file upload, including 2,000 attached files per minute per
31
+ # organization.
25
32
  #
26
33
  # Please [contact us](https://help.openai.com/) if you need to increase these
27
34
  # storage limits.
@@ -988,6 +988,10 @@ module OpenAI
988
988
  instructions: T.nilable(String),
989
989
  previous_response_id: T.nilable(String),
990
990
  prompt_cache_key: T.nilable(String),
991
+ prompt_cache_retention:
992
+ T.nilable(
993
+ OpenAI::Responses::ResponseCompactParams::PromptCacheRetention::OrSymbol
994
+ ),
991
995
  request_options: OpenAI::RequestOptions::OrHash
992
996
  ).returns(OpenAI::Responses::CompactedResponse)
993
997
  end
@@ -1012,6 +1016,8 @@ module OpenAI
1012
1016
  previous_response_id: nil,
1013
1017
  # A key to use when reading from or writing to the prompt cache.
1014
1018
  prompt_cache_key: nil,
1019
+ # How long to retain a prompt cache entry created by this request.
1020
+ prompt_cache_retention: nil,
1015
1021
  request_options: {}
1016
1022
  )
1017
1023
  end
@@ -293,12 +293,12 @@ module OpenAI
293
293
  def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]
294
294
  end
295
295
 
296
- type prompt_cache_retention = :"in-memory" | :"24h"
296
+ type prompt_cache_retention = :in_memory | :"24h"
297
297
 
298
298
  module PromptCacheRetention
299
299
  extend OpenAI::Internal::Type::Enum
300
300
 
301
- IN_MEMORY: :"in-memory"
301
+ IN_MEMORY: :in_memory
302
302
  PROMPT_CACHE_RETENTION_24H: :"24h"
303
303
 
304
304
  def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention]
@@ -259,12 +259,12 @@ module OpenAI
259
259
  def to_hash: -> { id: String }
260
260
  end
261
261
 
262
- type prompt_cache_retention = :"in-memory" | :"24h"
262
+ type prompt_cache_retention = :in_memory | :"24h"
263
263
 
264
264
  module PromptCacheRetention
265
265
  extend OpenAI::Internal::Type::Enum
266
266
 
267
- IN_MEMORY: :"in-memory"
267
+ IN_MEMORY: :in_memory
268
268
  PROMPT_CACHE_RETENTION_24H: :"24h"
269
269
 
270
270
  def self?.values: -> ::Array[OpenAI::Models::Responses::Response::prompt_cache_retention]
@@ -7,7 +7,8 @@ module OpenAI
7
7
  input: OpenAI::Models::Responses::ResponseCompactParams::input?,
8
8
  instructions: String?,
9
9
  previous_response_id: String?,
10
- prompt_cache_key: String?
10
+ prompt_cache_key: String?,
11
+ prompt_cache_retention: OpenAI::Models::Responses::ResponseCompactParams::prompt_cache_retention?
11
12
  }
12
13
  & OpenAI::Internal::Type::request_parameters
13
14
 
@@ -25,12 +26,15 @@ module OpenAI
25
26
 
26
27
  attr_accessor prompt_cache_key: String?
27
28
 
29
+ attr_accessor prompt_cache_retention: OpenAI::Models::Responses::ResponseCompactParams::prompt_cache_retention?
30
+
28
31
  def initialize: (
29
32
  model: OpenAI::Models::Responses::ResponseCompactParams::model?,
30
33
  ?input: OpenAI::Models::Responses::ResponseCompactParams::input?,
31
34
  ?instructions: String?,
32
35
  ?previous_response_id: String?,
33
36
  ?prompt_cache_key: String?,
37
+ ?prompt_cache_retention: OpenAI::Models::Responses::ResponseCompactParams::prompt_cache_retention?,
34
38
  ?request_options: OpenAI::request_opts
35
39
  ) -> void
36
40
 
@@ -40,6 +44,7 @@ module OpenAI
40
44
  instructions: String?,
41
45
  previous_response_id: String?,
42
46
  prompt_cache_key: String?,
47
+ prompt_cache_retention: OpenAI::Models::Responses::ResponseCompactParams::prompt_cache_retention?,
43
48
  request_options: OpenAI::RequestOptions
44
49
  }
45
50
 
@@ -247,6 +252,17 @@ module OpenAI
247
252
 
248
253
  ResponseInputItemArray: OpenAI::Internal::Type::Converter
249
254
  end
255
+
256
+ type prompt_cache_retention = :in_memory | :"24h"
257
+
258
+ module PromptCacheRetention
259
+ extend OpenAI::Internal::Type::Enum
260
+
261
+ IN_MEMORY: :in_memory
262
+ PROMPT_CACHE_RETENTION_24H: :"24h"
263
+
264
+ def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCompactParams::prompt_cache_retention]
265
+ end
250
266
  end
251
267
  end
252
268
  end
@@ -213,12 +213,12 @@ module OpenAI
213
213
  def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::input]
214
214
  end
215
215
 
216
- type prompt_cache_retention = :"in-memory" | :"24h"
216
+ type prompt_cache_retention = :in_memory | :"24h"
217
217
 
218
218
  module PromptCacheRetention
219
219
  extend OpenAI::Internal::Type::Enum
220
220
 
221
- IN_MEMORY: :"in-memory"
221
+ IN_MEMORY: :in_memory
222
222
  PROMPT_CACHE_RETENTION_24H: :"24h"
223
223
 
224
224
  def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::prompt_cache_retention]
@@ -217,12 +217,12 @@ module OpenAI
217
217
  def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponsesClientEvent::input]
218
218
  end
219
219
 
220
- type prompt_cache_retention = :"in-memory" | :"24h"
220
+ type prompt_cache_retention = :in_memory | :"24h"
221
221
 
222
222
  module PromptCacheRetention
223
223
  extend OpenAI::Internal::Type::Enum
224
224
 
225
- IN_MEMORY: :"in-memory"
225
+ IN_MEMORY: :in_memory
226
226
  PROMPT_CACHE_RETENTION_24H: :"24h"
227
227
 
228
228
  def self?.values: -> ::Array[OpenAI::Models::Responses::ResponsesClientEvent::prompt_cache_retention]
@@ -129,6 +129,7 @@ module OpenAI
129
129
  ?instructions: String?,
130
130
  ?previous_response_id: String?,
131
131
  ?prompt_cache_key: String?,
132
+ ?prompt_cache_retention: OpenAI::Models::Responses::ResponseCompactParams::prompt_cache_retention?,
132
133
  ?request_options: OpenAI::request_opts
133
134
  ) -> OpenAI::Responses::CompactedResponse
134
135
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.59.0
4
+ version: 0.60.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2026-04-16 00:00:00.000000000 Z
11
+ date: 2026-04-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: base64