active_storage_encryption 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Appraisals +7 -0
- data/MIT-LICENSE +20 -0
- data/README.md +236 -0
- data/Rakefile +17 -0
- data/bin/rails +26 -0
- data/bin/rubocop +8 -0
- data/config/initializers/active_storage_encryption.rb +9 -0
- data/config/routes.rb +7 -0
- data/gemfiles/rails_7.gemfile +7 -0
- data/gemfiles/rails_7.gemfile.lock +276 -0
- data/gemfiles/rails_8.gemfile +7 -0
- data/gemfiles/rails_8.gemfile.lock +276 -0
- data/lib/active_storage/service/encrypted_disk_service.rb +10 -0
- data/lib/active_storage/service/encrypted_mirror_service.rb +10 -0
- data/lib/active_storage/service/encrypted_s3_service.rb +10 -0
- data/lib/active_storage_encryption/encrypted_blobs_controller.rb +163 -0
- data/lib/active_storage_encryption/encrypted_disk_service/v1_scheme.rb +28 -0
- data/lib/active_storage_encryption/encrypted_disk_service/v2_scheme.rb +51 -0
- data/lib/active_storage_encryption/encrypted_disk_service.rb +186 -0
- data/lib/active_storage_encryption/encrypted_mirror_service.rb +76 -0
- data/lib/active_storage_encryption/encrypted_s3_service.rb +236 -0
- data/lib/active_storage_encryption/engine.rb +7 -0
- data/lib/active_storage_encryption/overrides.rb +201 -0
- data/lib/active_storage_encryption/private_url_policy.rb +53 -0
- data/lib/active_storage_encryption/resumable_gcs_upload.rb +194 -0
- data/lib/active_storage_encryption/version.rb +5 -0
- data/lib/active_storage_encryption.rb +79 -0
- data/lib/tasks/active_storage_encryption_tasks.rake +6 -0
- data/test/active_storage_encryption_test.rb +9 -0
- data/test/dummy/Rakefile +8 -0
- data/test/dummy/app/assets/stylesheets/application.css +1 -0
- data/test/dummy/app/controllers/application_controller.rb +6 -0
- data/test/dummy/app/helpers/application_helper.rb +4 -0
- data/test/dummy/app/models/application_record.rb +5 -0
- data/test/dummy/app/views/layouts/application.html.erb +22 -0
- data/test/dummy/app/views/pwa/manifest.json.erb +22 -0
- data/test/dummy/app/views/pwa/service-worker.js +26 -0
- data/test/dummy/bin/rails +4 -0
- data/test/dummy/bin/rake +4 -0
- data/test/dummy/bin/setup +37 -0
- data/test/dummy/config/application.rb +43 -0
- data/test/dummy/config/boot.rb +7 -0
- data/test/dummy/config/credentials.yml.enc +1 -0
- data/test/dummy/config/database.yml +32 -0
- data/test/dummy/config/environment.rb +7 -0
- data/test/dummy/config/environments/development.rb +59 -0
- data/test/dummy/config/environments/production.rb +81 -0
- data/test/dummy/config/environments/test.rb +53 -0
- data/test/dummy/config/initializers/content_security_policy.rb +27 -0
- data/test/dummy/config/initializers/filter_parameter_logging.rb +10 -0
- data/test/dummy/config/initializers/inflections.rb +18 -0
- data/test/dummy/config/initializers/permissions_policy.rb +15 -0
- data/test/dummy/config/locales/en.yml +31 -0
- data/test/dummy/config/master.key +1 -0
- data/test/dummy/config/puma.rb +36 -0
- data/test/dummy/config/routes.rb +5 -0
- data/test/dummy/config/storage.yml +21 -0
- data/test/dummy/config.ru +8 -0
- data/test/dummy/db/migrate/20250304023851_create_active_storage_tables.active_storage.rb +60 -0
- data/test/dummy/db/migrate/20250304023853_add_blob_encryption_key_column.rb +7 -0
- data/test/dummy/db/schema.rb +47 -0
- data/test/dummy/log/test.log +1022 -0
- data/test/dummy/public/404.html +67 -0
- data/test/dummy/public/406-unsupported-browser.html +66 -0
- data/test/dummy/public/422.html +67 -0
- data/test/dummy/public/500.html +66 -0
- data/test/dummy/public/icon.png +0 -0
- data/test/dummy/public/icon.svg +3 -0
- data/test/dummy/storage/test.sqlite3 +0 -0
- data/test/dummy/storage/x6/pl/x6plznfuhrsyjn9pox2a6xgmcs3x +0 -0
- data/test/dummy/storage/yq/sv/yqsvw5a72b3fv719zq8a6yb7lv0j +0 -0
- data/test/integration/encrypted_blobs_controller_test.rb +400 -0
- data/test/lib/encrypted_disk_service_test.rb +387 -0
- data/test/lib/encrypted_mirror_service_test.rb +159 -0
- data/test/lib/encrypted_s3_service_test.rb +293 -0
- data/test/test_helper.rb +19 -0
- metadata +264 -0
@@ -0,0 +1,186 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "block_cipher_kit"
|
4
|
+
require "active_storage/service/disk_service"
|
5
|
+
|
6
|
+
module ActiveStorageEncryption
|
7
|
+
# Provides a local encrypted store for ActiveStorage blobs.
|
8
|
+
# Configure it like so:
|
9
|
+
#
|
10
|
+
# local_encrypted:
|
11
|
+
# service: EncryptedDisk
|
12
|
+
# root: <%= Rails.root.join("storage/encrypted") %>
|
13
|
+
# private_url_policy: stream
|
14
|
+
class EncryptedDiskService < ::ActiveStorage::Service::DiskService
|
15
|
+
include ActiveStorageEncryption::PrivateUrlPolicy
|
16
|
+
|
17
|
+
autoload :V1Scheme, __dir__ + "/encrypted_disk_service/v1_scheme.rb"
|
18
|
+
autoload :V2Scheme, __dir__ + "/encrypted_disk_service/v2_scheme.rb"
|
19
|
+
|
20
|
+
FILENAME_EXTENSIONS_PER_SCHEME = {
|
21
|
+
".encrypted-v1" => "V1Scheme",
|
22
|
+
".encrypted-v2" => "V2Scheme"
|
23
|
+
}
|
24
|
+
|
25
|
+
# This lets the Blob encryption key methods know that this
|
26
|
+
# storage service _must_ use encryption
|
27
|
+
def encrypted? = true
|
28
|
+
|
29
|
+
def initialize(public: false, **options_for_disk_storage)
|
30
|
+
raise ArgumentError, "encrypted files cannot be served via a public URL or a CDN" if public
|
31
|
+
super
|
32
|
+
end
|
33
|
+
|
34
|
+
def upload(key, io, encryption_key:, checksum: nil, **)
|
35
|
+
instrument :upload, key: key, checksum: checksum do
|
36
|
+
scheme = create_scheme(key, encryption_key)
|
37
|
+
File.open(make_path_for(key), "wb") do |file|
|
38
|
+
scheme.streaming_encrypt(from_plaintext_io: io, into_ciphertext_io: file)
|
39
|
+
end
|
40
|
+
ensure_integrity_of(key, checksum, encryption_key) if checksum
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def download(key, encryption_key:, &block)
|
45
|
+
if block_given?
|
46
|
+
instrument :streaming_download, key: key do
|
47
|
+
stream key, encryption_key, &block
|
48
|
+
end
|
49
|
+
else
|
50
|
+
instrument :download, key: key do
|
51
|
+
(+"").b.tap do |buf|
|
52
|
+
download(key, encryption_key: encryption_key) do |data|
|
53
|
+
buf << data
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
def download_chunk(key, range, encryption_key:)
|
61
|
+
instrument :download_chunk, key: key, range: range do
|
62
|
+
scheme = create_scheme(key, encryption_key)
|
63
|
+
File.open(path_for(key), "rb") do |file|
|
64
|
+
scheme.decrypt_range(from_ciphertext_io: file, range:)
|
65
|
+
end
|
66
|
+
rescue Errno::ENOENT
|
67
|
+
raise ActiveStorage::FileNotFoundError
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def url_for_direct_upload(key, expires_in:, content_type:, content_length:, checksum:, encryption_key:, custom_metadata: {})
|
72
|
+
instrument :url, key: key do |payload|
|
73
|
+
upload_token = ActiveStorage.verifier.generate(
|
74
|
+
{
|
75
|
+
key: key,
|
76
|
+
content_type: content_type,
|
77
|
+
content_length: content_length,
|
78
|
+
encryption_key_sha256: Digest::SHA256.base64digest(encryption_key),
|
79
|
+
checksum: checksum,
|
80
|
+
service_name: name
|
81
|
+
},
|
82
|
+
expires_in: expires_in,
|
83
|
+
purpose: :encrypted_put
|
84
|
+
)
|
85
|
+
|
86
|
+
url_helpers = ActiveStorageEncryption::Engine.routes.url_helpers
|
87
|
+
url_helpers.encrypted_blob_put_url(upload_token, url_options).tap do |generated_url|
|
88
|
+
payload[:url] = generated_url
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
def path_for(key) # :nodoc:
|
94
|
+
# The extension indicates what encryption scheme the file will be using. This method
|
95
|
+
# gets used two ways - to get a path for a new object, and to get a path for an existing object.
|
96
|
+
# If an existing object is found, we need to return the path for the highest version of that
|
97
|
+
# object. If we want to create one - we always return the latest one.
|
98
|
+
glob_pattern = File.join(root, folder_for(key), key + ".encrypted-*")
|
99
|
+
last_existing_path = Dir.glob(glob_pattern).max
|
100
|
+
path_for_new_file = File.join(root, folder_for(key), key + FILENAME_EXTENSIONS_PER_SCHEME.keys.last)
|
101
|
+
last_existing_path || path_for_new_file
|
102
|
+
end
|
103
|
+
|
104
|
+
def exist?(key)
|
105
|
+
File.exist?(path_for(key))
|
106
|
+
end
|
107
|
+
|
108
|
+
def compose(source_keys, destination_key, source_encryption_keys:, encryption_key:, **)
|
109
|
+
if source_keys.length != source_encryption_keys.length
|
110
|
+
raise ArgumentError, "With #{source_keys.length} keys to compose there should be exactly as many source_encryption_keys, but got #{source_encryption_keys.length}"
|
111
|
+
end
|
112
|
+
File.open(make_path_for(destination_key), "wb") do |destination_file|
|
113
|
+
writing_scheme = create_scheme(destination_key, encryption_key)
|
114
|
+
writing_scheme.streaming_encrypt(into_ciphertext_io: destination_file) do |writable|
|
115
|
+
source_keys.zip(source_encryption_keys).each do |(source_key, encryption_key_for_source)|
|
116
|
+
File.open(path_for(source_key), "rb") do |source_file|
|
117
|
+
reading_scheme = create_scheme(source_key, encryption_key_for_source)
|
118
|
+
reading_scheme.streaming_decrypt(from_ciphertext_io: source_file, into_plaintext_io: writable)
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
def headers_for_direct_upload(key, content_type:, encryption_key:, checksum:, **)
|
126
|
+
# Both GCP and AWS require the key to be provided in the headers, together with the
|
127
|
+
# upload PUT request. This is not needed for the encrypted disk service, but it is
|
128
|
+
# useful to check it does get passed to the HTTP client and then to the upload -
|
129
|
+
# our controller extension will verify that this header is present, and fail if
|
130
|
+
# it is not in place.
|
131
|
+
super.merge!("x-active-storage-encryption-key" => Base64.strict_encode64(encryption_key), "content-md5" => checksum)
|
132
|
+
end
|
133
|
+
|
134
|
+
def headers_for_private_download(key, encryption_key:, **)
|
135
|
+
{"x-active-storage-encryption-key" => Base64.strict_encode64(encryption_key)}
|
136
|
+
end
|
137
|
+
|
138
|
+
private
|
139
|
+
|
140
|
+
def create_scheme(key, encryption_key_from_blob)
|
141
|
+
# Check whether this blob already exists and which version it is.
|
142
|
+
# path_for_key will give us the path to the existing version.
|
143
|
+
filename_extension = File.extname(path_for(key))
|
144
|
+
scheme_class_name = FILENAME_EXTENSIONS_PER_SCHEME.fetch(filename_extension)
|
145
|
+
scheme_class = self.class.const_get(scheme_class_name)
|
146
|
+
scheme_class.new(encryption_key_from_blob.b)
|
147
|
+
end
|
148
|
+
|
149
|
+
def private_url(key, **options)
|
150
|
+
private_url_for_streaming_via_controller(key, **options)
|
151
|
+
end
|
152
|
+
|
153
|
+
def public_url(key, filename:, encryption_key:, content_type: nil, disposition: :attachment, **)
|
154
|
+
raise "This should never be called"
|
155
|
+
end
|
156
|
+
|
157
|
+
def stream(key, encryption_key, &blk)
|
158
|
+
scheme = create_scheme(key, encryption_key)
|
159
|
+
File.open(path_for(key), "rb") do |file|
|
160
|
+
scheme.streaming_decrypt(from_ciphertext_io: file, &blk)
|
161
|
+
end
|
162
|
+
rescue Errno::ENOENT
|
163
|
+
raise ActiveStorage::FileNotFoundError
|
164
|
+
end
|
165
|
+
|
166
|
+
def ensure_integrity_of(key, checksum, encryption_key)
|
167
|
+
digest = OpenSSL::Digest.new("MD5")
|
168
|
+
stream(key, encryption_key) do |decrypted_data|
|
169
|
+
digest << decrypted_data
|
170
|
+
end
|
171
|
+
unless digest.base64digest == checksum
|
172
|
+
delete key
|
173
|
+
raise ActiveStorage::IntegrityError
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
def service_name
|
178
|
+
# Normally: ActiveStorage::Service::DiskService => Disk, so it does
|
179
|
+
# a split on "::" on the class name etc. Even though this is private,
|
180
|
+
# it does get called from the outside (or by other ActiveStorage::Service methods).
|
181
|
+
# Oddly it does _not_ get used in the `ActiveStorage::Configurator` to resolve
|
182
|
+
# the class to use.
|
183
|
+
"EncryptedDisk"
|
184
|
+
end
|
185
|
+
end
|
186
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "active_storage/service/mirror_service"
|
4
|
+
|
5
|
+
class ActiveStorageEncryption::EncryptedMirrorService < ActiveStorage::Service::MirrorService
|
6
|
+
delegate :private_url_policy, to: :primary
|
7
|
+
|
8
|
+
class MirrorJobWithEncryption < ActiveStorage::MirrorJob
|
9
|
+
def perform(key, checksum:, service_name:, encryption_key_token:)
|
10
|
+
service = lookup_service(service_name)
|
11
|
+
service.try(:mirror_with_encryption, key, checksum: checksum, encryption_key: encryption_key_from_token(encryption_key_token))
|
12
|
+
end
|
13
|
+
|
14
|
+
def encryption_key_from_token(encryption_key_token)
|
15
|
+
decrypted_token = ActiveStorageEncryption.token_encryptor.decrypt_and_verify(encryption_key_token, purpose: :mirror)
|
16
|
+
Base64.decode64(decrypted_token.fetch("encryption_key"))
|
17
|
+
end
|
18
|
+
|
19
|
+
def lookup_service(name)
|
20
|
+
# This should be the name in the config, NOT the class name
|
21
|
+
service = ActiveStorage::Blob.services.fetch(name) { ActiveStorage::Blob.service }
|
22
|
+
raise ArgumentError, "#{service.name} is not providing file encryption" unless service.try(:encrypted?)
|
23
|
+
service
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
def private_url_policy=(_)
|
28
|
+
raise ArgumentError, "EncryptedMirrorService uses the private_url_policy of the primary"
|
29
|
+
end
|
30
|
+
|
31
|
+
def encrypted?
|
32
|
+
true
|
33
|
+
end
|
34
|
+
|
35
|
+
def upload(key, io, encryption_key:, checksum: nil, **options)
|
36
|
+
io.rewind
|
37
|
+
if primary.try(:encrypted?)
|
38
|
+
primary.upload(key, io, checksum: checksum, encryption_key: encryption_key, **options)
|
39
|
+
else
|
40
|
+
primary.upload(key, io, checksum: checksum, **options)
|
41
|
+
end
|
42
|
+
mirror_later_with_encryption(key, checksum: checksum, encryption_key: encryption_key, **options)
|
43
|
+
end
|
44
|
+
|
45
|
+
def mirror_with_encryption(key, checksum:, encryption_key:)
|
46
|
+
instrument :mirror, key: key, checksum: checksum do
|
47
|
+
mirrors_in_need_of_mirroring = mirrors.select { |service| !service.exist?(key) }
|
48
|
+
return if mirrors_in_need_of_mirroring.empty?
|
49
|
+
primary.open(key, checksum: checksum, verify: checksum.present?, encryption_key: encryption_key) do |io|
|
50
|
+
mirrors_in_need_of_mirroring.each do |target|
|
51
|
+
io.rewind
|
52
|
+
options = target.try(:encrypted?) ? {encryption_key: encryption_key} : {}
|
53
|
+
target.upload(key, io, checksum: checksum, **options)
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def service_name
|
60
|
+
# ActiveStorage::Service::DiskService => Disk
|
61
|
+
# Overridden because in Rails 8 this is "self.class.name.split("::").third.remove("Service")"
|
62
|
+
self.class.name.split("::").last.remove("Service")
|
63
|
+
end
|
64
|
+
|
65
|
+
private
|
66
|
+
|
67
|
+
def mirror_later_with_encryption(key, checksum:, encryption_key: nil)
|
68
|
+
encryption_key_token = ActiveStorageEncryption.token_encryptor.encrypt_and_sign(
|
69
|
+
{
|
70
|
+
encryption_key: Base64.strict_encode64(encryption_key)
|
71
|
+
},
|
72
|
+
purpose: :mirror
|
73
|
+
)
|
74
|
+
MirrorJobWithEncryption.perform_later(key, checksum: checksum, service_name:, encryption_key_token:)
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,236 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "active_storage/service/s3_service"
|
4
|
+
|
5
|
+
class ActiveStorageEncryption::EncryptedS3Service < ActiveStorage::Service::S3Service
|
6
|
+
include ActiveStorageEncryption::PrivateUrlPolicy
|
7
|
+
def encrypted? = true
|
8
|
+
|
9
|
+
def initialize(public: false, **options_for_s3_service_and_private_url_policy)
|
10
|
+
raise ArgumentError, "encrypted files cannot be served via a public URL or a CDN" if public
|
11
|
+
super
|
12
|
+
end
|
13
|
+
|
14
|
+
def service_name
|
15
|
+
# ActiveStorage::Service::DiskService => Disk
|
16
|
+
# Overridden because in Rails 8 this is "self.class.name.split("::").third.remove("Service")"
|
17
|
+
self.class.name.split("::").last.remove("Service")
|
18
|
+
end
|
19
|
+
|
20
|
+
def headers_for_direct_upload(key, encryption_key:, **options_for_super)
|
21
|
+
# See https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#specifying-s3-c-encryption
|
22
|
+
# This is the same as sse_options but expressed with raw header names
|
23
|
+
sdk_sse_options = sse_options(encryption_key)
|
24
|
+
super(key, **options_for_super).merge!({
|
25
|
+
"x-amz-server-side-encryption-customer-key" => Base64.strict_encode64(sdk_sse_options.fetch(:sse_customer_key)),
|
26
|
+
"x-amz-server-side-encryption-customer-key-MD5" => Digest::MD5.base64digest(sdk_sse_options.fetch(:sse_customer_key))
|
27
|
+
})
|
28
|
+
end
|
29
|
+
|
30
|
+
def exist?(key)
|
31
|
+
# The stock S3Service uses S3::Object#exists? here. That method does
|
32
|
+
# a HEAD request to the S3 bucket under the hood. But there is a problem
|
33
|
+
# with that approach: to get all the metadata attributes of an object on S3
|
34
|
+
# (which is what the HEAD request should return to you) you need the encryption key.
|
35
|
+
# The interface of the ActiveStorage services does not provide for extra arguments
|
36
|
+
# for `Service#exist?`, so all we would get using that SDK call would be an error.
|
37
|
+
#
|
38
|
+
# But we don't need the object metadata - we need to know is whether the object exists
|
39
|
+
# at all. And this can be done with a GET request instead. We ask S3 to give us the first byte of the
|
40
|
+
# object. S3 will then raise an exception - the exception will be different
|
41
|
+
# depending on whether the object does not exist _or_ the object does exist, but
|
42
|
+
# is encrypted. We can use the distinction between those exceptions to tell
|
43
|
+
# whether the object is there or not.
|
44
|
+
#
|
45
|
+
# There is also a case where the object is not encrypted - in that situation
|
46
|
+
# our single-byte GET request will actually succeed. This also means that the
|
47
|
+
# object exists in the bucket.
|
48
|
+
object_for(key).get(range: "bytes=0-0")
|
49
|
+
# If we get here without an exception - the object exists in the bucket,
|
50
|
+
# but is not encrypted. For example, it was stored using a stock S3Service.
|
51
|
+
true
|
52
|
+
rescue Aws::S3::Errors::InvalidRequest
|
53
|
+
# With this exception S3 tells us that the object exists but we have to furnish
|
54
|
+
# the encryption key (the exception will have a message with "object was stored
|
55
|
+
# using a form of Server Side Encryption...").
|
56
|
+
true
|
57
|
+
rescue Aws::S3::Errors::NoSuchKey
|
58
|
+
# And this truly means the object is not present
|
59
|
+
false
|
60
|
+
end
|
61
|
+
|
62
|
+
def headers_for_private_download(key, encryption_key:, **)
|
63
|
+
sdk_sse_options = sse_options(encryption_key)
|
64
|
+
{
|
65
|
+
"x-amz-server-side-encryption-customer-key" => Base64.strict_encode64(sdk_sse_options.fetch(:sse_customer_key))
|
66
|
+
}
|
67
|
+
end
|
68
|
+
|
69
|
+
def url_for_direct_upload(key, encryption_key:, **options_for_super)
|
70
|
+
# With direct upload we need to remove the encryption key itself from
|
71
|
+
# the SDK parameters. Otherwise it does get included in the URL, but that
|
72
|
+
# does not make S3 actually _use_ the value - _and_ it leaks the key.
|
73
|
+
# We _do_ need the key MD5 to be in the signed header params, so that the client can't use an encryption key
|
74
|
+
# it invents by itself - it must use the one we issue it.
|
75
|
+
sse_options_without_key = sse_options(encryption_key).without(:sse_customer_key)
|
76
|
+
with_upload_options_for_customer_key(sse_options_without_key) do
|
77
|
+
super(key, **options_for_super)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def upload(*args, encryption_key:, **kwargs)
|
82
|
+
with_upload_options_for_customer_key(sse_options(encryption_key)) do
|
83
|
+
super(*args, **kwargs)
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def download(key, encryption_key:, &block)
|
88
|
+
if block_given?
|
89
|
+
instrument :streaming_download, key: key do
|
90
|
+
stream(key, encryption_key: encryption_key, &block)
|
91
|
+
end
|
92
|
+
else
|
93
|
+
instrument :download, key: key do
|
94
|
+
object_for(key).get(**sse_options(encryption_key)).body.string.force_encoding(Encoding::BINARY)
|
95
|
+
rescue Aws::S3::Errors::NoSuchKey
|
96
|
+
raise ActiveStorage::FileNotFoundError
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def download_chunk(key, range, encryption_key:)
|
102
|
+
instrument :download_chunk, key: key, range: range do
|
103
|
+
object_for(key).get(range: "bytes=#{range.begin}-#{range.exclude_end? ? range.end - 1 : range.end}", **sse_options(encryption_key)).body.string.force_encoding(Encoding::BINARY)
|
104
|
+
rescue Aws::S3::Errors::NoSuchKey
|
105
|
+
raise ActiveStorage::FileNotFoundError
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def compose(source_keys, destination_key, source_encryption_keys:, encryption_key:, filename: nil, content_type: nil, disposition: nil, custom_metadata: {})
|
110
|
+
if source_keys.length != source_encryption_keys.length
|
111
|
+
raise ArgumentError, "With #{source_keys.length} keys to compose there should be exactly as many source_encryption_keys, but got #{source_encryption_keys.length}"
|
112
|
+
end
|
113
|
+
content_disposition = content_disposition_with(type: disposition, filename: filename) if disposition && filename
|
114
|
+
upload_options_for_compose = upload_options.merge(sse_options(encryption_key))
|
115
|
+
object_for(destination_key).upload_stream(
|
116
|
+
content_type: content_type,
|
117
|
+
content_disposition: content_disposition,
|
118
|
+
part_size: MINIMUM_UPLOAD_PART_SIZE,
|
119
|
+
metadata: custom_metadata,
|
120
|
+
**upload_options_for_compose
|
121
|
+
) do |s3_multipart_io|
|
122
|
+
s3_multipart_io.binmode
|
123
|
+
source_keys.zip(source_encryption_keys).each do |(source_key, source_encryption_key)|
|
124
|
+
stream(source_key, encryption_key: source_encryption_key) do |chunk|
|
125
|
+
s3_multipart_io.write(chunk)
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
private
|
132
|
+
|
133
|
+
# Reads the object for the given key in chunks, yielding each to the block.
|
134
|
+
def stream(key, encryption_key:)
|
135
|
+
object = object_for(key)
|
136
|
+
|
137
|
+
chunk_size = 5.megabytes
|
138
|
+
offset = 0
|
139
|
+
|
140
|
+
# Doing a HEAD (what .exists? does under the hood) also requires the encryption key headers,
|
141
|
+
# but the SDK does not send them along. Instead of doing a HEAD, you can also do a GET - but for the first byte.
|
142
|
+
# This will give you the content-length of the object, and the SDK will pass the correct encryption headers.
|
143
|
+
# There is an issue in the SDK here https://github.com/aws/aws-sdk-ruby/issues/1342 which is allegedly fixed
|
144
|
+
# by https://github.com/aws/aws-sdk-ruby/pull/1343/files but it doesn't seem like it.
|
145
|
+
# Also, we do not only call `S3::Object#exists?`, but also `S3::Object#content_length` - which does not have a way to pass
|
146
|
+
# encryption options either.
|
147
|
+
response = object.get(range: "bytes=0-0", **sse_options(encryption_key))
|
148
|
+
object_content_length = response.content_range.scan(/\d+$/).first.to_i
|
149
|
+
|
150
|
+
while offset < object_content_length
|
151
|
+
yield object.get(range: "bytes=#{offset}-#{offset + chunk_size - 1}", **sse_options(encryption_key)).body.string.force_encoding(Encoding::BINARY)
|
152
|
+
offset += chunk_size
|
153
|
+
end
|
154
|
+
rescue Aws::S3::Errors::NoSuchKey
|
155
|
+
raise ActiveStorage::FileNotFoundError
|
156
|
+
end
|
157
|
+
|
158
|
+
def sse_options(encryption_key)
|
159
|
+
truncated_key_bytes = encryption_key.byteslice(0, 32)
|
160
|
+
{
|
161
|
+
sse_customer_algorithm: "AES256",
|
162
|
+
sse_customer_key: truncated_key_bytes,
|
163
|
+
sse_customer_key_md5: Digest::MD5.base64digest(truncated_key_bytes)
|
164
|
+
}
|
165
|
+
end
|
166
|
+
|
167
|
+
def private_url(key, encryption_key:, **options)
|
168
|
+
case private_url_policy
|
169
|
+
when :disable
|
170
|
+
if private_url_policy == :disable
|
171
|
+
raise ActiveStorageEncryption::StreamingDisabled, <<~EOS
|
172
|
+
Requested a signed GET URL for #{key.inspect} on service #{name}. This service
|
173
|
+
has disabled presigned URLs (private_url_policy: disable), you have to use `Blob#download` instead.
|
174
|
+
EOS
|
175
|
+
end
|
176
|
+
when :stream
|
177
|
+
private_url_for_streaming_via_controller(key, encryption_key:, **options)
|
178
|
+
when :require_headers
|
179
|
+
sse_options_for_presigned_url = sse_options(encryption_key)
|
180
|
+
|
181
|
+
# Remove the key itself. If we pass it to the SDK - it will leak the key (the key will be in the URL),
|
182
|
+
# but the download will still fail.
|
183
|
+
sse_options_for_presigned_url.delete(:sse_customer_key)
|
184
|
+
|
185
|
+
options_for_super = options.merge(sse_options_for_presigned_url) # The "rest" kwargs for super are the `client_options`
|
186
|
+
super(key, **options_for_super)
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def public_url(key, **client_opts)
|
191
|
+
raise "This should never be called"
|
192
|
+
end
|
193
|
+
|
194
|
+
def upload_options
|
195
|
+
super.merge(Thread.current[:aws_sse_options].to_h)
|
196
|
+
end
|
197
|
+
|
198
|
+
def with_upload_options_for_customer_key(overriding_upload_options)
|
199
|
+
# Gotta be careful here, because this call can be re-entrant.
|
200
|
+
# If one thread calls `upload_options` to do an upload, and does not
|
201
|
+
# return for some time, we want this thread to be using the upload options
|
202
|
+
# reserved for it - otherwise objects can get not their encryption keys, but
|
203
|
+
# others'. If we want to have upload_options be tailored to every specific upload,
|
204
|
+
# we would need to override way more of this Service class than is really needed.
|
205
|
+
# You can actually see that sometimes there is reentrancy here:
|
206
|
+
#
|
207
|
+
# MUX = Mutex.new
|
208
|
+
# opens_before = MUX.synchronize { @opens ||= 0; @opens += 1; @opens - 1 }
|
209
|
+
previous = Thread.current[:aws_sse_options]
|
210
|
+
Thread.current[:aws_sse_options] = overriding_upload_options
|
211
|
+
yield
|
212
|
+
ensure
|
213
|
+
# To check that there is reentrancy:
|
214
|
+
# opens_after = MUX.synchronize { @opens -= 1 }
|
215
|
+
# warn [opens_before, opens_after].inspect #exiting wo"
|
216
|
+
# In our tests:
|
217
|
+
# [2, 11]
|
218
|
+
# [10, 10]
|
219
|
+
# [0, 9]
|
220
|
+
# [9, 8]
|
221
|
+
# [5, 7]
|
222
|
+
# [3, 6]
|
223
|
+
# [6, 5]
|
224
|
+
# [1, 4]
|
225
|
+
# [8, 3]
|
226
|
+
# [4, 2]
|
227
|
+
# [7, 1]
|
228
|
+
# [11, 0]
|
229
|
+
# [0, 0]
|
230
|
+
# [0, 0]
|
231
|
+
# [0, 0]
|
232
|
+
# [0, 0]
|
233
|
+
# [0, 0]
|
234
|
+
Thread.current[:aws_sse_options] = previous
|
235
|
+
end
|
236
|
+
end
|