active_storage_encryption 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Appraisals +7 -0
- data/MIT-LICENSE +20 -0
- data/README.md +236 -0
- data/Rakefile +17 -0
- data/bin/rails +26 -0
- data/bin/rubocop +8 -0
- data/config/initializers/active_storage_encryption.rb +9 -0
- data/config/routes.rb +7 -0
- data/gemfiles/rails_7.gemfile +7 -0
- data/gemfiles/rails_7.gemfile.lock +276 -0
- data/gemfiles/rails_8.gemfile +7 -0
- data/gemfiles/rails_8.gemfile.lock +276 -0
- data/lib/active_storage/service/encrypted_disk_service.rb +10 -0
- data/lib/active_storage/service/encrypted_mirror_service.rb +10 -0
- data/lib/active_storage/service/encrypted_s3_service.rb +10 -0
- data/lib/active_storage_encryption/encrypted_blobs_controller.rb +163 -0
- data/lib/active_storage_encryption/encrypted_disk_service/v1_scheme.rb +28 -0
- data/lib/active_storage_encryption/encrypted_disk_service/v2_scheme.rb +51 -0
- data/lib/active_storage_encryption/encrypted_disk_service.rb +186 -0
- data/lib/active_storage_encryption/encrypted_mirror_service.rb +76 -0
- data/lib/active_storage_encryption/encrypted_s3_service.rb +236 -0
- data/lib/active_storage_encryption/engine.rb +7 -0
- data/lib/active_storage_encryption/overrides.rb +201 -0
- data/lib/active_storage_encryption/private_url_policy.rb +53 -0
- data/lib/active_storage_encryption/resumable_gcs_upload.rb +194 -0
- data/lib/active_storage_encryption/version.rb +5 -0
- data/lib/active_storage_encryption.rb +79 -0
- data/lib/tasks/active_storage_encryption_tasks.rake +6 -0
- data/test/active_storage_encryption_test.rb +9 -0
- data/test/dummy/Rakefile +8 -0
- data/test/dummy/app/assets/stylesheets/application.css +1 -0
- data/test/dummy/app/controllers/application_controller.rb +6 -0
- data/test/dummy/app/helpers/application_helper.rb +4 -0
- data/test/dummy/app/models/application_record.rb +5 -0
- data/test/dummy/app/views/layouts/application.html.erb +22 -0
- data/test/dummy/app/views/pwa/manifest.json.erb +22 -0
- data/test/dummy/app/views/pwa/service-worker.js +26 -0
- data/test/dummy/bin/rails +4 -0
- data/test/dummy/bin/rake +4 -0
- data/test/dummy/bin/setup +37 -0
- data/test/dummy/config/application.rb +43 -0
- data/test/dummy/config/boot.rb +7 -0
- data/test/dummy/config/credentials.yml.enc +1 -0
- data/test/dummy/config/database.yml +32 -0
- data/test/dummy/config/environment.rb +7 -0
- data/test/dummy/config/environments/development.rb +59 -0
- data/test/dummy/config/environments/production.rb +81 -0
- data/test/dummy/config/environments/test.rb +53 -0
- data/test/dummy/config/initializers/content_security_policy.rb +27 -0
- data/test/dummy/config/initializers/filter_parameter_logging.rb +10 -0
- data/test/dummy/config/initializers/inflections.rb +18 -0
- data/test/dummy/config/initializers/permissions_policy.rb +15 -0
- data/test/dummy/config/locales/en.yml +31 -0
- data/test/dummy/config/master.key +1 -0
- data/test/dummy/config/puma.rb +36 -0
- data/test/dummy/config/routes.rb +5 -0
- data/test/dummy/config/storage.yml +21 -0
- data/test/dummy/config.ru +8 -0
- data/test/dummy/db/migrate/20250304023851_create_active_storage_tables.active_storage.rb +60 -0
- data/test/dummy/db/migrate/20250304023853_add_blob_encryption_key_column.rb +7 -0
- data/test/dummy/db/schema.rb +47 -0
- data/test/dummy/log/test.log +1022 -0
- data/test/dummy/public/404.html +67 -0
- data/test/dummy/public/406-unsupported-browser.html +66 -0
- data/test/dummy/public/422.html +67 -0
- data/test/dummy/public/500.html +66 -0
- data/test/dummy/public/icon.png +0 -0
- data/test/dummy/public/icon.svg +3 -0
- data/test/dummy/storage/test.sqlite3 +0 -0
- data/test/dummy/storage/x6/pl/x6plznfuhrsyjn9pox2a6xgmcs3x +0 -0
- data/test/dummy/storage/yq/sv/yqsvw5a72b3fv719zq8a6yb7lv0j +0 -0
- data/test/integration/encrypted_blobs_controller_test.rb +400 -0
- data/test/lib/encrypted_disk_service_test.rb +387 -0
- data/test/lib/encrypted_mirror_service_test.rb +159 -0
- data/test/lib/encrypted_s3_service_test.rb +293 -0
- data/test/test_helper.rb +19 -0
- metadata +264 -0
@@ -0,0 +1,201 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module ActiveStorageEncryption
|
4
|
+
module Overrides
|
5
|
+
module EncryptedBlobClassMethods
|
6
|
+
def self.included base
|
7
|
+
base.class_eval do
|
8
|
+
encrypts :encryption_key
|
9
|
+
validates :encryption_key, presence: {message: "must be present for this service"}, if: :service_encrypted?
|
10
|
+
|
11
|
+
class << self
|
12
|
+
ENCRYPTION_KEY_LENGTH_BYTES = 16 + 32 # So we have enough
|
13
|
+
|
14
|
+
def service_encrypted?(service_name)
|
15
|
+
return false unless service_name
|
16
|
+
|
17
|
+
service = ActiveStorage::Blob.services.fetch(service_name) do
|
18
|
+
ActiveStorage::Blob.service
|
19
|
+
end
|
20
|
+
|
21
|
+
!!service&.try(:encrypted?)
|
22
|
+
end
|
23
|
+
|
24
|
+
def generate_random_encryption_key
|
25
|
+
SecureRandom.bytes(ENCRYPTION_KEY_LENGTH_BYTES)
|
26
|
+
end
|
27
|
+
|
28
|
+
def create_before_direct_upload!(filename:, byte_size:, checksum:, content_type: nil, metadata: nil, service_name: nil, record: nil, key: nil, encryption_key: nil)
|
29
|
+
encryption_key = service_encrypted?(service_name) ? (encryption_key || generate_random_encryption_key) : nil
|
30
|
+
create!(key: key, filename: filename, byte_size: byte_size, checksum: checksum, content_type: content_type, metadata: metadata, service_name: service_name, encryption_key: encryption_key)
|
31
|
+
end
|
32
|
+
|
33
|
+
def create_and_upload!(io:, filename:, content_type: nil, metadata: nil, service_name: nil, identify: true, record: nil, key: nil, encryption_key: nil)
|
34
|
+
create_after_unfurling!(key: key, io: io, filename: filename, content_type: content_type, metadata: metadata, service_name: service_name, identify: identify, encryption_key:).tap do |blob|
|
35
|
+
blob.upload_without_unfurling(io)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def build_after_unfurling(io:, filename:, content_type: nil, metadata: nil, service_name: nil, identify: true, record: nil, key: nil, encryption_key: nil)
|
40
|
+
new(key: key, filename: filename, content_type: content_type, metadata: metadata, service_name: service_name, encryption_key:).tap do |blob|
|
41
|
+
blob.unfurl(io, identify: identify)
|
42
|
+
blob.encryption_key ||= service_encrypted?(service_name) ? (encryption_key || generate_random_encryption_key) : nil
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def create_after_unfurling!(io:, filename:, content_type: nil, metadata: nil, service_name: nil, identify: true, record: nil, key: nil, encryption_key: nil)
|
47
|
+
build_after_unfurling(key: key, io: io, filename: filename, content_type: content_type, metadata: metadata, service_name: service_name, identify: identify, encryption_key:).tap(&:save!)
|
48
|
+
end
|
49
|
+
|
50
|
+
# Concatenate multiple blobs into a single "composed" blob.
|
51
|
+
def compose(blobs, filename:, content_type: nil, metadata: nil, key: nil, service_name: nil, encryption_key: nil)
|
52
|
+
raise ActiveRecord::RecordNotSaved, "All blobs must be persisted." if blobs.any?(&:new_record?)
|
53
|
+
|
54
|
+
content_type ||= blobs.pluck(:content_type).compact.first
|
55
|
+
|
56
|
+
new(key: key, filename: filename, content_type: content_type, metadata: metadata, byte_size: blobs.sum(&:byte_size), service_name:, encryption_key:).tap do |combined_blob|
|
57
|
+
combined_blob.compose(blobs.pluck(:key))
|
58
|
+
combined_blob.save!
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
module EncryptedBlobInstanceMethods
|
67
|
+
def service_encrypted?
|
68
|
+
!!service&.try(:encrypted?)
|
69
|
+
end
|
70
|
+
|
71
|
+
def service_url_for_direct_upload(expires_in: ActiveStorage.service_urls_expire_in)
|
72
|
+
if service_encrypted?
|
73
|
+
raise "No encryption key present" unless encryption_key
|
74
|
+
service.url_for_direct_upload(key, expires_in: expires_in, content_type: content_type, content_length: byte_size, checksum: checksum, custom_metadata: custom_metadata, encryption_key: encryption_key)
|
75
|
+
else
|
76
|
+
super
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def open(tmpdir: nil, &block)
|
81
|
+
service.open(
|
82
|
+
key,
|
83
|
+
encryption_key: encryption_key,
|
84
|
+
checksum: checksum,
|
85
|
+
verify: !composed,
|
86
|
+
name: ["ActiveStorage-#{id}-", filename.extension_with_delimiter],
|
87
|
+
tmpdir: tmpdir,
|
88
|
+
&block
|
89
|
+
)
|
90
|
+
end
|
91
|
+
|
92
|
+
def service_headers_for_direct_upload
|
93
|
+
if service_encrypted?
|
94
|
+
service.headers_for_direct_upload(key, filename: filename, content_type: content_type, content_length: byte_size, checksum: checksum, custom_metadata: custom_metadata, encryption_key: encryption_key)
|
95
|
+
else
|
96
|
+
super
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def upload_without_unfurling(io)
|
101
|
+
if service_encrypted?
|
102
|
+
service.upload(key, io, checksum: checksum, encryption_key: encryption_key, **service_metadata)
|
103
|
+
else
|
104
|
+
super
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
# Downloads the file associated with this blob. If no block is given, the entire file is read into memory and returned.
|
109
|
+
# That'll use a lot of RAM for very large files. If a block is given, then the download is streamed and yielded in chunks.
|
110
|
+
def download(&block)
|
111
|
+
if service_encrypted?
|
112
|
+
service.download(key, encryption_key: encryption_key, &block)
|
113
|
+
else
|
114
|
+
super
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def download_chunk(range)
|
119
|
+
if service_encrypted?
|
120
|
+
service.download_chunk(key, range, encryption_key: encryption_key)
|
121
|
+
else
|
122
|
+
super
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def compose(keys)
|
127
|
+
if service_encrypted?
|
128
|
+
self.composed = true
|
129
|
+
service.compose(keys, key, encryption_key: encryption_key, **service_metadata)
|
130
|
+
else
|
131
|
+
super
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
def url(expires_in: ActiveStorage.service_urls_expire_in, disposition: :inline, filename: nil, **options)
|
136
|
+
if service_encrypted?
|
137
|
+
service.url(
|
138
|
+
key, expires_in: expires_in, filename: ActiveStorage::Filename.wrap(filename || self.filename),
|
139
|
+
encryption_key: encryption_key,
|
140
|
+
content_type: content_type_for_serving, disposition: forced_disposition_for_serving || disposition,
|
141
|
+
**options
|
142
|
+
)
|
143
|
+
else
|
144
|
+
super
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
# The encryption_key can be in binary and not serializabe to UTF-8 by to_json, thus we always want to
|
149
|
+
# leave it out. This is also to better mimic how native ActiveStorage handles it.
|
150
|
+
def serializable_hash(options = nil)
|
151
|
+
options = if options
|
152
|
+
options.merge(except: Array.wrap(options[:except]).concat([:encryption_key]).uniq)
|
153
|
+
else
|
154
|
+
{except: [:encryption_key]}
|
155
|
+
end
|
156
|
+
super
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
module BlobIdentifiableInstanceMethods
|
161
|
+
private
|
162
|
+
|
163
|
+
# Active storage attach() tries to identify the content_type of the file. For that it downloads a chunk.
|
164
|
+
# Since we have an encrypted disk service which needs an encryption_key on everything, every call to it needs the encryption_key passed too.
|
165
|
+
def download_identifiable_chunk
|
166
|
+
if service_encrypted?
|
167
|
+
if byte_size.positive?
|
168
|
+
service.download_chunk(key, 0...4.kilobytes, encryption_key: encryption_key)
|
169
|
+
else
|
170
|
+
"".b
|
171
|
+
end
|
172
|
+
else
|
173
|
+
super
|
174
|
+
end
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
module DownloaderInstanceMethods
|
179
|
+
def open(key, encryption_key: nil, checksum: nil, verify: true, name: "ActiveStorage-", tmpdir: nil, &blk)
|
180
|
+
open_tempfile(name, tmpdir) do |file|
|
181
|
+
download(key, file, encryption_key: encryption_key)
|
182
|
+
verify_integrity_of(file, checksum: checksum) if verify
|
183
|
+
yield file
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
private
|
188
|
+
|
189
|
+
def download(key, file, encryption_key: nil)
|
190
|
+
if service.respond_to?(:encrypted?) && service.encrypted?
|
191
|
+
file.binmode
|
192
|
+
service.download(key, encryption_key: encryption_key) { |chunk| file.write(chunk) }
|
193
|
+
file.flush
|
194
|
+
file.rewind
|
195
|
+
else
|
196
|
+
super(key, file)
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
201
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module ActiveStorageEncryption::PrivateUrlPolicy
|
4
|
+
DEFAULT_POLICY = :stream
|
5
|
+
|
6
|
+
def initialize(private_url_policy: DEFAULT_POLICY, **any_other_options_for_service)
|
7
|
+
self.private_url_policy = private_url_policy.to_sym
|
8
|
+
super(**any_other_options_for_service)
|
9
|
+
end
|
10
|
+
|
11
|
+
def private_url_policy=(new_value)
|
12
|
+
allowed = [:disable, :require_headers, :stream]
|
13
|
+
raise ArgumentError, "private_url_policy: must be one of #{allowed.join(",")}" unless allowed.include?(new_value.to_sym)
|
14
|
+
@private_url_policy = new_value.to_sym
|
15
|
+
end
|
16
|
+
|
17
|
+
def private_url_policy
|
18
|
+
@private_url_policy
|
19
|
+
end
|
20
|
+
|
21
|
+
def private_url_for_streaming_via_controller(key, expires_in:, filename:, content_type:, disposition:, encryption_key:)
|
22
|
+
if private_url_policy == :disable
|
23
|
+
raise ActiveStorageEncryption::StreamingDisabled, <<~EOS
|
24
|
+
Requested a signed GET URL for #{key.inspect} on service #{name}. This service
|
25
|
+
has disabled presigned URLs (private_url_policy: disable), you have to use `Blob#download` instead.
|
26
|
+
EOS
|
27
|
+
end
|
28
|
+
|
29
|
+
content_disposition = content_disposition_with(type: disposition, filename: filename)
|
30
|
+
verified_key_with_expiration = ActiveStorageEncryption.token_encryptor.encrypt_and_sign(
|
31
|
+
{
|
32
|
+
key: key,
|
33
|
+
disposition: content_disposition,
|
34
|
+
encryption_key_sha256: Digest::SHA256.base64digest(encryption_key),
|
35
|
+
content_type: content_type,
|
36
|
+
service_name: name,
|
37
|
+
encryption_key: Base64.strict_encode64(encryption_key)
|
38
|
+
},
|
39
|
+
expires_in: expires_in,
|
40
|
+
purpose: :encrypted_get
|
41
|
+
)
|
42
|
+
|
43
|
+
# Both url_helpers and url_options are on the DiskService, but we need them here for other Services too
|
44
|
+
url_helpers = ActiveStorageEncryption::Engine.routes.url_helpers
|
45
|
+
url_options = ActiveStorage::Current.url_options
|
46
|
+
|
47
|
+
if url_options.blank?
|
48
|
+
raise ArgumentError, "Cannot generate URL for #{filename} because ActiveStorage::Current.url_options is not set"
|
49
|
+
end
|
50
|
+
|
51
|
+
url_helpers.encrypted_blob_streaming_get_url(verified_key_with_expiration, filename: filename, **url_options)
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,194 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Unlike the AWS SDKs, the Ruby GCP SDKs do not have a built-in resumable upload feature, while that
|
4
|
+
# feature is well-supported by GCP (and has been supported for a long while). This module provides
|
5
|
+
# resumable uploads in an IO-like package, giving you an object you can write to.
|
6
|
+
#
|
7
|
+
# file = @bucket.file("upload.bin", skip_lookup: true)
|
8
|
+
# upload = ActiveStorageEncryption::ResumableGCSUpload.new(file)
|
9
|
+
# upload.stream do |io|
|
10
|
+
# io.write("Hello resumable")
|
11
|
+
# 20.times { io.write(Random.bytes(1.megabyte)) }
|
12
|
+
# end
|
13
|
+
#
|
14
|
+
# Note that to perform the resumable upload your IAM identity or machine identity must have either
|
15
|
+
# a correct key for accessing Cloud Storage, or - alternatively - run under a service account
|
16
|
+
# that is permitted to sign blobs. This maps to the "iam.serviceAccountTokenCreator" role -
|
17
|
+
# see https://github.com/googleapis/google-cloud-ruby/issues/13307 and https://cloud.google.com/iam/docs/service-account-permissions
|
18
|
+
class ActiveStorageEncryption::ResumableGCSUpload
|
19
|
+
# AWS recommend 5MB as the default part size for multipart uploads. GCP recommend doing "less requests"
|
20
|
+
# in general, and they mandate that all parts except last are a multile of 256*1024. Knowing that we will
|
21
|
+
# need to hold a buffer of that size, let's just assume that the 5MB that AWS uses is a good number for part size.
|
22
|
+
CHUNK_SIZE_FOR_UPLOADS = 5 * 1024 * 1024
|
23
|
+
|
24
|
+
# When doing GCP uploads the chunks need to be sized to 256KB increments, and the output
|
25
|
+
# that we generate is not guaranteed to be chopped up this way. Also the upload for the last
|
26
|
+
# chunk is done slightly different than the preceding chunks. It is convenient to have a
|
27
|
+
# way to "chop up" an arbitrary streaming output into evenly sized chunks.
|
28
|
+
class ByteChunker
|
29
|
+
# @param chunk_size[Integer] the chunk size that all the chunks except the last one must have
|
30
|
+
# @delivery_proc the proc that will receive the bytes and the `is_last` boolean to indicate the last chunk
|
31
|
+
def initialize(chunk_size: 256 * 1024, &delivery_proc)
|
32
|
+
@chunk_size = chunk_size.to_i
|
33
|
+
# Use a fixed-capacity String instead of a StringIO since there are some advantages
|
34
|
+
# to mutable strings, if a string can be reused this saves memory
|
35
|
+
@buf_str = String.new(encoding: Encoding::BINARY, capacity: @chunk_size * 2)
|
36
|
+
@delivery_proc = delivery_proc.to_proc
|
37
|
+
end
|
38
|
+
|
39
|
+
# Appends data to the buffer. Once the size of the chunk has been exceeded, a precisely-sized
|
40
|
+
# chunk will be passed to the `delivery_proc`
|
41
|
+
#
|
42
|
+
# @param bin_str[String] string in binary encoding
|
43
|
+
# @return self
|
44
|
+
def <<(bin_str)
|
45
|
+
@buf_str << bin_str.b
|
46
|
+
deliver_buf_in_chunks
|
47
|
+
self
|
48
|
+
end
|
49
|
+
|
50
|
+
# Appends data to the buffer. Once the size of the chunk has been exceeded, a precisely-sized
|
51
|
+
# chunk will be passed to the `delivery_proc`
|
52
|
+
#
|
53
|
+
# @param bin_str[String] string in binary encoding
|
54
|
+
# @return [Integer] number of bytes appended to the buffer
|
55
|
+
def write(bin_str)
|
56
|
+
self << bin_str
|
57
|
+
bin_str.bytesize
|
58
|
+
end
|
59
|
+
|
60
|
+
# Sends the last chunk to the `delivery_proc` even if there is nothing output -
|
61
|
+
# the last request will usually be needed to close the file
|
62
|
+
#
|
63
|
+
# @return void
|
64
|
+
def finish
|
65
|
+
deliver_buf_in_chunks
|
66
|
+
@delivery_proc.call(@buf_str, _is_last_chunk = true)
|
67
|
+
nil
|
68
|
+
end
|
69
|
+
|
70
|
+
private def deliver_buf_in_chunks
|
71
|
+
while @buf_str.bytesize > @chunk_size
|
72
|
+
@delivery_proc.call(@buf_str[0...@chunk_size], _is_last_chunk = false)
|
73
|
+
@buf_str.replace(@buf_str[@chunk_size..])
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
# Largely inspired by https://gist.github.com/frankyn/9a5344d1b19ed50ebbf9f15f0ff92032
|
79
|
+
# Acts like a writable object that you send data into. The object will split the data
|
80
|
+
# you send into chunks and send it to GCP cloud storage, you do not need to indicate
|
81
|
+
# the size of the output in advance. You do need to close the object to deliver the
|
82
|
+
# last chunk
|
83
|
+
class RangedPutIO
|
84
|
+
extend Forwardable
|
85
|
+
def_delegators :@chunker, :write, :finish, :<<
|
86
|
+
|
87
|
+
# The chunks have to be sized in multiples of 256 kibibytes or 262,144 bytes
|
88
|
+
CHUNK_SIZE_UNIT = 256 * 1024
|
89
|
+
|
90
|
+
def initialize(put_url, chunk_size:, content_type: "binary/octet-stream")
|
91
|
+
raise ArgumentError, "chunk_size of #{chunk_size} is not a multiple of #{CHUNK_SIZE_UNIT}" unless (chunk_size % CHUNK_SIZE_UNIT).zero?
|
92
|
+
|
93
|
+
@put_uri = URI(put_url)
|
94
|
+
@last_byte = 0
|
95
|
+
@total_bytes = 0
|
96
|
+
@content_type = content_type
|
97
|
+
@chunker = ByteChunker.new(chunk_size: chunk_size) { |bytes, is_last| upload_chunk(bytes, is_last) }
|
98
|
+
end
|
99
|
+
|
100
|
+
private
|
101
|
+
|
102
|
+
def upload_chunk(chunk, is_last)
|
103
|
+
@total_bytes += chunk.bytesize
|
104
|
+
content_range = if is_last
|
105
|
+
"bytes #{@last_byte}-#{@last_byte + chunk.bytesize - 1}/#{@total_bytes}"
|
106
|
+
else
|
107
|
+
"bytes #{@last_byte}-#{@last_byte + chunk.bytesize - 1}/*"
|
108
|
+
end
|
109
|
+
@last_byte += chunk.bytesize
|
110
|
+
|
111
|
+
headers = {
|
112
|
+
"Content-Length" => chunk.bytesize.to_s,
|
113
|
+
"Content-Range" => content_range,
|
114
|
+
"Content-Type" => @content_type,
|
115
|
+
"Content-MD5" => Digest::MD5.base64digest(chunk) # This is to early flag bugs like the one mentioned below with httpx
|
116
|
+
}
|
117
|
+
|
118
|
+
# Use plain old Net::HTTP here since currently version 1.4.0 of HTTPX (which is used by Faraday in our env) mangles up the file bytes before upload.
|
119
|
+
# when passing a File object directly.
|
120
|
+
# See https://cheddar-me.slack.com/archives/C01FEPX7PA9/p1739290056637849
|
121
|
+
# and https://gitlab.com/os85/httpx/-/issues/338
|
122
|
+
put_response = Net::HTTP.put(@put_uri, chunk, headers)
|
123
|
+
|
124
|
+
# This is weird (from https://cloud.google.com/storage/docs/performing-resumable-uploads#resume-upload):
|
125
|
+
# Repeat the above steps for each remaining chunk of data that you want to upload, using the upper
|
126
|
+
# value contained in the Range header of each response to determine where to start each successive
|
127
|
+
# chunk; you should not assume that the server received all bytes sent in any given request.
|
128
|
+
# So in theory we must check that the "Range:" header in the response is "bytes=0-{@last_byte + chunk.bytesize - 1}"
|
129
|
+
# and we will add that soon.
|
130
|
+
#
|
131
|
+
# 308 means "intermediate chunk uploaded", 200 means "last chunk uploaded"
|
132
|
+
return if [308, 200].include?(put_response.code.to_i)
|
133
|
+
|
134
|
+
raise "The PUT for the resumable upload responded with status #{put_response.code}, headers #{put_response.to_hash.inspect}"
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
# @param [Google::Cloud::Storage::File]
|
139
|
+
def initialize(file, content_type: "binary/octet-stream", **signed_url_options)
|
140
|
+
@file = file
|
141
|
+
@content_type = content_type
|
142
|
+
@signed_url_options = url_issuer_and_signer.merge(signed_url_options)
|
143
|
+
end
|
144
|
+
|
145
|
+
# @yields writable[IO] an IO-ish object that responds to `#write`
|
146
|
+
def stream(&blk)
|
147
|
+
session_start_url = @file.signed_url(method: "POST", content_type: @content_type, headers: {"x-goog-resumable": "start"}, **@signed_url_options)
|
148
|
+
response = Net::HTTP.post(URI(session_start_url), "", {"content-type" => @content_type, "x-goog-resumable" => "start"})
|
149
|
+
raise "Expected HTTP status code to be 201, got #{response.code}" unless response.code.to_i == 201
|
150
|
+
|
151
|
+
resumable_upload_session_put_url = response["location"]
|
152
|
+
writable = RangedPutIO.new(resumable_upload_session_put_url, content_type: @content_type, chunk_size: CHUNK_SIZE_FOR_UPLOADS)
|
153
|
+
yield(writable)
|
154
|
+
writable.finish
|
155
|
+
end
|
156
|
+
|
157
|
+
private
|
158
|
+
|
159
|
+
# This is gnarly. It is needed to allow service accounts (workload identity) to sign
|
160
|
+
# blobs - which is needed to sign a presigned POST URL. The presigned POST URL allows us
|
161
|
+
# to initiate a resumable upload.
|
162
|
+
#
|
163
|
+
# Comes from here:
|
164
|
+
# https://github.com/googleapis/google-cloud-ruby/issues/13307#issuecomment-1894546343
|
165
|
+
def url_issuer_and_signer
|
166
|
+
env = Google::Cloud.env
|
167
|
+
if env.compute_engine?
|
168
|
+
# Issuer is the service account email that the Signed URL will be signed with
|
169
|
+
# and any permission granted in the Signed URL must be granted to the
|
170
|
+
# Google Service Account.
|
171
|
+
issuer = env.lookup_metadata "instance", "service-accounts/default/email"
|
172
|
+
|
173
|
+
# Create a lambda that accepts the string_to_sign
|
174
|
+
signer = lambda do |string_to_sign|
|
175
|
+
iam_client = Google::Apis::IamcredentialsV1::IAMCredentialsService.new
|
176
|
+
|
177
|
+
# Get the environment configured authorization
|
178
|
+
scopes = ["https://www.googleapis.com/auth/iam"]
|
179
|
+
iam_client.authorization = Google::Auth.get_application_default scopes
|
180
|
+
|
181
|
+
request = Google::Apis::IamcredentialsV1::SignBlobRequest.new(
|
182
|
+
payload: string_to_sign
|
183
|
+
)
|
184
|
+
resource = "projects/-/serviceAccounts/#{issuer}"
|
185
|
+
response = iam_client.sign_service_account_blob(resource, request)
|
186
|
+
response.signed_blob
|
187
|
+
end
|
188
|
+
|
189
|
+
{issuer:, signer:}
|
190
|
+
else
|
191
|
+
{}
|
192
|
+
end
|
193
|
+
end
|
194
|
+
end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "active_storage_encryption/version"
|
4
|
+
require "active_storage_encryption/engine"
|
5
|
+
|
6
|
+
module ActiveStorageEncryption
|
7
|
+
autoload :PrivateUrlPolicy, __dir__ + "/active_storage_encryption/private_url_policy.rb"
|
8
|
+
autoload :EncryptedBlobsController, __dir__ + "/active_storage_encryption/encrypted_blobs_controller.rb"
|
9
|
+
autoload :EncryptedDiskService, __dir__ + "/active_storage_encryption/encrypted_disk_service.rb"
|
10
|
+
autoload :EncryptedMirrorService, __dir__ + "/active_storage_encryption/encrypted_mirror_service.rb"
|
11
|
+
autoload :EncryptedS3Service, __dir__ + "/active_storage_encryption/encrypted_s3_service.rb"
|
12
|
+
autoload :Overrides, __dir__ + "/active_storage_encryption/overrides.rb"
|
13
|
+
|
14
|
+
class IncorrectEncryptionKey < ArgumentError
|
15
|
+
end
|
16
|
+
|
17
|
+
class StreamingDisabled < ArgumentError
|
18
|
+
end
|
19
|
+
|
20
|
+
class StreamingTokenInvalidOrExpired < ActiveSupport::MessageEncryptor::InvalidMessage
|
21
|
+
end
|
22
|
+
|
23
|
+
# Unlike MessageVerifier#verify, MessageEncryptor#decrypt_and_verify does not raise an exception if
|
24
|
+
# the message decrypts, but has expired or was signed for a different purpose. We want an exception
|
25
|
+
# to remove the annoying nil checks.
|
26
|
+
class TokenEncryptor < ActiveSupport::MessageEncryptor
|
27
|
+
def decrypt_and_verify(value, **options)
|
28
|
+
super.tap do |message_or_nil|
|
29
|
+
raise StreamingTokenInvalidOrExpired if message_or_nil.nil?
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
# Returns the ActiveSupport::MessageEncryptor which is used for encrypting the
|
35
|
+
# streaming download URLs. These URLs need to contain the encryption key which
|
36
|
+
# we do not want to reveal to the consumer. Note that this encryptor _is not_
|
37
|
+
# used to encrypt the file data itself - ActiveSupport::MessageEncryptor is not
|
38
|
+
# fit for streaming and not designed for file encryption use cases. We just use
|
39
|
+
# this encryptor to encrypt the tokens in URLs (which is something the MessageEncryptor)
|
40
|
+
# is actually good at.
|
41
|
+
#
|
42
|
+
# The encryptor gets configured using a key derived from the Rails secrets, in a similar
|
43
|
+
# manner to the MessageVerifier provided for your Rails app by the Rails bootstrapping code.
|
44
|
+
#
|
45
|
+
# @return [ActiveSupport::MessageEncryptor] the configured encryptor.
|
46
|
+
def self.token_encryptor
|
47
|
+
# Rails has a per-app message verifier, which is used for different purposes:
|
48
|
+
#
|
49
|
+
# Rails.application.message_verifier('sensitive_data')
|
50
|
+
#
|
51
|
+
# The ActiveStorage verifier (`ActiveStorage.verifier`) is actually just:
|
52
|
+
#
|
53
|
+
# Rails.application.message_verifier('ActiveStorage')
|
54
|
+
#
|
55
|
+
# Sadly, unlike the verifier, a Rails app does not have a similar centrally
|
56
|
+
# set-up `message_encryptor`, specifying a sane configuration (secret, encryption
|
57
|
+
# scheme et cetera).
|
58
|
+
#
|
59
|
+
# The initialization code for the Rails-wide verifiers (it is plural since Rails initializes
|
60
|
+
# verifiers according to the argument you pass to `message_verifier(purpose_or_name_of_using_module)`:
|
61
|
+
# ActiveSupport::MessageVerifiers.new do |salt, secret_key_base: self.secret_key_base|
|
62
|
+
# key_generator(secret_key_base).generate_key(salt)
|
63
|
+
# end.rotate_defaults
|
64
|
+
#
|
65
|
+
# The same API is actually supported by ActiveSupport::MessageEncryptors, see
|
66
|
+
# https://api.rubyonrails.org/classes/ActiveSupport/MessageEncryptors.html
|
67
|
+
# but we do not need multiple encryptors - one will do :-)
|
68
|
+
secret_key_base = Rails.application.secret_key_base
|
69
|
+
raise ArgumentError, "secret_key_base must be present on Rails.application" unless secret_key_base
|
70
|
+
|
71
|
+
len = TokenEncryptor.key_len
|
72
|
+
salt = Digest::SHA2.digest("ActiveStorageEncryption")
|
73
|
+
raise "Salt must be the same length as the key" unless salt.bytesize == len
|
74
|
+
key = ActiveSupport::KeyGenerator.new(secret_key_base).generate_key(salt, len)
|
75
|
+
|
76
|
+
# We need an URL-safe serializer, since the tokens are used in a path in URLs
|
77
|
+
TokenEncryptor.new(key, url_safe: true)
|
78
|
+
end
|
79
|
+
end
|
data/test/dummy/Rakefile
ADDED
@@ -0,0 +1,8 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Add your own tasks in files placed in lib/tasks ending in .rake,
|
4
|
+
# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.
|
5
|
+
|
6
|
+
require_relative "config/application"
|
7
|
+
|
8
|
+
Rails.application.load_tasks
|
@@ -0,0 +1 @@
|
|
1
|
+
/* Application styles */
|
@@ -0,0 +1,22 @@
|
|
1
|
+
<!DOCTYPE html>
|
2
|
+
<html>
|
3
|
+
<head>
|
4
|
+
<title><%= content_for(:title) || "Dummy" %></title>
|
5
|
+
<meta name="viewport" content="width=device-width,initial-scale=1">
|
6
|
+
<meta name="apple-mobile-web-app-capable" content="yes">
|
7
|
+
<%= csrf_meta_tags %>
|
8
|
+
<%= csp_meta_tag %>
|
9
|
+
|
10
|
+
<%= yield :head %>
|
11
|
+
|
12
|
+
<link rel="manifest" href="/manifest.json">
|
13
|
+
<link rel="icon" href="/icon.png" type="image/png">
|
14
|
+
<link rel="icon" href="/icon.svg" type="image/svg+xml">
|
15
|
+
<link rel="apple-touch-icon" href="/icon.png">
|
16
|
+
<%= stylesheet_link_tag "application" %>
|
17
|
+
</head>
|
18
|
+
|
19
|
+
<body>
|
20
|
+
<%= yield %>
|
21
|
+
</body>
|
22
|
+
</html>
|
@@ -0,0 +1,22 @@
|
|
1
|
+
{
|
2
|
+
"name": "Dummy",
|
3
|
+
"icons": [
|
4
|
+
{
|
5
|
+
"src": "/icon.png",
|
6
|
+
"type": "image/png",
|
7
|
+
"sizes": "512x512"
|
8
|
+
},
|
9
|
+
{
|
10
|
+
"src": "/icon.png",
|
11
|
+
"type": "image/png",
|
12
|
+
"sizes": "512x512",
|
13
|
+
"purpose": "maskable"
|
14
|
+
}
|
15
|
+
],
|
16
|
+
"start_url": "/",
|
17
|
+
"display": "standalone",
|
18
|
+
"scope": "/",
|
19
|
+
"description": "Dummy.",
|
20
|
+
"theme_color": "red",
|
21
|
+
"background_color": "red"
|
22
|
+
}
|
@@ -0,0 +1,26 @@
|
|
1
|
+
// Add a service worker for processing Web Push notifications:
|
2
|
+
//
|
3
|
+
// self.addEventListener("push", async (event) => {
|
4
|
+
// const { title, options } = await event.data.json()
|
5
|
+
// event.waitUntil(self.registration.showNotification(title, options))
|
6
|
+
// })
|
7
|
+
//
|
8
|
+
// self.addEventListener("notificationclick", function(event) {
|
9
|
+
// event.notification.close()
|
10
|
+
// event.waitUntil(
|
11
|
+
// clients.matchAll({ type: "window" }).then((clientList) => {
|
12
|
+
// for (let i = 0; i < clientList.length; i++) {
|
13
|
+
// let client = clientList[i]
|
14
|
+
// let clientPath = (new URL(client.url)).pathname
|
15
|
+
//
|
16
|
+
// if (clientPath == event.notification.data.path && "focus" in client) {
|
17
|
+
// return client.focus()
|
18
|
+
// }
|
19
|
+
// }
|
20
|
+
//
|
21
|
+
// if (clients.openWindow) {
|
22
|
+
// return clients.openWindow(event.notification.data.path)
|
23
|
+
// }
|
24
|
+
// })
|
25
|
+
// )
|
26
|
+
// })
|
data/test/dummy/bin/rake
ADDED