canistor 0.1.0 → 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/lib/canistor.rb +201 -2
- data/lib/canistor/authorization.rb +58 -0
- data/lib/canistor/error_handler.rb +147 -0
- data/lib/canistor/handler.rb +84 -0
- data/lib/canistor/plugin.rb +18 -0
- data/lib/canistor/storage.rb +9 -0
- data/lib/canistor/storage/bucket.rb +109 -0
- data/lib/canistor/storage/object.rb +149 -0
- data/lib/canistor/subject.rb +45 -0
- data/lib/canistor/version.rb +3 -3
- metadata +39 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 2bb2182118a1177fe7d53b2430b381d11aeadf3b7575b91e0c64639858eb85cf
|
4
|
+
data.tar.gz: 1f5fd555fabda31b4f24d622a2818e5df7f0292aa1724f9984b70adf821d67a8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c5c35f8dc1732a619ed8432a924e6f3bef0e09b78ff94279dfbb6f4471538fb14785e539c265de4ce3831d9a0b3c566424c56619a38b30dc654e0824918b1f93
|
7
|
+
data.tar.gz: 33adf6799dc1d6643228714b20df2135a4a834935630af348928ea8aa93c10f5cd278dffa66069e7229b0b81b1e7d06cc5d501def660c718c5f2ec36dd780838
|
data/lib/canistor.rb
CHANGED
@@ -1,4 +1,203 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require_relative "canistor/version"
|
4
|
+
require_relative "canistor/storage"
|
5
|
+
require_relative "canistor/authorization"
|
6
|
+
require_relative "canistor/subject"
|
7
|
+
require_relative "canistor/error_handler"
|
8
|
+
require_relative "canistor/handler"
|
9
|
+
require_relative "canistor/plugin"
|
10
|
+
|
11
|
+
require "thread"
|
12
|
+
|
13
|
+
# Replacement for the HTTP handler in the AWS SDK that mocks all interaction
|
14
|
+
# with S3 just above the HTTP level.
|
15
|
+
#
|
16
|
+
# The mock implementation is turned on by removing the NetHttp handlers that
|
17
|
+
# comes with the library by the Canistor handler.
|
18
|
+
#
|
19
|
+
# Aws::S3::Client.remove_plugin(Seahorse::Client::Plugins::NetHttp)
|
20
|
+
# Aws::S3::Client.add_plugin(Canistor::Plugin)
|
21
|
+
#
|
22
|
+
# The Canistor instance then needs to be configured with buckets and
|
23
|
+
# credentials to be useful. It can be configured using either the
|
24
|
+
# config method on the instance or by specifying the buckets one by one.
|
25
|
+
#
|
26
|
+
# In the example below Canistor will have two accounts and three buckets. It
|
27
|
+
# also specifies which accounts can access the buckets.
|
28
|
+
#
|
29
|
+
# Canistor.config(
|
30
|
+
# logger: Rails.logger,
|
31
|
+
# credentials: {
|
32
|
+
# 'global' => {
|
33
|
+
# access_key_id: 'AKIAIXXXXXXXXXXXXXX1',
|
34
|
+
# secret_access_key: 'phRL+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1'
|
35
|
+
# },
|
36
|
+
# 'accounting' => {
|
37
|
+
# access_key_id: 'AKIAIXXXXXXXXXXXXXX2',
|
38
|
+
# secret_access_key: 'phRL+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx2'
|
39
|
+
# }
|
40
|
+
# },
|
41
|
+
# buckets: {
|
42
|
+
# 'us-east-1' => {
|
43
|
+
# 'com-procore-production-images' => ['global'],
|
44
|
+
# 'com-procore-production-books' => ['global', 'accounting']
|
45
|
+
# },
|
46
|
+
# 'eu-central-1' => {
|
47
|
+
# 'com-procore-production-sales' => ['global']
|
48
|
+
# }
|
49
|
+
# }
|
50
|
+
# )
|
51
|
+
#
|
52
|
+
# Canistor implements basic interaction with buckets and objects. It also
|
53
|
+
# verifies authentication information. It does not implement control lists so
|
54
|
+
# all accounts have full access to the buckets and objects.
|
55
|
+
#
|
56
|
+
# The mock can simulate a number of failures. These are triggered by setting
|
57
|
+
# the operation which needs to fail on the mock. For more information see
|
58
|
+
# [Canistor.fail].
|
59
|
+
#
|
60
|
+
# In most cases you should configure the suite to clear the mock before running
|
61
|
+
# each example with [Canistor.clear].
|
62
|
+
module Canistor
|
63
|
+
class << self
|
64
|
+
attr_accessor :logger
|
65
|
+
attr_accessor :store
|
66
|
+
attr_reader :credentials
|
67
|
+
attr_reader :fail
|
68
|
+
attr_reader :fail_mutex
|
69
|
+
end
|
70
|
+
|
71
|
+
@store = {}
|
72
|
+
@buckets = {}
|
73
|
+
@credentials = Set.new
|
74
|
+
@fail_mutex = Mutex.new
|
75
|
+
|
76
|
+
def self.find_credentials(authorization)
|
77
|
+
if authorization.access_key_id
|
78
|
+
credentials.each do |attributes|
|
79
|
+
if authorization.access_key_id == attributes[:access_key_id]
|
80
|
+
return Aws::Credentials.new(
|
81
|
+
attributes[:access_key_id],
|
82
|
+
attributes[:secret_access_key]
|
83
|
+
)
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
nil
|
88
|
+
end
|
89
|
+
|
90
|
+
def self.credentials=(accounts)
|
91
|
+
accounts.each do |attributes|
|
92
|
+
unless attributes.keys.map(&:to_s) == %w(access_key_id secret_access_key)
|
93
|
+
raise(
|
94
|
+
ArgumentError,
|
95
|
+
"Credentials need to specify access_key_id and secret_access_key, " \
|
96
|
+
"got: `#{attributes.keys.inspect}'"
|
97
|
+
)
|
98
|
+
end
|
99
|
+
end
|
100
|
+
credentials.merge(accounts)
|
101
|
+
end
|
102
|
+
|
103
|
+
def self.buckets=(buckets)
|
104
|
+
buckets.each do |region, attributes|
|
105
|
+
attributes.each do |bucket, access_key_ids|
|
106
|
+
bucket = create_bucket(region, bucket)
|
107
|
+
bucket.allow_access_to(access_key_ids)
|
108
|
+
bucket
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
# Configures a bucket in the mock implementation. Use #allow_access_to on
|
114
|
+
# the Container object returned by this method to configure who may access
|
115
|
+
# the bucket.
|
116
|
+
def self.create_bucket(region, bucket_name)
|
117
|
+
store[region] ||= {}
|
118
|
+
store[region][bucket_name] = Canistor::Storage::Bucket.new(
|
119
|
+
region: region,
|
120
|
+
name: bucket_name
|
121
|
+
)
|
122
|
+
end
|
123
|
+
|
124
|
+
def self.config(config)
|
125
|
+
config.each do |section, attributes|
|
126
|
+
public_send("#{section}=", attributes)
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
SUPPORTED_FAILURES = [
|
131
|
+
:internal_server_error,
|
132
|
+
:reset_connection,
|
133
|
+
:fatal,
|
134
|
+
:store
|
135
|
+
]
|
136
|
+
|
137
|
+
# The mock can simulate a number of failures. These are triggered by setting
|
138
|
+
# the way we expect it to fail. Note that the AWS-SDK already helps you
|
139
|
+
# to recover from certain errors like :reset_connection. If you want these
|
140
|
+
# kinds of error to trigger a failure you have to call #fail more than then
|
141
|
+
# configured retry count.
|
142
|
+
#
|
143
|
+
# Canistor.fail(:reset_connection)
|
144
|
+
# Canistor.fail(
|
145
|
+
# :reset_connection,
|
146
|
+
# :reset_connection,
|
147
|
+
# :reset_connection,
|
148
|
+
# :reset_connection
|
149
|
+
# )
|
150
|
+
#
|
151
|
+
# * reset_connection: Signals the library to handle a connection error
|
152
|
+
# (retryable)
|
153
|
+
# * internal_server_error: Returns a 500 internal server error (retryable)
|
154
|
+
# * fatal: Signals the library to handle a fatal error (fatal)
|
155
|
+
#
|
156
|
+
# A less common problem is when S3 reports a successful write but fails to
|
157
|
+
# store the file. This means the PUT to the bucket will be successful, but
|
158
|
+
# GET and HEAD on the object fail, because it's not there.
|
159
|
+
#
|
160
|
+
# Canistor.fail(:store)
|
161
|
+
def self.fail(*operations)
|
162
|
+
unsupported = operations - SUPPORTED_FAILURES
|
163
|
+
unless unsupported.empty?
|
164
|
+
raise(
|
165
|
+
ArgumentError,
|
166
|
+
"Requested unsupported failure: `#{unsupported.inspect}', supported: " \
|
167
|
+
"#{SUPPORTED_FAILURES.inspect}."
|
168
|
+
)
|
169
|
+
end
|
170
|
+
fail_mutex.synchronize do
|
171
|
+
@fail.concat(operations)
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
# Returns true when Canistor should fail the operation and false otherwise.
|
176
|
+
def self.fail?(operation)
|
177
|
+
@fail.include?(operation)
|
178
|
+
end
|
179
|
+
|
180
|
+
# Executes the block when the operation is in the failure queue and removes
|
181
|
+
# one instance of the operation.
|
182
|
+
def self.take_fail(operation, &block)
|
183
|
+
fail_mutex.synchronize do
|
184
|
+
if index = @fail.index(operation)
|
185
|
+
block.call
|
186
|
+
@fail.delete_at(index)
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
# Clears the state of the mock. Leaves all the credentials and buckets but
|
192
|
+
# removes all objects and mocked responses.
|
193
|
+
def self.clear
|
194
|
+
@fail = []
|
195
|
+
@store.each do |region, buckets|
|
196
|
+
buckets.each do |bucket_name, bucket|
|
197
|
+
bucket.clear
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
2
201
|
|
3
|
-
|
4
|
-
end
|
202
|
+
clear
|
203
|
+
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Canistor
|
4
|
+
class Authorization
|
5
|
+
attr_reader :protocol
|
6
|
+
attr_reader :region
|
7
|
+
attr_reader :access_key_id
|
8
|
+
attr_reader :signature
|
9
|
+
attr_reader :date
|
10
|
+
|
11
|
+
def initialize(authorization)
|
12
|
+
@protocol, params = authorization.split(' ', 2)
|
13
|
+
params.split(', ').inject({}) do |unpacked, part|
|
14
|
+
name, values = part.split('=')
|
15
|
+
case name
|
16
|
+
when 'Credential'
|
17
|
+
self.credential = values.split('/')
|
18
|
+
when 'Signature'
|
19
|
+
self.signature = values
|
20
|
+
end
|
21
|
+
unpacked
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def valid_signature?(request, credentials)
|
26
|
+
return false if signature.to_s.strip == ''
|
27
|
+
signer = Aws::Sigv4::Signer.new(
|
28
|
+
service: 's3',
|
29
|
+
region: region,
|
30
|
+
credentials_provider: credentials,
|
31
|
+
uri_escape_path: false,
|
32
|
+
unsigned_headers: ['content-length', 'x-amzn-trace-id']
|
33
|
+
)
|
34
|
+
signed_request = signer.sign_request(
|
35
|
+
http_method: request.http_method,
|
36
|
+
url: request.endpoint.to_s,
|
37
|
+
headers: request.headers.to_hash,
|
38
|
+
body: request.body
|
39
|
+
)
|
40
|
+
signature == signer.send(
|
41
|
+
:signature,
|
42
|
+
credentials.secret_access_key,
|
43
|
+
date,
|
44
|
+
signed_request.string_to_sign
|
45
|
+
)
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
def signature=(signature)
|
51
|
+
@signature = signature
|
52
|
+
end
|
53
|
+
|
54
|
+
def credential=(credential)
|
55
|
+
@access_key_id, @date, @region = credential[0, 3]
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,147 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Canistor
|
4
|
+
class ErrorHandler
|
5
|
+
attr_reader :context
|
6
|
+
attr_reader :request_id
|
7
|
+
attr_reader :host_id
|
8
|
+
|
9
|
+
def initialize(context)
|
10
|
+
@context = context
|
11
|
+
@request_id = SecureRandom.hex(8).upcase
|
12
|
+
@host_id = Base64.strict_encode64(SecureRandom.hex(16))
|
13
|
+
end
|
14
|
+
|
15
|
+
def request
|
16
|
+
context.http_request
|
17
|
+
end
|
18
|
+
|
19
|
+
def response
|
20
|
+
context.http_response
|
21
|
+
end
|
22
|
+
|
23
|
+
def serve_invalid_access_key(authorization)
|
24
|
+
serve_error(403, Nokogiri::XML::Builder.new do |xml|
|
25
|
+
xml.Error do
|
26
|
+
xml.Code 'InvalidAccessKeyId'
|
27
|
+
xml.Message 'The AWS Access Key Id you provided does not exist in our records.'
|
28
|
+
xml.AWSAccessKeyId authorization.access_key_id
|
29
|
+
xml.RequestId request_id
|
30
|
+
xml.HostId host_id
|
31
|
+
end
|
32
|
+
end.to_xml)
|
33
|
+
end
|
34
|
+
|
35
|
+
def serve_signature_does_not_match(authorization)
|
36
|
+
serve_error(403, Nokogiri::XML::Builder.new do |xml|
|
37
|
+
xml.Error do
|
38
|
+
xml.Code 'SignatureDoesNotMatch'
|
39
|
+
xml.Message 'The request signature we calculated does not match the signature you provided. Check your key and signing method.'
|
40
|
+
xml.AWSAccessKeyId authorization.access_key_id
|
41
|
+
xml.SignatureProvided authorization.signature
|
42
|
+
xml.RequestId request_id
|
43
|
+
xml.HostId host_id
|
44
|
+
end
|
45
|
+
end.to_xml)
|
46
|
+
end
|
47
|
+
|
48
|
+
def serve_no_such_bucket(subject)
|
49
|
+
serve_error(404, Nokogiri::XML::Builder.new do |xml|
|
50
|
+
xml.Error do
|
51
|
+
xml.Code 'NoSuchBucket'
|
52
|
+
xml.Message 'The specified bucket does not exist'
|
53
|
+
xml.BucketName subject.bucket
|
54
|
+
xml.RequestId request_id
|
55
|
+
xml.HostId host_id
|
56
|
+
end
|
57
|
+
end.to_xml)
|
58
|
+
end
|
59
|
+
|
60
|
+
def serve_no_such_key(subject)
|
61
|
+
serve_error(404, Nokogiri::XML::Builder.new do |xml|
|
62
|
+
xml.Error do
|
63
|
+
xml.Code 'NoSuchKey'
|
64
|
+
xml.Message 'The specified key does not exist.'
|
65
|
+
xml.Key subject.key
|
66
|
+
xml.RequestId request_id
|
67
|
+
xml.HostId host_id
|
68
|
+
end
|
69
|
+
end.to_xml)
|
70
|
+
end
|
71
|
+
|
72
|
+
def serve_access_denied(subject)
|
73
|
+
serve_error(403, Nokogiri::XML::Builder.new do |xml|
|
74
|
+
xml.Error do
|
75
|
+
xml.Code 'AccessDenied'
|
76
|
+
xml.Message 'Access Denied'
|
77
|
+
xml.RequestId request_id
|
78
|
+
xml.HostId host_id
|
79
|
+
end
|
80
|
+
end.to_xml)
|
81
|
+
end
|
82
|
+
|
83
|
+
def serve_internal_error
|
84
|
+
serve_error(500, Nokogiri::XML::Builder.new do |xml|
|
85
|
+
xml.Error do
|
86
|
+
xml.Code 'InternalError'
|
87
|
+
xml.Message 'We encountered an internal error. Please try again.'
|
88
|
+
xml.RequestId request_id
|
89
|
+
xml.HostId host_id
|
90
|
+
end
|
91
|
+
end.to_xml)
|
92
|
+
end
|
93
|
+
|
94
|
+
def serve_error(status_code, body)
|
95
|
+
response.signal_headers(
|
96
|
+
status_code,
|
97
|
+
'data' => Time.now.httpdate,
|
98
|
+
'x-amz-request-id' => request_id
|
99
|
+
)
|
100
|
+
unless request.http_method == 'HEAD'
|
101
|
+
response.signal_data(body)
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def trigger_reset_connection
|
106
|
+
response.signal_error(Seahorse::Client::NetworkingError.new(
|
107
|
+
Errno::ECONNRESET.new, 'Remote host reset the connection request.'
|
108
|
+
))
|
109
|
+
end
|
110
|
+
|
111
|
+
def trigger_fatal_error
|
112
|
+
response.signal_error(RuntimeError.new("Fatal error."))
|
113
|
+
end
|
114
|
+
|
115
|
+
def self.serve_invalid_access_key(context, authorization)
|
116
|
+
new(context).serve_invalid_access_key(authorization)
|
117
|
+
end
|
118
|
+
|
119
|
+
def self.serve_signature_does_not_match(context, authorization)
|
120
|
+
new(context).serve_signature_does_not_match(authorization)
|
121
|
+
end
|
122
|
+
|
123
|
+
def self.serve_no_such_bucket(context, subject)
|
124
|
+
new(context).serve_no_such_bucket(subject)
|
125
|
+
end
|
126
|
+
|
127
|
+
def self.serve_no_such_key(context, subject)
|
128
|
+
new(context).serve_no_such_key(subject)
|
129
|
+
end
|
130
|
+
|
131
|
+
def self.access_denied(context, subject)
|
132
|
+
new(context).access_denied(subject)
|
133
|
+
end
|
134
|
+
|
135
|
+
def self.serve_internal_error(context)
|
136
|
+
new(context).serve_internal_error
|
137
|
+
end
|
138
|
+
|
139
|
+
def self.trigger_reset_connection(context)
|
140
|
+
new(context).trigger_reset_connection
|
141
|
+
end
|
142
|
+
|
143
|
+
def self.trigger_fatal_error(context)
|
144
|
+
new(context).trigger_fatal_error
|
145
|
+
end
|
146
|
+
end
|
147
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'set'
|
4
|
+
require 'cgi'
|
5
|
+
require 'uri/http'
|
6
|
+
require 'digest/sha1'
|
7
|
+
require 'base64'
|
8
|
+
require 'nokogiri'
|
9
|
+
require 'singleton'
|
10
|
+
require 'securerandom'
|
11
|
+
|
12
|
+
module Canistor
|
13
|
+
# AWS-SDK compatible handler to mock S3 interaction.
|
14
|
+
class Handler < Seahorse::Client::Handler
|
15
|
+
# @param [RequestContext] context
|
16
|
+
# @return [Response]
|
17
|
+
def call(context)
|
18
|
+
log(context.config, context.http_request)
|
19
|
+
handle(
|
20
|
+
context,
|
21
|
+
Canistor::Authorization.new(context.http_request.headers['Authorization']),
|
22
|
+
Canistor::Subject.new(context.http_request.endpoint)
|
23
|
+
)
|
24
|
+
Seahorse::Client::Response.new(context: context)
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
# Mocks interaction with S3 using the library request context, authorization
|
30
|
+
# from headers, and subject based on the request URI.
|
31
|
+
#
|
32
|
+
# When a bucket can be found the request will be forwarded to the
|
33
|
+
# Canistor::Store::Bucket object for futher handling.
|
34
|
+
#
|
35
|
+
# Stubbed error reponses and error conditions are handled by rendering the
|
36
|
+
# correct responses or raising an exception.
|
37
|
+
def handle(context, authorization, subject)
|
38
|
+
Canistor.take_fail(:fatal) do
|
39
|
+
return Canistor::ErrorHandler.trigger_fatal_error(context)
|
40
|
+
end
|
41
|
+
Canistor.take_fail(:reset_connection) do
|
42
|
+
return Canistor::ErrorHandler.trigger_reset_connection(context)
|
43
|
+
end
|
44
|
+
Canistor.take_fail(:internal_server_error) do
|
45
|
+
return Canistor::ErrorHandler.serve_internal_error(context)
|
46
|
+
end
|
47
|
+
if credentials = Canistor.find_credentials(authorization)
|
48
|
+
if authorization.valid_signature?(context.http_request, credentials)
|
49
|
+
if bucket = Canistor.store.dig(subject.region, subject.bucket)
|
50
|
+
method = context.http_request.http_method.to_s.downcase
|
51
|
+
bucket.send(
|
52
|
+
method,
|
53
|
+
context,
|
54
|
+
authorization.access_key_id,
|
55
|
+
subject
|
56
|
+
)
|
57
|
+
else
|
58
|
+
Canistor::ErrorHandler.serve_no_such_bucket(context, subject)
|
59
|
+
end
|
60
|
+
else
|
61
|
+
Canistor::ErrorHandler.serve_signature_does_not_match(
|
62
|
+
context,
|
63
|
+
authorization
|
64
|
+
)
|
65
|
+
end
|
66
|
+
else
|
67
|
+
Canistor::ErrorHandler.serve_invalid_access_key(context, authorization)
|
68
|
+
end
|
69
|
+
|
70
|
+
context.http_response.signal_done
|
71
|
+
end
|
72
|
+
|
73
|
+
def log(config, request)
|
74
|
+
headers = request.headers.to_hash.slice('content-length', 'content-type')
|
75
|
+
params = CGI::parse(request.endpoint.query.to_s)
|
76
|
+
Canistor.logger.debug(
|
77
|
+
'[Canistor::S3] ' + config.region + ' ' + request.http_method + ' ' +
|
78
|
+
request.endpoint.path.to_s +
|
79
|
+
(headers.empty? ? '' : ' ' + headers.inspect) +
|
80
|
+
(params.empty? ? '' : ' ' + params.inspect)
|
81
|
+
) if Canistor.logger
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Canistor
|
4
|
+
class Plugin < Seahorse::Client::Plugin
|
5
|
+
option(:http_proxy, default: nil, doc_type: String, docstring: '')
|
6
|
+
option(:http_open_timeout, default: 15, doc_type: Integer, docstring: '')
|
7
|
+
option(:http_read_timeout, default: 60, doc_type: Integer, docstring: '')
|
8
|
+
option(:http_idle_timeout, default: 5, doc_type: Integer, docstring: '')
|
9
|
+
option(:http_continue_timeout, default: 1, doc_type: Integer, docstring: '')
|
10
|
+
option(:http_wire_trace, default: false, doc_type: 'Boolean', docstring: '')
|
11
|
+
option(:ssl_verify_peer, default: true, doc_type: 'Boolean', docstring: '')
|
12
|
+
option(:ssl_ca_bundle, default: nil, doc_type: String, docstring: '')
|
13
|
+
option(:ssl_ca_directory, default: nil, doc_type: String, docstring: '')
|
14
|
+
option(:ssl_ca_store, default: nil, doc_type: String, docstring: '')
|
15
|
+
option(:logger) # for backwards compat
|
16
|
+
handler(Canistor::Handler, step: :send)
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,109 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'set'
|
4
|
+
require 'base64'
|
5
|
+
require 'nokogiri'
|
6
|
+
require 'singleton'
|
7
|
+
require 'securerandom'
|
8
|
+
|
9
|
+
module Canistor
|
10
|
+
module Storage
|
11
|
+
class Bucket
|
12
|
+
attr_accessor :region
|
13
|
+
attr_accessor :name
|
14
|
+
|
15
|
+
attr_reader :access_keys
|
16
|
+
attr_reader :objects
|
17
|
+
|
18
|
+
def initialize(**attributes)
|
19
|
+
@access_keys = Set.new
|
20
|
+
clear
|
21
|
+
attributes.each do |name, value|
|
22
|
+
public_send("#{name}=", value)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def [](name)
|
27
|
+
@objects[name]
|
28
|
+
end
|
29
|
+
|
30
|
+
def []=(name, value)
|
31
|
+
@objects[name] = value
|
32
|
+
end
|
33
|
+
|
34
|
+
def dig(*segments)
|
35
|
+
@objects.dig(*segments)
|
36
|
+
end
|
37
|
+
|
38
|
+
def head(context, access_key_id, subject)
|
39
|
+
if !access_keys.include?(access_key_id)
|
40
|
+
Canistor::ErrorHandler.serve_access_denied(context, subject)
|
41
|
+
elsif object = objects[subject.key]
|
42
|
+
object.head(context, subject)
|
43
|
+
else
|
44
|
+
Canistor::ErrorHandler.serve_no_such_key(context, subject)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def get(context, access_key_id, subject)
|
49
|
+
if !access_keys.include?(access_key_id)
|
50
|
+
Canistor::ErrorHandler.serve_access_denied(context, subject)
|
51
|
+
elsif object = objects[subject.key]
|
52
|
+
object.get(context, subject)
|
53
|
+
else
|
54
|
+
Canistor::ErrorHandler.serve_no_such_key(context, subject)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def put(context, access_key_id, subject)
|
59
|
+
if access_keys.include?(access_key_id)
|
60
|
+
Canistor.take_fail(:store) { return }
|
61
|
+
object = find_or_build_object(subject, context)
|
62
|
+
self[subject.key] = object
|
63
|
+
object.put(context, subject)
|
64
|
+
else
|
65
|
+
Canistor::ErrorHandler.serve_access_denied(context, subject)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def delete(context, access_key_id, subject)
|
70
|
+
if !access_keys.include?(access_key_id)
|
71
|
+
Canistor::ErrorHandler.serve_access_denied(context, subject)
|
72
|
+
elsif object = objects[subject.key]
|
73
|
+
@objects.delete(object.key)
|
74
|
+
object.delete(context, subject)
|
75
|
+
else
|
76
|
+
Canistor::ErrorHandler.serve_no_such_key(context, subject)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def clear
|
81
|
+
@objects = {}
|
82
|
+
end
|
83
|
+
|
84
|
+
def to_s
|
85
|
+
@objects.values.map do |object|
|
86
|
+
' * ' + object.label
|
87
|
+
end.join("\n")
|
88
|
+
end
|
89
|
+
|
90
|
+
def allow_access_to(access_key_ids)
|
91
|
+
access_keys.merge(access_key_ids)
|
92
|
+
end
|
93
|
+
|
94
|
+
def headers
|
95
|
+
{}
|
96
|
+
end
|
97
|
+
|
98
|
+
private
|
99
|
+
|
100
|
+
def find_or_build_object(subject, context)
|
101
|
+
objects[subject.key] || Canistor::Storage::Object.new(
|
102
|
+
region: subject.region,
|
103
|
+
bucket: subject.bucket,
|
104
|
+
key: subject.key
|
105
|
+
)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
@@ -0,0 +1,149 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "digest/sha1"
|
4
|
+
|
5
|
+
module Canistor
|
6
|
+
module Storage
|
7
|
+
class Object
|
8
|
+
attr_accessor :region
|
9
|
+
attr_accessor :bucket
|
10
|
+
attr_accessor :key
|
11
|
+
|
12
|
+
attr_reader :data
|
13
|
+
attr_reader :last_modified
|
14
|
+
|
15
|
+
def initialize(**attributes)
|
16
|
+
@headers = {}
|
17
|
+
@data = ''
|
18
|
+
attributes.each do |name, value|
|
19
|
+
send("#{name}=", value)
|
20
|
+
end
|
21
|
+
@digest = nil
|
22
|
+
end
|
23
|
+
|
24
|
+
def size
|
25
|
+
data&.size
|
26
|
+
end
|
27
|
+
|
28
|
+
def label
|
29
|
+
[region, bucket, key].map(&:to_s).join(':') + ' ' + headers.inspect
|
30
|
+
end
|
31
|
+
|
32
|
+
def write(headers, data)
|
33
|
+
self.headers = headers
|
34
|
+
self.data = data
|
35
|
+
end
|
36
|
+
|
37
|
+
def digest
|
38
|
+
@digest ||= Digest::SHA1.hexdigest(data)
|
39
|
+
end
|
40
|
+
|
41
|
+
def etag
|
42
|
+
'"' + digest + '"'
|
43
|
+
end
|
44
|
+
|
45
|
+
def headers
|
46
|
+
@headers.merge(identity_headers).merge(
|
47
|
+
'date' => Time.now.httpdate,
|
48
|
+
'content-length' => size.to_s,
|
49
|
+
'last-modified' => last_modified.httpdate,
|
50
|
+
'server' => 'Canistor'
|
51
|
+
)
|
52
|
+
end
|
53
|
+
|
54
|
+
def identity_headers
|
55
|
+
{
|
56
|
+
'x-amz-request-id' => Base64.strict_encode64(SecureRandom.hex(16)),
|
57
|
+
'x-amz-id' => digest[0, 16],
|
58
|
+
'x-amz-id-2' => Base64.strict_encode64(digest),
|
59
|
+
'etag' => etag
|
60
|
+
}
|
61
|
+
end
|
62
|
+
|
63
|
+
def head(context, subject)
|
64
|
+
context.http_response.signal_headers(200, headers)
|
65
|
+
end
|
66
|
+
|
67
|
+
def get(context, subject)
|
68
|
+
context.http_response.signal_headers(200, headers)
|
69
|
+
context.http_response.signal_data(data)
|
70
|
+
end
|
71
|
+
|
72
|
+
def put(context, subject)
|
73
|
+
catch(:rendered_error) do
|
74
|
+
source_object = source_object(context, subject)
|
75
|
+
self.data = object_data(context, source_object)
|
76
|
+
self.headers = object_headers(context, source_object)
|
77
|
+
context.http_response.signal_headers(200, identity_headers)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def delete(context, subject)
|
82
|
+
context.http_response.signal_headers(200, {})
|
83
|
+
end
|
84
|
+
|
85
|
+
private
|
86
|
+
|
87
|
+
def source_object(context, subject)
|
88
|
+
if source = context.http_request.headers['x-amz-copy-source']
|
89
|
+
bucket_name, key = source.split('/', 2)
|
90
|
+
if bucket = Canistor.store.dig(region, bucket_name)
|
91
|
+
if object = bucket.dig(key)
|
92
|
+
object
|
93
|
+
else
|
94
|
+
Canistor::ErrorHandler.serve_no_such_key(context, subject)
|
95
|
+
throw :rendered_error
|
96
|
+
end
|
97
|
+
else
|
98
|
+
Canistor::ErrorHandler.serve_no_such_bucket(context, subject)
|
99
|
+
throw :rendered_error
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def object_data(context, source_object)
|
105
|
+
if source_object
|
106
|
+
source_object.data
|
107
|
+
else
|
108
|
+
context.http_request.body
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
def object_headers(context, source_object)
|
113
|
+
directive = context.http_request.headers['x-amz-metadata-directive']
|
114
|
+
case directive
|
115
|
+
when 'COPY'
|
116
|
+
source_object.headers
|
117
|
+
when 'REPLACE', nil
|
118
|
+
context.http_request.headers
|
119
|
+
else
|
120
|
+
raise ArgumentError, "Unsupported metadata directive: `#{directive}'"
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
META_HEADERS = %w(
|
125
|
+
content-disposition
|
126
|
+
content-type
|
127
|
+
)
|
128
|
+
|
129
|
+
def headers=(headers)
|
130
|
+
return if headers.nil?
|
131
|
+
headers.each do |name, value|
|
132
|
+
if META_HEADERS.include?(name)
|
133
|
+
@headers[name] = value
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
def data=(data)
|
139
|
+
@digest = nil
|
140
|
+
@last_modified = Time.now
|
141
|
+
if data.respond_to?(:read)
|
142
|
+
@data = data.read
|
143
|
+
else
|
144
|
+
@data = data
|
145
|
+
end
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Canistor
|
4
|
+
class Subject
|
5
|
+
attr_reader :uri
|
6
|
+
|
7
|
+
attr_reader :region
|
8
|
+
attr_reader :bucket
|
9
|
+
attr_reader :key
|
10
|
+
|
11
|
+
HOST_RE = /\A([^\.]+\.)?(s3\.([^\.]+)|s3-([^\.]+))/
|
12
|
+
|
13
|
+
def initialize(uri)
|
14
|
+
@uri = uri
|
15
|
+
|
16
|
+
host_segments = HOST_RE.match(uri.host)
|
17
|
+
path_segments = uri.path.split('/', 3)[1..-1] || []
|
18
|
+
|
19
|
+
@region = parse_region(host_segments)
|
20
|
+
@bucket = parse_bucket(host_segments, path_segments)
|
21
|
+
@key = path_segments.empty? ? nil : path_segments.join('/')
|
22
|
+
end
|
23
|
+
|
24
|
+
private
|
25
|
+
|
26
|
+
def parse_region(host_segments)
|
27
|
+
case host_segments[3]
|
28
|
+
when nil
|
29
|
+
host_segments[4]
|
30
|
+
when 'amazonaws'
|
31
|
+
'us-east-1'
|
32
|
+
else
|
33
|
+
host_segments[3]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def parse_bucket(host_segments, path_segments)
|
38
|
+
if host_segments[1]
|
39
|
+
host_segments[1][0..-2]
|
40
|
+
else
|
41
|
+
path_segments.shift
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
data/lib/canistor/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: canistor
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Manfred Stienstra
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-02-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -38,6 +38,20 @@ dependencies:
|
|
38
38
|
- - "~>"
|
39
39
|
- !ruby/object:Gem::Version
|
40
40
|
version: '12.3'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: minitest-assert_errors
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - ">="
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '0'
|
48
|
+
type: :development
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '0'
|
41
55
|
- !ruby/object:Gem::Dependency
|
42
56
|
name: aws-sdk-s3
|
43
57
|
requirement: !ruby/object:Gem::Requirement
|
@@ -52,6 +66,20 @@ dependencies:
|
|
52
66
|
- - "~>"
|
53
67
|
- !ruby/object:Gem::Version
|
54
68
|
version: '1'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: nokogiri
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - "~>"
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '1.8'
|
76
|
+
type: :runtime
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - "~>"
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '1.8'
|
55
83
|
description: |2
|
56
84
|
Canistor allows you to register an HTTP handler with the AWS SDK and all
|
57
85
|
interaction with S3 buckets and objects will happen in memory instead of
|
@@ -64,6 +92,14 @@ extra_rdoc_files: []
|
|
64
92
|
files:
|
65
93
|
- LICENSE.txt
|
66
94
|
- lib/canistor.rb
|
95
|
+
- lib/canistor/authorization.rb
|
96
|
+
- lib/canistor/error_handler.rb
|
97
|
+
- lib/canistor/handler.rb
|
98
|
+
- lib/canistor/plugin.rb
|
99
|
+
- lib/canistor/storage.rb
|
100
|
+
- lib/canistor/storage/bucket.rb
|
101
|
+
- lib/canistor/storage/object.rb
|
102
|
+
- lib/canistor/subject.rb
|
67
103
|
- lib/canistor/version.rb
|
68
104
|
homepage: https://erm.im/canistor
|
69
105
|
licenses:
|
@@ -85,7 +121,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
85
121
|
version: '0'
|
86
122
|
requirements: []
|
87
123
|
rubyforge_project:
|
88
|
-
rubygems_version: 2.
|
124
|
+
rubygems_version: 2.7.3
|
89
125
|
signing_key:
|
90
126
|
specification_version: 4
|
91
127
|
summary: Canistor is mock for Aws::S3 defined by the AWS SDK gem.
|