fog-aws 3.5.0 → 3.5.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +24 -2
- data/lib/fog/aws/compute.rb +1 -1
- data/lib/fog/aws/glacier.rb +83 -21
- data/lib/fog/aws/requests/lambda/invoke.rb +7 -7
- data/lib/fog/aws/version.rb +1 -1
- data/tests/requests/glacier/tree_hash_tests.rb +29 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1e5ec1bf418676b65d208530359ea42646043759948957785d93f49766803490
|
4
|
+
data.tar.gz: d01633914ad453baeeb3b5da1eaa21880276f4a534a3dd00d28c20cd0c9c1b4f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a1bf1567ddbcaff60e4cf8041586807f055fc28a1d010a373d7038fa1eb83ab84feb42e2595ebeef5b8508fa61ea6a76e9ad22592d0be0ba865b3908ab8633b8
|
7
|
+
data.tar.gz: 145c2544d2a240f66dd7e85385509812e12b475fccbbb33563321db519584ed18eb35bb9509ddaef2a397ac545578d5f9ebe6202cb4c19fdaa0122cb5267a351
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,28 @@
|
|
1
1
|
# Change Log
|
2
2
|
|
3
|
+
## [Unreleased](https://github.com/fog/fog-aws/tree/HEAD)
|
4
|
+
|
5
|
+
[Full Changelog](https://github.com/fog/fog-aws/compare/v3.5.0...HEAD)
|
6
|
+
|
7
|
+
**Merged pull requests:**
|
8
|
+
|
9
|
+
- Fixed issue with InvocationType header for AWS Lambda [\#524](https://github.com/fog/fog-aws/pull/524) ([GarrisonD](https://github.com/GarrisonD))
|
10
|
+
- Add support for generating tree hash tests by adding unaligned parts. [\#521](https://github.com/fog/fog-aws/pull/521) ([hkmaly](https://github.com/hkmaly))
|
11
|
+
|
12
|
+
## [v3.5.0](https://github.com/fog/fog-aws/tree/v3.5.0) (2019-04-25)
|
13
|
+
[Full Changelog](https://github.com/fog/fog-aws/compare/v3.4.0...v3.5.0)
|
14
|
+
|
15
|
+
**Closed issues:**
|
16
|
+
|
17
|
+
- Missing AWS region: EU \(Stockholm\) eu-north-1 [\#514](https://github.com/fog/fog-aws/issues/514)
|
18
|
+
- Support for AWS fargate [\#510](https://github.com/fog/fog-aws/issues/510)
|
19
|
+
|
20
|
+
**Merged pull requests:**
|
21
|
+
|
22
|
+
- Add AWS Stockholm region [\#515](https://github.com/fog/fog-aws/pull/515) ([fred-secludit](https://github.com/fred-secludit))
|
23
|
+
- Enhance/handle ELBv2 api version [\#512](https://github.com/fog/fog-aws/pull/512) ([KevinLoiseau](https://github.com/KevinLoiseau))
|
24
|
+
- Enhance/add attribute db subnet group for db instance [\#511](https://github.com/fog/fog-aws/pull/511) ([KevinLoiseau](https://github.com/KevinLoiseau))
|
25
|
+
|
3
26
|
## [v3.4.0](https://github.com/fog/fog-aws/tree/v3.4.0) (2019-03-11)
|
4
27
|
[Full Changelog](https://github.com/fog/fog-aws/compare/v3.3.0...v3.4.0)
|
5
28
|
|
@@ -184,7 +207,6 @@
|
|
184
207
|
- Fog::AWS::IAM::Error: InvalidAction =\> Could not find operation "ReplaceIamInstanceProfileAssociation" for version 2010-05-08 [\#368](https://github.com/fog/fog-aws/issues/368)
|
185
208
|
- Multipart upload fails on empty files [\#364](https://github.com/fog/fog-aws/issues/364)
|
186
209
|
- The action `ModifyVolume` is not valid for this web service. [\#363](https://github.com/fog/fog-aws/issues/363)
|
187
|
-
- Tag instances upon creation of new instance [\#359](https://github.com/fog/fog-aws/issues/359)
|
188
210
|
- Cache/read local amazon data [\#354](https://github.com/fog/fog-aws/issues/354)
|
189
211
|
|
190
212
|
**Merged pull requests:**
|
@@ -783,4 +805,4 @@
|
|
783
805
|
## [fog-brightbox_v0.0.1](https://github.com/fog/fog-aws/tree/fog-brightbox_v0.0.1) (2014-02-19)
|
784
806
|
|
785
807
|
|
786
|
-
\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*
|
808
|
+
\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*
|
data/lib/fog/aws/compute.rb
CHANGED
@@ -454,7 +454,7 @@ module Fog
|
|
454
454
|
|
455
455
|
def tagged_resources(resources)
|
456
456
|
Array(resources).map do |resource_id|
|
457
|
-
if match = resource_id.match(/^(\w+)-[a-z0-9]{8}/i)
|
457
|
+
if match = resource_id.match(/^(\w+)-[a-z0-9]{8,17}/i)
|
458
458
|
id = match.captures.first
|
459
459
|
else
|
460
460
|
raise(Fog::Service::NotFound.new("Unknown resource id #{resource_id}"))
|
data/lib/fog/aws/glacier.rb
CHANGED
@@ -39,52 +39,114 @@ module Fog
|
|
39
39
|
new.add_part(body)
|
40
40
|
end
|
41
41
|
|
42
|
-
def
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
42
|
+
def initialize
|
43
|
+
@last_chunk_digest = nil # Digest OBJECT for last chunk (Digest::SHA256)
|
44
|
+
@last_chunk_digest_temp = nil # Digest VALUE for last chunk
|
45
|
+
@last_chunk_length = 0 # Length of last chunk, always smaller than 1MB.
|
46
|
+
@digest_stack = []
|
47
|
+
# First position on stack corresponds to 1MB, second 2MB, third 4MB, fourt 8MB and so on.
|
48
|
+
# In any time, the size of all already added parts is equal to sum of all existing (non-nil)
|
49
|
+
# positions multiplied by that number, plus last_chunk_length for the remainder smaller than
|
50
|
+
# one megabyte. So, if last_chunk_length is half megabyte, stack[0] is filled, stack[1] and
|
51
|
+
# stack[2] empty and stack[3] filled, the size is 0.5MB + 1x1MB + 0x2MB + 0x4MB + 1x8MB = 9.5MB.
|
52
|
+
end
|
53
|
+
|
54
|
+
def update_digest_stack(digest, stack)
|
55
|
+
stack.each_with_index{|s,i|
|
56
|
+
if s
|
57
|
+
digest = Digest::SHA256.digest(s + digest)
|
58
|
+
stack[i] = nil
|
59
|
+
else
|
60
|
+
stack[i] = digest # Update this position with value obtained in previous run of cycle.
|
61
|
+
digest = nil
|
62
|
+
break
|
50
63
|
end
|
51
|
-
|
52
|
-
|
64
|
+
}
|
65
|
+
stack << digest if digest
|
53
66
|
end
|
54
67
|
|
55
|
-
def
|
56
|
-
|
68
|
+
def reduce_digest_stack(digest, stack)
|
69
|
+
stack.each_with_index{|s,i|
|
70
|
+
unless digest
|
71
|
+
digest = stack[i]
|
72
|
+
next
|
73
|
+
end
|
74
|
+
if stack[i]
|
75
|
+
digest = Digest::SHA256.digest(stack[i] + digest)
|
76
|
+
end
|
77
|
+
}
|
78
|
+
digest
|
57
79
|
end
|
58
80
|
|
59
81
|
def add_part(bytes)
|
60
82
|
part = self.digest_for_part(bytes)
|
61
|
-
@digests << part
|
62
83
|
part.unpack('H*').first
|
63
84
|
end
|
64
85
|
|
65
|
-
def
|
66
|
-
chunk_count = [body.bytesize / MEGABYTE + (body.bytesize % MEGABYTE > 0 ? 1 : 0), 1].max
|
86
|
+
def prepare_body_for_slice(body)
|
67
87
|
if body.respond_to? :byteslice
|
68
|
-
|
88
|
+
r = yield(body, :byteslice)
|
69
89
|
else
|
70
90
|
if body.respond_to? :encoding
|
71
91
|
old_encoding = body.encoding
|
72
92
|
body.force_encoding('BINARY')
|
73
93
|
end
|
74
|
-
|
94
|
+
r = yield(body, :slice)
|
75
95
|
if body.respond_to? :encoding
|
76
96
|
body.force_encoding(old_encoding)
|
77
97
|
end
|
78
98
|
end
|
79
|
-
|
99
|
+
r
|
80
100
|
end
|
81
101
|
|
82
|
-
def
|
83
|
-
|
102
|
+
def digest_for_part(body)
|
103
|
+
part_stack = []
|
104
|
+
part_temp = nil
|
105
|
+
body_size = body.bytesize
|
106
|
+
prepare_body_for_slice(body) {|body, slice|
|
107
|
+
start_offset = 0
|
108
|
+
if @last_chunk_length != 0
|
109
|
+
start_offset = MEGABYTE - @last_chunk_length
|
110
|
+
@last_chunk_hash.update(body.send(slice, 0, start_offset))
|
111
|
+
hash = @last_chunk_hash.digest
|
112
|
+
@last_chunk_digest_temp = hash
|
113
|
+
if body_size > start_offset
|
114
|
+
@last_chunk_length = 0
|
115
|
+
@last_chunk_hash = nil
|
116
|
+
@last_chunk_digest_temp = nil
|
117
|
+
update_digest_stack(hash, @digest_stack)
|
118
|
+
else
|
119
|
+
part_temp = hash
|
120
|
+
@last_chunk_digest_temp = hash
|
121
|
+
@last_chunk_length += body_size
|
122
|
+
next
|
123
|
+
end
|
124
|
+
end
|
125
|
+
whole_chunk_count = (body_size - start_offset) / MEGABYTE
|
126
|
+
whole_chunk_count.times.each {|chunk_index|
|
127
|
+
hash = Digest::SHA256.digest(body.send(slice, start_offset + chunk_index * MEGABYTE, MEGABYTE))
|
128
|
+
update_digest_stack(hash, part_stack)
|
129
|
+
update_digest_stack(hash, @digest_stack)
|
130
|
+
}
|
131
|
+
rest_size = body_size - start_offset - whole_chunk_count * MEGABYTE
|
132
|
+
if rest_size > 0 || whole_chunk_count == 0
|
133
|
+
@last_chunk_hash = Digest::SHA256.new
|
134
|
+
@last_chunk_length = rest_size
|
135
|
+
@last_chunk_hash.update(body.send(slice, start_offset + whole_chunk_count * MEGABYTE, rest_size))
|
136
|
+
hash = @last_chunk_hash.digest
|
137
|
+
@last_chunk_digest_temp = hash
|
138
|
+
part_temp = hash
|
139
|
+
end
|
140
|
+
}
|
141
|
+
reduce_digest_stack(part_temp, part_stack)
|
84
142
|
end
|
85
143
|
|
86
144
|
def digest
|
87
|
-
|
145
|
+
reduce_digest_stack(@last_chunk_digest_temp, @digest_stack)
|
146
|
+
end
|
147
|
+
|
148
|
+
def hexdigest
|
149
|
+
digest.unpack('H*').first
|
88
150
|
end
|
89
151
|
end
|
90
152
|
|
@@ -20,8 +20,8 @@ module Fog
|
|
20
20
|
headers['X-Amz-Client-Context'] =
|
21
21
|
Base64::encode64(Fog::JSON.encode(client_context))
|
22
22
|
end
|
23
|
-
if
|
24
|
-
headers['X-Amz-
|
23
|
+
if invocation_type = params.delete('InvocationType')
|
24
|
+
headers['X-Amz-Invocation-Type'] = invocation_type
|
25
25
|
end
|
26
26
|
if log_type = params.delete('LogType')
|
27
27
|
headers['X-Amz-Log-Type'] = log_type
|
@@ -51,12 +51,12 @@ module Fog
|
|
51
51
|
raise Fog::AWS::Lambda::Error, message
|
52
52
|
end
|
53
53
|
|
54
|
-
client_context
|
55
|
-
|
56
|
-
log_type
|
57
|
-
payload
|
54
|
+
client_context = params.delete('ClientContext')
|
55
|
+
invocation_type = params.delete('InvocationType')
|
56
|
+
log_type = params.delete('LogType')
|
57
|
+
payload = params.delete('Payload')
|
58
58
|
|
59
|
-
if (client_context ||
|
59
|
+
if (client_context || invocation_type || log_type)
|
60
60
|
message = "invoke parameters handling are not yet mocked [light_black](#{caller.first})[/]"
|
61
61
|
Fog::Logger.warning message
|
62
62
|
Fog::Mock.not_implemented
|
data/lib/fog/aws/version.rb
CHANGED
@@ -59,4 +59,33 @@ Shindo.tests('AWS::Glacier | glacier tree hash calcuation', ['aws']) do
|
|
59
59
|
|
60
60
|
end
|
61
61
|
|
62
|
+
# Aligned is used in general sense of https://en.wikipedia.org/wiki/Data_structure_alignment
|
63
|
+
# except we are not dealing with data in memory, but with parts in "virtual" space of whole file.
|
64
|
+
# Tests for https://github.com/fog/fog-aws/issues/520 and https://github.com/fog/fog-aws/issues/521
|
65
|
+
tests('multipart with unaligned parts') do
|
66
|
+
tree_hash = Fog::AWS::Glacier::TreeHash.new
|
67
|
+
part = ('x' * 512*1024)
|
68
|
+
returns(Fog::AWS::Glacier::TreeHash.digest(part)) { tree_hash.add_part part }
|
69
|
+
|
70
|
+
# At this point, we have 0.5MB in tree_hash. That means that the next part we add will not be aligned,
|
71
|
+
# because it will start on 0.5MB which is not 1MB boundary.
|
72
|
+
part2 = ('x' * 512*1024) + ('y'*1024*1024) + ('z'* 512*1024)
|
73
|
+
returns(Fog::AWS::Glacier::TreeHash.digest(part + part2)) { tree_hash.add_part part2 ; tree_hash.hexdigest }
|
74
|
+
|
75
|
+
# Here we are adding another 1.5MB to tree_hash which has size of 3.5MB. Again, 3.5MB is not on 1MB boundary,
|
76
|
+
# so this is another unaligned part. It does test different part of code, though.
|
77
|
+
tree_hash.add_part('z'* 512*1024 + 't'*1024*1024)
|
78
|
+
|
79
|
+
expected = OpenSSL::Digest::SHA256.hexdigest(
|
80
|
+
OpenSSL::Digest::SHA256.digest(
|
81
|
+
OpenSSL::Digest::SHA256.digest('x' * 1024*1024) + OpenSSL::Digest::SHA256.digest('y' * 1024*1024)
|
82
|
+
) +
|
83
|
+
OpenSSL::Digest::SHA256.digest(
|
84
|
+
OpenSSL::Digest::SHA256.digest('z' * 1024*1024) + OpenSSL::Digest::SHA256.digest('t' * 1024*1024)
|
85
|
+
)
|
86
|
+
)
|
87
|
+
returns(expected) { tree_hash.hexdigest}
|
88
|
+
|
89
|
+
end
|
90
|
+
|
62
91
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fog-aws
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 3.5.
|
4
|
+
version: 3.5.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Josh Lane
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2019-
|
12
|
+
date: 2019-06-10 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: bundler
|