dockly 2.7.2 → 3.0.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Gemfile +0 -3
- data/dockly.gemspec +1 -2
- data/lib/dockly.rb +41 -16
- data/lib/dockly/build_cache/base.rb +12 -8
- data/lib/dockly/deb.rb +10 -5
- data/lib/dockly/docker.rb +17 -19
- data/lib/dockly/rake_task.rb +131 -136
- data/lib/dockly/s3_writer.rb +79 -0
- data/lib/dockly/util/git.rb +32 -7
- data/lib/dockly/version.rb +6 -1
- data/spec/dockly/build_cache/base_spec.rb +15 -7
- data/spec/dockly/build_cache/local_spec.rb +1 -1
- data/spec/dockly/deb_spec.rb +13 -9
- data/spec/dockly/docker_spec.rb +5 -3
- data/spec/dockly/history_spec.rb +1 -1
- data/spec/dockly/rpm_spec.rb +13 -9
- data/spec/dockly/s3_writer_spec.rb +139 -0
- data/spec/dockly/util/git_spec.rb +38 -0
- data/spec/spec_helper.rb +0 -4
- metadata +10 -27
- data/lib/dockly/aws.rb +0 -62
- data/lib/dockly/aws/s3_writer.rb +0 -59
- data/spec/dockly/aws/s3_writer_spec.rb +0 -154
- data/spec/dockly/aws_spec.rb +0 -17
@@ -0,0 +1,79 @@
|
|
1
|
+
module Dockly
|
2
|
+
class S3Writer
|
3
|
+
include Dockly::Util::Logger::Mixin
|
4
|
+
extend Forwardable
|
5
|
+
|
6
|
+
MAX_BUFFER_SIZE = 5 * 1024 * 1024
|
7
|
+
|
8
|
+
attr_reader :connection, :s3_bucket, :s3_object, :parts, :closed, :buffer
|
9
|
+
|
10
|
+
def_delegators :multipart_upload, :upload_id
|
11
|
+
logger_prefix '[dockly s3writer]'
|
12
|
+
|
13
|
+
def initialize(connection, s3_bucket, s3_object)
|
14
|
+
@connection = connection
|
15
|
+
@s3_bucket = s3_bucket
|
16
|
+
@s3_object = s3_object
|
17
|
+
@parts = []
|
18
|
+
@closed = false
|
19
|
+
@buffer = StringIO.new
|
20
|
+
end
|
21
|
+
|
22
|
+
def upload_buffer
|
23
|
+
num = @parts.length.succ
|
24
|
+
debug "Writing chunk ##{num} to s3://#{s3_bucket}/#{s3_object} with upload id: #{upload_id}"
|
25
|
+
res = connection.upload_part(
|
26
|
+
bucket: s3_bucket,
|
27
|
+
key: s3_object,
|
28
|
+
upload_id: upload_id,
|
29
|
+
part_number:num,
|
30
|
+
body: buffer.tap(&:rewind)
|
31
|
+
)
|
32
|
+
@parts << res.etag
|
33
|
+
@buffer = StringIO.new
|
34
|
+
end
|
35
|
+
|
36
|
+
def write(chunk)
|
37
|
+
@buffer.write(chunk)
|
38
|
+
upload_buffer if buffer.size > MAX_BUFFER_SIZE
|
39
|
+
chunk.length
|
40
|
+
end
|
41
|
+
|
42
|
+
def close
|
43
|
+
return if @closed
|
44
|
+
upload_buffer unless buffer.size.zero?
|
45
|
+
connection.complete_multipart_upload(
|
46
|
+
bucket: s3_bucket,
|
47
|
+
key: s3_object,
|
48
|
+
upload_id: upload_id,
|
49
|
+
multipart_upload: {
|
50
|
+
parts: @parts.each_with_index.map do |part, idx|
|
51
|
+
{
|
52
|
+
etag: part,
|
53
|
+
part_number: idx.succ
|
54
|
+
}
|
55
|
+
end
|
56
|
+
}
|
57
|
+
)
|
58
|
+
@closed = true
|
59
|
+
end
|
60
|
+
|
61
|
+
def abort_upload
|
62
|
+
connection.abort_multipart_upload(
|
63
|
+
bucket: s3_bucket,
|
64
|
+
key: s3_object,
|
65
|
+
upload_id: upload_id
|
66
|
+
)
|
67
|
+
end
|
68
|
+
|
69
|
+
def abort_unless_closed
|
70
|
+
abort_upload unless @closed
|
71
|
+
@closed = true
|
72
|
+
end
|
73
|
+
|
74
|
+
def multipart_upload
|
75
|
+
@multipart_upload ||=
|
76
|
+
connection.create_multipart_upload(bucket: s3_bucket, key: s3_object)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
data/lib/dockly/util/git.rb
CHANGED
@@ -1,13 +1,38 @@
|
|
1
|
-
require 'grit'
|
2
|
-
|
3
1
|
module Dockly::Util::Git
|
4
|
-
|
2
|
+
module_function
|
3
|
+
|
4
|
+
def repo
|
5
|
+
@repo ||= Rugged::Repository.discover('.')
|
6
|
+
end
|
7
|
+
|
8
|
+
def sha
|
9
|
+
return @sha if @sha
|
10
|
+
@sha = repo.head.target.oid[0..6]
|
11
|
+
rescue
|
12
|
+
@sha = 'unknown'
|
13
|
+
end
|
5
14
|
|
6
|
-
def
|
7
|
-
|
15
|
+
def ls_files(oid)
|
16
|
+
target = repo.lookup(oid)
|
17
|
+
target = target.target until target.type == :commit
|
18
|
+
ary = []
|
19
|
+
target.tree.walk(:postorder) do |root, entry|
|
20
|
+
next unless entry[:type] == :blob
|
21
|
+
name = File.join(root, entry[:name]).gsub(/\A\//, '')
|
22
|
+
ary << entry.merge(name: name)
|
23
|
+
end
|
24
|
+
ary
|
8
25
|
end
|
9
26
|
|
10
|
-
def
|
11
|
-
|
27
|
+
def archive(oid, prefix, output)
|
28
|
+
Gem::Package::TarWriter.new(output) do |tar|
|
29
|
+
ls_files(oid).each do |blob|
|
30
|
+
name, mode = blob.values_at(:name, :filemode)
|
31
|
+
prefixed = File.join(prefix, name)
|
32
|
+
tar.add_file(prefixed, mode) do |tar_out|
|
33
|
+
tar_out.write(File.read(name))
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
12
37
|
end
|
13
38
|
end
|
data/lib/dockly/version.rb
CHANGED
@@ -13,15 +13,19 @@ describe Dockly::BuildCache::Base do
|
|
13
13
|
|
14
14
|
describe '#up_to_date?' do
|
15
15
|
context 'when the object exists in s3' do
|
16
|
-
before
|
16
|
+
before do
|
17
|
+
allow(subject.connection)
|
18
|
+
.to receive(:head_object)
|
19
|
+
end
|
17
20
|
|
18
21
|
its(:up_to_date?) { should be_true }
|
19
22
|
end
|
20
23
|
|
21
24
|
context 'when the object does not exist in s3' do
|
22
25
|
before do
|
23
|
-
subject.connection
|
24
|
-
|
26
|
+
allow(subject.connection)
|
27
|
+
.to receive(:head_object)
|
28
|
+
.and_raise(Aws::S3::Errors::NoSuchKey.new('Some Error', 500))
|
25
29
|
end
|
26
30
|
|
27
31
|
its(:up_to_date?) { should be_false }
|
@@ -33,8 +37,12 @@ describe Dockly::BuildCache::Base do
|
|
33
37
|
let(:object) { double(:object) }
|
34
38
|
|
35
39
|
before do
|
36
|
-
subject.connection
|
37
|
-
|
40
|
+
allow(subject.connection)
|
41
|
+
.to receive(:get_object)
|
42
|
+
.and_return(object)
|
43
|
+
allow(object)
|
44
|
+
.to receive(:body)
|
45
|
+
.and_return(StringIO.new('hey dad').tap(&:rewind))
|
38
46
|
end
|
39
47
|
|
40
48
|
after do
|
@@ -50,8 +58,8 @@ describe Dockly::BuildCache::Base do
|
|
50
58
|
|
51
59
|
describe '#s3_object' do
|
52
60
|
before do
|
53
|
-
subject.
|
54
|
-
subject.
|
61
|
+
allow(subject).to receive(:s3_object_prefix).and_return('lol')
|
62
|
+
allow(subject).to receive(:hash_output).and_return('lel')
|
55
63
|
end
|
56
64
|
|
57
65
|
context "without an arch_output" do
|
data/spec/dockly/deb_spec.rb
CHANGED
@@ -191,7 +191,9 @@ describe Dockly::Deb do
|
|
191
191
|
|
192
192
|
context 'when the object does exist' do
|
193
193
|
before do
|
194
|
-
Dockly
|
194
|
+
allow(Dockly.s3)
|
195
|
+
.to receive(:head_object)
|
196
|
+
.and_return({})
|
195
197
|
end
|
196
198
|
|
197
199
|
it 'is true' do
|
@@ -201,7 +203,9 @@ describe Dockly::Deb do
|
|
201
203
|
|
202
204
|
context 'when the object does not exist' do
|
203
205
|
before do
|
204
|
-
Dockly
|
206
|
+
allow(Dockly.s3)
|
207
|
+
.to receive(:head_object)
|
208
|
+
.and_raise(StandardError.new('object does not exist'))
|
205
209
|
end
|
206
210
|
|
207
211
|
it 'is true' do
|
@@ -224,7 +228,7 @@ describe Dockly::Deb do
|
|
224
228
|
|
225
229
|
context 'when the s3_bucket is nil' do
|
226
230
|
it 'does nothing' do
|
227
|
-
Dockly
|
231
|
+
expect(Dockly).to_not receive(:s3)
|
228
232
|
subject.upload_to_s3
|
229
233
|
end
|
230
234
|
end
|
@@ -244,14 +248,14 @@ describe Dockly::Deb do
|
|
244
248
|
context 'when the package has been created' do
|
245
249
|
before { subject.create_package! }
|
246
250
|
|
247
|
-
it 'creates the s3 bucket' do
|
248
|
-
subject.upload_to_s3
|
249
|
-
Dockly::AWS.s3.get_bucket(bucket_name).body.should_not be_nil
|
250
|
-
end
|
251
|
-
|
252
251
|
it 'inserts the deb package into that bucket' do
|
252
|
+
expect(Dockly.s3).to receive(:put_object) do |hash|
|
253
|
+
expect(hash[:bucket]).to eq(bucket_name)
|
254
|
+
expect(hash[:key]).to eq(subject.s3_object_name)
|
255
|
+
expect(hash).to have_key(:body)
|
256
|
+
end
|
257
|
+
|
253
258
|
subject.upload_to_s3
|
254
|
-
Dockly::AWS.s3.get_bucket(bucket_name, subject.s3_object_name).body.should_not be_nil
|
255
259
|
end
|
256
260
|
end
|
257
261
|
end
|
data/spec/dockly/docker_spec.rb
CHANGED
@@ -135,8 +135,10 @@ describe Dockly::Docker do
|
|
135
135
|
let(:data) { 'sweet, sweet data' }
|
136
136
|
|
137
137
|
before do
|
138
|
-
|
139
|
-
|
138
|
+
allow(Dockly.s3)
|
139
|
+
.to receive(:get_object)
|
140
|
+
.with(bucket: 'bucket', key: 'object')
|
141
|
+
.and_yield(data)
|
140
142
|
end
|
141
143
|
|
142
144
|
it 'pulls the file from S3' do
|
@@ -182,7 +184,7 @@ describe Dockly::Docker do
|
|
182
184
|
context "with an S3 export" do
|
183
185
|
let(:export) { double(:export) }
|
184
186
|
before do
|
185
|
-
expect(Dockly::
|
187
|
+
expect(Dockly::S3Writer).to receive(:new).and_return(export)
|
186
188
|
expect(export).to receive(:write).once
|
187
189
|
expect(export).to receive(:close).once
|
188
190
|
subject.s3_bucket "test-bucket"
|
data/spec/dockly/history_spec.rb
CHANGED
@@ -68,7 +68,7 @@ describe Dockly::History do
|
|
68
68
|
expect(files).to be_a(Array)
|
69
69
|
expect(files).to include('dockly.gemspec')
|
70
70
|
expect(files).to include('lib/dockly.rb')
|
71
|
-
expect(files).to include('spec/dockly/
|
71
|
+
expect(files).to include('spec/dockly/s3_writer_spec.rb')
|
72
72
|
end
|
73
73
|
end
|
74
74
|
|
data/spec/dockly/rpm_spec.rb
CHANGED
@@ -178,7 +178,9 @@ describe Dockly::Rpm do
|
|
178
178
|
|
179
179
|
context 'when the object does exist' do
|
180
180
|
before do
|
181
|
-
Dockly
|
181
|
+
allow(Dockly.s3)
|
182
|
+
.to receive(:head_object)
|
183
|
+
.and_return({})
|
182
184
|
end
|
183
185
|
|
184
186
|
it 'is true' do
|
@@ -188,7 +190,9 @@ describe Dockly::Rpm do
|
|
188
190
|
|
189
191
|
context 'when the object does not exist' do
|
190
192
|
before do
|
191
|
-
Dockly
|
193
|
+
allow(Dockly.s3)
|
194
|
+
.to receive(:head_object)
|
195
|
+
.and_raise(StandardError.new('object does not exist'))
|
192
196
|
end
|
193
197
|
|
194
198
|
it 'is true' do
|
@@ -211,7 +215,7 @@ describe Dockly::Rpm do
|
|
211
215
|
|
212
216
|
context 'when the s3_bucket is nil' do
|
213
217
|
it 'does nothing' do
|
214
|
-
Dockly
|
218
|
+
expect(Dockly).to_not receive(:s3)
|
215
219
|
subject.upload_to_s3
|
216
220
|
end
|
217
221
|
end
|
@@ -231,14 +235,14 @@ describe Dockly::Rpm do
|
|
231
235
|
context 'when the package has been created' do
|
232
236
|
before { subject.create_package! }
|
233
237
|
|
234
|
-
it 'creates the s3 bucket' do
|
235
|
-
subject.upload_to_s3
|
236
|
-
Dockly::AWS.s3.get_bucket(bucket_name).body.should_not be_nil
|
237
|
-
end
|
238
|
-
|
239
238
|
it 'inserts the rpm package into that bucket' do
|
239
|
+
expect(Dockly.s3).to receive(:put_object) do |hash|
|
240
|
+
expect(hash[:bucket]).to eq(bucket_name)
|
241
|
+
expect(hash[:key]).to eq(subject.s3_object_name)
|
242
|
+
expect(hash).to have_key(:body)
|
243
|
+
end
|
244
|
+
|
240
245
|
subject.upload_to_s3
|
241
|
-
Dockly::AWS.s3.get_bucket(bucket_name, subject.s3_object_name).body.should_not be_nil
|
242
246
|
end
|
243
247
|
end
|
244
248
|
end
|
@@ -0,0 +1,139 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Dockly::S3Writer do
|
4
|
+
let(:connection) { double(:connection) }
|
5
|
+
let(:bucket) { 'test_bucket' }
|
6
|
+
let(:object) { 'object_name.tar' }
|
7
|
+
let(:multipart_upload) { double(:multipart_upload, upload_id: upload_id) }
|
8
|
+
let(:upload_id) { 'test_id' }
|
9
|
+
|
10
|
+
before do
|
11
|
+
allow(subject)
|
12
|
+
.to receive(:multipart_upload)
|
13
|
+
.and_return(multipart_upload)
|
14
|
+
end
|
15
|
+
|
16
|
+
subject { described_class.new(connection, bucket, object) }
|
17
|
+
|
18
|
+
describe '.new' do
|
19
|
+
it 'sets the connection, s3_bucket, s3_object, and upload_id' do
|
20
|
+
expect(subject.connection).to eq(connection)
|
21
|
+
expect(subject.s3_bucket).to eq(bucket)
|
22
|
+
expect(subject.s3_object).to eq(object)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
describe '#upload_id' do
|
27
|
+
it 'delegates to the multipart_upload' do
|
28
|
+
expect(subject.upload_id).to eq(multipart_upload.upload_id)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
describe '#upload_buffer' do
|
33
|
+
let(:input) { 'Some String' }
|
34
|
+
let(:io) { StringIO.new(input) }
|
35
|
+
let(:upload_response) { double(:upload_response, etag: etag) }
|
36
|
+
let(:etag) { 'test' }
|
37
|
+
|
38
|
+
before do
|
39
|
+
subject.instance_variable_set(:@buffer, io)
|
40
|
+
|
41
|
+
allow(connection)
|
42
|
+
.to receive(:upload_part)
|
43
|
+
.with(bucket: bucket, key: object, upload_id: upload_id, part_number: 1, body: io)
|
44
|
+
.and_return(upload_response)
|
45
|
+
end
|
46
|
+
|
47
|
+
it 'uploads to S3' do
|
48
|
+
expect { subject.upload_buffer }
|
49
|
+
.to change { subject.parts.last }
|
50
|
+
.to(etag)
|
51
|
+
end
|
52
|
+
|
53
|
+
it 'clears the buffer' do
|
54
|
+
expect { subject.upload_buffer }
|
55
|
+
.to change { subject.buffer.tap(&:rewind).string }
|
56
|
+
.from(input)
|
57
|
+
.to('')
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
describe '#write' do
|
62
|
+
let(:message) { 'a' * chunk_length }
|
63
|
+
|
64
|
+
context 'with a buffer of less than 5 MB' do
|
65
|
+
let(:chunk_length) { 100 }
|
66
|
+
|
67
|
+
it 'adds it to the buffer and returns the chunk length' do
|
68
|
+
expect(subject).to_not receive(:upload_buffer)
|
69
|
+
expect(subject.write(message)).to eq(chunk_length)
|
70
|
+
expect(subject.buffer.tap(&:rewind).string).to eq(message)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
context 'with a buffer of greater than 5 MB' do
|
75
|
+
let(:chunk_length) { 1 + 5 * 1024 * 1024 }
|
76
|
+
|
77
|
+
it 'adds it to the buffer, writes to S3 and returns the chunk length' do
|
78
|
+
expect(subject).to receive(:upload_buffer)
|
79
|
+
expect(subject.write(message)).to eq(chunk_length)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
describe '#close' do
|
85
|
+
let(:complete_response) { double(:complete_response) }
|
86
|
+
|
87
|
+
before do
|
88
|
+
allow(connection)
|
89
|
+
.to receive(:complete_multipart_upload)
|
90
|
+
.with(bucket: bucket, key: object, upload_id: upload_id, multipart_upload: { parts: [] })
|
91
|
+
.and_return(complete_response)
|
92
|
+
end
|
93
|
+
|
94
|
+
context 'when it passes' do
|
95
|
+
context 'when the buffer is not empty' do
|
96
|
+
before { subject.instance_variable_set(:@buffer, StringIO.new('text')) }
|
97
|
+
|
98
|
+
it 'uploads the rest of the buffer and closes the connection' do
|
99
|
+
expect(subject).to receive(:upload_buffer)
|
100
|
+
expect(subject.close).to be_true
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
context 'when the buffer is empty' do
|
105
|
+
it 'closes the connection' do
|
106
|
+
expect(subject).to_not receive(:upload_buffer)
|
107
|
+
expect(subject.close).to be_true
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
describe '#abort_upload' do
|
114
|
+
it 'aborts the upload' do
|
115
|
+
expect(connection)
|
116
|
+
.to receive(:abort_multipart_upload)
|
117
|
+
.with(bucket: bucket, key: object, upload_id: upload_id)
|
118
|
+
subject.abort_upload
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
describe '#abort_unless_closed' do
|
123
|
+
context 'when the upload is closed' do
|
124
|
+
before { subject.instance_variable_set(:@closed, true) }
|
125
|
+
|
126
|
+
it 'does not abort' do
|
127
|
+
expect(subject).to_not receive(:abort_upload)
|
128
|
+
subject.abort_unless_closed
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
context 'when the upload is open' do
|
133
|
+
it 'aborts the upload' do
|
134
|
+
expect(subject).to receive(:abort_upload)
|
135
|
+
subject.abort_unless_closed
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|