dockly 2.7.2 → 3.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,79 @@
1
+ module Dockly
2
+ class S3Writer
3
+ include Dockly::Util::Logger::Mixin
4
+ extend Forwardable
5
+
6
+ MAX_BUFFER_SIZE = 5 * 1024 * 1024
7
+
8
+ attr_reader :connection, :s3_bucket, :s3_object, :parts, :closed, :buffer
9
+
10
+ def_delegators :multipart_upload, :upload_id
11
+ logger_prefix '[dockly s3writer]'
12
+
13
+ def initialize(connection, s3_bucket, s3_object)
14
+ @connection = connection
15
+ @s3_bucket = s3_bucket
16
+ @s3_object = s3_object
17
+ @parts = []
18
+ @closed = false
19
+ @buffer = StringIO.new
20
+ end
21
+
22
+ def upload_buffer
23
+ num = @parts.length.succ
24
+ debug "Writing chunk ##{num} to s3://#{s3_bucket}/#{s3_object} with upload id: #{upload_id}"
25
+ res = connection.upload_part(
26
+ bucket: s3_bucket,
27
+ key: s3_object,
28
+ upload_id: upload_id,
29
+ part_number:num,
30
+ body: buffer.tap(&:rewind)
31
+ )
32
+ @parts << res.etag
33
+ @buffer = StringIO.new
34
+ end
35
+
36
+ def write(chunk)
37
+ @buffer.write(chunk)
38
+ upload_buffer if buffer.size > MAX_BUFFER_SIZE
39
+ chunk.length
40
+ end
41
+
42
+ def close
43
+ return if @closed
44
+ upload_buffer unless buffer.size.zero?
45
+ connection.complete_multipart_upload(
46
+ bucket: s3_bucket,
47
+ key: s3_object,
48
+ upload_id: upload_id,
49
+ multipart_upload: {
50
+ parts: @parts.each_with_index.map do |part, idx|
51
+ {
52
+ etag: part,
53
+ part_number: idx.succ
54
+ }
55
+ end
56
+ }
57
+ )
58
+ @closed = true
59
+ end
60
+
61
+ def abort_upload
62
+ connection.abort_multipart_upload(
63
+ bucket: s3_bucket,
64
+ key: s3_object,
65
+ upload_id: upload_id
66
+ )
67
+ end
68
+
69
+ def abort_unless_closed
70
+ abort_upload unless @closed
71
+ @closed = true
72
+ end
73
+
74
+ def multipart_upload
75
+ @multipart_upload ||=
76
+ connection.create_multipart_upload(bucket: s3_bucket, key: s3_object)
77
+ end
78
+ end
79
+ end
@@ -1,13 +1,38 @@
1
- require 'grit'
2
-
3
1
  module Dockly::Util::Git
4
- extend self
2
+ module_function
3
+
4
+ def repo
5
+ @repo ||= Rugged::Repository.discover('.')
6
+ end
7
+
8
+ def sha
9
+ return @sha if @sha
10
+ @sha = repo.head.target.oid[0..6]
11
+ rescue
12
+ @sha = 'unknown'
13
+ end
5
14
 
6
- def git_repo
7
- @git_repo ||= Grit::Repo.new('.')
15
+ def ls_files(oid)
16
+ target = repo.lookup(oid)
17
+ target = target.target until target.type == :commit
18
+ ary = []
19
+ target.tree.walk(:postorder) do |root, entry|
20
+ next unless entry[:type] == :blob
21
+ name = File.join(root, entry[:name]).gsub(/\A\//, '')
22
+ ary << entry.merge(name: name)
23
+ end
24
+ ary
8
25
  end
9
26
 
10
- def git_sha
11
- @git_sha ||= git_repo.git.show.lines.first.chomp.match(/^commit ([a-f0-9]+)$/)[1][0..6] rescue 'unknown'
27
+ def archive(oid, prefix, output)
28
+ Gem::Package::TarWriter.new(output) do |tar|
29
+ ls_files(oid).each do |blob|
30
+ name, mode = blob.values_at(:name, :filemode)
31
+ prefixed = File.join(prefix, name)
32
+ tar.add_file(prefixed, mode) do |tar_out|
33
+ tar_out.write(File.read(name))
34
+ end
35
+ end
36
+ end
12
37
  end
13
38
  end
@@ -1,3 +1,8 @@
1
1
  module Dockly
2
- VERSION = '2.7.2'
2
+ MAJOR = 3
3
+ MINOR = 0
4
+ PATCH = 0
5
+ RELEASE = nil
6
+
7
+ VERSION = [MAJOR, MINOR, PATCH, RELEASE].compact.join('.')
3
8
  end
@@ -13,15 +13,19 @@ describe Dockly::BuildCache::Base do
13
13
 
14
14
  describe '#up_to_date?' do
15
15
  context 'when the object exists in s3' do
16
- before { subject.connection.stub(:head_object) }
16
+ before do
17
+ allow(subject.connection)
18
+ .to receive(:head_object)
19
+ end
17
20
 
18
21
  its(:up_to_date?) { should be_true }
19
22
  end
20
23
 
21
24
  context 'when the object does not exist in s3' do
22
25
  before do
23
- subject.connection.stub(:head_object)
24
- .and_raise(Excon::Errors::NotFound.new('help'))
26
+ allow(subject.connection)
27
+ .to receive(:head_object)
28
+ .and_raise(Aws::S3::Errors::NoSuchKey.new('Some Error', 500))
25
29
  end
26
30
 
27
31
  its(:up_to_date?) { should be_false }
@@ -33,8 +37,12 @@ describe Dockly::BuildCache::Base do
33
37
  let(:object) { double(:object) }
34
38
 
35
39
  before do
36
- subject.connection.stub(:get_object).and_return object
37
- object.stub(:body).and_return 'hey dad'
40
+ allow(subject.connection)
41
+ .to receive(:get_object)
42
+ .and_return(object)
43
+ allow(object)
44
+ .to receive(:body)
45
+ .and_return(StringIO.new('hey dad').tap(&:rewind))
38
46
  end
39
47
 
40
48
  after do
@@ -50,8 +58,8 @@ describe Dockly::BuildCache::Base do
50
58
 
51
59
  describe '#s3_object' do
52
60
  before do
53
- subject.stub(:s3_object_prefix) { 'lol' }
54
- subject.stub(:hash_output) { 'lel' }
61
+ allow(subject).to receive(:s3_object_prefix).and_return('lol')
62
+ allow(subject).to receive(:hash_output).and_return('lel')
55
63
  end
56
64
 
57
65
  context "without an arch_output" do
@@ -83,7 +83,7 @@ describe Dockly::BuildCache::Local do
83
83
 
84
84
  describe '#hash_output' do
85
85
  let(:output) {
86
- "5ebe2fbf321fa1008833574649e986a3"
86
+ "f683463a09482287c33959ab71a87189"
87
87
  }
88
88
 
89
89
  context "when hash command returns successfully" do
@@ -191,7 +191,9 @@ describe Dockly::Deb do
191
191
 
192
192
  context 'when the object does exist' do
193
193
  before do
194
- Dockly::AWS.s3.stub(:head_object).and_return {}
194
+ allow(Dockly.s3)
195
+ .to receive(:head_object)
196
+ .and_return({})
195
197
  end
196
198
 
197
199
  it 'is true' do
@@ -201,7 +203,9 @@ describe Dockly::Deb do
201
203
 
202
204
  context 'when the object does not exist' do
203
205
  before do
204
- Dockly::AWS.s3.stub(:head_object).and_raise(Excon::Errors::NotFound.new "NotFound")
206
+ allow(Dockly.s3)
207
+ .to receive(:head_object)
208
+ .and_raise(StandardError.new('object does not exist'))
205
209
  end
206
210
 
207
211
  it 'is true' do
@@ -224,7 +228,7 @@ describe Dockly::Deb do
224
228
 
225
229
  context 'when the s3_bucket is nil' do
226
230
  it 'does nothing' do
227
- Dockly::AWS.should_not_receive(:s3)
231
+ expect(Dockly).to_not receive(:s3)
228
232
  subject.upload_to_s3
229
233
  end
230
234
  end
@@ -244,14 +248,14 @@ describe Dockly::Deb do
244
248
  context 'when the package has been created' do
245
249
  before { subject.create_package! }
246
250
 
247
- it 'creates the s3 bucket' do
248
- subject.upload_to_s3
249
- Dockly::AWS.s3.get_bucket(bucket_name).body.should_not be_nil
250
- end
251
-
252
251
  it 'inserts the deb package into that bucket' do
252
+ expect(Dockly.s3).to receive(:put_object) do |hash|
253
+ expect(hash[:bucket]).to eq(bucket_name)
254
+ expect(hash[:key]).to eq(subject.s3_object_name)
255
+ expect(hash).to have_key(:body)
256
+ end
257
+
253
258
  subject.upload_to_s3
254
- Dockly::AWS.s3.get_bucket(bucket_name, subject.s3_object_name).body.should_not be_nil
255
259
  end
256
260
  end
257
261
  end
@@ -135,8 +135,10 @@ describe Dockly::Docker do
135
135
  let(:data) { 'sweet, sweet data' }
136
136
 
137
137
  before do
138
- subject.send(:connection).put_bucket('bucket')
139
- subject.send(:connection).put_object('bucket', 'object', data)
138
+ allow(Dockly.s3)
139
+ .to receive(:get_object)
140
+ .with(bucket: 'bucket', key: 'object')
141
+ .and_yield(data)
140
142
  end
141
143
 
142
144
  it 'pulls the file from S3' do
@@ -182,7 +184,7 @@ describe Dockly::Docker do
182
184
  context "with an S3 export" do
183
185
  let(:export) { double(:export) }
184
186
  before do
185
- expect(Dockly::AWS::S3Writer).to receive(:new).and_return(export)
187
+ expect(Dockly::S3Writer).to receive(:new).and_return(export)
186
188
  expect(export).to receive(:write).once
187
189
  expect(export).to receive(:close).once
188
190
  subject.s3_bucket "test-bucket"
@@ -68,7 +68,7 @@ describe Dockly::History do
68
68
  expect(files).to be_a(Array)
69
69
  expect(files).to include('dockly.gemspec')
70
70
  expect(files).to include('lib/dockly.rb')
71
- expect(files).to include('spec/dockly/aws/s3_writer_spec.rb')
71
+ expect(files).to include('spec/dockly/s3_writer_spec.rb')
72
72
  end
73
73
  end
74
74
 
@@ -178,7 +178,9 @@ describe Dockly::Rpm do
178
178
 
179
179
  context 'when the object does exist' do
180
180
  before do
181
- Dockly::AWS.s3.stub(:head_object).and_return {}
181
+ allow(Dockly.s3)
182
+ .to receive(:head_object)
183
+ .and_return({})
182
184
  end
183
185
 
184
186
  it 'is true' do
@@ -188,7 +190,9 @@ describe Dockly::Rpm do
188
190
 
189
191
  context 'when the object does not exist' do
190
192
  before do
191
- Dockly::AWS.s3.stub(:head_object).and_raise(Excon::Errors::NotFound.new "NotFound")
193
+ allow(Dockly.s3)
194
+ .to receive(:head_object)
195
+ .and_raise(StandardError.new('object does not exist'))
192
196
  end
193
197
 
194
198
  it 'is true' do
@@ -211,7 +215,7 @@ describe Dockly::Rpm do
211
215
 
212
216
  context 'when the s3_bucket is nil' do
213
217
  it 'does nothing' do
214
- Dockly::AWS.should_not_receive(:s3)
218
+ expect(Dockly).to_not receive(:s3)
215
219
  subject.upload_to_s3
216
220
  end
217
221
  end
@@ -231,14 +235,14 @@ describe Dockly::Rpm do
231
235
  context 'when the package has been created' do
232
236
  before { subject.create_package! }
233
237
 
234
- it 'creates the s3 bucket' do
235
- subject.upload_to_s3
236
- Dockly::AWS.s3.get_bucket(bucket_name).body.should_not be_nil
237
- end
238
-
239
238
  it 'inserts the rpm package into that bucket' do
239
+ expect(Dockly.s3).to receive(:put_object) do |hash|
240
+ expect(hash[:bucket]).to eq(bucket_name)
241
+ expect(hash[:key]).to eq(subject.s3_object_name)
242
+ expect(hash).to have_key(:body)
243
+ end
244
+
240
245
  subject.upload_to_s3
241
- Dockly::AWS.s3.get_bucket(bucket_name, subject.s3_object_name).body.should_not be_nil
242
246
  end
243
247
  end
244
248
  end
@@ -0,0 +1,139 @@
1
+ require 'spec_helper'
2
+
3
+ describe Dockly::S3Writer do
4
+ let(:connection) { double(:connection) }
5
+ let(:bucket) { 'test_bucket' }
6
+ let(:object) { 'object_name.tar' }
7
+ let(:multipart_upload) { double(:multipart_upload, upload_id: upload_id) }
8
+ let(:upload_id) { 'test_id' }
9
+
10
+ before do
11
+ allow(subject)
12
+ .to receive(:multipart_upload)
13
+ .and_return(multipart_upload)
14
+ end
15
+
16
+ subject { described_class.new(connection, bucket, object) }
17
+
18
+ describe '.new' do
19
+ it 'sets the connection, s3_bucket, s3_object, and upload_id' do
20
+ expect(subject.connection).to eq(connection)
21
+ expect(subject.s3_bucket).to eq(bucket)
22
+ expect(subject.s3_object).to eq(object)
23
+ end
24
+ end
25
+
26
+ describe '#upload_id' do
27
+ it 'delegates to the multipart_upload' do
28
+ expect(subject.upload_id).to eq(multipart_upload.upload_id)
29
+ end
30
+ end
31
+
32
+ describe '#upload_buffer' do
33
+ let(:input) { 'Some String' }
34
+ let(:io) { StringIO.new(input) }
35
+ let(:upload_response) { double(:upload_response, etag: etag) }
36
+ let(:etag) { 'test' }
37
+
38
+ before do
39
+ subject.instance_variable_set(:@buffer, io)
40
+
41
+ allow(connection)
42
+ .to receive(:upload_part)
43
+ .with(bucket: bucket, key: object, upload_id: upload_id, part_number: 1, body: io)
44
+ .and_return(upload_response)
45
+ end
46
+
47
+ it 'uploads to S3' do
48
+ expect { subject.upload_buffer }
49
+ .to change { subject.parts.last }
50
+ .to(etag)
51
+ end
52
+
53
+ it 'clears the buffer' do
54
+ expect { subject.upload_buffer }
55
+ .to change { subject.buffer.tap(&:rewind).string }
56
+ .from(input)
57
+ .to('')
58
+ end
59
+ end
60
+
61
+ describe '#write' do
62
+ let(:message) { 'a' * chunk_length }
63
+
64
+ context 'with a buffer of less than 5 MB' do
65
+ let(:chunk_length) { 100 }
66
+
67
+ it 'adds it to the buffer and returns the chunk length' do
68
+ expect(subject).to_not receive(:upload_buffer)
69
+ expect(subject.write(message)).to eq(chunk_length)
70
+ expect(subject.buffer.tap(&:rewind).string).to eq(message)
71
+ end
72
+ end
73
+
74
+ context 'with a buffer of greater than 5 MB' do
75
+ let(:chunk_length) { 1 + 5 * 1024 * 1024 }
76
+
77
+ it 'adds it to the buffer, writes to S3 and returns the chunk length' do
78
+ expect(subject).to receive(:upload_buffer)
79
+ expect(subject.write(message)).to eq(chunk_length)
80
+ end
81
+ end
82
+ end
83
+
84
+ describe '#close' do
85
+ let(:complete_response) { double(:complete_response) }
86
+
87
+ before do
88
+ allow(connection)
89
+ .to receive(:complete_multipart_upload)
90
+ .with(bucket: bucket, key: object, upload_id: upload_id, multipart_upload: { parts: [] })
91
+ .and_return(complete_response)
92
+ end
93
+
94
+ context 'when it passes' do
95
+ context 'when the buffer is not empty' do
96
+ before { subject.instance_variable_set(:@buffer, StringIO.new('text')) }
97
+
98
+ it 'uploads the rest of the buffer and closes the connection' do
99
+ expect(subject).to receive(:upload_buffer)
100
+ expect(subject.close).to be_true
101
+ end
102
+ end
103
+
104
+ context 'when the buffer is empty' do
105
+ it 'closes the connection' do
106
+ expect(subject).to_not receive(:upload_buffer)
107
+ expect(subject.close).to be_true
108
+ end
109
+ end
110
+ end
111
+ end
112
+
113
+ describe '#abort_upload' do
114
+ it 'aborts the upload' do
115
+ expect(connection)
116
+ .to receive(:abort_multipart_upload)
117
+ .with(bucket: bucket, key: object, upload_id: upload_id)
118
+ subject.abort_upload
119
+ end
120
+ end
121
+
122
+ describe '#abort_unless_closed' do
123
+ context 'when the upload is closed' do
124
+ before { subject.instance_variable_set(:@closed, true) }
125
+
126
+ it 'does not abort' do
127
+ expect(subject).to_not receive(:abort_upload)
128
+ subject.abort_unless_closed
129
+ end
130
+ end
131
+
132
+ context 'when the upload is open' do
133
+ it 'aborts the upload' do
134
+ expect(subject).to receive(:abort_upload)
135
+ subject.abort_unless_closed
136
+ end
137
+ end
138
+ end
139
+ end