dockly 1.5.8 → 1.5.9

Sign up to get free protection for your applications and to get access to all the features.
@@ -7,7 +7,7 @@ class Dockly::Docker::Registry
7
7
  logger_prefix '[dockly docker registry]'
8
8
 
9
9
  dsl_attribute :name, :server_address, :email, :username, :password,
10
- :authentication_required
10
+ :authentication_required, :auth_config_file
11
11
 
12
12
  default_value :server_address, DEFAULT_SERVER_ADDRESS
13
13
  default_value :authentication_required, true
@@ -1,8 +1,6 @@
1
1
  require 'rake'
2
2
  require 'dockly'
3
3
 
4
- $rake_task_logger = Dockly::Util::Logger.new('[dockly rake_task]', STDOUT, false)
5
-
6
4
  class Rake::DebTask < Rake::Task
7
5
  def needed?
8
6
  raise "Package does not exist" if package.nil?
@@ -14,10 +12,25 @@ class Rake::DebTask < Rake::Task
14
12
  end
15
13
  end
16
14
 
15
+ class Rake::DockerTask < Rake::Task
16
+ def needed?
17
+ raise "Docker does not exist" if docker.nil?
18
+ !docker.exists?
19
+ end
20
+
21
+ def docker
22
+ Dockly::Docker[name.split(':').last.to_sym]
23
+ end
24
+ end
25
+
17
26
  module Rake::DSL
18
27
  def deb(*args, &block)
19
28
  Rake::DebTask.define_task(*args, &block)
20
29
  end
30
+
31
+ def docker(*args, &block)
32
+ Rake::DockerTask.define_task(*args, &block)
33
+ end
21
34
  end
22
35
 
23
36
  namespace :dockly do
@@ -33,4 +46,20 @@ namespace :dockly do
33
46
  end
34
47
  end
35
48
  end
49
+
50
+ namespace :docker do
51
+ Dockly.dockers.values.each do |inst|
52
+ docker inst.name => 'dockly:load' do
53
+ Thread.current[:rake_task] = inst.name
54
+ inst.generate!
55
+ end
56
+
57
+ namespace :noexport do
58
+ task inst.name => 'dockly:load' do
59
+ Thread.current[:rake_task] = inst.name
60
+ inst.generate_build
61
+ end
62
+ end
63
+ end
64
+ end
36
65
  end
@@ -0,0 +1,150 @@
1
+ class Dockly::TarDiff
2
+ include Dockly::Util::Logger::Mixin
3
+
4
+ # Tar header format for a ustar tar
5
+ HEADER_UNPACK_FORMAT = "Z100A8A8A8A12A12A8aZ100A6A2Z32Z32A8A8Z155"
6
+ PAX_FILE_FORMAT_REGEX = /\d+ path=(.*)/
7
+
8
+ logger_prefix '[dockly tar_diff]'
9
+
10
+ attr_reader :base, :output, :target, :base_enum, :target_enum
11
+
12
+ def initialize(base, target, output)
13
+ @base, @target, @output = base, target, output
14
+
15
+ @base_enum = to_enum(:read_header, base)
16
+ @target_enum = to_enum(:read_header, target)
17
+ end
18
+
19
+ def write_tar_section(header, data, remainder)
20
+ output.write(header)
21
+ output.write(data)
22
+ output.write("\0" * remainder)
23
+ end
24
+
25
+ def quick_write(size)
26
+ while size > 0
27
+ bread = target.read([size, 4096].min)
28
+ output.write(bread)
29
+ size -= bread.to_s.size
30
+ end
31
+ end
32
+
33
+ def read_header(io)
34
+ loop do
35
+ return if io.eof?
36
+ # Tar header is 512 bytes large
37
+ data = io.read(512)
38
+ fields = data.unpack(HEADER_UNPACK_FORMAT)
39
+ name = fields[0]
40
+ size = fields[4].oct
41
+ mtime = fields[5].oct
42
+ typeflag = fields[7]
43
+ prefix = fields[15]
44
+
45
+ empty = (data == "\0" * 512)
46
+ remainder = (512 - (size % 512)) % 512
47
+
48
+ yield data, name, prefix, mtime, typeflag, size, remainder, empty
49
+
50
+ io.read(remainder)
51
+ end
52
+ end
53
+
54
+ def process
55
+ debug "Started processing tar diff"
56
+ target_data = nil
57
+ base_data = nil
58
+ loop do
59
+ begin
60
+
61
+ target_header, target_name, \
62
+ target_prefix, target_mtime, \
63
+ target_typeflag, \
64
+ target_size, target_remainder, \
65
+ target_empty = target_enum.peek
66
+ rescue StopIteration
67
+ debug "Finished target file"
68
+ break
69
+ end
70
+
71
+ if target_empty
72
+ debug "End of target file/Empty"
73
+ break
74
+ end
75
+
76
+ begin
77
+ _, base_name, base_prefix, base_mtime, base_typeflag, base_size, _, base_empty = base_enum.peek
78
+ rescue StopIteration
79
+ target_data ||= target.read(target_size)
80
+ write_tar_section(target_header, target_data, target_remainder)
81
+ target_data = nil
82
+ target_enum.next
83
+ next
84
+ end
85
+
86
+ if base_empty
87
+ target_data ||= target.read(target_size)
88
+ write_tar_section(target_header, target_data, target_remainder)
89
+ target_data = nil
90
+ target_enum.next
91
+ next
92
+ end
93
+
94
+ target_full_name = File.join(target_prefix, target_name)
95
+ base_full_name = File.join(base_prefix, base_name)
96
+
97
+ target_full_name = target_full_name[1..-1] if target_full_name[0] == '/'
98
+ base_full_name = base_full_name[1..-1] if base_full_name[0] == '/'
99
+
100
+ if target_typeflag == 'x'
101
+ target_file = File.basename(target_full_name)
102
+ target_dir = File.dirname(File.dirname(target_full_name))
103
+ target_full_name = File.join(target_dir, target_file)
104
+ end
105
+
106
+ if base_typeflag == 'x'
107
+ base_file = File.basename(base_full_name)
108
+ base_dir = File.dirname(File.dirname(base_full_name))
109
+ base_full_name = File.join(base_dir, base_file)
110
+ end
111
+
112
+ # Remove the PaxHeader.PID from the file
113
+ # Format: /base/directory/PaxHeader.1234/file.ext
114
+ # After: /base/directory/file.ext
115
+ if (target_typeflag == 'x' && base_typeflag == 'x')
116
+ target_data = target.read(target_size)
117
+ base_data = base.read(base_size)
118
+
119
+ if target_match = target_data.match(PAX_FILE_FORMAT_REGEX) && \
120
+ base_match = base_data.match(PAX_FILE_FORMAT_REGEX)
121
+ target_full_name = target_match[1]
122
+ base_full_name = base_match[1]
123
+ end
124
+ end
125
+
126
+ if (target_full_name < base_full_name)
127
+ target_data ||= target.read(target_size)
128
+ write_tar_section(target_header, target_data, target_remainder)
129
+ target_data = nil
130
+ target_enum.next
131
+ elsif (base_full_name < target_full_name)
132
+ base.read(base_size) unless base_data
133
+ base_data = nil
134
+ base_enum.next
135
+ elsif (target_mtime != base_mtime) || (target_size != base_size)
136
+ target_data ||= target.read(target_size)
137
+ write_tar_section(target_header, target_data, target_remainder)
138
+ target_data = nil
139
+ target_enum.next
140
+ else
141
+ target.read(target_size) unless target_data
142
+ target_data = nil
143
+ target_enum.next
144
+ base.read(base_size) unless base_data
145
+ base_data = nil
146
+ base_enum.next
147
+ end
148
+ end
149
+ end
150
+ end
@@ -1,3 +1,3 @@
1
1
  module Dockly
2
- VERSION = '1.5.8'
2
+ VERSION = '1.5.9'
3
3
  end
@@ -0,0 +1 @@
1
+ docker import - <% if data[:repo] %><%= data[:repo] %>:<%= data[:tag] %><% end %>
@@ -0,0 +1,4 @@
1
+ file_diff_docker_import_fn() {
2
+ <%= get_from_s3(data[:base_image]) %>
3
+ }
4
+ (cat "<%= data[:diff_image] %>"; file_diff_docker_import_fn) | gunzip -c | <%= docker_import(data[:repo], data[:tag]) %>
@@ -0,0 +1 @@
1
+ cat <%= data[:path] %> | gunzip -c | <%= docker_import(data[:repo], data[:tag]) %>
@@ -0,0 +1,2 @@
1
+ <%= get_from_s3(data[:s3_url], data[:deb_path]) %>
2
+ <%= install_package(data[:deb_path]) %>
@@ -0,0 +1,11 @@
1
+ log "fetch: starting to fetch deb"
2
+ worked=1
3
+ s3_path="<%= data[:s3_url] %>"
4
+ output_path="<%= data[:output_path] %>"
5
+ for attempt in {1..200}; do
6
+ [[ $worked != 0 ]] || break
7
+ log "fetch: attempt ${attempt} to get $s3_path ..."
8
+ s3cmd -f get $s3_path $output_path && worked=0 || (log "fetch: attempt failed, sleeping 30"; sleep 30)
9
+ done
10
+ [[ $worked != 0 ]] && fatal "fetch: failed to pull deb from S3"
11
+ log "fetch: successfully fetched deb"
@@ -0,0 +1 @@
1
+ dpkg -i "<%= data[:path] %>"
@@ -0,0 +1,12 @@
1
+ set -e -o pipefail
2
+
3
+ function log {
4
+ logger -t dockly:startup "$@"
5
+ }
6
+
7
+ function fatal {
8
+ logger -t dockly:startup "FATAL: $@"
9
+ exit 1
10
+ }
11
+
12
+ mkdir -p /opt/dockly
@@ -0,0 +1 @@
1
+ docker pull <%= data[:repo] %>:<%= data[:tag] %>
@@ -0,0 +1,14 @@
1
+ <%
2
+ base_image = "/opt/dockly/base_image.tar"
3
+ %>
4
+
5
+ s3_diff_docker_import_base_fn() {
6
+ <%= get_from_s3(data[:base_image]) %>
7
+ }
8
+ s3_diff_docker_import_diff_fn() {
9
+ <%= get_from_s3(data[:diff_image]) %>
10
+ }
11
+ s3_diff_docker_import_base_fn | gunzip -c > "<%= base_image %>"
12
+ size=$(stat --format "%s" "<%= base_image %>")
13
+ head_size=$(($size - 1024))
14
+ (head -c $head_size "<%= base_image %>"; s3_diff_docker_import_diff_fn | gunzip -c) | <%= docker_import(data[:repo], data[:tag]) %>
@@ -0,0 +1,4 @@
1
+ s3_docker_import_fn() {
2
+ <%= get_from_s3(data[:s3_url]) %>
3
+ }
4
+ s3_docker_import_fn | gunzip -c | <%= docker_import(data[:repo], data[:tag]) %>
@@ -0,0 +1,154 @@
1
+ require 'spec_helper'
2
+
3
+ describe Dockly::AWS::S3Writer do
4
+ let(:connection) { double(:connection) }
5
+ let(:bucket) { 'test_bucket' }
6
+ let(:object) { 'object_name.tar' }
7
+ let(:initiate_response) { double(:initiate_response) }
8
+ let(:upload_id) { 'test_id' }
9
+
10
+ subject { described_class.new(connection, bucket, object) }
11
+
12
+ before do
13
+ connection.should_receive(:initiate_multipart_upload) { initiate_response }
14
+ initiate_response.stub(:body) { { 'UploadId' => upload_id } }
15
+ end
16
+
17
+ describe ".new" do
18
+
19
+ it "sets the connection, s3_bucket, s3_object, and upload_id" do
20
+ expect(subject.connection).to eq(connection)
21
+ expect(subject.s3_bucket).to eq(bucket)
22
+ expect(subject.s3_object).to eq(object)
23
+ expect(subject.upload_id).to eq(upload_id)
24
+ end
25
+ end
26
+
27
+ describe "#upload_buffer" do
28
+ let(:message) { "message" }
29
+ let(:upload_response) { double(:upload_response) }
30
+ let(:etag) { "test" }
31
+
32
+ before do
33
+ connection.should_receive(:upload_part).with(bucket, object, upload_id, 1, message) do
34
+ upload_response
35
+ end
36
+ upload_response.stub(:headers) { { "ETag" => etag } }
37
+ subject.instance_variable_set(:"@buffer", message)
38
+ end
39
+
40
+ it "connects to S3" do
41
+ subject.upload_buffer
42
+ expect(subject.instance_variable_get(:"@parts")).to include(etag)
43
+ end
44
+ end
45
+
46
+ describe "#write" do
47
+ let(:message) { "a" * chunk_length }
48
+
49
+ context "with a buffer of less than 5 MB" do
50
+ let(:chunk_length) { 100 }
51
+
52
+ before do
53
+ subject.should_not_receive(:upload_buffer)
54
+ end
55
+
56
+ it "adds it to the buffer and returns the chunk length" do
57
+ expect(subject.write(message)).to eq(chunk_length)
58
+ expect(subject.instance_variable_get(:"@buffer")).to eq(message)
59
+ end
60
+ end
61
+
62
+ context "with a buffer of greater than 5 MB" do
63
+ let(:chunk_length) { 1 + 5 * 1024 * 1024 }
64
+
65
+ before do
66
+ subject.should_receive(:upload_buffer)
67
+ end
68
+
69
+ it "adds it to the buffer, writes to S3 and returns the chunk length" do
70
+ expect(subject.write(message)).to eq(chunk_length)
71
+ end
72
+ end
73
+ end
74
+
75
+ describe "#close" do
76
+ let(:complete_response) { double(:complete_response) }
77
+
78
+ before do
79
+ connection.should_receive(:complete_multipart_upload).with(bucket, object, upload_id, []) do
80
+ complete_response
81
+ end
82
+ end
83
+
84
+ context "when it passes" do
85
+ before do
86
+ complete_response.stub(:body) { {} }
87
+ end
88
+
89
+ context "when the buffer is not empty" do
90
+ before do
91
+ subject.instance_variable_set(:"@buffer", "text")
92
+ subject.should_receive(:upload_buffer)
93
+ end
94
+
95
+ it "uploads the rest of the buffer and closes the connection" do
96
+ expect(subject.close).to be_true
97
+ end
98
+ end
99
+
100
+ context "when the buffer is empty" do
101
+ before do
102
+ subject.should_not_receive(:upload_buffer)
103
+ end
104
+
105
+ it "closes the connection" do
106
+ expect(subject.close).to be_true
107
+ end
108
+ end
109
+ end
110
+
111
+ context "when it fails" do
112
+ before do
113
+ complete_response.stub(:body) { { 'Code' => 20, 'Message' => 'Msggg' } }
114
+ end
115
+
116
+ it "raises an error" do
117
+ expect { subject.close }.to raise_error("Failed to upload to S3: 20: Msggg")
118
+ end
119
+ end
120
+ end
121
+
122
+ describe "#abort" do
123
+ before do
124
+ connection.should_receive(:abort_multipart_upload).with(bucket, object, upload_id)
125
+ end
126
+
127
+ it "aborts the upload" do
128
+ subject.abort
129
+ end
130
+ end
131
+
132
+ describe "#abort_unless_closed" do
133
+ context "when the upload is closed" do
134
+ before do
135
+ subject.should_not_receive(:abort)
136
+ subject.instance_variable_set(:"@closed", true)
137
+ end
138
+
139
+ it "does not abort" do
140
+ subject.abort_unless_closed
141
+ end
142
+ end
143
+
144
+ context "when the upload is open" do
145
+ before do
146
+ subject.should_receive(:abort)
147
+ end
148
+
149
+ it "aborts the upload" do
150
+ subject.abort_unless_closed
151
+ end
152
+ end
153
+ end
154
+ end