capistrano-ops 1.0.7 → 1.0.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/capistrano/ops/rails/lib/backup/s3.rb +83 -40
- data/lib/capistrano/ops/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: cb9a8459b0906318eb8149333da576558e4f232c35ca3bd453756a1ba4031036
|
4
|
+
data.tar.gz: e6e1342c6a847e029583db4824b1f69e63b687db53f42ce3f6e4e933d5d7aa27
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b8e3b49f6fc1fbc179713f06ec82a06b3e6bc953b256ad1f5e6b564ff47a28c9d3705e4e4b8add425de28aea1e0d5830fa055b42c9a6bc46d2244f5e645d3fb3
|
7
|
+
data.tar.gz: 19516277d0a171017ad63b526f602a47056ab469defe6d6b3e5292ffe9a1a0221ed8dedd5e2012d49ea4dca5c233eb283d11fd05df6111a723a9bf13e48d5867
|
@@ -6,8 +6,10 @@ module Backup
|
|
6
6
|
require 'zlib'
|
7
7
|
require 'find'
|
8
8
|
require 'capistrano/ops/rails/lib/backup/s3_helper'
|
9
|
+
|
9
10
|
class S3
|
10
11
|
include Backup::S3Helper
|
12
|
+
|
11
13
|
attr_accessor :endpoint, :region, :access_key_id, :secret_access_key, :s3_resource, :s3_client
|
12
14
|
|
13
15
|
def initialize(endpoint: ENV['S3_BACKUP_ENDPOINT'], region: ENV['S3_BACKUP_REGION'], access_key_id: ENV['S3_BACKUP_KEY'],
|
@@ -21,7 +23,6 @@ module Backup
|
|
21
23
|
access_key_id: access_key_id,
|
22
24
|
secret_access_key: secret_access_key,
|
23
25
|
force_path_style: true
|
24
|
-
|
25
26
|
}
|
26
27
|
configuration[:endpoint] = endpoint unless endpoint.nil?
|
27
28
|
self.s3_resource = Aws::S3::Resource.new(configuration)
|
@@ -46,6 +47,7 @@ module Backup
|
|
46
47
|
end
|
47
48
|
end
|
48
49
|
|
50
|
+
# rubocop:disable Metrics/MethodLength
|
49
51
|
def upload_file_as_stream(file_path, key)
|
50
52
|
bucket = ENV['S3_BACKUP_BUCKET']
|
51
53
|
# Calculate total size of the file to be uploaded
|
@@ -55,31 +57,51 @@ module Backup
|
|
55
57
|
uploaded_size = 0
|
56
58
|
|
57
59
|
# Initiate multipart upload
|
58
|
-
multipart_upload = s3_client.create_multipart_upload(bucket: bucket, key: key)
|
59
60
|
|
60
61
|
# Upload the tar.gz data from the file in parts
|
61
62
|
part_number = 1
|
62
63
|
parts = []
|
63
64
|
last_logged_progress = 0
|
65
|
+
max_retry_time = 300 # 5 minutes in seconds
|
66
|
+
total_wait_time = 0
|
64
67
|
|
65
68
|
begin
|
66
69
|
File.open(file_path, 'rb') do |file|
|
67
70
|
while (part = file.read(chunk_size)) # Read calculated chunk size
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
71
|
+
retry_count = 0
|
72
|
+
begin
|
73
|
+
# Initiate multipart upload
|
74
|
+
multipart_upload ||= s3_client.create_multipart_upload(bucket: bucket, key: key)
|
75
|
+
part_upload = s3_client.upload_part(
|
76
|
+
bucket: bucket,
|
77
|
+
key: key,
|
78
|
+
upload_id: multipart_upload.upload_id,
|
79
|
+
part_number: part_number,
|
80
|
+
body: part
|
81
|
+
)
|
82
|
+
parts << { part_number: part_number, etag: part_upload.etag }
|
83
|
+
uploaded_size += part.size
|
84
|
+
part_number += 1
|
78
85
|
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
86
|
+
progress = (uploaded_size.to_f / total_size * 100).round
|
87
|
+
if progress >= last_logged_progress + 10
|
88
|
+
puts "Upload progress: #{progress}% complete"
|
89
|
+
last_logged_progress = progress
|
90
|
+
end
|
91
|
+
rescue StandardError => e
|
92
|
+
retry_count += 1
|
93
|
+
wait_time = 2**retry_count
|
94
|
+
total_wait_time += wait_time
|
95
|
+
|
96
|
+
if total_wait_time > max_retry_time
|
97
|
+
puts "Exceeded maximum retry time of #{max_retry_time / 60} minutes. Aborting upload."
|
98
|
+
raise e
|
99
|
+
end
|
100
|
+
puts "Error uploading part #{part_number}: #{e.message.split("\n").first} (Attempt #{retry_count})"
|
101
|
+
puts "Retry in #{wait_time} seconds"
|
102
|
+
sleep(wait_time) # Exponential backoff
|
103
|
+
puts 'Retrying upload part...'
|
104
|
+
retry
|
83
105
|
end
|
84
106
|
end
|
85
107
|
end
|
@@ -109,7 +131,6 @@ module Backup
|
|
109
131
|
raise e
|
110
132
|
end
|
111
133
|
|
112
|
-
# rubocop:disable Metrics/MethodLength
|
113
134
|
def upload_folder_as_tar_gz_stream(folder_path, key)
|
114
135
|
bucket = ENV['S3_BACKUP_BUCKET']
|
115
136
|
|
@@ -127,31 +148,49 @@ module Backup
|
|
127
148
|
# Start a thread to write the tar.gz data to the pipe
|
128
149
|
writer_thread = start_writer_thread(folder_path, write_io)
|
129
150
|
|
130
|
-
# Initiate multipart upload
|
131
|
-
multipart_upload = s3_client.create_multipart_upload(bucket: bucket, key: key)
|
132
|
-
|
133
151
|
# Upload the tar.gz data from the pipe in parts
|
134
152
|
part_number = 1
|
135
153
|
parts = []
|
136
154
|
last_logged_progress = 0
|
155
|
+
max_retry_time = 300 # 5 minutes in seconds
|
156
|
+
total_wait_time = 0
|
137
157
|
|
138
158
|
begin
|
139
159
|
while (part = read_io.read(chunk_size)) # Read calculated chunk size
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
160
|
+
retry_count = 0
|
161
|
+
begin
|
162
|
+
# Initiate multipart upload
|
163
|
+
multipart_upload ||= s3_client.create_multipart_upload(bucket: bucket, key: key)
|
164
|
+
part_upload = s3_client.upload_part(
|
165
|
+
bucket: bucket,
|
166
|
+
key: key,
|
167
|
+
upload_id: multipart_upload.upload_id,
|
168
|
+
part_number: part_number,
|
169
|
+
body: part
|
170
|
+
)
|
171
|
+
parts << { part_number: part_number, etag: part_upload.etag }
|
172
|
+
uploaded_size += part.size
|
173
|
+
part_number += 1
|
174
|
+
|
175
|
+
progress = (uploaded_size.to_f / total_size * 100).round
|
176
|
+
if progress >= last_logged_progress + 10
|
177
|
+
puts "Upload progress: #{progress}% complete"
|
178
|
+
last_logged_progress = progress
|
179
|
+
end
|
180
|
+
rescue StandardError => e
|
181
|
+
retry_count += 1
|
182
|
+
wait_time = 2**retry_count
|
183
|
+
total_wait_time += wait_time
|
184
|
+
|
185
|
+
if total_wait_time > max_retry_time
|
186
|
+
puts "Exceeded maximum retry time of #{max_retry_time / 60} minutes. Aborting upload."
|
187
|
+
raise e
|
188
|
+
end
|
189
|
+
puts "Error uploading part #{part_number}: #{e.message.split("\n").first} (Attempt #{retry_count})"
|
190
|
+
puts "Retry in #{wait_time} seconds"
|
191
|
+
sleep(wait_time) # Exponential backoff
|
192
|
+
puts 'Retrying upload part...'
|
193
|
+
retry
|
155
194
|
end
|
156
195
|
end
|
157
196
|
|
@@ -165,11 +204,13 @@ module Backup
|
|
165
204
|
puts 'Completed multipart upload'
|
166
205
|
rescue StandardError => e
|
167
206
|
# Abort multipart upload in case of error
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
207
|
+
if multipart_upload
|
208
|
+
s3_client.abort_multipart_upload(
|
209
|
+
bucket: bucket,
|
210
|
+
key: key,
|
211
|
+
upload_id: multipart_upload.upload_id
|
212
|
+
)
|
213
|
+
end
|
173
214
|
puts "Aborted multipart upload due to error: #{e.message}"
|
174
215
|
raise e
|
175
216
|
ensure
|
@@ -184,6 +225,8 @@ module Backup
|
|
184
225
|
end
|
185
226
|
# rubocop:enable Metrics/MethodLength
|
186
227
|
|
228
|
+
private
|
229
|
+
|
187
230
|
def start_writer_thread(folder_path, write_io)
|
188
231
|
Thread.new do
|
189
232
|
parent_folder = File.dirname(folder_path)
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: capistrano-ops
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.
|
4
|
+
version: 1.0.8
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Florian Crusius
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-12-
|
11
|
+
date: 2024-12-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk-s3
|