capistrano-ops 1.0.7 → 1.0.9

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8f235992d4982cea3d663c6bd2616becfd54a4f772d199c93c76d3f27b7b359a
4
- data.tar.gz: af9e2c333ff0a24fb9eff2bbc87ef38f6ac5d014b4e975ac685542ad075f8544
3
+ metadata.gz: 96ec2fb775bc19f6853b6194b58f544acb57957c3ccca331caea4a7de3313507
4
+ data.tar.gz: 320ee9acc8aaa62844ea0e693b605f4b19f0567ab1449952df4941eb986096f0
5
5
  SHA512:
6
- metadata.gz: 7046680c7e931b6942785f2539215549a3bc5f20a32d4a1a2226a6bf2db684b019124cd409726bdb839295265b488abf60f340ef9311f8f794b8d576fe3b79c3
7
- data.tar.gz: e0499b85de6f9bc85463433ab80fab5fccfa33489940d21d8578fa1e1a71d9ad94bcac8c7e542f4c2e8982ab5f601d986445175643646dc74769f3810c1a9157
6
+ metadata.gz: cdb6f282869288525433017c3ccfc48638dc0abcc22b1adca479135a43d605f2308a0e3badbccdb5820e5d223be8818b41ae6ff3adade5c621a7c56b389762b9
7
+ data.tar.gz: 6e42b2e1596d763b25da9578d916ad37c0feabd2bb85ba3a7a32151bb3fb49505d641a7162ccfd1f75601da73402c541ac9bdb9ddab5e36fd6362ee19613c472
@@ -6,8 +6,10 @@ module Backup
6
6
  require 'zlib'
7
7
  require 'find'
8
8
  require 'capistrano/ops/rails/lib/backup/s3_helper'
9
+
9
10
  class S3
10
11
  include Backup::S3Helper
12
+
11
13
  attr_accessor :endpoint, :region, :access_key_id, :secret_access_key, :s3_resource, :s3_client
12
14
 
13
15
  def initialize(endpoint: ENV['S3_BACKUP_ENDPOINT'], region: ENV['S3_BACKUP_REGION'], access_key_id: ENV['S3_BACKUP_KEY'],
@@ -21,7 +23,6 @@ module Backup
21
23
  access_key_id: access_key_id,
22
24
  secret_access_key: secret_access_key,
23
25
  force_path_style: true
24
-
25
26
  }
26
27
  configuration[:endpoint] = endpoint unless endpoint.nil?
27
28
  self.s3_resource = Aws::S3::Resource.new(configuration)
@@ -46,6 +47,7 @@ module Backup
46
47
  end
47
48
  end
48
49
 
50
+ # rubocop:disable Metrics/MethodLength
49
51
  def upload_file_as_stream(file_path, key)
50
52
  bucket = ENV['S3_BACKUP_BUCKET']
51
53
  # Calculate total size of the file to be uploaded
@@ -54,32 +56,50 @@ module Backup
54
56
 
55
57
  uploaded_size = 0
56
58
 
57
- # Initiate multipart upload
58
- multipart_upload = s3_client.create_multipart_upload(bucket: bucket, key: key)
59
-
60
59
  # Upload the tar.gz data from the file in parts
61
60
  part_number = 1
62
61
  parts = []
63
62
  last_logged_progress = 0
63
+ max_retry_time = 300 # 5 minutes in seconds
64
+ total_wait_time = 0
64
65
 
65
66
  begin
67
+ multipart_upload ||= s3_client.create_multipart_upload(bucket: bucket, key: key)
66
68
  File.open(file_path, 'rb') do |file|
67
69
  while (part = file.read(chunk_size)) # Read calculated chunk size
68
- part_upload = s3_client.upload_part(
69
- bucket: bucket,
70
- key: key,
71
- upload_id: multipart_upload.upload_id,
72
- part_number: part_number,
73
- body: part
74
- )
75
- parts << { part_number: part_number, etag: part_upload.etag }
76
- uploaded_size += part.size
77
- part_number += 1
70
+ retry_count = 0
71
+ begin
72
+ # Initiate multipart upload
73
+ part_upload = s3_client.upload_part(
74
+ bucket: bucket,
75
+ key: key,
76
+ upload_id: multipart_upload.upload_id,
77
+ part_number: part_number,
78
+ body: part
79
+ )
80
+ parts << { part_number: part_number, etag: part_upload.etag }
81
+ uploaded_size += part.size
82
+ part_number += 1
78
83
 
79
- progress = (uploaded_size.to_f / total_size * 100).round
80
- if progress >= last_logged_progress + 10
81
- puts "Upload progress: #{progress}% complete"
82
- last_logged_progress = progress
84
+ progress = (uploaded_size.to_f / total_size * 100).round
85
+ if progress >= last_logged_progress + 10
86
+ puts "Upload progress: #{progress}% complete"
87
+ last_logged_progress = progress
88
+ end
89
+ rescue StandardError => e
90
+ retry_count += 1
91
+ wait_time = 2**retry_count
92
+ total_wait_time += wait_time
93
+
94
+ if total_wait_time > max_retry_time
95
+ puts "Exceeded maximum retry time of #{max_retry_time / 60} minutes. Aborting upload."
96
+ raise e
97
+ end
98
+ puts "Error uploading part #{part_number}: #{e.message.split("\n").first} (Attempt #{retry_count})"
99
+ puts "Retry in #{wait_time} seconds"
100
+ sleep(wait_time) # Exponential backoff
101
+ puts 'Retrying upload part...'
102
+ retry
83
103
  end
84
104
  end
85
105
  end
@@ -109,7 +129,6 @@ module Backup
109
129
  raise e
110
130
  end
111
131
 
112
- # rubocop:disable Metrics/MethodLength
113
132
  def upload_folder_as_tar_gz_stream(folder_path, key)
114
133
  bucket = ENV['S3_BACKUP_BUCKET']
115
134
 
@@ -127,31 +146,49 @@ module Backup
127
146
  # Start a thread to write the tar.gz data to the pipe
128
147
  writer_thread = start_writer_thread(folder_path, write_io)
129
148
 
130
- # Initiate multipart upload
131
- multipart_upload = s3_client.create_multipart_upload(bucket: bucket, key: key)
132
-
133
149
  # Upload the tar.gz data from the pipe in parts
134
150
  part_number = 1
135
151
  parts = []
136
152
  last_logged_progress = 0
153
+ max_retry_time = 300 # 5 minutes in seconds
154
+ total_wait_time = 0
137
155
 
138
156
  begin
157
+ # Initiate multipart upload
158
+ multipart_upload ||= s3_client.create_multipart_upload(bucket: bucket, key: key)
139
159
  while (part = read_io.read(chunk_size)) # Read calculated chunk size
140
- part_upload = s3_client.upload_part(
141
- bucket: bucket,
142
- key: key,
143
- upload_id: multipart_upload.upload_id,
144
- part_number: part_number,
145
- body: part
146
- )
147
- parts << { part_number: part_number, etag: part_upload.etag }
148
- uploaded_size += part.size
149
- part_number += 1
160
+ retry_count = 0
161
+ begin
162
+ part_upload = s3_client.upload_part(
163
+ bucket: bucket,
164
+ key: key,
165
+ upload_id: multipart_upload.upload_id,
166
+ part_number: part_number,
167
+ body: part
168
+ )
169
+ parts << { part_number: part_number, etag: part_upload.etag }
170
+ uploaded_size += part.size
171
+ part_number += 1
172
+
173
+ progress = (uploaded_size.to_f / total_size * 100).round
174
+ if progress >= last_logged_progress + 10
175
+ puts "Upload progress: #{progress}% complete"
176
+ last_logged_progress = progress
177
+ end
178
+ rescue StandardError => e
179
+ retry_count += 1
180
+ wait_time = 2**retry_count
181
+ total_wait_time += wait_time
150
182
 
151
- progress = (uploaded_size.to_f / total_size * 100).round
152
- if progress >= last_logged_progress + 10
153
- puts "Upload progress: #{progress}% complete"
154
- last_logged_progress = progress
183
+ if total_wait_time > max_retry_time
184
+ puts "Exceeded maximum retry time of #{max_retry_time / 60} minutes. Aborting upload."
185
+ raise e
186
+ end
187
+ puts "Error uploading part #{part_number}: #{e.message.split("\n").first} (Attempt #{retry_count})"
188
+ puts "Retry in #{wait_time} seconds"
189
+ sleep(wait_time) # Exponential backoff
190
+ puts 'Retrying upload part...'
191
+ retry
155
192
  end
156
193
  end
157
194
 
@@ -165,11 +202,13 @@ module Backup
165
202
  puts 'Completed multipart upload'
166
203
  rescue StandardError => e
167
204
  # Abort multipart upload in case of error
168
- s3_client.abort_multipart_upload(
169
- bucket: bucket,
170
- key: key,
171
- upload_id: multipart_upload.upload_id
172
- )
205
+ if multipart_upload
206
+ s3_client.abort_multipart_upload(
207
+ bucket: bucket,
208
+ key: key,
209
+ upload_id: multipart_upload.upload_id
210
+ )
211
+ end
173
212
  puts "Aborted multipart upload due to error: #{e.message}"
174
213
  raise e
175
214
  ensure
@@ -32,7 +32,7 @@ module Backup
32
32
 
33
33
  def calculate_chunk_size(total_size)
34
34
  max_chunks = 10_000
35
- min_chunk_size = 20 * 1024 * 1024 # 20MB
35
+ min_chunk_size = 50 * 1024 * 1024 # 50MB
36
36
  max_chunk_size = 105 * 1024 * 1024 # 105MB
37
37
  chunk_size = [total_size / max_chunks, min_chunk_size].max
38
38
  [chunk_size, max_chunk_size].min
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Capistrano
4
4
  module Ops
5
- VERSION = '1.0.7'
5
+ VERSION = '1.0.9'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: capistrano-ops
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.7
4
+ version: 1.0.9
5
5
  platform: ruby
6
6
  authors:
7
7
  - Florian Crusius
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-12-05 00:00:00.000000000 Z
11
+ date: 2024-12-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-s3