capistrano-ops 1.0.7 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8f235992d4982cea3d663c6bd2616becfd54a4f772d199c93c76d3f27b7b359a
4
- data.tar.gz: af9e2c333ff0a24fb9eff2bbc87ef38f6ac5d014b4e975ac685542ad075f8544
3
+ metadata.gz: cb9a8459b0906318eb8149333da576558e4f232c35ca3bd453756a1ba4031036
4
+ data.tar.gz: e6e1342c6a847e029583db4824b1f69e63b687db53f42ce3f6e4e933d5d7aa27
5
5
  SHA512:
6
- metadata.gz: 7046680c7e931b6942785f2539215549a3bc5f20a32d4a1a2226a6bf2db684b019124cd409726bdb839295265b488abf60f340ef9311f8f794b8d576fe3b79c3
7
- data.tar.gz: e0499b85de6f9bc85463433ab80fab5fccfa33489940d21d8578fa1e1a71d9ad94bcac8c7e542f4c2e8982ab5f601d986445175643646dc74769f3810c1a9157
6
+ metadata.gz: b8e3b49f6fc1fbc179713f06ec82a06b3e6bc953b256ad1f5e6b564ff47a28c9d3705e4e4b8add425de28aea1e0d5830fa055b42c9a6bc46d2244f5e645d3fb3
7
+ data.tar.gz: 19516277d0a171017ad63b526f602a47056ab469defe6d6b3e5292ffe9a1a0221ed8dedd5e2012d49ea4dca5c233eb283d11fd05df6111a723a9bf13e48d5867
@@ -6,8 +6,10 @@ module Backup
6
6
  require 'zlib'
7
7
  require 'find'
8
8
  require 'capistrano/ops/rails/lib/backup/s3_helper'
9
+
9
10
  class S3
10
11
  include Backup::S3Helper
12
+
11
13
  attr_accessor :endpoint, :region, :access_key_id, :secret_access_key, :s3_resource, :s3_client
12
14
 
13
15
  def initialize(endpoint: ENV['S3_BACKUP_ENDPOINT'], region: ENV['S3_BACKUP_REGION'], access_key_id: ENV['S3_BACKUP_KEY'],
@@ -21,7 +23,6 @@ module Backup
21
23
  access_key_id: access_key_id,
22
24
  secret_access_key: secret_access_key,
23
25
  force_path_style: true
24
-
25
26
  }
26
27
  configuration[:endpoint] = endpoint unless endpoint.nil?
27
28
  self.s3_resource = Aws::S3::Resource.new(configuration)
@@ -46,6 +47,7 @@ module Backup
46
47
  end
47
48
  end
48
49
 
50
+ # rubocop:disable Metrics/MethodLength
49
51
  def upload_file_as_stream(file_path, key)
50
52
  bucket = ENV['S3_BACKUP_BUCKET']
51
53
  # Calculate total size of the file to be uploaded
@@ -55,31 +57,51 @@ module Backup
55
57
  uploaded_size = 0
56
58
 
57
59
  # Initiate multipart upload
58
- multipart_upload = s3_client.create_multipart_upload(bucket: bucket, key: key)
59
60
 
60
61
  # Upload the tar.gz data from the file in parts
61
62
  part_number = 1
62
63
  parts = []
63
64
  last_logged_progress = 0
65
+ max_retry_time = 300 # 5 minutes in seconds
66
+ total_wait_time = 0
64
67
 
65
68
  begin
66
69
  File.open(file_path, 'rb') do |file|
67
70
  while (part = file.read(chunk_size)) # Read calculated chunk size
68
- part_upload = s3_client.upload_part(
69
- bucket: bucket,
70
- key: key,
71
- upload_id: multipart_upload.upload_id,
72
- part_number: part_number,
73
- body: part
74
- )
75
- parts << { part_number: part_number, etag: part_upload.etag }
76
- uploaded_size += part.size
77
- part_number += 1
71
+ retry_count = 0
72
+ begin
73
+ # Initiate multipart upload
74
+ multipart_upload ||= s3_client.create_multipart_upload(bucket: bucket, key: key)
75
+ part_upload = s3_client.upload_part(
76
+ bucket: bucket,
77
+ key: key,
78
+ upload_id: multipart_upload.upload_id,
79
+ part_number: part_number,
80
+ body: part
81
+ )
82
+ parts << { part_number: part_number, etag: part_upload.etag }
83
+ uploaded_size += part.size
84
+ part_number += 1
78
85
 
79
- progress = (uploaded_size.to_f / total_size * 100).round
80
- if progress >= last_logged_progress + 10
81
- puts "Upload progress: #{progress}% complete"
82
- last_logged_progress = progress
86
+ progress = (uploaded_size.to_f / total_size * 100).round
87
+ if progress >= last_logged_progress + 10
88
+ puts "Upload progress: #{progress}% complete"
89
+ last_logged_progress = progress
90
+ end
91
+ rescue StandardError => e
92
+ retry_count += 1
93
+ wait_time = 2**retry_count
94
+ total_wait_time += wait_time
95
+
96
+ if total_wait_time > max_retry_time
97
+ puts "Exceeded maximum retry time of #{max_retry_time / 60} minutes. Aborting upload."
98
+ raise e
99
+ end
100
+ puts "Error uploading part #{part_number}: #{e.message.split("\n").first} (Attempt #{retry_count})"
101
+ puts "Retry in #{wait_time} seconds"
102
+ sleep(wait_time) # Exponential backoff
103
+ puts 'Retrying upload part...'
104
+ retry
83
105
  end
84
106
  end
85
107
  end
@@ -109,7 +131,6 @@ module Backup
109
131
  raise e
110
132
  end
111
133
 
112
- # rubocop:disable Metrics/MethodLength
113
134
  def upload_folder_as_tar_gz_stream(folder_path, key)
114
135
  bucket = ENV['S3_BACKUP_BUCKET']
115
136
 
@@ -127,31 +148,49 @@ module Backup
127
148
  # Start a thread to write the tar.gz data to the pipe
128
149
  writer_thread = start_writer_thread(folder_path, write_io)
129
150
 
130
- # Initiate multipart upload
131
- multipart_upload = s3_client.create_multipart_upload(bucket: bucket, key: key)
132
-
133
151
  # Upload the tar.gz data from the pipe in parts
134
152
  part_number = 1
135
153
  parts = []
136
154
  last_logged_progress = 0
155
+ max_retry_time = 300 # 5 minutes in seconds
156
+ total_wait_time = 0
137
157
 
138
158
  begin
139
159
  while (part = read_io.read(chunk_size)) # Read calculated chunk size
140
- part_upload = s3_client.upload_part(
141
- bucket: bucket,
142
- key: key,
143
- upload_id: multipart_upload.upload_id,
144
- part_number: part_number,
145
- body: part
146
- )
147
- parts << { part_number: part_number, etag: part_upload.etag }
148
- uploaded_size += part.size
149
- part_number += 1
150
-
151
- progress = (uploaded_size.to_f / total_size * 100).round
152
- if progress >= last_logged_progress + 10
153
- puts "Upload progress: #{progress}% complete"
154
- last_logged_progress = progress
160
+ retry_count = 0
161
+ begin
162
+ # Initiate multipart upload
163
+ multipart_upload ||= s3_client.create_multipart_upload(bucket: bucket, key: key)
164
+ part_upload = s3_client.upload_part(
165
+ bucket: bucket,
166
+ key: key,
167
+ upload_id: multipart_upload.upload_id,
168
+ part_number: part_number,
169
+ body: part
170
+ )
171
+ parts << { part_number: part_number, etag: part_upload.etag }
172
+ uploaded_size += part.size
173
+ part_number += 1
174
+
175
+ progress = (uploaded_size.to_f / total_size * 100).round
176
+ if progress >= last_logged_progress + 10
177
+ puts "Upload progress: #{progress}% complete"
178
+ last_logged_progress = progress
179
+ end
180
+ rescue StandardError => e
181
+ retry_count += 1
182
+ wait_time = 2**retry_count
183
+ total_wait_time += wait_time
184
+
185
+ if total_wait_time > max_retry_time
186
+ puts "Exceeded maximum retry time of #{max_retry_time / 60} minutes. Aborting upload."
187
+ raise e
188
+ end
189
+ puts "Error uploading part #{part_number}: #{e.message.split("\n").first} (Attempt #{retry_count})"
190
+ puts "Retry in #{wait_time} seconds"
191
+ sleep(wait_time) # Exponential backoff
192
+ puts 'Retrying upload part...'
193
+ retry
155
194
  end
156
195
  end
157
196
 
@@ -165,11 +204,13 @@ module Backup
165
204
  puts 'Completed multipart upload'
166
205
  rescue StandardError => e
167
206
  # Abort multipart upload in case of error
168
- s3_client.abort_multipart_upload(
169
- bucket: bucket,
170
- key: key,
171
- upload_id: multipart_upload.upload_id
172
- )
207
+ if multipart_upload
208
+ s3_client.abort_multipart_upload(
209
+ bucket: bucket,
210
+ key: key,
211
+ upload_id: multipart_upload.upload_id
212
+ )
213
+ end
173
214
  puts "Aborted multipart upload due to error: #{e.message}"
174
215
  raise e
175
216
  ensure
@@ -184,6 +225,8 @@ module Backup
184
225
  end
185
226
  # rubocop:enable Metrics/MethodLength
186
227
 
228
+ private
229
+
187
230
  def start_writer_thread(folder_path, write_io)
188
231
  Thread.new do
189
232
  parent_folder = File.dirname(folder_path)
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Capistrano
4
4
  module Ops
5
- VERSION = '1.0.7'
5
+ VERSION = '1.0.8'
6
6
  end
7
7
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: capistrano-ops
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.7
4
+ version: 1.0.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Florian Crusius
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-12-05 00:00:00.000000000 Z
11
+ date: 2024-12-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-s3