envoi-mam-agent 1.2.0 → 1.3.1
Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 786d46404e4cba2dc315f28715568437da1f5b7a
|
4
|
+
data.tar.gz: ee6601a7376a73e74ab0f7278a5a766bbb9d0db5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 22b74933cae7aa8dbaf193434d193feca691a35f3b61b5ad290df4e4babcb01bb76e51edc9e51d89195ab3fcae9f494e422541a3bdc8f8b3b57cb9e0fc856694
|
7
|
+
data.tar.gz: b35dd88fe050844c25e0da41dd28070de6b05184b8bc70c27e240be3a466ed2590bf48422bb410bcc11604d46032d51cb246a75b1f02de382a6a19c16dee65f7
|
data/envoi-mam-agent.gemspec
CHANGED
@@ -26,6 +26,7 @@ Gem::Specification.new do |spec|
|
|
26
26
|
spec.add_runtime_dependency 'asperalm', '~> 0.9'
|
27
27
|
spec.add_runtime_dependency 'aws-sdk-s3', '~> 1'
|
28
28
|
spec.add_runtime_dependency 'aws-sdk-sqs', '~> 1'
|
29
|
+
spec.add_runtime_dependency 'aws-sdk-mediaconvert', '~> 1'
|
29
30
|
# spec.add_runtime_dependency 'shoryuken' '~> 4'
|
30
31
|
spec.add_runtime_dependency 'daemons', '~> 1.3'
|
31
32
|
# spec.add_runtime_dependency 'faraday', '~> 0.15'
|
@@ -0,0 +1,72 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
lib_path = __FILE__ == '(irb)' ? File.join(Dir.cwd, 'lib') : File.expand_path('../../../lib', __FILE__)
|
3
|
+
$:.unshift(lib_path) unless $:.include?(lib_path) or !File.exists?(lib_path)
|
4
|
+
require 'rubygems'
|
5
|
+
require 'optparse'
|
6
|
+
require 'time'
|
7
|
+
|
8
|
+
require 'envoi/mam/agent/cli'
|
9
|
+
require 'envoi/utils/mediaconvert_enumerator'
|
10
|
+
|
11
|
+
DEFAULT_LIST_JOBS_STATUS = 'ERROR'
|
12
|
+
|
13
|
+
args = {
|
14
|
+
job_status: DEFAULT_LIST_JOBS_STATUS
|
15
|
+
}
|
16
|
+
op = OptionParser.new
|
17
|
+
op.accept(Time) { |time_in| Time.parse(time_in) }
|
18
|
+
op.on('--aws-profile PROFILENAME', 'AWS credentials file profile name to use.') { |v| args[:aws_profile] = v }
|
19
|
+
op.on('--aws-access-key KEY', 'AWS access ley') { |v| args[:aws_access_key_id] = v }
|
20
|
+
op.on('--aws-secret-key KEY', 'AWS secret access key') { |v| args[:aws_secret_access_key] = v }
|
21
|
+
op.on('--aws-region REGION', 'The AWS region the jobs are located.') { |v| args[:aws_region] = v }
|
22
|
+
op.on('--new-role ROLE', 'The new role arn to use when resubmitting jobs.') { |v| args[:new_role] = v }
|
23
|
+
op.on('--new-queue QUEUE', 'The new queue arn to use when resubmitting jobs.') { |v| args[:new_queue] = v }
|
24
|
+
op.on('--[no-]job-status [STATUS]', 'A job status to query for.',
|
25
|
+
"default: '#{args[:job_status]}'") { |v| args[:job_status] = v }
|
26
|
+
op.on('--from-date DATE', Time, 'The earliest date to filter jobs to.') { |v| args[:from_date] = v }
|
27
|
+
op.on('--to-date DATE', Time,'The latest date to filter jobs to.') { |v| args[:to_date] = v }
|
28
|
+
op.on('--limit LIMIT', 'Limit the number of jobs to process.') { |v| args[:limit] = v }
|
29
|
+
op.on('--verbose', '') { |v| args[:verbose] = v }
|
30
|
+
op.on('--[no-]preview', 'Output paths with summary and stop.',
|
31
|
+
"default: #{args[:preview_mode] ? 'true' : 'false'}") { |v| args[:preview_only] = v }
|
32
|
+
op.on('--[no-]summary', 'Output the paths and calculate the number of assets.',
|
33
|
+
"default: #{args[:show_summary] ? 'true' : 'false'}",) { |v| args[:show_summary] = v }
|
34
|
+
|
35
|
+
op.on('-h', '--help', 'Print help (this message) and exit') { puts op; exit }
|
36
|
+
op.parse!
|
37
|
+
|
38
|
+
new_queue = args[:new_queue]
|
39
|
+
new_role = args[:new_role]
|
40
|
+
|
41
|
+
mc_enum = MediaConvertEnumerator.new(args)
|
42
|
+
mc_enum.run
|
43
|
+
exit if args[:preview_only]
|
44
|
+
|
45
|
+
puts "\n\n"
|
46
|
+
|
47
|
+
jobs_to_retry = [ ]
|
48
|
+
mc_enum.jobs_by_error_code.each do |error_code, jobs|
|
49
|
+
jobs.each do |job|
|
50
|
+
jobs_to_retry << job
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
verbose = args[:verbose]
|
55
|
+
|
56
|
+
job_keys_to_keep = [ :queue, :role, :settings, :user_metadata ]
|
57
|
+
job_counter = 0
|
58
|
+
total_jobs_to_retry = jobs_to_retry.length
|
59
|
+
|
60
|
+
jobs_to_retry.each do |job|
|
61
|
+
job_counter += 1
|
62
|
+
input_file = job.settings.inputs.first.file_input rescue ''
|
63
|
+
puts "Processing #{job_counter} of #{total_jobs_to_retry} #{job.arn} '#{input_file}'"
|
64
|
+
new_job = job.to_h
|
65
|
+
new_job.keep_if { |k,v| job_keys_to_keep.include?(k) }
|
66
|
+
new_job[:queue] = new_queue if new_queue
|
67
|
+
new_job[:role] = new_role if new_role
|
68
|
+
|
69
|
+
resp = mc_enum.media_convert.create_job(new_job)
|
70
|
+
puts "Response: #{resp}" if verbose
|
71
|
+
end
|
72
|
+
|
@@ -0,0 +1,167 @@
|
|
1
|
+
require 'aws-sdk-mediaconvert'
|
2
|
+
|
3
|
+
class MediaConvertEnumerator
|
4
|
+
|
5
|
+
attr_reader :media_convert
|
6
|
+
|
7
|
+
attr_reader :jobs
|
8
|
+
|
9
|
+
attr_reader :preview_only, :should_show_summary
|
10
|
+
|
11
|
+
attr_reader :limit, :max_keys
|
12
|
+
|
13
|
+
attr_accessor :from_date, :to_date, :job_status
|
14
|
+
|
15
|
+
attr_reader :jobs_by_status, :jobs_by_error_code
|
16
|
+
|
17
|
+
def initialize(args = {})
|
18
|
+
aws_access_key_id = args[:aws_access_key_id]
|
19
|
+
aws_secret_access_key = args[:aws_secret_access_key]
|
20
|
+
aws_region = args[:aws_region]
|
21
|
+
aws_profile = args.fetch(:aws_profile,
|
22
|
+
args.fetch(:aws_profile_name,
|
23
|
+
args.fetch(:profile,
|
24
|
+
args.fetch(:profile_name, nil))))
|
25
|
+
aws_config = {}
|
26
|
+
aws_config[:credentials] = (aws_access_key_id || aws_secret_access_key) ?
|
27
|
+
Aws::Credentials.new(aws_access_key_id, aws_secret_access_key) :
|
28
|
+
Aws::SharedCredentials.new(profile_name: aws_profile)
|
29
|
+
aws_config[:region] ||= aws_region if aws_region
|
30
|
+
|
31
|
+
@media_convert = Aws::MediaConvert::Client.new(aws_config)
|
32
|
+
|
33
|
+
resp = @media_convert.describe_endpoints
|
34
|
+
endpoint = resp.endpoints.first
|
35
|
+
@media_convert = Aws::MediaConvert::Client.new(aws_config.merge(endpoint: endpoint.url))
|
36
|
+
|
37
|
+
@preview_only = args[:preview_only]
|
38
|
+
@should_show_summary = args.fetch(:show_summary, preview_only)
|
39
|
+
|
40
|
+
@job_status = args[:job_status]
|
41
|
+
|
42
|
+
@limit = args[:limit]
|
43
|
+
@limit = @limit.to_i if @limit
|
44
|
+
|
45
|
+
@from_date = args[:from_date]
|
46
|
+
@to_date = args[:to_date]
|
47
|
+
|
48
|
+
@jobs = []
|
49
|
+
@jobs_by_status = Hash.new { |h, k| h[k] = [] }
|
50
|
+
@jobs_by_error_code = Hash.new { |h, k| h[k] = [] }
|
51
|
+
end
|
52
|
+
|
53
|
+
def concat_jobs(jobs, new_jobs)
|
54
|
+
jobs.concat new_jobs
|
55
|
+
jobs
|
56
|
+
end
|
57
|
+
|
58
|
+
def retrieve_jobs(options = {}, &block)
|
59
|
+
@jobs = []
|
60
|
+
|
61
|
+
list_jobs_args = { }
|
62
|
+
list_jobs_args[:status] = job_status if job_status #||= DEFAULT_LIST_JOBS_STATUS
|
63
|
+
|
64
|
+
resp = @media_convert.list_jobs(list_jobs_args)
|
65
|
+
# pp resp
|
66
|
+
loop do
|
67
|
+
concat_jobs(jobs, resp.jobs)
|
68
|
+
job_count = jobs.length
|
69
|
+
puts job_count
|
70
|
+
|
71
|
+
break unless resp.next_token && (!limit || job_count < limit)
|
72
|
+
created_date = resp.jobs.last.created_at
|
73
|
+
break if from_date && created_date <= from_date
|
74
|
+
|
75
|
+
resp = @media_convert.list_jobs(list_jobs_args.merge(next_token: resp.next_token))
|
76
|
+
end
|
77
|
+
|
78
|
+
if from_date || to_date
|
79
|
+
jobs.delete_if { |job| (from_date && job.created_at < from_date) || (to_date && job.created_at > to_date) }
|
80
|
+
end
|
81
|
+
@jobs = jobs.first(limit) if limit
|
82
|
+
|
83
|
+
end
|
84
|
+
|
85
|
+
def jobs_by_status
|
86
|
+
build_grouped_data unless @group_data_initialized
|
87
|
+
@jobs_by_status
|
88
|
+
end
|
89
|
+
|
90
|
+
def jobs_by_error_code
|
91
|
+
build_grouped_data unless @group_data_initialized
|
92
|
+
@jobs_by_error_code
|
93
|
+
end
|
94
|
+
|
95
|
+
def build_grouped_data
|
96
|
+
jobs.each do |job|
|
97
|
+
status = job.status
|
98
|
+
@jobs_by_status[status] << job
|
99
|
+
@jobs_by_error_code[job.error_code] << job if status == 'ERROR'
|
100
|
+
end
|
101
|
+
@group_data_initialized = true
|
102
|
+
end
|
103
|
+
|
104
|
+
def print_table(data, options = { })
|
105
|
+
first_row = data.first
|
106
|
+
table = first_row.is_a?(Hash) ? [first_row.keys] + data.map(&:values) : data
|
107
|
+
|
108
|
+
widths = []
|
109
|
+
table.each do |line|
|
110
|
+
line.each_with_index do |col, idx|
|
111
|
+
cur_col_width = widths[idx]
|
112
|
+
if cur_col_width
|
113
|
+
col_len = col.to_s.length
|
114
|
+
widths[idx] = col_len if col_len > cur_col_width
|
115
|
+
else
|
116
|
+
widths[idx] = col.to_s.length
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
# header separator
|
122
|
+
separator_ary = widths.map { |n| '-' * n }
|
123
|
+
table.insert(1, separator_ary)
|
124
|
+
table.insert(-2, separator_ary) if options[:has_totals]
|
125
|
+
|
126
|
+
format = widths.collect { |n| "%-#{n}s"}.join(' | ')
|
127
|
+
table.each { |line| printf "| #{format} |\n", *line }
|
128
|
+
end
|
129
|
+
|
130
|
+
def run(&block)
|
131
|
+
retrieve_jobs(&block)
|
132
|
+
show_summary if @should_show_summary
|
133
|
+
end
|
134
|
+
|
135
|
+
def human_readable_bytes_short(human_readable_number)
|
136
|
+
"#{human_readable_number.split(',').first} #{HUMAN_READABLE_SHORT_SUFFIX[human_readable_number.count(',')]}"
|
137
|
+
end
|
138
|
+
|
139
|
+
def humanize_number(number)
|
140
|
+
number.to_s.chars.to_a.reverse.each_slice(3).map(&:join).join(',').reverse
|
141
|
+
end
|
142
|
+
|
143
|
+
def grouped_data_to_table(collection, key_name)
|
144
|
+
table_data = [ [ key_name, 'Count' ] ]
|
145
|
+
collection.each do |key, objects|
|
146
|
+
row_values = [ key, humanize_number(objects.length) ]
|
147
|
+
table_data << row_values
|
148
|
+
end
|
149
|
+
table_data
|
150
|
+
end
|
151
|
+
|
152
|
+
def show_summary
|
153
|
+
jobs_by_status_table_data = grouped_data_to_table(jobs_by_status, 'Status')
|
154
|
+
jobs_by_error_code_table_data = grouped_data_to_table(jobs_by_error_code.sort, 'Error Code')
|
155
|
+
|
156
|
+
row_data = [ 'TOTAL', humanize_number(jobs.length)]
|
157
|
+
jobs_by_status_table_data << row_data
|
158
|
+
|
159
|
+
puts "\n\n--- Jobs Count by Status ---"
|
160
|
+
print_table(jobs_by_status_table_data, { has_totals: true })
|
161
|
+
|
162
|
+
puts "\n\n--- Jobs Count by Error Code ---\n| https://docs.aws.amazon.com/mediaconvert/latest/ug/mediaconvert_error_codes.html\n"
|
163
|
+
|
164
|
+
print_table(jobs_by_error_code_table_data, { has_totals: false })
|
165
|
+
end
|
166
|
+
|
167
|
+
end
|
@@ -0,0 +1,204 @@
|
|
1
|
+
require 'aws-sdk-s3'
|
2
|
+
|
3
|
+
class S3Enumerator
|
4
|
+
|
5
|
+
attr_reader :s3, :s3_bucket_name, :s3_object_key_prefix, :s3_bucket_region
|
6
|
+
|
7
|
+
attr_reader :objects
|
8
|
+
|
9
|
+
attr_reader :preview_only, :should_show_summary
|
10
|
+
|
11
|
+
attr_reader :limit, :max_keys
|
12
|
+
|
13
|
+
attr_reader :objects_by_ext, :objects_by_storage_class, :total_bytes, :directories, :files, :largest_file
|
14
|
+
|
15
|
+
HUMAN_READABLE_SHORT_SUFFIX = %w(Bytes KB MB GB TB PB EB ZB YB)
|
16
|
+
|
17
|
+
def initialize(args = { })
|
18
|
+
aws_access_key_id = args[:aws_access_key_id]
|
19
|
+
aws_secret_access_key = args[:aws_secret_access_key]
|
20
|
+
aws_region = args[:aws_region]
|
21
|
+
|
22
|
+
aws_config = {}
|
23
|
+
aws_config[:credentials] = (aws_access_key_id || aws_secret_access_key) ?
|
24
|
+
Aws::Credentials.new(aws_access_key_id, aws_secret_access_key) :
|
25
|
+
Aws::SharedCredentials.new(profile_name: args[:aws_profile])
|
26
|
+
aws_config[:region] = aws_region if aws_region
|
27
|
+
Aws.config.update(aws_config) unless aws_config.empty?
|
28
|
+
|
29
|
+
@preview_only = args[:preview_only]
|
30
|
+
@should_show_summary = args.fetch(:show_summary, preview_only)
|
31
|
+
|
32
|
+
@limit = args[:limit]
|
33
|
+
@limit = @limit.to_i if @limit
|
34
|
+
|
35
|
+
@max_keys = 1000
|
36
|
+
@max_keys = @limit if @limit && @limit < @max_keys
|
37
|
+
|
38
|
+
@s3_bucket_name = args[:s3_bucket_name] || args[:bucket_name]
|
39
|
+
@s3_object_key_prefix = args[:s3_object_key_prefix] || args[:object_key_prefix] || ''
|
40
|
+
@s3_object_key_prefix = @s3_object_key_prefix[1..-1] if @s3_object_key_prefix.start_with?('/')
|
41
|
+
|
42
|
+
@s3 = Aws::S3::Client.new
|
43
|
+
@s3_bucket_region = s3.get_bucket_location(bucket: @s3_bucket_name).location_constraint
|
44
|
+
@s3_bucket_region = 'us-east-1' if @s3_bucket_region.empty?
|
45
|
+
|
46
|
+
@objects = []
|
47
|
+
@files = []
|
48
|
+
@ignored = []
|
49
|
+
@directories = []
|
50
|
+
@objects_by_ext = Hash.new { |h, k| h[k] = [] }
|
51
|
+
end
|
52
|
+
|
53
|
+
def concat_objects(objects, resp)
|
54
|
+
objects.concat resp.contents
|
55
|
+
objects
|
56
|
+
end
|
57
|
+
|
58
|
+
def total_object_count
|
59
|
+
objects.length
|
60
|
+
end
|
61
|
+
|
62
|
+
def retrieve_objects(&block)
|
63
|
+
@objects = []
|
64
|
+
|
65
|
+
resp = s3.list_objects_v2(bucket: s3_bucket_name, prefix: s3_object_key_prefix, max_keys: max_keys)
|
66
|
+
loop do
|
67
|
+
concat_objects(objects, resp)
|
68
|
+
puts objects.length
|
69
|
+
break if !resp.next_page? || (limit && total_object_count >= limit)
|
70
|
+
resp = resp.next_page
|
71
|
+
end
|
72
|
+
@objects = objects.first(limit) if limit
|
73
|
+
|
74
|
+
@files = []
|
75
|
+
@directories = []
|
76
|
+
@objects_by_ext = Hash.new { |h, k| h[k] = [] }
|
77
|
+
@objects_by_storage_class = Hash.new { |h, k| h[k] = [] }
|
78
|
+
#
|
79
|
+
@total_bytes = 0
|
80
|
+
@largest_file = nil
|
81
|
+
objects.each do |object|
|
82
|
+
if object.key.end_with?('/')
|
83
|
+
@directories << object
|
84
|
+
else
|
85
|
+
filename = File.basename(object.key)
|
86
|
+
should_ignore = filename.start_with?('.')
|
87
|
+
if should_ignore
|
88
|
+
@ignored << object
|
89
|
+
else
|
90
|
+
@files << object
|
91
|
+
@total_bytes += object.size
|
92
|
+
@largest_file = object unless @largest_file && @largest_file.size > object.size
|
93
|
+
filename_ext = File.extname(filename).downcase
|
94
|
+
objects_by_ext[filename_ext] << object
|
95
|
+
objects_by_storage_class[object.storage_class] << object
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
yield object, self if block_given?
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
|
104
|
+
def print_table(data, options = { })
|
105
|
+
first_row = data.first
|
106
|
+
table = first_row.is_a?(Hash) ? [first_row.keys] + data.map(&:values) : data
|
107
|
+
|
108
|
+
widths = []
|
109
|
+
table.each do |line|
|
110
|
+
line.each_with_index do |col, idx|
|
111
|
+
cur_col_width = widths[idx]
|
112
|
+
if cur_col_width
|
113
|
+
col_len = col.to_s.length
|
114
|
+
widths[idx] = col_len if col_len > cur_col_width
|
115
|
+
else
|
116
|
+
widths[idx] = col.to_s.length
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
# header separator
|
122
|
+
separator_ary = widths.map { |n| '-' * n }
|
123
|
+
table.insert(1, separator_ary)
|
124
|
+
table.insert(-2, separator_ary) if options[:has_totals]
|
125
|
+
|
126
|
+
format = widths.collect { |n| "%-#{n}s"}.join(' | ')
|
127
|
+
table.each { |line| printf "| #{format} |\n", *line }
|
128
|
+
end
|
129
|
+
|
130
|
+
def run(&block)
|
131
|
+
retrieve_objects(&block)
|
132
|
+
show_summary if @should_show_summary
|
133
|
+
end
|
134
|
+
|
135
|
+
def human_readable_bytes_short(human_readable_number)
|
136
|
+
"#{human_readable_number.split(',').first} #{HUMAN_READABLE_SHORT_SUFFIX[human_readable_number.count(',')]}"
|
137
|
+
end
|
138
|
+
|
139
|
+
def humanize_number(number)
|
140
|
+
number.to_s.chars.to_a.reverse.each_slice(3).map(&:join).join(',').reverse
|
141
|
+
end
|
142
|
+
|
143
|
+
def grouped_data_to_table(collection, key_name)
|
144
|
+
table_data = [ [ key_name, 'Count', 'Short Bytes', 'Total Bytes' ] ]
|
145
|
+
collection.each do |key, objects|
|
146
|
+
group_total_size = objects.reduce(0) { |s, o| s + o.size }
|
147
|
+
human_readable_group_total_size = humanize_number(group_total_size)
|
148
|
+
human_readable_group_total_size_short = human_readable_bytes_short(human_readable_group_total_size)
|
149
|
+
row_values = [ key, humanize_number(objects.length), human_readable_group_total_size_short, human_readable_group_total_size ]
|
150
|
+
table_data << row_values
|
151
|
+
end
|
152
|
+
table_data
|
153
|
+
end
|
154
|
+
|
155
|
+
def show_summary
|
156
|
+
objects_by_ext_table_data = grouped_data_to_table(objects_by_ext, 'File Ext')
|
157
|
+
objects_by_storage_class_table_data = grouped_data_to_table(objects_by_storage_class, 'Storage Class')
|
158
|
+
|
159
|
+
human_readable_total_bytes = humanize_number(total_bytes)
|
160
|
+
human_readable_total_bytes_short = human_readable_bytes_short(human_readable_total_bytes)
|
161
|
+
row_data = [ 'TOTAL', humanize_number(objects.length), human_readable_total_bytes_short, human_readable_total_bytes]
|
162
|
+
objects_by_ext_table_data << row_data
|
163
|
+
objects_by_storage_class_table_data << row_data
|
164
|
+
|
165
|
+
puts "\n\n--- Summary ---"
|
166
|
+
puts "Bucket Name: #{s3_bucket_name}"
|
167
|
+
puts "Bucket Region: #{s3_bucket_region}"
|
168
|
+
puts "Object Key Prefix: #{s3_object_key_prefix}"
|
169
|
+
puts "Total Objects: #{humanize_number(total_object_count)}"
|
170
|
+
puts "Total Directories: #{humanize_number(directories.length)}"
|
171
|
+
puts "Total Files: #{humanize_number(files.length)}"
|
172
|
+
puts "Total Size (in bytes): #{human_readable_total_bytes} (#{human_readable_total_bytes_short})"
|
173
|
+
puts "Largest File: #{largest_file}"
|
174
|
+
puts "\n"
|
175
|
+
print_table(objects_by_ext_table_data, { has_totals: true })
|
176
|
+
puts "\n"
|
177
|
+
print_table(objects_by_storage_class_table_data, { has_totals: true })
|
178
|
+
end
|
179
|
+
|
180
|
+
def process_files(args = { })
|
181
|
+
process_objects(files, { total_bytes: total_bytes })
|
182
|
+
end
|
183
|
+
|
184
|
+
def process_objects(objects, args = {})
|
185
|
+
return objects unless block_given?
|
186
|
+
return objects.map { |object| yield object, self } if args[:quiet]
|
187
|
+
|
188
|
+
bytes_remaining = args[:total_bytes] || objects.reduce(0) { |s, o| s + o.size }
|
189
|
+
_total_object_count = objects.length
|
190
|
+
counter = 0
|
191
|
+
objects.each do |object|
|
192
|
+
counter += 1
|
193
|
+
human_readable_bytes_remaining = humanize_number(bytes_remaining)
|
194
|
+
human_readable_bytes_remaining_short = human_readable_bytes_short(human_readable_bytes_remaining)
|
195
|
+
human_readable_object_bytes = humanize_number(object.size)
|
196
|
+
human_readable_object_bytes_short = human_readable_bytes_short(human_readable_object_bytes)
|
197
|
+
puts "Processing #{humanize_number(counter)} of #{humanize_number(_total_object_count)} #{human_readable_object_bytes} (#{human_readable_object_bytes_short}) of #{human_readable_bytes_remaining} (#{human_readable_bytes_remaining_short}) #{object.key}"
|
198
|
+
yield object, self if block_given?
|
199
|
+
bytes_remaining -= object.size
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
end
|
204
|
+
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: envoi-mam-agent
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.3.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- John Whitson
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-02-
|
11
|
+
date: 2019-02-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: asperalm
|
@@ -52,6 +52,20 @@ dependencies:
|
|
52
52
|
- - "~>"
|
53
53
|
- !ruby/object:Gem::Version
|
54
54
|
version: '1'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: aws-sdk-mediaconvert
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '1'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '1'
|
55
69
|
- !ruby/object:Gem::Dependency
|
56
70
|
name: daemons
|
57
71
|
requirement: !ruby/object:Gem::Requirement
|
@@ -248,6 +262,7 @@ files:
|
|
248
262
|
- lib/envoi/mam/agent/cli.rb
|
249
263
|
- lib/envoi/mam/agent/cli/commands.rb
|
250
264
|
- lib/envoi/mam/agent/cli/commands/iconik.rb
|
265
|
+
- lib/envoi/mam/agent/cli/commands/mediaconvert-retry.rb
|
251
266
|
- lib/envoi/mam/agent/cli/commands/mediasilo.rb
|
252
267
|
- lib/envoi/mam/agent/cli/commands/restore.rb
|
253
268
|
- lib/envoi/mam/agent/cli/commands/vidispine.rb
|
@@ -265,6 +280,8 @@ files:
|
|
265
280
|
- lib/envoi/restore/agent.rb
|
266
281
|
- lib/envoi/restore/glacier-restore-event-handler.rb
|
267
282
|
- lib/envoi/restore/sqs-queue-worker.rb
|
283
|
+
- lib/envoi/utils/mediaconvert_enumerator.rb
|
284
|
+
- lib/envoi/utils/s3_enumerator.rb
|
268
285
|
homepage: http://www.github.com/XPlatform-Consulting/envoi-mam-agent
|
269
286
|
licenses:
|
270
287
|
- MIT
|