filecluster 0.5.14 → 0.5.15

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,16 @@
1
+ require 'autosync'
2
+
3
+ class AutosyncThread < BaseThread
4
+ attr_accessor :start_time, :files_to_delete, :items_to_delete
5
+ def go(storages)
6
+ storages.each do |storage|
7
+ $log.debug("AutosyncThread: Run storage synchronization for #{storage.name}")
8
+ Autosync.new(storage).run
9
+ storage.reload
10
+ storage.autosync_at = Time.now.to_i
11
+ storage.save
12
+ $log.debug("AutosyncThread: Finish storage synchronization for #{storage.name}")
13
+ break if $exit_signal
14
+ end
15
+ end
16
+ end
@@ -39,8 +39,8 @@ class GlobalDaemonThread < BaseThread
39
39
  "ist.item_id = i.id AND i.copies IN (#{copies}) AND i.status = 'ready' AND ist.status <> 'delete' GROUP BY i.id LIMIT #{limit}"
40
40
  r = FC::DB.query(sql)
41
41
  r.each do |row|
42
- $log.info("GlobalDaemonThread: new item_storage for item #{row['item_id']}")
43
42
  item_storages = row['storages'].split(',')
43
+ $log.info("GlobalDaemonThread: new item_storage for item #{row['item_id']}, exclude #{item_storages}")
44
44
  if row['item_copies'] != item_storages.count
45
45
  $log.warn("GlobalDaemonThread: ItemStorage count <> item.copies for item #{row['item_id']}")
46
46
  elsif item_storages.count >= policy.copies.to_i
@@ -9,7 +9,7 @@ class UpdateTasksThread < BaseThread
9
9
  count = 0
10
10
  limit = FC::Var.get("daemon_tasks_#{type}_group_limit", 1000).to_i
11
11
  tasks = (type == :copy ? $tasks_copy : $tasks_delete)
12
- $storages.each do |storage|
12
+ $storages.select { |storage| storage.write_weight.to_i >= 0 }.each do |storage|
13
13
  tasks[storage.name] = [] unless tasks[storage.name]
14
14
  ids = tasks[storage.name].map(&:id) + $curr_tasks.compact.map(&:id)
15
15
  if ids.length > limit*2
@@ -147,7 +147,7 @@ module FC
147
147
  id bigint NOT NULL AUTO_INCREMENT,
148
148
  name varchar(1024) NOT NULL DEFAULT '',
149
149
  tag varchar(255) DEFAULT NULL,
150
- outer_id int DEFAULT NULL,
150
+ outer_id bigint DEFAULT NULL,
151
151
  policy_id int NOT NULL,
152
152
  dir tinyint(1) NOT NULL DEFAULT 0,
153
153
  size bigint NOT NULL DEFAULT 0,
@@ -336,5 +336,9 @@ module FC
336
336
  FC::DB.query("ALTER TABLE #{@prefix}storages ADD COLUMN auto_size bigint(20) DEFAULT 0")
337
337
  end
338
338
 
339
+ def self.migrate_5
340
+ FC::DB.query("ALTER TABLE #{@prefix}storages ADD COLUMN autosync_at bigint(11) DEFAULT 0")
341
+ end
342
+
339
343
  end
340
344
  end
@@ -4,7 +4,7 @@ require 'fileutils'
4
4
 
5
5
  module FC
6
6
  class Storage < DbBase
7
- set_table :storages, 'name, host, dc, path, url, size, size_limit, check_time, copy_storages, url_weight, write_weight, auto_size'
7
+ set_table :storages, 'name, host, dc, path, url, size, size_limit, check_time, copy_storages, url_weight, write_weight, auto_size, autosync_at'
8
8
 
9
9
  class << self
10
10
  attr_accessor :check_time_limit, :storages_cache_time, :get_copy_storages_mutex
@@ -124,7 +124,7 @@ module FC
124
124
  raise r if $?.exitstatus != 0
125
125
  else
126
126
  local_path += '/' if File.stat(local_path).directory?
127
- cmd = "ionice -c 2 -n 7 rsync -e \"ssh -o StrictHostKeyChecking=no\" -a #{FC::Storage.speed_limit_to_rsync_opt(speed_limit)}--rsync-path=\"#{recreate_dirs_cmd} && ionice -c 2 -n 7 rsync\" #{local_path.shellescape} #{self.host}:\"#{dst_path.shellescape}\""
127
+ cmd = "ionice -c 2 -n 7 rsync -e \"ssh -o StrictHostKeyChecking=no\" -a --no-t #{FC::Storage.speed_limit_to_rsync_opt(speed_limit)}--rsync-path=\"#{recreate_dirs_cmd} && ionice -c 2 -n 7 rsync\" #{local_path.shellescape} #{self.host}:\"#{dst_path.shellescape}\""
128
128
  r = `#{cmd} 2>&1`
129
129
  raise r if $?.exitstatus != 0
130
130
  end
@@ -142,7 +142,7 @@ module FC
142
142
  r = `#{cmd} 2>&1`
143
143
  src_path += '/' if $?.exitstatus == 0
144
144
 
145
- cmd = "ionice -c 2 -n 7 rsync -e \"ssh -o StrictHostKeyChecking=no\" -a #{FC::Storage.speed_limit_to_rsync_opt(speed_limit)}--rsync-path=\"ionice -c 2 -n 7 rsync\" #{self.host}:\"#{src_path.shellescape}\" #{local_path.shellescape}"
145
+ cmd = "ionice -c 2 -n 7 rsync -e \"ssh -o StrictHostKeyChecking=no\" -a --no-t #{FC::Storage.speed_limit_to_rsync_opt(speed_limit)}--rsync-path=\"ionice -c 2 -n 7 rsync\" #{self.host}:\"#{src_path.shellescape}\" #{local_path.shellescape}"
146
146
  r = `#{cmd} 2>&1`
147
147
  raise r if $?.exitstatus != 0
148
148
  end
@@ -35,7 +35,26 @@ module FC
35
35
  end
36
36
  @all_vars
37
37
  end
38
-
38
+
39
+ def self.get_autosync
40
+ sync_interval = {
41
+ 'all' => 604_800 # default 7 days in seconds
42
+ }
43
+ sync_interval.merge! Hash[get('autosync_intervals').to_s.split(';;').map { |v| v.split('::') }]
44
+ sync_interval.each { |host, val| sync_interval[host] = val.to_i > 0 ? val.to_i : 0 }
45
+ end
46
+
47
+ def self.set_autosync(host, val)
48
+ current = get_autosync
49
+ if val.nil? || val == ''
50
+ current.delete(host)
51
+ else
52
+ current[host] = val
53
+ end
54
+ list = current.map { |h, v| "#{h}::#{v}" }.join(';;')
55
+ set('autosync_intervals', list)
56
+ end
57
+
39
58
  def self.get_speed_limits
40
59
  limits = {
41
60
  'all' => nil
@@ -1,3 +1,3 @@
1
1
  module FC
2
- VERSION = '0.5.14'.freeze
2
+ VERSION = '0.5.15'.freeze
3
3
  end
@@ -0,0 +1,48 @@
1
+ require 'open3'
2
+ require 'shellwords'
3
+
4
+ class Iostat
5
+ attr_reader :w_await, :r_await, :util, :disk_name
6
+ def initialize(path)
7
+ @path = path
8
+ @w_await = 0
9
+ @r_await = 0
10
+ @util = 0
11
+ @disk_name = 'unknown'
12
+ run
13
+ end
14
+
15
+ def run
16
+ @run = true
17
+ Thread.new do
18
+ disk_await_monitor
19
+ end
20
+ end
21
+
22
+ def stop
23
+ @run = false
24
+ end
25
+
26
+ private
27
+
28
+ def disk_await_monitor
29
+ drive = `df #{@path.shellescape}`.split("\n")[1].split(' ')[0].split('/').last
30
+ Open3.popen3('iostat -x 1 -p') do |_, stderr, _, thread|
31
+ while line = stderr.gets
32
+ update_stats(line) if line.split(' ')[0] == drive
33
+ unless @run
34
+ Process.kill('KILL', thread.pid) rescue nil
35
+ break
36
+ end
37
+ end
38
+ end
39
+ end
40
+
41
+ def update_stats(line)
42
+ parts = line.gsub(/\s+/, ' ').split(' ')
43
+ @disk_name = parts.first
44
+ @util = parts.last.to_f
45
+ @w_await = parts[parts.size - 2].to_f
46
+ @r_await = parts[parts.size - 3].to_f
47
+ end
48
+ end
@@ -5,4 +5,6 @@ require "manage/show"
5
5
  require "manage/copy_rules"
6
6
  require "manage/var"
7
7
  require "manage/item"
8
- require "manage/copy_speed"
8
+ require "manage/copy_speed"
9
+ require "manage/autosync"
10
+ require 'autosync'
@@ -0,0 +1,53 @@
1
+ # encoding: utf-8
2
+ require 'shellwords'
3
+
4
+ def autosync_list
5
+ FC::Var.get_autosync.each do |name, val|
6
+ puts name.to_s+(val.to_i > 0 ? " - every: #{val} seconds" : " - never")
7
+ end
8
+ end
9
+
10
+ def autosync_add
11
+ hosts = ['all'] + all_hosts
12
+ puts 'Set autosync interval'
13
+ begin
14
+ host = stdin_read_val("Host (default #{FC::Storage.curr_host})", true).strip
15
+ host = FC::Storage.curr_host if host.empty?
16
+ puts "Host can be one of: #{hosts.join(', ')}" unless hosts.index(host)
17
+ end until hosts.index(host)
18
+ interval = stdin_read_val('Autosync interval, seconds (0 - never, empty = all)', true)
19
+ confirm_autosync_set(host, interval)
20
+ end
21
+
22
+ def autosync_change
23
+ return if (host = find_host).to_s.empty?
24
+ puts "Change autosync interval for host #{host}"
25
+ interval = FC::Var.get_autosync[host]
26
+ txt = interval.to_s.empty? ? 'default (=all)' : nil
27
+ txt = interval.to_i.zero? ? 'never' : "#{interval}" unless txt
28
+ interval = stdin_read_val("Autosync interval, seconds (now #{txt}, 0 - never, empty = all)", true)
29
+ confirm_autosync_set(host, interval)
30
+ end
31
+
32
+ private
33
+
34
+ def confirm_autosync_set(host, interval)
35
+ txt = interval.to_s.empty? ? 'default (=all)' : nil
36
+ txt = interval.to_i.zero? ? 'never' : interval unless txt
37
+ puts %(\nAutosync interval
38
+ Host: #{host}
39
+ Interval: #{txt})
40
+ s = Readline.readline('Continue? (y/n) ', false).strip.downcase
41
+ puts ''
42
+ if %w[y yes].include?(s)
43
+ begin
44
+ FC::Var.set_autosync(host, interval.to_s.empty? ? interval : interval.to_i)
45
+ rescue Exception => e
46
+ puts "Error: #{e.message}"
47
+ exit
48
+ end
49
+ puts 'ok'
50
+ else
51
+ puts 'Canceled.'
52
+ end
53
+ end
@@ -69,6 +69,6 @@ end
69
69
 
70
70
  def find_host
71
71
  host = ARGV[2].to_s.strip
72
- puts "Storage with host #{host} not found." unless (['all'] + all_hosts).index(host)
72
+ puts "Storage with host \"#{host}\" not found." unless (['all'] + all_hosts).index(host)
73
73
  host
74
74
  end
@@ -203,6 +203,53 @@ def storages_change
203
203
  end
204
204
 
205
205
  def storages_sync_info
206
+ if storage = find_storage
207
+ return puts "Storage #{storage.name} is not local." if storage.host != FC::Storage.curr_host
208
+ puts "Get synchronization info for (#{storage.name}) storage and file system (#{storage.path}).."
209
+ init_console_logger
210
+ manual_sync(storage, true)
211
+ puts 'Done.'
212
+ end
213
+ end
214
+
215
+ def storages_sync
216
+ if storage = find_storage
217
+ return puts "Storage #{storage.name} is not local." if storage.host != FC::Storage.curr_host
218
+ puts "Synchronize (#{storage.name}) storage and file system (#{storage.path}).."
219
+ s = Readline.readline('Continue? (y/n) ', false).strip.downcase
220
+ puts ''
221
+ if s == 'y' || s == 'yes'
222
+ init_console_logger
223
+ manual_sync(storage, false)
224
+ s = Readline.readline('Update storage size? (y/n) ', false).strip.downcase
225
+ storages_update_size if s == 'y' || s == 'yes'
226
+ else
227
+ puts "Canceled."
228
+ end
229
+ end
230
+ end
231
+
232
+ def manual_sync(storage, dry_run)
233
+ syncer = Autosync.new(storage, dry_run)
234
+ syncer.run
235
+ puts "Deleted #{syncer.files_to_delete.size} files"
236
+ puts "Deleted #{syncer.items_to_delete.size} items_storages"
237
+ if (ARGV[3])
238
+ File.open(ARGV[3], 'w') do |file|
239
+ syncer.files_to_delete.each { |f| file.puts f }
240
+ end
241
+ puts "Save deleted files to #{ARGV[3]}"
242
+ end
243
+
244
+ if (ARGV[4])
245
+ File.open(ARGV[4], 'w') do |file|
246
+ syncer.items_to_delete.each { |item_storage_id| file.puts item_storage_id }
247
+ end
248
+ puts "Save deleted items_storages to #{ARGV[4]}"
249
+ end
250
+ end
251
+
252
+ def storages_sync_info_old
206
253
  if storage = find_storage
207
254
  return puts "Storage #{storage.name} is not local." if storage.host != FC::Storage.curr_host
208
255
  puts "Get synchronization info for (#{storage.name}) storage and file system (#{storage.path}).."
@@ -211,7 +258,7 @@ def storages_sync_info
211
258
  end
212
259
  end
213
260
 
214
- def storages_sync
261
+ def storages_sync_old
215
262
  if storage = find_storage
216
263
  return puts "Storage #{storage.name} is not local." if storage.host != FC::Storage.curr_host
217
264
  puts "Synchronize (#{storage.name}) storage and file system (#{storage.path}).."
@@ -230,6 +277,15 @@ end
230
277
 
231
278
  private
232
279
 
280
+ def init_console_logger
281
+ require 'logger'
282
+ $log = Logger.new(STDOUT)
283
+ $log.level = Logger::DEBUG
284
+ $log.formatter = proc { |severity, datetime, progname, msg|
285
+ "[#{severity}]: #{msg}\n"
286
+ }
287
+ end
288
+
233
289
  def find_storage
234
290
  name = ARGV[2]
235
291
  storage = FC::Storage.where('name = ?', name).first
@@ -0,0 +1,186 @@
1
+ require 'helper'
2
+ require 'autosync'
3
+
4
+ class AutosyncTest < Test::Unit::TestCase
5
+ class << self
6
+ def startup
7
+ @@storage = FC::Storage.new(
8
+ name: 'rec1-sda',
9
+ host: 'rec1',
10
+ size: 0,
11
+ copy_storages: '',
12
+ size_limit: 10,
13
+ path: '/tmp/rec-1-sda/mediaroot/'
14
+ )
15
+ `mkdir -p #{@@storage.path}`
16
+ `touch #{@@storage.path}healthcheck`
17
+ `touch -t 9901010000 #{@@storage.path}healthcheck`
18
+ @@storage.save
19
+ end
20
+
21
+ def shutdown
22
+ FC::DB.query("DELETE FROM policies")
23
+ FC::DB.query("DELETE FROM items_storages")
24
+ FC::DB.query("DELETE FROM items")
25
+ FC::DB.query("DELETE FROM storages")
26
+ FC::DB.query("DELETE FROM errors")
27
+ `rm -rf #{@@storage.path}` if @@storage.path
28
+ end
29
+ end
30
+
31
+ def setup
32
+ @item_storages = {}
33
+ [
34
+ 'live_hls/otr_1/01/a.ts',
35
+ 'live_hls/otr_1/01/b.ts',
36
+ 'live_hls/otr_1/02/c.ts'
37
+ ].each { |i| @item_storages[i] = create_item_storage(i) }
38
+ end
39
+
40
+ def teardown
41
+ FC::DB.query("DELETE FROM items_storages")
42
+ FC::DB.query("DELETE FROM items")
43
+ FC::DB.query("DELETE FROM errors")
44
+ `rm -rf #{@@storage.path}` if @@storage.path
45
+ end
46
+
47
+ should "fill_db with items on storage" do
48
+ as = Autosync.new(@@storage)
49
+ db_struct = as.fill_db
50
+ assert db_struct['live_hls']
51
+ assert db_struct['live_hls']['otr_1']
52
+ assert db_struct['live_hls']['otr_1']['01']
53
+ assert db_struct['live_hls']['otr_1']['01']['a.ts']
54
+ assert db_struct['live_hls']['otr_1']['01']['b.ts']
55
+ assert db_struct['live_hls']['otr_1']['02']
56
+ assert db_struct['live_hls']['otr_1']['02']['c.ts']
57
+ end
58
+
59
+ should "content synchonized, nothing to delete from DISK and DB" do
60
+ as = Autosync.new(@@storage)
61
+ db_struct = as.fill_db
62
+ as.scan_disk(db_struct, '')
63
+ assert as.files_to_delete.empty?
64
+ as.scan_db(db_struct, '')
65
+ assert as.items_to_delete.empty?
66
+ end
67
+
68
+ should 'select to remove disk file entry unless found in DB' do
69
+ as = Autosync.new(@@storage)
70
+ db_struct = as.fill_db
71
+ unstored_file = "#{@@storage.path}live_hls/otr_1/01/z.ts"
72
+ `touch -t 9901010000 #{unstored_file}`
73
+ as.scan_disk(db_struct, '')
74
+ assert as.files_to_delete.size == 1
75
+ assert as.files_to_delete.first == unstored_file
76
+ end
77
+
78
+ should 'physically remove disk entries older than 1 hour unless found in DB' do
79
+ `mkdir -p #{@@storage.path}live_hls/empty_subfolders/sub1/sub2`
80
+ `touch -t 9901010000 #{@@storage.path}live_hls/empty_subfolders`
81
+ `touch -t 9901010000 #{@@storage.path}live_hls/empty_subfolders/sub1`
82
+ `touch -t 9901010000 #{@@storage.path}live_hls/empty_subfolders/sub1/sub2`
83
+ assert File.exist?("#{@@storage.path}live_hls/empty_subfolders")
84
+
85
+ `mkdir -p #{@@storage.path}live_hls/not_empty_subfolders/sub1/sub2`
86
+ `touch -t 9901010000 #{@@storage.path}live_hls/not_empty_subfolders`
87
+ `touch -t 9901010000 #{@@storage.path}live_hls/not_empty_subfolders/sub1`
88
+ assert File.exist?("#{@@storage.path}live_hls/not_empty_subfolders/sub1/sub2")
89
+
90
+ `touch -t 9901010000 #{@@storage.path}live_hls/not_empty_subfolders/sub1/sub2/some_file`
91
+ assert File.exist?("#{@@storage.path}live_hls/not_empty_subfolders/sub1/sub2/some_file")
92
+ `touch -t 9901010000 #{@@storage.path}live_hls/not_empty_subfolders/sub1/sub2`
93
+
94
+ `mkdir -p #{@@storage.path}live_hls/new_empty_folders/sub1/sub2`
95
+
96
+ as = Autosync.new(@@storage)
97
+ db_struct = as.fill_db
98
+ as.scan_disk(db_struct, '')
99
+ assert as.files_to_delete.size == 2
100
+ as.delete_diffs
101
+ assert !File.exist?("#{@@storage.path}live_hls/empty_subfolders")
102
+ assert !File.exist?("#{@@storage.path}live_hls/not_empty_subfolders")
103
+ assert File.exist?("#{@@storage.path}live_hls/new_empty_folders/sub1/sub2")
104
+ end
105
+
106
+ should 'not remove folder from disk if some file appeared between disk scan and delete process' do
107
+ # make old empty folder
108
+ `mkdir -p #{@@storage.path}live_hls/empty_subfolders/sub1/sub2`
109
+ `touch -t 9901010000 #{@@storage.path}live_hls/empty_subfolders`
110
+ `touch -t 9901010000 #{@@storage.path}live_hls/empty_subfolders/sub1`
111
+ `touch -t 9901010000 #{@@storage.path}live_hls/empty_subfolders/sub1/sub2`
112
+ assert File.exist?("#{@@storage.path}live_hls/empty_subfolders")
113
+
114
+ as = Autosync.new(@@storage)
115
+ db_struct = as.fill_db
116
+ # scan disk
117
+ as.scan_disk(db_struct, '')
118
+ assert as.files_to_delete.size == 1
119
+ assert as.files_to_delete[0] == "#{@@storage.path}live_hls/empty_subfolders"
120
+ # new file
121
+ `touch #{@@storage.path}live_hls/empty_subfolders/sub1/sub2/new_appeared_file`
122
+ as.delete_diffs
123
+ assert File.exist?("#{@@storage.path}live_hls/empty_subfolders/sub1/sub2/new_appeared_file")
124
+ end
125
+
126
+ should 'select for delete file with mtime less than 1 hour' do
127
+ old_file = "#{@@storage.path}live_hls/otr_1/01/old.ts"
128
+ new_file = "#{@@storage.path}live_hls/otr_1/01/new.ts"
129
+ `touch -t #{(Time.now - 3601).strftime('%Y%m%d%H%M.%S')} #{old_file}`
130
+ `touch -t #{(Time.now - 3509).strftime('%Y%m%d%H%M.%S')} #{new_file}`
131
+ as = Autosync.new(@@storage)
132
+ db_struct = as.fill_db
133
+ as.scan_disk(db_struct, '')
134
+ assert as.files_to_delete.size == 1
135
+ assert as.files_to_delete.first == old_file
136
+ end
137
+
138
+ should 'not select for delete disk folder with entries which is in db' do
139
+ `mkdir -p #{@@storage.path}track/01`
140
+ create_item_storage('track/01')
141
+ # make it all old
142
+ `touch -t 9901010000 #{@@storage.path}track`
143
+ `touch -t 9901010000 #{@@storage.path}track/01`
144
+ # make some old files
145
+ `touch -t 9901010000 #{@@storage.path}track/01/s-0001.ts`
146
+ `touch -t 9901010000 #{@@storage.path}track/01/s-0002.ts`
147
+ `touch -t 9901010000 #{@@storage.path}track/01/s-0003.ts`
148
+ as = Autosync.new(@@storage)
149
+ db_struct = as.fill_db
150
+ as.scan_disk(db_struct, '')
151
+ assert as.files_to_delete.size.zero?
152
+ end
153
+
154
+ should 'select and remove DB item if not exist on disk' do
155
+ item_name = @item_storages.keys.first
156
+ `rm -f #{@@storage.path}#{item_name}`
157
+ removed_item_storage_id = @item_storages[item_name].id
158
+ as = Autosync.new(@@storage)
159
+ db_struct = as.fill_db
160
+ as.scan_disk(db_struct, '')
161
+ assert as.files_to_delete.empty?
162
+ as.scan_db(db_struct, '')
163
+ assert as.items_to_delete.size == 1
164
+ assert as.items_to_delete[0] == removed_item_storage_id
165
+ as.delete_diffs
166
+ @item_storages[item_name].reload
167
+ assert @item_storages[item_name].status == 'error'
168
+ assert FC::Error.where('1').to_a.size == 1
169
+ end
170
+
171
+ def create_item_storage(item_name)
172
+ item = FC::Item.new
173
+ item.name = item_name
174
+ item.size = 0
175
+ item.save
176
+ item_storage = FC::ItemStorage.new
177
+ item_storage.item_id = item.id
178
+ item_storage.storage_name = @@storage.name
179
+ item_storage.status = 'ready'
180
+ item_storage.save
181
+ `mkdir -p #{@@storage.path}#{File.dirname(item_name)}`
182
+ `touch -t 9901010000 #{@@storage.path}#{item_name}`
183
+ item_storage
184
+ end
185
+
186
+ end