snapsync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.travis.yml +4 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +44 -0
- data/Rakefile +10 -0
- data/bin/snapsync +3 -0
- data/lib/snapsync.rb +92 -0
- data/lib/snapsync/auto_sync.rb +74 -0
- data/lib/snapsync/cleanup.rb +40 -0
- data/lib/snapsync/cli.rb +125 -0
- data/lib/snapsync/default_sync_policy.rb +42 -0
- data/lib/snapsync/exceptions.rb +8 -0
- data/lib/snapsync/local_sync.rb +223 -0
- data/lib/snapsync/local_target.rb +150 -0
- data/lib/snapsync/partitions_monitor.rb +105 -0
- data/lib/snapsync/snapper_config.rb +125 -0
- data/lib/snapsync/snapshot.rb +164 -0
- data/lib/snapsync/sync_all.rb +67 -0
- data/lib/snapsync/sync_last_policy.rb +24 -0
- data/lib/snapsync/test.rb +62 -0
- data/lib/snapsync/timeline_sync_policy.rb +163 -0
- data/lib/snapsync/version.rb +3 -0
- data/snapsync.gemspec +30 -0
- metadata +216 -0
@@ -0,0 +1,42 @@
|
|
1
|
+
module Snapsync
|
2
|
+
# Exception thrown when performing sanity checks on the values returned by
|
3
|
+
# the policy. Snapsync usually aborts in these cases, given how this is
|
4
|
+
# critical
|
5
|
+
class InvalidPolicy < RuntimeError; end
|
6
|
+
|
7
|
+
# Default synchronization policy
|
8
|
+
#
|
9
|
+
# Synchronization policy objects are used by the synchronization passes to
|
10
|
+
# decide which snapshots to copy and which to not copy. They have to provide
|
11
|
+
# {#filter_snapshots_to_sync}.
|
12
|
+
#
|
13
|
+
# This default policy is to copy everything but the snapsync-created
|
14
|
+
# synchronization points that are not involving the current target
|
15
|
+
class DefaultSyncPolicy
|
16
|
+
def self.from_config(config)
|
17
|
+
new
|
18
|
+
end
|
19
|
+
|
20
|
+
def to_config
|
21
|
+
Array.new
|
22
|
+
end
|
23
|
+
|
24
|
+
# Returns the snapshots that should be synchronized according to this
|
25
|
+
# policy
|
26
|
+
#
|
27
|
+
# @param [#uuid] target the target object
|
28
|
+
# @param [Array<Snapshot>] the snapshot candidates
|
29
|
+
# @return [Array<Snapshot>] the snapshots that should be copied
|
30
|
+
def filter_snapshots_to_sync(target, snapshots)
|
31
|
+
# Filter out any snapsync-generated snapshot
|
32
|
+
snapshots.find_all { |s| !s.synchronization_point? }
|
33
|
+
end
|
34
|
+
|
35
|
+
# Pretty prints this policy
|
36
|
+
#
|
37
|
+
# This is used by the CLI to give information about a target to the user
|
38
|
+
def pretty_print(pp)
|
39
|
+
pp.text "default policy"
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,8 @@
|
|
1
|
+
module Snapsync
|
2
|
+
# Exception raised when a snapshot directory is given to {Snapshot} that
|
3
|
+
# does not look like a snapshot at all
|
4
|
+
class InvalidSnapshot < RuntimeError; end
|
5
|
+
# Exception raised when a snapshot directory is given to {Snapshot} but
|
6
|
+
# snapshot_dir/info.xml does not look like a valid snapper info file.
|
7
|
+
class InvalidInfoFile < InvalidSnapshot; end
|
8
|
+
end
|
@@ -0,0 +1,223 @@
|
|
1
|
+
module Snapsync
|
2
|
+
# Synchronization between local file systems
|
3
|
+
class LocalSync
|
4
|
+
# The snapper configuration we should synchronize
|
5
|
+
#
|
6
|
+
# @return [SnapperConfig]
|
7
|
+
attr_reader :config
|
8
|
+
# The target directory into which to synchronize
|
9
|
+
#
|
10
|
+
# @return [LocalTarget]
|
11
|
+
attr_reader :target
|
12
|
+
|
13
|
+
def initialize(config, target)
|
14
|
+
@config, @target = config, target
|
15
|
+
end
|
16
|
+
|
17
|
+
def create_synchronization_point
|
18
|
+
config.create(
|
19
|
+
description: "synchronization snapshot for snapsync",
|
20
|
+
user_data: Hash['important' => 'yes', 'snapsync' => target.uuid])
|
21
|
+
end
|
22
|
+
|
23
|
+
def remove_synchronization_points(except_last: true)
|
24
|
+
synchronization_points = config.each_snapshot.find_all do |snapshot|
|
25
|
+
snapshot.synchronization_point_for?(target)
|
26
|
+
end
|
27
|
+
if except_last
|
28
|
+
synchronization_points = synchronization_points.sort_by(&:num)
|
29
|
+
synchronization_points.pop
|
30
|
+
end
|
31
|
+
synchronization_points.each do |snapshot|
|
32
|
+
config.delete(snapshot)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def copy_stream(send_io, receive_io, chunk_length: (1 << 20), estimated_size: 0)
|
37
|
+
longest_message_length = 0
|
38
|
+
counter = 0
|
39
|
+
start = Time.now
|
40
|
+
while !send_io.eof?
|
41
|
+
if buffer = send_io.read(chunk_length) # 1MB buffer
|
42
|
+
receive_io.write(buffer)
|
43
|
+
|
44
|
+
counter += buffer.size
|
45
|
+
rate = counter / (Time.now - start)
|
46
|
+
remaining =
|
47
|
+
if estimated_size > counter
|
48
|
+
human_readable_time((estimated_size - counter) / rate)
|
49
|
+
elsif counter - estimated_size < 100 * 1024**2
|
50
|
+
human_readable_time(0)
|
51
|
+
else
|
52
|
+
'?'
|
53
|
+
end
|
54
|
+
|
55
|
+
msg = "#{human_readable_size(counter)} (#{human_readable_size(rate)}/s), #{remaining} remaining"
|
56
|
+
longest_message_length = [longest_message_length, msg.length].max
|
57
|
+
print "\r%-#{longest_message_length}s" % [msg]
|
58
|
+
end
|
59
|
+
end
|
60
|
+
print "\r#{" " * longest_message_length}\r"
|
61
|
+
counter
|
62
|
+
end
|
63
|
+
|
64
|
+
def synchronize_snapshot(target_snapshot_dir, src, parent: nil)
|
65
|
+
partial_marker_path = Snapshot.partial_marker_path(target_snapshot_dir)
|
66
|
+
|
67
|
+
# Verify first if the snapshot is already present and/or partially
|
68
|
+
# synchronized
|
69
|
+
begin
|
70
|
+
snapshot = Snapshot.new(target_snapshot_dir)
|
71
|
+
if snapshot.partial?
|
72
|
+
Snapsync.warn "target snapshot directory #{target_snapshot_dir} looks like an aborted snapsync synchronization, I will attempt to refresh it"
|
73
|
+
else
|
74
|
+
return true
|
75
|
+
end
|
76
|
+
rescue InvalidSnapshot
|
77
|
+
if target_snapshot_dir.exist?
|
78
|
+
Snapsync.warn "target snapshot directory #{target_snapshot_dir} already exists, but does not seem to be a valid snapper snapshot. I will attempt to refresh it"
|
79
|
+
else
|
80
|
+
target_snapshot_dir.mkdir
|
81
|
+
end
|
82
|
+
FileUtils.touch(partial_marker_path.to_s)
|
83
|
+
end
|
84
|
+
|
85
|
+
if copy_snapshot(target_snapshot_dir, src, parent: parent)
|
86
|
+
partial_marker_path.unlink
|
87
|
+
IO.popen(["sudo", "btrfs", "filesystem", "sync", target_snapshot_dir.to_s, err: '/dev/null']).read
|
88
|
+
Snapsync.info "Successfully synchronized #{src.snapshot_dir}"
|
89
|
+
true
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
def copy_snapshot(target_snapshot_dir, src, parent: nil)
|
94
|
+
# This variable is used in the 'ensure' block. Make sure it is
|
95
|
+
# initialized properly
|
96
|
+
success = false
|
97
|
+
|
98
|
+
File.open(target_snapshot_dir + "info.xml", 'w') do |io|
|
99
|
+
io.write (src.snapshot_dir + "info.xml").read
|
100
|
+
end
|
101
|
+
|
102
|
+
if parent
|
103
|
+
parent_opt = ['-p', parent.subvolume_dir.to_s]
|
104
|
+
estimated_size = src.size_diff_from(parent)
|
105
|
+
else
|
106
|
+
parent_opt = []
|
107
|
+
estimated_size = src.size
|
108
|
+
end
|
109
|
+
|
110
|
+
Snapsync.info "Estimating transfer for #{src.snapshot_dir} to be #{human_readable_size(estimated_size)}"
|
111
|
+
|
112
|
+
start = Time.now
|
113
|
+
bytes_transferred = nil
|
114
|
+
receive_status, send_status = nil
|
115
|
+
err_send_pipe_r, err_send_pipe_w = IO.pipe
|
116
|
+
err_receive_pipe_r, err_receive_pipe_w = IO.pipe
|
117
|
+
IO.popen(['sudo', 'btrfs', 'send', *parent_opt, src.subvolume_dir.to_s, err: err_send_pipe_w]) do |send_io|
|
118
|
+
err_send_pipe_w.close
|
119
|
+
IO.popen(['sudo', 'btrfs', 'receive', target_snapshot_dir.to_s, err: err_receive_pipe_w, out: '/dev/null'], 'w') do |receive_io|
|
120
|
+
err_receive_pipe_w.close
|
121
|
+
receive_io.sync = true
|
122
|
+
bytes_transferred = copy_stream(send_io, receive_io, estimated_size: estimated_size)
|
123
|
+
end
|
124
|
+
receive_status = $?
|
125
|
+
end
|
126
|
+
send_status = $?
|
127
|
+
|
128
|
+
success = (receive_status.success? && send_status.success?)
|
129
|
+
if !send_status.success?
|
130
|
+
Snapsync.warn "btrfs send reported an error"
|
131
|
+
err_send_pipe_w.readlines.each do |line|
|
132
|
+
Snapsync.warn " #{line.chomp}"
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
if !receive_status.success?
|
137
|
+
Snapsync.warn "btrfs receive reported an error"
|
138
|
+
err_receive_pipe_w.readlines.each do |line|
|
139
|
+
Snapsync.warn " #{line.chomp}"
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
if success
|
144
|
+
Snapsync.info "Flushing data to disk"
|
145
|
+
IO.popen(["sudo", "btrfs", "filesystem", "sync", target_snapshot_dir.to_s, err: '/dev/null']).read
|
146
|
+
duration = Time.now - start
|
147
|
+
rate = bytes_transferred / duration
|
148
|
+
Snapsync.info "Transferred #{human_readable_size(bytes_transferred)} in #{human_readable_time(duration)} (#{human_readable_size(rate)}/s)"
|
149
|
+
Snapsync.info "Successfully synchronized #{src.snapshot_dir}"
|
150
|
+
true
|
151
|
+
end
|
152
|
+
|
153
|
+
ensure
|
154
|
+
if !success
|
155
|
+
Snapsync.warn "Failed to synchronize #{src.snapshot_dir}, deleting target directory"
|
156
|
+
subvolume_dir = target_snapshot_dir + "snapshot"
|
157
|
+
if subvolume_dir.directory?
|
158
|
+
IO.popen(["sudo", "btrfs", "subvolume", "delete", subvolume_dir.to_s, err: '/dev/null']).read
|
159
|
+
end
|
160
|
+
target_snapshot_dir.rmtree
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def sync
|
165
|
+
STDOUT.sync = true
|
166
|
+
|
167
|
+
# First, create a snapshot and protect it against cleanup, to use as
|
168
|
+
# synchronization point
|
169
|
+
#
|
170
|
+
# We remove old synchronization points on successful synchronization
|
171
|
+
source_snapshots = config.each_snapshot.sort_by(&:num)
|
172
|
+
sync_snapshot = source_snapshots.reverse.find do |snapshot|
|
173
|
+
if snapshot.synchronization_point_for?(target)
|
174
|
+
true
|
175
|
+
elsif !snapshot.synchronization_point?
|
176
|
+
break
|
177
|
+
end
|
178
|
+
end
|
179
|
+
sync_snapshot ||= create_synchronization_point
|
180
|
+
|
181
|
+
target_snapshots = target.each_snapshot.sort_by(&:num)
|
182
|
+
|
183
|
+
last_common_snapshot = source_snapshots.find do |s|
|
184
|
+
target_snapshots.find { |src| src.num == s.num }
|
185
|
+
end
|
186
|
+
if !last_common_snapshot
|
187
|
+
Snapsync.warn "no common snapshot found, will have to synchronize the first snapshot fully"
|
188
|
+
end
|
189
|
+
|
190
|
+
snapshots_to_sync = target.sync_policy.filter_snapshots_to_sync(target, source_snapshots)
|
191
|
+
snapshots_to_sync.each do |src|
|
192
|
+
if synchronize_snapshot(target.dir + src.num.to_s, src, parent: last_common_snapshot)
|
193
|
+
last_common_snapshot = src
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
if synchronize_snapshot(target.dir + sync_snapshot.num.to_s, sync_snapshot, parent: last_common_snapshot)
|
198
|
+
Snapsync.debug "successfully copied last synchronization point #{sync_snapshot.num}, removing old ones"
|
199
|
+
remove_synchronization_points
|
200
|
+
end
|
201
|
+
|
202
|
+
last_common_snapshot
|
203
|
+
end
|
204
|
+
|
205
|
+
def human_readable_time(time)
|
206
|
+
hrs = time / 3600
|
207
|
+
min = (time / 60) % 60
|
208
|
+
sec = time % 60
|
209
|
+
"%02i:%02i:%02i" % [hrs, min, sec]
|
210
|
+
end
|
211
|
+
|
212
|
+
def human_readable_size(size, digits: 1)
|
213
|
+
order = ['B', 'kB', 'MB', 'GB']
|
214
|
+
magnitude =
|
215
|
+
if size > 0
|
216
|
+
Integer(Math.log2(size) / 10)
|
217
|
+
else 0
|
218
|
+
end
|
219
|
+
"%.#{digits}f#{order[magnitude]}" % [Float(size) / (1024 ** magnitude)]
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
@@ -0,0 +1,150 @@
|
|
1
|
+
module Snapsync
|
2
|
+
class LocalTarget
|
3
|
+
# The target's UUID
|
4
|
+
#
|
5
|
+
# @return [String]
|
6
|
+
attr_reader :uuid
|
7
|
+
|
8
|
+
# This target's directory
|
9
|
+
attr_reader :dir
|
10
|
+
|
11
|
+
# The target sync policy
|
12
|
+
attr_reader :sync_policy
|
13
|
+
|
14
|
+
# The cleanup object
|
15
|
+
attr_reader :cleanup
|
16
|
+
|
17
|
+
# Whether this target is enabled or not
|
18
|
+
def enabled?; @enabled end
|
19
|
+
|
20
|
+
# Enable this target, i.e. add it to the auto synchronization and
|
21
|
+
# cleanup commands
|
22
|
+
def enable; @enabled = true; self end
|
23
|
+
|
24
|
+
# Disable this target, i.e. remove it from the auto synchronization and
|
25
|
+
# cleanup commands
|
26
|
+
def disable; @enabled = false; self end
|
27
|
+
|
28
|
+
# Whether the target should be autocleaned on synchronization
|
29
|
+
#
|
30
|
+
# Defaults to true
|
31
|
+
def autoclean?; !!@autoclean end
|
32
|
+
|
33
|
+
class InvalidUUIDError < RuntimeError; end
|
34
|
+
class NoUUIDError < InvalidUUIDError; end
|
35
|
+
|
36
|
+
def initialize(dir, create_if_needed: true)
|
37
|
+
if !dir.directory?
|
38
|
+
raise ArgumentError, "#{dir} does not exist"
|
39
|
+
end
|
40
|
+
@dir = dir
|
41
|
+
|
42
|
+
begin
|
43
|
+
read_config
|
44
|
+
rescue NoUUIDError
|
45
|
+
if !create_if_needed
|
46
|
+
raise
|
47
|
+
end
|
48
|
+
@uuid = SecureRandom.uuid
|
49
|
+
@sync_policy = DefaultSyncPolicy.new
|
50
|
+
@cleanup = nil
|
51
|
+
@enabled = true
|
52
|
+
@autoclean = true
|
53
|
+
end
|
54
|
+
write_config
|
55
|
+
end
|
56
|
+
|
57
|
+
def each_snapshot(&block)
|
58
|
+
Snapshot.each(dir, &block)
|
59
|
+
end
|
60
|
+
|
61
|
+
def write_config
|
62
|
+
config = Hash['uuid' => uuid, 'policy' => Hash.new]
|
63
|
+
config['policy']['type'] =
|
64
|
+
case sync_policy
|
65
|
+
when TimelineSyncPolicy then 'timeline'
|
66
|
+
when SyncLastPolicy then 'last'
|
67
|
+
when DefaultSyncPolicy then 'default'
|
68
|
+
end
|
69
|
+
config['policy']['options'] =
|
70
|
+
sync_policy.to_config
|
71
|
+
config['enabled'] = enabled?
|
72
|
+
config['autoclean'] = autoclean?
|
73
|
+
|
74
|
+
File.open(config_path, 'w') do |io|
|
75
|
+
io.write YAML.dump(config)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def read_config
|
80
|
+
begin
|
81
|
+
raw_config = YAML.load(config_path.read)
|
82
|
+
rescue Errno::ENOENT => e
|
83
|
+
raise NoUUIDError, e.message, e.backtrace
|
84
|
+
end
|
85
|
+
parse_config(raw_config)
|
86
|
+
end
|
87
|
+
|
88
|
+
def parse_config(config)
|
89
|
+
uuid = config['uuid']
|
90
|
+
if uuid.length != 36
|
91
|
+
raise InvalidUUIDError, "uuid in #{uuid_path} was expected to be 36 characters long, but is #{uuid.length}"
|
92
|
+
end
|
93
|
+
@uuid = uuid
|
94
|
+
|
95
|
+
@enabled = config.fetch('enabled', true)
|
96
|
+
@autoclean = config.fetch('autoclean', true)
|
97
|
+
|
98
|
+
if policy_config = config['policy']
|
99
|
+
change_policy(policy_config['type'], policy_config['options'] || Array.new)
|
100
|
+
else
|
101
|
+
@sync_policy = DefaultSyncPolicy.new
|
102
|
+
@cleanup = nil
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
# Path to the target's configuration file
|
107
|
+
#
|
108
|
+
# @return [Pathname]
|
109
|
+
def config_path
|
110
|
+
dir + "snapsync.config"
|
111
|
+
end
|
112
|
+
|
113
|
+
def change_policy(type, options)
|
114
|
+
case type
|
115
|
+
when 'default'
|
116
|
+
sync_policy = DefaultSyncPolicy
|
117
|
+
cleanup = nil
|
118
|
+
when 'timeline'
|
119
|
+
sync_policy = TimelineSyncPolicy
|
120
|
+
cleanup = TimelineSyncPolicy
|
121
|
+
when 'last'
|
122
|
+
sync_policy = SyncLastPolicy
|
123
|
+
cleanup = SyncLastPolicy
|
124
|
+
else
|
125
|
+
raise InvalidConfiguration, "synchronization policy #{type} does not exist"
|
126
|
+
end
|
127
|
+
@sync_policy = sync_policy.from_config(options)
|
128
|
+
@cleanup =
|
129
|
+
if cleanup
|
130
|
+
Cleanup.new(cleanup.from_config(options))
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
def delete(s, dry_run: false)
|
135
|
+
Snapsync.info "Removing snapshot #{s.num} #{s.date.to_time} at #{s.subvolume_dir}"
|
136
|
+
return if dry_run
|
137
|
+
|
138
|
+
IO.popen(["sudo", "btrfs", "subvolume", "delete", s.subvolume_dir.to_s, err: '/dev/null']) do |io|
|
139
|
+
io.read
|
140
|
+
end
|
141
|
+
if $?.success?
|
142
|
+
s.snapshot_dir.rmtree
|
143
|
+
Snapsync.info "Flushing data to disk"
|
144
|
+
IO.popen(["sudo", "btrfs", "filesystem", "sync", s.snapshot_dir.to_s, err: '/dev/null']).read
|
145
|
+
else
|
146
|
+
Snapsync.warn "failed to remove snapshot at #{s.subvolume_dir}, keeping the rest of the snapshot"
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
@@ -0,0 +1,105 @@
|
|
1
|
+
module Snapsync
|
2
|
+
class PartitionsMonitor
|
3
|
+
attr_reader :udisk
|
4
|
+
|
5
|
+
attr_reader :dirty
|
6
|
+
|
7
|
+
attr_reader :monitored_partitions
|
8
|
+
attr_reader :known_partitions
|
9
|
+
|
10
|
+
def initialize
|
11
|
+
dbus = DBus::SystemBus.instance
|
12
|
+
@udisk = dbus.service('org.freedesktop.UDisks2')
|
13
|
+
udisk.introspect
|
14
|
+
|
15
|
+
@dirty = Concurrent::AtomicBoolean.new(false)
|
16
|
+
# udisk.on_signal('InterfacesAdded') do
|
17
|
+
# dirty!
|
18
|
+
# end
|
19
|
+
# udisk.on_signal('InterfacesRemoved') do
|
20
|
+
# dirty!
|
21
|
+
# end
|
22
|
+
|
23
|
+
@monitored_partitions = Set.new
|
24
|
+
@known_partitions = Hash.new
|
25
|
+
end
|
26
|
+
|
27
|
+
def monitor_for(partition_uuid)
|
28
|
+
monitored_partitions << partition_uuid.to_str
|
29
|
+
end
|
30
|
+
|
31
|
+
def dirty!
|
32
|
+
dirty.set
|
33
|
+
end
|
34
|
+
|
35
|
+
def dirty?
|
36
|
+
dirty.set?
|
37
|
+
end
|
38
|
+
|
39
|
+
def partition_uuid_for_dir(dir)
|
40
|
+
dir = dir.expand_path
|
41
|
+
# Find the dir's mountpoint
|
42
|
+
while !dir.mountpoint?
|
43
|
+
dir = dir.parent
|
44
|
+
end
|
45
|
+
dir = dir.to_s
|
46
|
+
|
47
|
+
each_partition_with_filesystem.find do |name, dev|
|
48
|
+
fs = dev['org.freedesktop.UDisks2.Filesystem']
|
49
|
+
mp = fs['MountPoints']
|
50
|
+
if !mp.empty?
|
51
|
+
binding.pry
|
52
|
+
end
|
53
|
+
# .map { |str| Pathname.new(str) }
|
54
|
+
mp.include?(dir)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def poll
|
59
|
+
udisk.introspect
|
60
|
+
dirty.make_false
|
61
|
+
|
62
|
+
all = Hash.new
|
63
|
+
each_partition_with_filesystem do |name, dev|
|
64
|
+
partition = dev['org.freedesktop.UDisks2.Block']
|
65
|
+
uuid = partition['IdUUID']
|
66
|
+
|
67
|
+
if monitored_partitions.include?(uuid)
|
68
|
+
all[uuid] = dev['org.freedesktop.UDisks2.Filesystem']
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
added = Hash.new
|
73
|
+
(all.keys - known_partitions.keys).each do |uuid|
|
74
|
+
fs = added[uuid] = all[uuid]
|
75
|
+
emit_added(uuid, fs)
|
76
|
+
end
|
77
|
+
removed = (known_partitions.keys - all.keys)
|
78
|
+
removed.each { |uuid| emit_removed(uuid) }
|
79
|
+
|
80
|
+
@known_partitions = all
|
81
|
+
return added, removed
|
82
|
+
end
|
83
|
+
|
84
|
+
def emit_added(uuid, fs)
|
85
|
+
end
|
86
|
+
|
87
|
+
def emit_removed(uuid)
|
88
|
+
end
|
89
|
+
|
90
|
+
# Yields the udev objects representing block devices that support an
|
91
|
+
# underlying filesystem
|
92
|
+
#
|
93
|
+
# @yieldparam [String] the block device name (e.g. sda3)
|
94
|
+
# @yieldparam the block device's udev object
|
95
|
+
def each_partition_with_filesystem
|
96
|
+
return enum_for(__method__) if !block_given?
|
97
|
+
udisk.root['org']['freedesktop']['UDisks2']['block_devices'].each do |device_name, _|
|
98
|
+
dev = udisk.object("/org/freedesktop/UDisks2/block_devices/#{device_name}")
|
99
|
+
if dev['org.freedesktop.UDisks2.Partition'] && dev['org.freedesktop.UDisks2.Filesystem']
|
100
|
+
yield(device_name, dev)
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|