nixadm 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.md +26 -0
- data/src/lib/nixadm/backup.rb +267 -0
- data/src/lib/nixadm/db/postgres.rb +9 -0
- data/src/lib/nixadm/db/postgresql.rb +217 -0
- data/src/lib/nixadm/pipeline.rb +477 -0
- data/src/lib/nixadm/util.rb +210 -0
- data/src/lib/nixadm/version.rb +11 -0
- data/src/lib/nixadm/zfs.rb +564 -0
- metadata +52 -0
@@ -0,0 +1,210 @@
|
|
1
|
+
require 'pg'
|
2
|
+
require 'rbconfig'
|
3
|
+
require 'securerandom'
|
4
|
+
require 'yaml'
|
5
|
+
require 'socket'
|
6
|
+
|
7
|
+
require 'nixadm/version'
|
8
|
+
|
9
|
+
#-------------------------------------------------------------------------------
|
10
|
+
# Global parameters
|
11
|
+
#-------------------------------------------------------------------------------
|
12
|
+
|
13
|
+
# Configuration file. Config file is YAML.
|
14
|
+
|
15
|
+
$nixadm_config_file = 'nixadm.conf'
|
16
|
+
#-------------------------------------------------------------------------------
|
17
|
+
|
18
|
+
class String
|
19
|
+
|
20
|
+
def sqlEscape()
|
21
|
+
return self.gsub("'", "''").gsub("\\", "\\\\")
|
22
|
+
end
|
23
|
+
|
24
|
+
end
|
25
|
+
|
26
|
+
# This extends PG::Result making it more "rubyish"
|
27
|
+
module PG
|
28
|
+
|
29
|
+
class Result
|
30
|
+
|
31
|
+
# Takes a row and column and return associated field. +r+ is the row
|
32
|
+
# ordinal. +c+ can be either a string (column name) or an integer
|
33
|
+
# (column ordinal).
|
34
|
+
def [](r, c)
|
35
|
+
idx = c
|
36
|
+
|
37
|
+
if c.class == String
|
38
|
+
idx = self.fnumber(c)
|
39
|
+
end
|
40
|
+
|
41
|
+
return self.getvalue(r, idx)
|
42
|
+
end
|
43
|
+
|
44
|
+
# This is meant to be used with +each()+. It takes a column and
|
45
|
+
# returns the associated field value. The current row is the active
|
46
|
+
# row in the +each()+ block. +c+ can be a String (column name) or an
|
47
|
+
# integer (column ordinal).
|
48
|
+
def [](c)
|
49
|
+
idx = c
|
50
|
+
|
51
|
+
if c.class == String
|
52
|
+
idx = self.fnumber(c)
|
53
|
+
end
|
54
|
+
|
55
|
+
return self.getvalue(@row || 0, idx)
|
56
|
+
end
|
57
|
+
|
58
|
+
end # class PG
|
59
|
+
end # module Result
|
60
|
+
|
61
|
+
module NixAdm
|
62
|
+
|
63
|
+
# Optional mixin for general utility functions
|
64
|
+
|
65
|
+
module Util
|
66
|
+
|
67
|
+
# Run shell command in bash with pipefail option for full error detection on
|
68
|
+
# pipelines.
|
69
|
+
def bash(cmd)
|
70
|
+
cmd = "set -o pipefail && #{cmd}"
|
71
|
+
system("bash -c #{cmd.shellescape}")
|
72
|
+
cmd_status = $?
|
73
|
+
if cmd_status != 0
|
74
|
+
raise "'#{cmd}' execution failed (exit code: #{cmd_status})"
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
# Logging to PostgreSQL
|
79
|
+
#
|
80
|
+
# Connect to a database. Uses configuration file. Config file is YAML and
|
81
|
+
# logger uses the following format for database connection:
|
82
|
+
#
|
83
|
+
# ---
|
84
|
+
# databases:
|
85
|
+
# logger:
|
86
|
+
# host: db
|
87
|
+
# db: nixadm
|
88
|
+
# user: bob
|
89
|
+
# password: bobspassword
|
90
|
+
#
|
91
|
+
# @param db_name The name of the entry in the configuration file for the
|
92
|
+
# database connection paramaters.
|
93
|
+
#
|
94
|
+
# @return Database connection if successful, nil otherwise
|
95
|
+
|
96
|
+
def logSystemInit(config_file = nil)
|
97
|
+
@logdb = connectDb('logger', config_file)
|
98
|
+
|
99
|
+
@logfields =
|
100
|
+
{
|
101
|
+
:host => Socket.gethostname,
|
102
|
+
:system => 'backup',
|
103
|
+
:module => self.class.name,
|
104
|
+
:function => 'run'
|
105
|
+
}
|
106
|
+
end
|
107
|
+
|
108
|
+
# Call when complete backup is done. Disconnects from database
|
109
|
+
|
110
|
+
def logSystemfinalize()
|
111
|
+
@logdb.disconnect()
|
112
|
+
end
|
113
|
+
|
114
|
+
# Connect to a database. Uses configuration file. Config file is YAML and has
|
115
|
+
# following format for databases:
|
116
|
+
#
|
117
|
+
# ---
|
118
|
+
# databases:
|
119
|
+
# logger:
|
120
|
+
# host: db
|
121
|
+
# db: nixadm
|
122
|
+
# user: bob
|
123
|
+
# password: bobspassword
|
124
|
+
#
|
125
|
+
# @param db_name The name of the entry in the configuration file for the
|
126
|
+
# database connection paramaters.
|
127
|
+
#
|
128
|
+
# @return Database connection if successful, nil otherwise
|
129
|
+
|
130
|
+
def connectDb(db_name, config_file = nil)
|
131
|
+
|
132
|
+
if config_file.nil?
|
133
|
+
config_file = $nixadm_config_file
|
134
|
+
|
135
|
+
# Account for BSD systems that user /usr/local/etc
|
136
|
+
prefix = RbConfig::CONFIG['prefix']
|
137
|
+
if prefix == '/usr/local'
|
138
|
+
config_file = prefix + $nixadm_config_file
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
@config = YAML::load_file(config_file)
|
143
|
+
|
144
|
+
params = @config['databases'][db_name]
|
145
|
+
|
146
|
+
login = params['user']
|
147
|
+
password = params['password']
|
148
|
+
db = params['db']
|
149
|
+
host = params['host']
|
150
|
+
port = params['port'] || '5432'
|
151
|
+
|
152
|
+
return PG::Connection.new(host, port, '', '', db, login, password)
|
153
|
+
end
|
154
|
+
|
155
|
+
# Called before starting a backup job. Sets service function name based on
|
156
|
+
# called method and logs a start message.
|
157
|
+
|
158
|
+
def jobStart(host=nil)
|
159
|
+
|
160
|
+
if not host.nil?
|
161
|
+
@logfields[:function] = "#{caller_locations()[0].label} #{host} "
|
162
|
+
else
|
163
|
+
@logfields[:function] = "#{caller_locations()[0].label}"
|
164
|
+
end
|
165
|
+
|
166
|
+
@job = SecureRandom.uuid()
|
167
|
+
|
168
|
+
log('Started', 0)
|
169
|
+
end
|
170
|
+
|
171
|
+
# Call when job has completed. Logs a completed message and clears the service
|
172
|
+
# function name.
|
173
|
+
|
174
|
+
def jobFinish()
|
175
|
+
log('Completed', 0)
|
176
|
+
@logfields[:function] = ''
|
177
|
+
end
|
178
|
+
|
179
|
+
# Logs a message. Uses service name defined in jobStart().
|
180
|
+
#
|
181
|
+
# @param msg A log message describing a completed operation
|
182
|
+
# @param code The status code of the operation in the log message
|
183
|
+
|
184
|
+
def log(message, code = 0)
|
185
|
+
fields = {}.merge(@logfields)
|
186
|
+
|
187
|
+
fields['job'] = @job
|
188
|
+
fields['code'] = code
|
189
|
+
|
190
|
+
if not message.nil?
|
191
|
+
fields['message'] = message.sqlEscape()
|
192
|
+
end
|
193
|
+
|
194
|
+
keys = []
|
195
|
+
values = []
|
196
|
+
|
197
|
+
fields.each do |k,v|
|
198
|
+
keys << k.to_s
|
199
|
+
values << v
|
200
|
+
end
|
201
|
+
|
202
|
+
sql = %Q{insert into sys.log (#{keys.join(",")}) values ('#{values.join("','")}')}
|
203
|
+
@logdb.exec(sql)
|
204
|
+
|
205
|
+
$stderr.puts message
|
206
|
+
end
|
207
|
+
|
208
|
+
end # module Util
|
209
|
+
|
210
|
+
end # module NixAdm
|
@@ -0,0 +1,564 @@
|
|
1
|
+
require 'nixadm/pipeline'
|
2
|
+
|
3
|
+
module NixAdm
|
4
|
+
module ZFS
|
5
|
+
|
6
|
+
class Host < NixAdm::Command
|
7
|
+
|
8
|
+
attr_reader :name
|
9
|
+
|
10
|
+
def initialize(name, port=22)
|
11
|
+
super(name, port)
|
12
|
+
|
13
|
+
@name = name
|
14
|
+
end
|
15
|
+
|
16
|
+
def exec(command)
|
17
|
+
run resolveCommand(command)
|
18
|
+
end
|
19
|
+
|
20
|
+
def pools()
|
21
|
+
exec 'zpool list -Hp'
|
22
|
+
|
23
|
+
data = @sys.out.split("\n")
|
24
|
+
|
25
|
+
names = []
|
26
|
+
data.each do |rec|
|
27
|
+
names << rec.split()[0]
|
28
|
+
end
|
29
|
+
|
30
|
+
return names
|
31
|
+
end
|
32
|
+
|
33
|
+
def pool(name)
|
34
|
+
return Pool.new(self, name)
|
35
|
+
end
|
36
|
+
|
37
|
+
end
|
38
|
+
|
39
|
+
class Object < Status
|
40
|
+
|
41
|
+
attr_reader :name, :host
|
42
|
+
|
43
|
+
def initialize(host, name)
|
44
|
+
super()
|
45
|
+
|
46
|
+
if host.is_a?(NixAdm::ZFS::Host) == false
|
47
|
+
raise 'host must be of class Host'
|
48
|
+
end
|
49
|
+
|
50
|
+
@host = host
|
51
|
+
@name = name
|
52
|
+
end
|
53
|
+
|
54
|
+
def to_s()
|
55
|
+
return @name
|
56
|
+
end
|
57
|
+
|
58
|
+
def <=>(other)
|
59
|
+
@name <=> other.name
|
60
|
+
end
|
61
|
+
|
62
|
+
def >(other)
|
63
|
+
@name > other.name
|
64
|
+
end
|
65
|
+
|
66
|
+
def <(other)
|
67
|
+
@name < other.name
|
68
|
+
end
|
69
|
+
|
70
|
+
end
|
71
|
+
|
72
|
+
class Pool < Object
|
73
|
+
|
74
|
+
def initialize(host, name)
|
75
|
+
super
|
76
|
+
end
|
77
|
+
|
78
|
+
def filesystems()
|
79
|
+
data = filesystemNames()
|
80
|
+
|
81
|
+
filter = []
|
82
|
+
data.each do |object|
|
83
|
+
filter << Filesystem.new(@host, self, object[:name])
|
84
|
+
end
|
85
|
+
|
86
|
+
return filter.sort
|
87
|
+
end
|
88
|
+
|
89
|
+
def volumes()
|
90
|
+
data = volumeNames()
|
91
|
+
|
92
|
+
filter = []
|
93
|
+
data.each do |object|
|
94
|
+
filter << Volume.new(@host, self, object[:name])
|
95
|
+
end
|
96
|
+
|
97
|
+
return filter.sort
|
98
|
+
end
|
99
|
+
|
100
|
+
def objects()
|
101
|
+
data = objectNames()
|
102
|
+
|
103
|
+
filter = []
|
104
|
+
data.each do |object|
|
105
|
+
type = object[:type]
|
106
|
+
|
107
|
+
if type == 'volume'
|
108
|
+
filter << Volume.new(@host, self, object[:name])
|
109
|
+
elsif type == 'filesystem'
|
110
|
+
filter << Filesystem.new(@host, self, object[:name])
|
111
|
+
else
|
112
|
+
filter << ZfsEntity.new(@host, self, object[:name])
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
return filter.sort
|
117
|
+
end
|
118
|
+
|
119
|
+
# Given filesystem name, return corresponding Filesystem object.
|
120
|
+
#
|
121
|
+
# @param name Filesystem name (excluding pool)
|
122
|
+
def filesystem(name)
|
123
|
+
data = filesystemNames()
|
124
|
+
|
125
|
+
if not data.include?(name)
|
126
|
+
return nil
|
127
|
+
end
|
128
|
+
|
129
|
+
return Filesystem.new(@host, self, name)
|
130
|
+
end
|
131
|
+
|
132
|
+
# Given volume name, return corresponding Volume object.
|
133
|
+
#
|
134
|
+
# @param name Volume name (excluding pool)
|
135
|
+
def volume(name)
|
136
|
+
data = volumeNames()
|
137
|
+
|
138
|
+
if not data.include?(name)
|
139
|
+
return nil
|
140
|
+
end
|
141
|
+
|
142
|
+
return Volume.new(@host, self, name)
|
143
|
+
end
|
144
|
+
|
145
|
+
# Given object name, return corresponding ZFS object.
|
146
|
+
#
|
147
|
+
# @param name Object name (excluding pool)
|
148
|
+
def object(name)
|
149
|
+
data = objectNames()
|
150
|
+
|
151
|
+
if not data.include?(name)
|
152
|
+
return nil
|
153
|
+
end
|
154
|
+
|
155
|
+
type = data[name]
|
156
|
+
|
157
|
+
if type == 'volume'
|
158
|
+
return Volume.new(@host, self, name)
|
159
|
+
elsif type == 'filesystem'
|
160
|
+
return Filesystem.new(@host, self, name)
|
161
|
+
end
|
162
|
+
|
163
|
+
return ZfsEntity.new(@host, self, name)
|
164
|
+
end
|
165
|
+
|
166
|
+
def objects(type='all')
|
167
|
+
@host.exec("zfs list -H -o name,type -t #{type} -r #{@name}")
|
168
|
+
data = @host.sys.out.split("\n")
|
169
|
+
|
170
|
+
names = []
|
171
|
+
data.each do |e|
|
172
|
+
n, t = e.split(' ')
|
173
|
+
names << { name: n, type: t }
|
174
|
+
end
|
175
|
+
|
176
|
+
return names
|
177
|
+
end
|
178
|
+
|
179
|
+
def createFilesystem(name)
|
180
|
+
return @host.exec("zfs create #{@name}/#{name}")
|
181
|
+
end
|
182
|
+
|
183
|
+
def createVolume(name)
|
184
|
+
return @host.exec("zfs create -V 32K #{@name}/#{name}")
|
185
|
+
end
|
186
|
+
|
187
|
+
private
|
188
|
+
|
189
|
+
def zfsEntities(type)
|
190
|
+
data = objects(type)
|
191
|
+
|
192
|
+
filter = {}
|
193
|
+
data.each do |entry|
|
194
|
+
e = entry[:name].split('/')[1..-1].join('/')
|
195
|
+
if e.size > 0
|
196
|
+
filter[e] = entry[:type]
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
return filter
|
201
|
+
end
|
202
|
+
|
203
|
+
def objectNames()
|
204
|
+
return zfsEntities('all')
|
205
|
+
end
|
206
|
+
|
207
|
+
def filesystemNames()
|
208
|
+
return zfsEntities('filesystem')
|
209
|
+
end
|
210
|
+
|
211
|
+
def volumeNames()
|
212
|
+
return zfsEntities('volume')
|
213
|
+
end
|
214
|
+
|
215
|
+
end
|
216
|
+
|
217
|
+
class ZfsEntity < Object
|
218
|
+
|
219
|
+
attr_reader :pool, :host
|
220
|
+
|
221
|
+
def initialize(host, pool, name)
|
222
|
+
super host, name
|
223
|
+
|
224
|
+
if pool.is_a?(Pool) == false
|
225
|
+
raise 'Second argument must be pool'
|
226
|
+
end
|
227
|
+
|
228
|
+
@pool = pool
|
229
|
+
end
|
230
|
+
|
231
|
+
def snapshot(id)
|
232
|
+
snap_names = snapshotNames()
|
233
|
+
|
234
|
+
snap_names.each do |name|
|
235
|
+
if name.split('@')[1].to_i == id
|
236
|
+
return snapshotInstance(name)
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
return nil
|
241
|
+
end
|
242
|
+
|
243
|
+
def snapshots()
|
244
|
+
snap_names = snapshotNames()
|
245
|
+
|
246
|
+
objects = []
|
247
|
+
snap_names.each do |name|
|
248
|
+
snapshot = snapshotInstance(name)
|
249
|
+
|
250
|
+
if block_given?
|
251
|
+
yield snapshot
|
252
|
+
else
|
253
|
+
objects << snapshot
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
return objects
|
258
|
+
end
|
259
|
+
|
260
|
+
# Convert snapshot integers to full names
|
261
|
+
def snapshotNames()
|
262
|
+
return filterSnapshots(fetchSnapshots())
|
263
|
+
end
|
264
|
+
|
265
|
+
def snapshotIds()
|
266
|
+
snaps = snapshotNames()
|
267
|
+
|
268
|
+
# We assume that relavant snapshot names are numerical values (epoch
|
269
|
+
# times). Any name containing alpha characters will be reduced to 0 by
|
270
|
+
# to_i().
|
271
|
+
ids = snaps.collect { |s| s.split('@')[1].to_i }
|
272
|
+
|
273
|
+
# Filter out the zero integer value entries, if any
|
274
|
+
ids.select! { |s| s > 0 }
|
275
|
+
|
276
|
+
return ids.sort
|
277
|
+
end
|
278
|
+
|
279
|
+
def lastSnapshotId()
|
280
|
+
return snapshotIds()[-1]
|
281
|
+
end
|
282
|
+
|
283
|
+
def lastSnapshot()
|
284
|
+
id = lastSnapshotId()
|
285
|
+
|
286
|
+
return nil if id.nil?
|
287
|
+
|
288
|
+
return snapshot(id)
|
289
|
+
end
|
290
|
+
|
291
|
+
def snapshotName(ss)
|
292
|
+
return "#{@pool.name}/#{@name}@#{ss}"
|
293
|
+
end
|
294
|
+
|
295
|
+
# Makes testing easier
|
296
|
+
def snapshotInstance(name)
|
297
|
+
return Snapshot.new(self, name)
|
298
|
+
end
|
299
|
+
|
300
|
+
def previousSnapshotId(id)
|
301
|
+
ids = snapshotIds()
|
302
|
+
|
303
|
+
prev = nil
|
304
|
+
ids.each do |x|
|
305
|
+
if x == id
|
306
|
+
return prev
|
307
|
+
end
|
308
|
+
prev = x
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
# Creates a snapshot using epoch time as name
|
313
|
+
def createSnapshot(id=nil)
|
314
|
+
name = newSnapshotName(id)
|
315
|
+
|
316
|
+
command = @host.resolveCommand("zfs snapshot #{name}")
|
317
|
+
if @host.run(command) == false
|
318
|
+
raise "Failed to create snapshot #{name}"
|
319
|
+
end
|
320
|
+
|
321
|
+
return name.split('@')[1].to_i
|
322
|
+
end
|
323
|
+
|
324
|
+
def deleteSnapshot(id)
|
325
|
+
name = snapshotName(id)
|
326
|
+
command = @host.resolveCommand("zfs destroy #{name}")
|
327
|
+
|
328
|
+
if @host.run(command) == false
|
329
|
+
raise "Failed to create snapshot #{name}"
|
330
|
+
end
|
331
|
+
end
|
332
|
+
|
333
|
+
def deleteAllSnapshots()
|
334
|
+
snapshotIds().each do |id|
|
335
|
+
if deleteSnapshot(id) == false
|
336
|
+
return status()
|
337
|
+
end
|
338
|
+
end
|
339
|
+
|
340
|
+
return success()
|
341
|
+
end
|
342
|
+
|
343
|
+
# Delete all except the most recent snapshot
|
344
|
+
def trimSnapshots()
|
345
|
+
snapshotIds()[0..-2].each do |id|
|
346
|
+
deleteSnapshot(id)
|
347
|
+
end
|
348
|
+
|
349
|
+
return success()
|
350
|
+
end
|
351
|
+
|
352
|
+
# Given a set of snapshots in order, returns the most recent match if one
|
353
|
+
# exists.
|
354
|
+
def latestMatchingSnapshotId(ids)
|
355
|
+
my_ids = snapshotIds()
|
356
|
+
ids.reverse_each do |id|
|
357
|
+
if my_ids.include?(id)
|
358
|
+
return id
|
359
|
+
end
|
360
|
+
end
|
361
|
+
|
362
|
+
return nil
|
363
|
+
end
|
364
|
+
|
365
|
+
private
|
366
|
+
|
367
|
+
def fetchSnapshots()
|
368
|
+
return @pool.objects('snapshot')
|
369
|
+
end
|
370
|
+
|
371
|
+
def newSnapshotName(id=nil)
|
372
|
+
if not id.nil?
|
373
|
+
return "#{@pool.name}/#{@name}@#{id}"
|
374
|
+
end
|
375
|
+
|
376
|
+
return "#{@pool.name}/#{@name}@#{Time.now.to_i}"
|
377
|
+
end
|
378
|
+
|
379
|
+
def filterSnapshots(data)
|
380
|
+
filter = []
|
381
|
+
data.each do |entry|
|
382
|
+
fs = entry[:name].split('/')[1..-1].join('/')
|
383
|
+
if fs.match(/^#{@name}@/) != nil
|
384
|
+
filter << fs
|
385
|
+
end
|
386
|
+
end
|
387
|
+
|
388
|
+
return filter
|
389
|
+
end
|
390
|
+
|
391
|
+
end
|
392
|
+
|
393
|
+
class Filesystem < ZfsEntity
|
394
|
+
|
395
|
+
def initialize(host, pool, name)
|
396
|
+
super
|
397
|
+
end
|
398
|
+
|
399
|
+
end
|
400
|
+
|
401
|
+
class Volume < ZfsEntity
|
402
|
+
|
403
|
+
def initialize(host, pool, name)
|
404
|
+
super
|
405
|
+
end
|
406
|
+
|
407
|
+
end
|
408
|
+
|
409
|
+
class Snapshot < Object
|
410
|
+
|
411
|
+
attr_reader :object, :host
|
412
|
+
|
413
|
+
def initialize(object, name)
|
414
|
+
|
415
|
+
if object.is_a?(ZfsEntity) == false
|
416
|
+
raise 'First argument must be filesystem or zvol'
|
417
|
+
end
|
418
|
+
|
419
|
+
super object.host, name
|
420
|
+
|
421
|
+
@object = object
|
422
|
+
end
|
423
|
+
|
424
|
+
def id()
|
425
|
+
return @name.split('@')[1].to_i
|
426
|
+
end
|
427
|
+
|
428
|
+
def previous()
|
429
|
+
return @object.previousSnapshotId(id())
|
430
|
+
end
|
431
|
+
|
432
|
+
# Push this snapshot to other object
|
433
|
+
def send(object)
|
434
|
+
|
435
|
+
if object.is_a?(ZfsEntity) == false
|
436
|
+
raise 'First argument must be filesystem or zvol'
|
437
|
+
end
|
438
|
+
|
439
|
+
# Used for incremental if applicable
|
440
|
+
zfs_options = ''
|
441
|
+
|
442
|
+
# See if remote object has a matching previous snapshot to use as
|
443
|
+
# reference for incremental send
|
444
|
+
set = @object.snapshotIds()[0..-1]
|
445
|
+
previous_snap_id = object.latestMatchingSnapshotId(set)
|
446
|
+
|
447
|
+
if previous_snap_id == set[-1]
|
448
|
+
return true
|
449
|
+
end
|
450
|
+
|
451
|
+
if not previous_snap_id.nil?
|
452
|
+
# We can send an incremental snapshot
|
453
|
+
zfs_options = "-i #{previous_snap_id}"
|
454
|
+
else
|
455
|
+
# There are not previous reference snapshots we can use so we have to send
|
456
|
+
# a full snapshot. We may as well clear out all remote snapshots (if any)
|
457
|
+
# before doing do.
|
458
|
+
#
|
459
|
+
# Note: if this pukes for any reason, it should throw exception because
|
460
|
+
# there's no way we can handle a failure here.
|
461
|
+
object.deleteAllSnapshots()
|
462
|
+
end
|
463
|
+
|
464
|
+
# This sends either full or incremental depending on the value of
|
465
|
+
# zfs_options variable.
|
466
|
+
|
467
|
+
return push(object, zfs_options)
|
468
|
+
end
|
469
|
+
|
470
|
+
private
|
471
|
+
|
472
|
+
def push(object, zfs_options='')
|
473
|
+
snapshot = "#{@object.pool.name}/#{@name}"
|
474
|
+
remote_port = object.pool.host.port
|
475
|
+
remote_host = object.pool.host.name
|
476
|
+
|
477
|
+
command_1 = "zfs send #{zfs_options} #{snapshot}"
|
478
|
+
|
479
|
+
command_2 = "ssh -p #{remote_port} #{remote_host} " +
|
480
|
+
"zfs receive -F #{object.pool.name}/#{object.name}"
|
481
|
+
|
482
|
+
command = @host.resolveCommand [ command_1, command_2 ]
|
483
|
+
|
484
|
+
return @host.run(command)
|
485
|
+
end
|
486
|
+
|
487
|
+
end
|
488
|
+
|
489
|
+
class Admin < Status
|
490
|
+
|
491
|
+
attr_reader :host
|
492
|
+
|
493
|
+
def initialize(host=nil, port=22)
|
494
|
+
super()
|
495
|
+
|
496
|
+
@host = Host.new(host, port)
|
497
|
+
end
|
498
|
+
|
499
|
+
# Given full pathname of ZFS object, return corresponding ZFS object. Returns
|
500
|
+
# nil upon no match.
|
501
|
+
def filesystem(name)
|
502
|
+
pool_name = name.split('/')[0]
|
503
|
+
pool = @host.pool(pool_name)
|
504
|
+
|
505
|
+
return nil if pool.nil?
|
506
|
+
|
507
|
+
fs_name = name.split('/')[1..-1].join('/')
|
508
|
+
|
509
|
+
return pool.filesystem(fs_name)
|
510
|
+
end
|
511
|
+
|
512
|
+
# Given full pathname of ZFS volumne, return corresponding Volume
|
513
|
+
# object. Returns nil upon no match.
|
514
|
+
def volume(name)
|
515
|
+
pool_name = name.split('/')[0]
|
516
|
+
pool = @host.pool(pool_name)
|
517
|
+
|
518
|
+
return nil if pool.nil?
|
519
|
+
|
520
|
+
vol_name = name.split('/')[1..-1].join('/')
|
521
|
+
|
522
|
+
return pool.volume(vol_name)
|
523
|
+
end
|
524
|
+
|
525
|
+
# Given full pathname of ZFS volumne, return corresponding Volume
|
526
|
+
# object. Returns nil upon no match.
|
527
|
+
def object(name)
|
528
|
+
pool_name = name.split('/')[0]
|
529
|
+
pool = @host.pool(pool_name)
|
530
|
+
|
531
|
+
return nil if pool.nil?
|
532
|
+
|
533
|
+
object_name = name.split('/')[1..-1].join('/')
|
534
|
+
|
535
|
+
return pool.object(object_name)
|
536
|
+
end
|
537
|
+
|
538
|
+
# Replicate changes on one ZFS filesystem on one host (src_fs) to another
|
539
|
+
# (dst_fs). Filesystem must be in sync. This means they must have most recent
|
540
|
+
# snapshots that match. If this is not met, function will abort.
|
541
|
+
#
|
542
|
+
# @param src_fs The source host. This is the machine that has the filesystem
|
543
|
+
# we want to replicate. Format can be in the form user@host or just
|
544
|
+
# host. Whatever is acceptable to ssh is acceptable here.
|
545
|
+
#
|
546
|
+
# @param dest_fs The destination host. This is the machine what will receive
|
547
|
+
# the delta from the source host, whose filesystem will be updated. Can be
|
548
|
+
# in the form user@host or just host. Whatever is acceptable to ssh is
|
549
|
+
# acceptable here.
|
550
|
+
|
551
|
+
def replicate(source_fs, dest_fs)
|
552
|
+
snapshot = source_fs.lastSnapshot()
|
553
|
+
|
554
|
+
if snapshot.nil?
|
555
|
+
return failure(-1, "No snapshots on filesystem #{sfs.name}")
|
556
|
+
end
|
557
|
+
|
558
|
+
return snapshot.send(dest_fs)
|
559
|
+
end
|
560
|
+
|
561
|
+
end
|
562
|
+
|
563
|
+
end # module ZFS
|
564
|
+
end # module NixAdm
|