zfs_mgmt 0.2.4

Sign up to get free protection for your applications and to get access to all the features.
data/bin/zfssendman ADDED
@@ -0,0 +1,252 @@
1
+ #! /bin/bash
2
+
3
+ export PATH=$PATH:/sbin
4
+
5
+ FILTER='.'
6
+ USER='root'
7
+ SEND='send'
8
+ RECV='recv -e -F'
9
+ FLOCK='/usr/bin/flock -w 60 -n 9'
10
+ PORT='1337'
11
+ MBUFFER='-s 128k -m 1G -4'
12
+ USE_MBUFFER='no'
13
+ LOCK_DIR='/var/run/'$(basename $0)
14
+ test -d $LOCK_DIR || mkdir $LOCK_DIR
15
+ TEST=0
16
+ VERB=0
17
+
18
+ test -f $HOME/.keychain/$HOSTNAME-sh && . $HOME/.keychain/$HOSTNAME-sh
19
+
20
+ function zfssendrecv {
21
+ local OPTIND OPTARG opt
22
+ local zfs
23
+ local snap
24
+ local dest
25
+ local inc=''
26
+ local hold=0
27
+ while getopts "z:s:d:i:I:h" opt; do
28
+ case $opt in
29
+ z)
30
+ zfs=$OPTARG
31
+ ;;
32
+ s)
33
+ snap=$OPTARG
34
+ ;;
35
+ d)
36
+ dest=$OPTARG
37
+ ;;
38
+ i)
39
+ inc="-i ${OPTARG}"
40
+ ;;
41
+ I)
42
+ inc="-I ${OPTARG}"
43
+ ;;
44
+ h)
45
+ hold=1
46
+ ;;
47
+ esac
48
+ done
49
+ shift $((OPTIND-1))
50
+ local zfs_normal=$( echo $zfs|sed 's/[\:\|\/\\ ]/_/g' )
51
+ local lock="${LOCK_DIR}/${zfs_normal}.lock"
52
+ local zfs_recv_status
53
+ local zfs_send_status
54
+ local pipe_status
55
+ (
56
+ if ! $FLOCK; then
57
+ $ulog "unable to lock ${lock}"
58
+ return -2
59
+ fi
60
+ if [[ $TEST == 0 && $hold == 1 ]]; then
61
+ zfs hold -r zfsrecvman $snap 2>&1 | $ulog
62
+ local hold_status="${PIPESTATUS[0]}"
63
+ if [[ $hold_status != 0 ]]; then
64
+ $ulog "unable to place a hold on our snapshots: ${snap}"
65
+ return -3
66
+ fi
67
+ fi
68
+ $ulog "estimating size of sending ${snap}"
69
+ local size=$( zfs $SEND -v -n $inc $snap 2>&1 | tail -1 | cut -d" " -f 5 )
70
+ # could be 0 or 400 or 4K or 9.3g, etc.
71
+ local suf=$( echo $size | sed -E 's/[0-9]+\.?[0-9]*//' | tr '[:lower:]' '[:upper:]' )
72
+ size=$( echo $size | sed -E 's/[pPtTgGmMkKB]$//' ) # remove known suffixes
73
+ if [[ $suf != 'B' ]]; then
74
+ size=$( echo "${size} * 1024" | bc | sed -E 's/\.[0-9]+//' ) # use bc to multiply decimals, sed to make ceil()
75
+ fi
76
+ case $suf in
77
+ B)
78
+ suf=''
79
+ ;;
80
+ K)
81
+ suf=''
82
+ ;;
83
+ M)
84
+ suf='K'
85
+ ;;
86
+ G)
87
+ suf='M'
88
+ ;;
89
+ T)
90
+ suf='G'
91
+ ;;
92
+ P)
93
+ suf='T'
94
+ ;;
95
+ esac
96
+ $ulog "estimated size of sending ${snap} is ${size}${suf}"
97
+ local pv_more="-s ${size}${suf}"
98
+ if [[ $USE_MBUFFER == 'yes' ]]; then
99
+ ssh "${USER}@${REMOTE}" "mbuffer ${MBUFFER} -q -I ${PORT} | zfs ${RECV} ${dest}" 2>&1 | $ulog &
100
+ sleep 5
101
+ zfs $SEND $inc $snap 2> >($ulog)|
102
+ mbuffer $MBUFFER $MBUFFER_SEND_OPTS -O ${REMOTE}:${PORT}
103
+ zfs_send_status="${PIPESTATUS[0]}"
104
+ $ulog "zfs send exited with status: ${zfs_send_status}"
105
+ $ulog "about to wait on zfs send (this may take a while and appear to have hung)"
106
+ wait
107
+ zfs_recv_status="${PIPESTATUS[0]}"
108
+ $ulog "zfs recv exited with status: ${zfs_recv_status}"
109
+ else
110
+ zfs $SEND $inc $snap 2> >($ulog) | pv $PV_OPTS $pv_more | ssh "${USER}@${REMOTE}" "zfs ${RECV} ${dest}" 2>&1 | $ulog
111
+ pipe_status=("${PIPESTATUS[@]}")
112
+ zfs_send_status="${pipe_status[0]}"
113
+ zfs_recv_status="${pipe_status[2]}"
114
+ $ulog "zfs send exited with status: ${zfs_send_status}"
115
+ $ulog "zfs recv exited with status: ${zfs_recv_status}"
116
+ fi
117
+ if [[ $zfs_send_status != 0 ]]; then
118
+ return $zfs_send_status
119
+ elif [[ $zfs_recv_status != 0 ]]; then
120
+ return $zfs_recv_status
121
+ else
122
+ # both must be zero
123
+ return 0
124
+ fi
125
+ ) 9>$lock
126
+ }
127
+
128
+ function terminal_options {
129
+ if [ -t 1 ]; then
130
+ ISTERM=1
131
+ LOGGER_EXTRA='-s' # enable logger output to stderr
132
+ PV_OPTS='-perb' # enable all the magic output from pv
133
+ MBUFFER_SEND_OPTS='' # don't do quiet mode, we have a term
134
+ ulog="logger ${LOGGER_EXTRA} -p user.notice -t "$(basename $0 2>/dev/null)"[${$}]"
135
+ else
136
+ ISTERM=0
137
+ LOGGER_EXTRA='' # don't enable stderr output
138
+ PV_OPTS='-q' # make pv quiet
139
+ MBUFFER_SEND_OPTS='-q' # enable send side -q, no terminal
140
+ ulog="logger ${LOGGER_EXTRA} -p user.notice -t "$(basename $0 2>/dev/null)"[${$}]"
141
+ fi
142
+ }
143
+
144
+ terminal_options
145
+
146
+ while getopts "p:f:L:mnvr:u:d:" opt; do
147
+ case $opt in
148
+ p)
149
+ PORT=$OPTARG
150
+ ;;
151
+ f)
152
+ FILTER=$OPTARG
153
+ ;;
154
+ L)
155
+ PV_OPTS="${PV_OPTS} -L ${OPTARG}"
156
+ ;;
157
+ m)
158
+ USE_MBUFFER='yes'
159
+ ;;
160
+ n)
161
+ RECV="${RECV} -n"
162
+ TEST=1
163
+ VERB=1
164
+ PV_OPTS='-q' # make pv quiet
165
+ MBUFFER_SEND_OPTS='-q' # enable send side -q, no terminal
166
+ ;;
167
+ v)
168
+ VERB=1
169
+ ;;
170
+ r)
171
+ REMOTE=$OPTARG
172
+ ;;
173
+ u)
174
+ USER=$OPTARG
175
+ ;;
176
+ d)
177
+ DEST=$OPTARG
178
+ ;;
179
+
180
+ esac
181
+ done
182
+ if [[ "${REMOTE}Z" == 'Z' ]]; then
183
+ echo 'must set remote with -r option'
184
+ exit 1;
185
+ fi
186
+ if [[ "${DEST}Z" == 'Z' ]]; then
187
+ echo 'must set dest with -d option'
188
+ exit 1;
189
+ fi
190
+
191
+ if [[ $VERB == 1 ]]; then
192
+ echo $RECV | grep -q -- -v || RECV="${RECV} -v"
193
+ fi
194
+
195
+ function set_options {
196
+ local zfs=$1
197
+ local target=$2
198
+ local compress=$( zfs get -H -o value compression $zfs )
199
+ local destroy=$( zfs get -H -o value zfssnapman:destroy $zfs )
200
+ ssh "${USER}@${REMOTE}" "zfs set readonly=on ${target} ; zfs set compression=${compress} ${target} ; zfs set zfssnapman:destroy=${destroy} ${target}"
201
+ }
202
+
203
+ # zfs is the filesystem to be sent, including source pool
204
+ for zfs in $( zfs list -H -t filesystem,volume -o name,zfssendman:send | grep true | egrep "${FILTER}" | awk -F\\t '{if($2 == "true") {print $1;}}' | sort ); do
205
+ # base is the zfs filesystem to send, minus the pool
206
+ target="${DEST}/${zfs}"
207
+ target_dir=$( dirname $target )
208
+ # recv_last is the last snapshot on the recv side of this zfs
209
+ if ! ssh "${USER}@${REMOTE}" zfs get written $target >/dev/null 2>/dev/null; then
210
+ $ulog sending initial snapshot of $zfs to $target_dir
211
+ zfssendrecv -z $zfs \
212
+ -s $( zfs list -t snapshot -o name -s creation -d 1 -H $zfs | head -1 ) \
213
+ -d $target_dir
214
+ if [[ $TEST == 0 ]]; then
215
+ set_options $zfs $target
216
+ fi
217
+ sleep 5
218
+ fi
219
+
220
+ recv_last=$( ssh "${USER}@${REMOTE}" zfs list -t snapshot -o name -s creation -d 1 -H $target | tail -1 ) || break;
221
+ if ! echo $recv_last | grep -q "${zfs}@"; then
222
+ $ulog "no snapshot on distant end, you must destroy the filesystem: $zfs"
223
+ break;
224
+ fi
225
+ # send is the snapshot on the recv side after stripping off the DEST
226
+ send=$( echo $recv_last|sed "s|$DEST/||" )
227
+
228
+ zfs list -t snapshot -o name -d 1 -H $zfs | grep -q $send
229
+ if [[ $? == 0 ]]; then
230
+ # most recent snapshot on the send side
231
+ current=$( zfs list -t snapshot -o name -s creation -d 1 -H $zfs | tail -1 )
232
+ if [[ $send == $current ]]; then
233
+ $ulog "${zfs} is in sync on source and destination (${target})"
234
+ else
235
+ $ulog sending $send through $current to $target
236
+ zfssendrecv -z $zfs \
237
+ -I $send \
238
+ -s $current \
239
+ -d $target_dir
240
+ if [[ $? == 0 ]]; then
241
+ $ulog "$zfs is in sync on source and destination"
242
+ if [[ $TEST == 0 ]]; then
243
+ set_options $zfs $target
244
+ fi
245
+ else
246
+ $ulog zfs exited with $? while sending $send through $current to $target
247
+ fi
248
+ fi
249
+ else
250
+ $ulog "the most recent snapshot ($recv_last) on the recv side does not exist on the send side ($send)"
251
+ fi
252
+ done
data/bin/zfssnapman ADDED
@@ -0,0 +1,69 @@
1
+ #! /usr/bin/perl
2
+
3
+ use strict;
4
+ use warnings;
5
+
6
+ use Getopt::Long;
7
+ use POSIX qw( strftime mktime );
8
+
9
+ my %months=(Jan => 0,
10
+ Feb => 1,
11
+ Mar => 2,
12
+ Apr => 3,
13
+ May => 4,
14
+ Jun => 5,
15
+ Jul => 6,
16
+ Aug => 7,
17
+ Sep => 8,
18
+ Oct => 9,
19
+ Nov => 10,
20
+ Dec => 11,
21
+ );
22
+
23
+ my $DESTROY=0;
24
+ my $SNAP=1;
25
+ my $VERBOSE=0;
26
+
27
+ GetOptions('snap!' => \$SNAP,
28
+ 'destroy!' => \$DESTROY,
29
+ 'v|verbose'=> \$VERBOSE,
30
+ );
31
+
32
+ my $time=strftime('%F%T',localtime(time()));
33
+ $time =~ s/[\:\-]//g;
34
+
35
+ foreach(`/sbin/zfs list -t filesystem,volume -o name,zfssnapman:snap,zfssnapman:destroy -H`) {
36
+ chomp;
37
+ my ($zfs,$snap,$days)=split(/\t/);
38
+ if($SNAP and ($snap eq 'true' or $snap eq 'on')) {
39
+ my $com="/sbin/zfs snapshot $zfs\@zfssnapman-$time";
40
+ $VERBOSE and print $com."\n";
41
+ system($com);
42
+ if($? != 0) {
43
+ warn "unable to create zfs snapshot for $zfs at $time";
44
+ warn "failed command: $com";
45
+ }
46
+ }
47
+ if($DESTROY and $days =~ /\d+/) {
48
+ foreach(`/sbin/zfs list -t snapshot -o name,creation -s creation -r -H $zfs`) {
49
+ chomp;
50
+ my ($snap,$creation)=split(/\t/);
51
+ next unless $snap =~ /^$zfs\@/;
52
+ unless($creation =~ /\s+(\w\w\w)\s+(\d+)\s+(\d+)\:(\d+)\s+(\d\d\d\d)/) {
53
+ die "unable to parse the date: $creation";
54
+ }
55
+ my $age=mktime(0,$4,$3,$2,$months{$1},($5-1900));
56
+ #printf("%s\t%s\t%s\t%d\t%s\n",strftime('%F %T',localtime(time())),$creation,strftime('%F %T',localtime($age)),$days,strftime('%F %T',localtime($age + ($days * 24 * 60 * 60 ))));
57
+ if(time() > ($age + ($days * 24 * 60 * 60 ))) {
58
+ my $comment=sprintf("removing: %s created at %s\n",$snap,strftime('%F %T',localtime($age)));
59
+ my $command="/sbin/zfs destroy $snap";
60
+ $VERBOSE and print $comment;
61
+ system($command);
62
+ if($? != 0) {
63
+ warn 'failed '.$comment;
64
+ warn "failed command: $command";
65
+ }
66
+ }
67
+ }
68
+ }
69
+ }
@@ -0,0 +1,3 @@
1
+ module ZfsMgmt
2
+ VERSION = "0.2.4"
3
+ end
data/lib/zfs_mgmt.rb ADDED
@@ -0,0 +1,314 @@
1
+ # coding: utf-8
2
+ require "zfs_mgmt/version"
3
+ require 'pp'
4
+ require 'date'
5
+ require 'logger'
6
+ require 'text-table'
7
+ require 'open3'
8
+ require 'filesize'
9
+
10
+ $logger = Logger.new(STDERR)
11
+
12
+ $date_patterns = {
13
+ 'hourly' => '%F Hour %H',
14
+ 'daily' => '%F',
15
+ 'weekly' => '%Y Week %U', # week, starting on sunday
16
+ 'monthly' => '%Y-%m',
17
+ 'yearly' => '%Y',
18
+ }
19
+
20
+ $time_pattern_map = {}
21
+ $date_patterns.keys.each do |tf|
22
+ $time_pattern_map[tf[0]] = tf
23
+ end
24
+
25
+ $time_specs = {
26
+ 's' => 1,
27
+ 'm' => 60,
28
+ 'h' => 60*60,
29
+ 'd' => 24*60*60,
30
+ 'w' => 7*24*60*60,
31
+ }
32
+
33
+ $properties_xlate = {
34
+ 'userrefs' => ->(x) { x.to_i },
35
+ 'creation' => ->(x) { x.to_i },
36
+ }
37
+
38
+ module ZfsMgmt
39
+ def self.custom_properties()
40
+ return [
41
+ 'policy',
42
+ 'manage',
43
+ 'strategy',
44
+ 'minage',
45
+ 'matchsnaps',
46
+ 'ignoresnaps',
47
+ 'snapshot',
48
+ 'snap_prefix',
49
+ 'snap_timestamp',
50
+ ].map do |p|
51
+ ['zfsmgmt',p].join(':')
52
+ end
53
+ end
54
+ def self.timespec_to_seconds(spec)
55
+ md = /^(\d+)([smhdw]?)/i.match(spec)
56
+ unless md.length == 3
57
+ raise 'SpecParseError'
58
+ end
59
+ if md[2] and md[2].length > 0
60
+ return md[1].to_i * $time_specs[md[2].downcase]
61
+ else
62
+ return md[1].to_i
63
+ end
64
+ end
65
+
66
+ def self.zfsget(properties: ['name'],types: ['filesystem','volume'],zfs: '')
67
+ results={}
68
+ com = ['zfs', 'get', '-Hp', properties.join(','), '-t', types.join(','), zfs]
69
+ so,se,status = Open3.capture3(com.join(' '))
70
+ if status.signaled?
71
+ $logger.error("process was signalled \"#{com.join(' ')}\", termsig #{status.termsig}")
72
+ raise 'ZfsGetError'
73
+ end
74
+ unless status.success?
75
+ $logger.error("failed to execute \"#{com.join(' ')}\", exit status #{status.exitstatus}")
76
+ so.split("\n").each { |l| $logger.debug("stdout: #{l}") }
77
+ se.split("\n").each { |l| $logger.error("stderr: #{l}") }
78
+ raise 'ZfsGetError'
79
+ end
80
+ so.split("\n").each do |line|
81
+ params = line.split("\t")
82
+ unless results.has_key?(params[0])
83
+ results[params[0]] = {}
84
+ end
85
+ if params[2] != '-'
86
+ if $properties_xlate.has_key?(params[1])
87
+ results[params[0]][params[1]] = $properties_xlate[params[1]].call(params[2])
88
+ else
89
+ results[params[0]][params[1]] = params[2]
90
+ end
91
+ end
92
+ if params[3] != '-'
93
+ results[params[0]]["#{params[1]}@source"] = params[3]
94
+ end
95
+ end
96
+ return results
97
+ end
98
+ def self.local_epoch_to_datetime(e)
99
+ return Time.at(e).to_datetime
100
+ end
101
+ def self.find_saved_reason(saved,snap)
102
+ results = {}
103
+ $date_patterns.each do |d,dk|
104
+ if saved.has_key?(d)
105
+ saved[d].each do |k,s|
106
+ if snap == s
107
+ results[d]=k
108
+ break
109
+ end
110
+ end
111
+ end
112
+ end
113
+ return [results['hourly'],results['daily'],results['weekly'],results['monthly'],results['yearly']]
114
+ end
115
+ def self.snapshot_destroy_policy(zfs,props,snaps)
116
+ minage = 0
117
+ if props.has_key?('zfsmgmt:minage')
118
+ minage = timespec_to_seconds(props['zfsmgmt:minage'])
119
+ end
120
+ strategy = 'youngest'
121
+ if props.has_key?('zfsmgmt:strategy') and props['zfsmgmt:strategy'] == 'oldest'
122
+ strategy = 'oldest'
123
+ end
124
+ sorted = snaps.keys.sort { |a,b| snaps[b]['creation'] <=> snaps[a]['creation'] }
125
+ # never consider the latest snapshot for anything
126
+ newest_snapshot_name = sorted.shift
127
+
128
+ counters = policy_parser(props['zfsmgmt:policy'])
129
+ $logger.debug(counters)
130
+ saved = {}
131
+
132
+ # set the counters variable to track the number of saved daily/hourly/etc. snapshots
133
+ $date_patterns.each do |d,p|
134
+ saved[d] = {}
135
+ end
136
+
137
+ sorted.each do |snap_name|
138
+ if props.has_key?('zfsmgmt:ignoresnaps') and /#{props['zfsmgmt:ignoresnaps']}/ =~ snap_name.split('@')[1]
139
+ $logger.debug("skipping #{snap_name} because it matches ignoresnaps pattern: #{props['zfsmgmt:ignoresnaps']}")
140
+ next
141
+ end
142
+ if props.has_key?('zfsmgmt:matchsnaps') and not /#{props['zfsmgmt:matchsnaps']}/ =~ snap_name.split('@')[1]
143
+ $logger.debug("skipping #{snap_name} because it does not match matchsnaps pattern: #{props['zfsmgmt:matchsnaps']}")
144
+ next
145
+ end
146
+ snaptime = local_epoch_to_datetime(snaps[snap_name]['creation'])
147
+ $date_patterns.each do |d,p|
148
+ pat = snaptime.strftime(p)
149
+ if saved[d].has_key?(pat)
150
+ if strategy == 'youngest'
151
+ # update the existing current save snapshot for this timeframe
152
+ $logger.debug("updating the saved snapshot for \"#{pat}\" to #{snap_name} at #{snaptime}")
153
+ saved[d][pat] = snap_name
154
+ else
155
+ $logger.debug("not updating the saved snapshot for \"#{pat}\" to #{snap_name} at #{snaptime}, we have an older snap")
156
+ end
157
+ elsif counters[d] > 0
158
+ # new pattern, and we want to save more snaps of this type
159
+ $logger.debug("new pattern \"#{pat}\" n#{counters[d]} #{d} snapshot, saving #{snap_name} at #{snaptime}")
160
+ counters[d] -= 1
161
+ saved[d][pat] = snap_name
162
+ end
163
+ end
164
+ end
165
+
166
+ # create a list of unique saved snap shots
167
+ saved_snaps = []
168
+ saved.each do |d,saved|
169
+ saved_snaps += saved.values()
170
+ end
171
+ saved_snaps = saved_snaps.sort.uniq
172
+
173
+ # delete everything not in the list of saved snapshots
174
+ deleteme = sorted - saved_snaps
175
+ deleteme = deleteme.select { |snap|
176
+ if props.has_key?('zfsmgmt:ignoresnaps') and /#{props['zfsmgmt:ignoresnaps']}/ =~ snap
177
+ $logger.debug("skipping #{snap} because it matches ignoresnaps pattern: #{props['zfsmgmt:ignoresnaps']}")
178
+ false
179
+ elsif minage > 0 and Time.at(snaps[snap]['creation'] + minage) > Time.now()
180
+ $logger.debug("skipping due to minage: #{snap} #{local_epoch_to_datetime(snaps[snap]['creation']).strftime('%F %T')}")
181
+ false
182
+ else
183
+ true
184
+ end
185
+ }
186
+ return saved,saved_snaps,deleteme
187
+ end
188
+ def self.zfs_managed_list(filter: '.+')
189
+ zfss = [] # array of arrays
190
+ zfsget(properties: custom_properties()).each do |zfs,props|
191
+ unless /#{filter}/ =~ zfs
192
+ next
193
+ end
194
+ unless props.has_key?('zfsmgmt:manage') and props['zfsmgmt:manage'] == 'true'
195
+ next
196
+ end
197
+ snaps = self.zfsget(properties: ['name','creation','userrefs','used','written','referenced'],types: ['snapshot'], zfs: zfs)
198
+ if snaps.length == 0
199
+ $logger.warn("unable to process this zfs, no snapshots at all: #{zfs}")
200
+ next
201
+ end
202
+ unless props.has_key?('zfsmgmt:policy') and policy = policy_parser(props['zfsmgmt:policy'])
203
+ $logger.error("zfs_mgmt is configured to manage #{zfs}, but there is no valid policy configuration, skipping")
204
+ next # zfs
205
+ end
206
+ zfss.push([zfs,props,snaps])
207
+ end
208
+ return zfss
209
+ end
210
+ def self.snapshot_policy(verbopt: false, debugopt: false, filter: '.+')
211
+ if debugopt
212
+ $logger.level = Logger::DEBUG
213
+ else
214
+ $logger.level = Logger::INFO
215
+ end
216
+ zfs_managed_list(filter: filter).each do |zdata|
217
+ (zfs,props,snaps) = zdata
218
+ # call the function that decides who to save and who to delete
219
+ (saved,saved_snaps,deleteme) = snapshot_destroy_policy(zfs,props,snaps)
220
+
221
+ if saved_snaps.length == 0
222
+ $logger.info("no snapshots marked as saved by policy for #{zfs}")
223
+ next
224
+ end
225
+ # print a table of saved snapshots with the reasons it is being saved
226
+ table = Text::Table.new
227
+ table.head = ['snap','creation','hourly','daily','weekly','monthly','yearly']
228
+ table.rows = []
229
+ saved_snaps.sort { |a,b| snaps[b]['creation'] <=> snaps[a]['creation'] }.each do |snap|
230
+ table.rows << [snap,local_epoch_to_datetime(snaps[snap]['creation'])] + find_saved_reason(saved,snap)
231
+ end
232
+ print table.to_s
233
+ end
234
+ end
235
+ def self.snapshot_destroy(noop: false, verbopt: false, debugopt: false, filter: '.+')
236
+ if debugopt
237
+ $logger.level = Logger::DEBUG
238
+ else
239
+ $logger.level = Logger::INFO
240
+ end
241
+ zfs_managed_list(filter: filter).each do |zdata|
242
+ (zfs,props,snaps) = zdata
243
+ # call the function that decides who to save and who to delete
244
+ (saved,saved_snaps,deleteme) = snapshot_destroy_policy(zfs,props,snaps)
245
+
246
+ $logger.info("deleting #{deleteme.length} snapshots for #{zfs}")
247
+ com_base = "zfs destroy -p"
248
+ if noop
249
+ com_base = "#{com_base}n"
250
+ end
251
+ if verbopt
252
+ com_base = "#{com_base}v"
253
+ end
254
+ deleteme.reverse! # oldest first for removal
255
+ deleteme.each do |snap_name|
256
+ $logger.debug("delete: #{snap_name} #{local_epoch_to_datetime(snaps[snap_name]['creation']).strftime('%F %T')}")
257
+ end
258
+ while deleteme.length > 0
259
+ for i in 0..(deleteme.length - 1) do
260
+ max = deleteme.length - 1 - i
261
+ $logger.debug("attempting to remove snaps 0 through #{max} out of #{deleteme.length} snapshots")
262
+ bigarg = "#{zfs}@#{deleteme[0..max].map { |s| s.split('@')[1] }.join(',')}"
263
+ com = "#{com_base} #{bigarg}"
264
+ $logger.debug("size of bigarg: #{bigarg.length} size of com: #{com.length}")
265
+ if bigarg.length >= 131072 or com.length >= (2097152-10000)
266
+ next
267
+ end
268
+ $logger.info(com)
269
+ deleteme = deleteme - deleteme[0..max]
270
+ system(com)
271
+ break
272
+ end
273
+ end
274
+ end
275
+ end
276
+ # parse a policy string into a hash of integers
277
+ def self.policy_parser(str)
278
+ res = {}
279
+ $date_patterns.keys.each do |tf|
280
+ res[tf]=0
281
+ end
282
+ p = str.scan(/\d+[#{$time_pattern_map.keys.join('')}]/i)
283
+ unless p.length > 0
284
+ raise "unable to parse the policy configuration #{str}"
285
+ end
286
+ p.each do |pi|
287
+ scn = /(\d+)([#{$time_pattern_map.keys.join('')}])/i.match(pi)
288
+ res[$time_pattern_map[scn[2].downcase]] = scn[1].to_i
289
+ end
290
+ res
291
+ end
292
+ def self.snapshot_create(noop: false, verbopt: false, debugopt: false, filter: '.+')
293
+ if debugopt
294
+ $logger.level = Logger::DEBUG
295
+ else
296
+ $logger.level = Logger::INFO
297
+ end
298
+ dt = DateTime.now
299
+ zfsget(properties: custom_properties()).each do |zfs,props|
300
+ # zfs must have snapshot set to true or recursive
301
+ if props.has_key?('zfsmgmt:snapshot') and props['zfsmgmt:snapshot'] == 'true' or ( props['zfsmgmt:snapshot'] == 'recursive' and props['zfsmgmt:snapshot@source'] == 'local' )
302
+ prefix = ( props.has_key?('zfsmgmt:snap_prefix') ? props['zfsmgmt:snap_prefix'] : 'zfsmgmt' )
303
+ ts = ( props.has_key?('zfsmgmt:snap_timestamp') ? props['zfsmgmt:snap_timestamp'] : '%FT%T%z' )
304
+ com = ['zfs','snapshot']
305
+ if props['zfsmgmt:snapshot'] == 'recursive' and props['zfsmgmt:snapshot@source'] == 'local'
306
+ com.push('-r')
307
+ end
308
+ com.push("#{zfs}@#{[prefix,dt.strftime(ts)].join('-')}")
309
+ $logger.info(com)
310
+ system(com.join(' '))
311
+ end
312
+ end
313
+ end
314
+ end
data/zfs_mgmt.gemspec ADDED
@@ -0,0 +1,46 @@
1
+
2
+ lib = File.expand_path("../lib", __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require "zfs_mgmt/version"
5
+
6
+ Gem::Specification.new do |spec|
7
+ spec.name = "zfs_mgmt"
8
+ spec.version = ZfsMgmt::VERSION
9
+ spec.licenses = ['GPL-3.0-or-later']
10
+ spec.authors = ["Aran Cox"]
11
+ spec.email = ["arancox@gmail.com"]
12
+
13
+ spec.summary = %q{Misc. helpers regarding snapshots and send/recv.}
14
+ #spec.description = %q{TODO: Write a longer description or delete this line.}
15
+ spec.homepage = 'https://github.com/aranc23/zfs_mgmt'
16
+
17
+ # Prevent pushing this gem to RubyGems.org. To allow pushes either set the 'allowed_push_host'
18
+ # to allow pushing to a single host or delete this section to allow pushing to any host.
19
+ if spec.respond_to?(:metadata)
20
+ #spec.metadata["allowed_push_host"] = "TODO: Set to 'http://mygemserver.com'"
21
+
22
+ spec.metadata["homepage_uri"] = spec.homepage
23
+ spec.metadata["source_code_uri"] = spec.homepage
24
+ spec.metadata["changelog_uri"] = spec.homepage << '/commits/'
25
+ else
26
+ raise "RubyGems 2.0 or newer is required to protect against " \
27
+ "public gem pushes."
28
+ end
29
+
30
+ # Specify which files should be added to the gem when it is released.
31
+ # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
32
+ spec.files = Dir.chdir(File.expand_path('..', __FILE__)) do
33
+ `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
34
+ end
35
+ spec.bindir = "bin"
36
+ spec.executables = ['readsnaps','zfssendman','zfssnapman','zfsrecvman','zfs-list-snapshots','zfsmgr']
37
+ spec.require_paths = ["lib"]
38
+
39
+ spec.add_development_dependency "bundler", "~> 1.16"
40
+ spec.add_development_dependency "rake", ">= 12.3.3"
41
+ spec.add_development_dependency "rspec", "~> 3.0"
42
+ spec.add_development_dependency "thor", "~> 1.0.1"
43
+ spec.add_development_dependency "text-table", "~> 1.2.4"
44
+ spec.add_development_dependency "filesize", "~> 0.2.0"
45
+
46
+ end