zfs_mgmt 0.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/Gemfile.lock ADDED
@@ -0,0 +1,41 @@
1
+ PATH
2
+ remote: .
3
+ specs:
4
+ zfs_mgmt (0.2.4)
5
+
6
+ GEM
7
+ remote: https://rubygems.org/
8
+ specs:
9
+ diff-lcs (1.3)
10
+ filesize (0.2.0)
11
+ rake (13.0.1)
12
+ rspec (3.9.0)
13
+ rspec-core (~> 3.9.0)
14
+ rspec-expectations (~> 3.9.0)
15
+ rspec-mocks (~> 3.9.0)
16
+ rspec-core (3.9.1)
17
+ rspec-support (~> 3.9.1)
18
+ rspec-expectations (3.9.0)
19
+ diff-lcs (>= 1.2.0, < 2.0)
20
+ rspec-support (~> 3.9.0)
21
+ rspec-mocks (3.9.1)
22
+ diff-lcs (>= 1.2.0, < 2.0)
23
+ rspec-support (~> 3.9.0)
24
+ rspec-support (3.9.2)
25
+ text-table (1.2.4)
26
+ thor (1.0.1)
27
+
28
+ PLATFORMS
29
+ ruby
30
+
31
+ DEPENDENCIES
32
+ bundler (~> 1.16)
33
+ filesize
34
+ rake (>= 12.3.3)
35
+ rspec (~> 3.0)
36
+ text-table
37
+ thor
38
+ zfs_mgmt!
39
+
40
+ BUNDLED WITH
41
+ 1.16.6
data/README.md ADDED
@@ -0,0 +1,191 @@
1
+ # ZfsMgmt
2
+
3
+ zfs_mgmt aims to provide some useful helpers for managing zfs snapshots, and eventually send/recv duties via the zfsmgr script in bin/.
4
+
5
+ Currently only snapshot destruction is implemented by a policy specification stored in zfs properties.
6
+
7
+ ## Installation
8
+
9
+ Currently zfs_mgmt is only useful for it's zfsmgr binary, although
10
+ eventually the library might be useful for writing other applications
11
+ around managing zfs.
12
+
13
+ Therefore, building the gem and installing, or running ruby inside the src/ directory would be most useful:
14
+
15
+ $ ruby -I lib bin/zfsmgr
16
+
17
+ ## Usage
18
+
19
+ The most common usage pattern would be to set zfs properties as explained below, then use **zfsmgr snapshot policy** to print a table of what would be kept and for what reason. Then use **zfsmgr snapshot destroy --noop** to see what would be destroyed, and finally **zfsmgr snapshot destroy** without the --noop option to actually remove snapshots.
20
+
21
+ Commands:
22
+ zfsmgr help [COMMAND] # Describe available commands or one specific command
23
+ zfsmgr snapshot SUBCOMMAND ...ARGS # manage snapshots
24
+ zfsmgr zfsget [ZFS] # execute zfs get for the given properties and types and parse the output into a nested hash
25
+
26
+
27
+ zfsmgr snapshot create # execute zfs snapshot based on zfs properties
28
+ zfsmgr snapshot destroy # apply the snapshot destroy policy to zfs
29
+ zfsmgr snapshot help [COMMAND] # Describe subcommands or one specific subcommand
30
+ zfsmgr snapshot policy # print the policy table for zfs
31
+
32
+ Options:
33
+ [--noop], [--no-noop] # pass -n option to zfs commands
34
+ [--verbose], [--no-verbose] # pass -v option to zfs commands
35
+ [--debug], [--no-debug] # set logging level to debug
36
+ [--filter=FILTER] # only act on zfs matching this regexp
37
+ # Default: .+
38
+
39
+
40
+ ## Example output
41
+ [aranc23@beast:~/src/zfs_mgmt] (master)$ zfs get all | egrep 'zfsmgmt.+local'
42
+ backup zfsmgmt:manage true local
43
+ backup zfsmgmt:policy 10y60m104w365d168h local
44
+ backup zfsmgmt:minage 7D local
45
+ backup zfsmgmt:ignoresnaps ^syncoid_ local
46
+ backup/beast/data/archive zfsmgmt:policy 1h local
47
+ backup/beast/data/archive zfsmgmt:minage 1s local
48
+ backup/beast/data/archive zfsmgmt:matchsnaps archive local
49
+
50
+ [aranc23@beast:~/src/zfs_mgmt] (master)$ ruby -I lib bin/zfsmgr snapshot policy --filter pics
51
+ +------------------------------------------------------------+---------------------------+--------------------+------------+--------------+---------+--------+
52
+ | snap | creation | hourly | daily | weekly | monthly | yearly |
53
+ +------------------------------------------------------------+---------------------------+--------------------+------------+--------------+---------+--------+
54
+ | backup/beast/data/pics@autosnap-2020-02-27T12:17:01-0600 | 2020-02-27T12:17:01-06:00 | 2020-02-27 Hour 12 | | | | |
55
+ | backup/beast/data/pics@autosnap-2020-02-27T11:17:01-0600 | 2020-02-27T11:17:01-06:00 | 2020-02-27 Hour 11 | | | | |
56
+ | backup/beast/data/pics@autosnap-2020-02-27T10:17:01-0600 | 2020-02-27T10:17:01-06:00 | 2020-02-27 Hour 10 | | | | |
57
+ | backup/beast/data/pics@autosnap-2020-02-27T09:17:01-0600 | 2020-02-27T09:17:02-06:00 | 2020-02-27 Hour 09 | | | | |
58
+ | backup/beast/data/pics@autosnap-2020-02-27T08:17:01-0600 | 2020-02-27T08:17:01-06:00 | 2020-02-27 Hour 08 | | | | |
59
+ | backup/beast/data/pics@autosnap-2020-02-27T07:17:01-0600 | 2020-02-27T07:17:01-06:00 | 2020-02-27 Hour 07 | | | | |
60
+ | backup/beast/data/pics@autosnap-2020-02-27T06:17:01-0600 | 2020-02-27T06:17:01-06:00 | 2020-02-27 Hour 06 | | | | |
61
+ | backup/beast/data/pics@autosnap-2020-02-27T05:17:01-0600 | 2020-02-27T05:17:01-06:00 | 2020-02-27 Hour 05 | | | | |
62
+ | backup/beast/data/pics@autosnap-2020-02-27T04:17:01-0600 | 2020-02-27T04:17:02-06:00 | 2020-02-27 Hour 04 | | | | |
63
+ | backup/beast/data/pics@autosnap-2020-02-27T03:17:01-0600 | 2020-02-27T03:17:01-06:00 | 2020-02-27 Hour 03 | | | | |
64
+ | backup/beast/data/pics@autosnap-2020-02-27T02:17:01-0600 | 2020-02-27T02:17:01-06:00 | 2020-02-27 Hour 02 | | | | |
65
+ | backup/beast/data/pics@autosnap-2020-02-27T01:17:01-0600 | 2020-02-27T01:17:02-06:00 | 2020-02-27 Hour 01 | | | | |
66
+ | backup/beast/data/pics@autosnap-2020-02-27T00:17:01-0600 | 2020-02-27T00:17:01-06:00 | 2020-02-27 Hour 00 | 2020-02-27 | | | |
67
+ ...
68
+ | backup/beast/data/pics@zfssendman-20140604092215 | 2014-06-04T09:22:43-05:00 | | 2014-06-04 | 2014 Week 22 | 2014-06 | |
69
+ | backup/beast/data/pics@migrate3 | 2014-05-26T08:17:31-05:00 | | 2014-05-26 | | | |
70
+ | backup/beast/data/pics@migrate2 | 2014-05-25T21:57:28-05:00 | | 2014-05-25 | 2014 Week 21 | | |
71
+ | backup/beast/data/pics@migrate1 | 2014-05-24T10:31:56-05:00 | | 2014-05-24 | 2014 Week 20 | 2014-05 | 2014 |
72
+ | backup/beast/data/pics@20131108144154 | 2013-11-08T14:41:57-06:00 | | 2013-11-08 | 2013 Week 44 | 2013-11 | 2013 |
73
+ +------------------------------------------------------------+---------------------------+--------------------+------------+--------------+---------+--------+
74
+
75
+ [aranc23@beast:~/src/zfs_mgmt] (master)$ ruby -I lib bin/zfsmgr snapshot destroy --filter pics --noop
76
+ I, [2020-02-27T16:27:33.381645 #4914] INFO -- : deleting 21 snapshots for backup/beast/data/pics
77
+ I, [2020-02-27T16:27:33.381731 #4914] INFO -- : zfs destroy -pn backup/beast/data/pics@autosnap_2020-02-19_21:00:05_hourly,autosnap_2020-02-19_22:00:05_hourly,autosnap_2020-02-19_23:00:01_hourly,autosnap_2020-02-20_00:00:05_daily,autosnap_2020-02-20_01:00:04_hourly,autosnap_2020-02-20_02:00:04_hourly,autosnap_2020-02-20_03:00:04_hourly,autosnap_2020-02-20_04:00:05_hourly,autosnap_2020-02-20_05:00:05_hourly,autosnap_2020-02-20_07:00:04_hourly,autosnap_2020-02-20_08:00:01_hourly,autosnap_2020-02-20_09:00:05_hourly,autosnap_2020-02-20_10:00:05_hourly,autosnap_2020-02-20_11:00:05_hourly,autosnap_2020-02-20_12:00:05_hourly,autosnap_2020-02-20_13:00:01_hourly,autosnap_2020-02-20_14:00:05_hourly,autosnap_2020-02-20_15:00:05_hourly,autosnap_2020-02-20_16:00:05_hourly,autosnap_2020-02-20_17:00:05_hourly,autosnap_2020-02-20_18:00:05_hourly
78
+ destroy backup/beast/data/pics@autosnap_2020-02-19_21:00:05_hourly
79
+ destroy backup/beast/data/pics@autosnap_2020-02-19_22:00:05_hourly
80
+ destroy backup/beast/data/pics@autosnap_2020-02-19_23:00:01_hourly
81
+ destroy backup/beast/data/pics@autosnap_2020-02-20_00:00:05_daily
82
+ destroy backup/beast/data/pics@autosnap_2020-02-20_01:00:04_hourly
83
+ destroy backup/beast/data/pics@autosnap_2020-02-20_02:00:04_hourly
84
+ destroy backup/beast/data/pics@autosnap_2020-02-20_03:00:04_hourly
85
+ destroy backup/beast/data/pics@autosnap_2020-02-20_04:00:05_hourly
86
+ destroy backup/beast/data/pics@autosnap_2020-02-20_05:00:05_hourly
87
+ destroy backup/beast/data/pics@autosnap_2020-02-20_07:00:04_hourly
88
+ destroy backup/beast/data/pics@autosnap_2020-02-20_08:00:01_hourly
89
+ destroy backup/beast/data/pics@autosnap_2020-02-20_09:00:05_hourly
90
+ destroy backup/beast/data/pics@autosnap_2020-02-20_10:00:05_hourly
91
+ destroy backup/beast/data/pics@autosnap_2020-02-20_11:00:05_hourly
92
+ destroy backup/beast/data/pics@autosnap_2020-02-20_12:00:05_hourly
93
+ destroy backup/beast/data/pics@autosnap_2020-02-20_13:00:01_hourly
94
+ destroy backup/beast/data/pics@autosnap_2020-02-20_14:00:05_hourly
95
+ destroy backup/beast/data/pics@autosnap_2020-02-20_15:00:05_hourly
96
+ destroy backup/beast/data/pics@autosnap_2020-02-20_16:00:05_hourly
97
+ destroy backup/beast/data/pics@autosnap_2020-02-20_17:00:05_hourly
98
+ destroy backup/beast/data/pics@autosnap_2020-02-20_18:00:05_hourly
99
+ reclaim 0
100
+
101
+ ## Development
102
+
103
+ After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
104
+
105
+ To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
106
+
107
+ ## Contributing
108
+
109
+ Bug reports and pull requests are welcome on GitHub at https://github.com/aranc23/zfs_mgmt.
110
+
111
+ ## zfs user properties
112
+
113
+ Destruction of zfs snapshots is based on the following zfs user properties:
114
+
115
+ ### zfsmgmt:manage
116
+ manage snapshots for this filesystem if this property is 'true' (string literal)
117
+
118
+ ### zfsmgmt:policy
119
+
120
+ A policy specification consisting of the number of snapshots of a
121
+ certain time frame to keep. A zfs must have a valid policy
122
+ specification or zfs_mgmt will not destroy any snapshots.
123
+
124
+ Examples:
125
+ - 30d ( 30 daily snapshots )
126
+ - 8w15d (8 weekly, and 15 daily snapshots)
127
+ - 1y1m1y1d1h (1 of each time frame worth of snapshots)
128
+ - 72h (72 hourly snapshots)
129
+
130
+ The order in which each timeframe is listed in does not matter, and the supported specs are as follows:
131
+
132
+ - h - hourly
133
+ - d - daily
134
+ - w - weekly (Sunday)
135
+ - m - monthly
136
+ - y - yearly
137
+
138
+ ### zfsmgmt:minage
139
+ The minimum age of a snapshot before it will be considered for
140
+ deletion, as specified in seconds, or using a multiplier of:
141
+
142
+ - s (seconds, same as not specifiying a multiplier)
143
+ - m (minutes, x60)
144
+ - h (hours, x60x60)
145
+ - d (days, x24x60x60)
146
+ - w (weeks, x7x24x60x60)
147
+
148
+ The intended purpose of minage is to keep recent snapshots regardless
149
+ of policy, possibly to ensure zfs send/recv has recent snapshots to
150
+ work with, or simply out of paranoia.
151
+
152
+ ### zfsmgmt:matchsnaps
153
+ If this property is set, the snapshot portion of a snapshot name
154
+ (right of the @) must match this as interpreted as a regular
155
+ expression in order to match the policy as specified above. The
156
+ intended use is to match application specific snapshots, (ie: ^backup-
157
+ ) in an environment where automatic snapshots are still created but
158
+ there is no need to keep them. Snapshots matching this pattern can and
159
+ will still be deleted if they aren't marked to be saved by the policy
160
+ in place for the zfs.
161
+
162
+ ### zfsmgmt:ignoresnaps
163
+ Ignore snapshots matching this regexp pattern. They are neither used
164
+ to match the specified policy for the zfs, nor will they be deleted.
165
+ The intended use is match zfs send/recv snapshots or hand-created
166
+ snapshots, etc. ie: ^syncoid_
167
+
168
+ ### zfsmgmt:snapshot
169
+ If this property is 'true' then create a snapshot in the format of
170
+ zfsmgmt-%FT%T%z. If this property is 'recursive' then create a
171
+ recursive snapshot of this zfs.
172
+
173
+ ### zfsmgmt:snap_prefix
174
+ Change the zfsmgmt portion of created snapshots, ie: 'autosnap' would
175
+ create snapshots called autosnap-%FT%T%z.
176
+
177
+ ### zfsmgmt:snap_timestamp
178
+ strftime format string used when creating snapshot names, default
179
+ being %FT%T%z.
180
+
181
+ ## Snapshot Management / zfs destroy
182
+ When destroying snapshots according to a given policy, all snapshots
183
+ should be considered for deletion and all snapshots should be
184
+ considered as potentially satisfying the retention policy regardless
185
+ of the name of the snapshot. Only the creation property really
186
+ matters unless the user configures zfsmgmt otherwise. If the user
187
+ wants to preserve a given snapshot it should be preserved using the
188
+ zfs hold mechanism or excluded by the ignoresnaps property. This
189
+ allows zfs_mgmt to manage snapshots indepentantly of the mechanism
190
+ used to create them.
191
+
data/Rakefile ADDED
@@ -0,0 +1,6 @@
1
+ require "bundler/gem_tasks"
2
+ require "rspec/core/rake_task"
3
+
4
+ RSpec::Core::RakeTask.new(:spec)
5
+
6
+ task :default => :spec
data/bin/console ADDED
@@ -0,0 +1,14 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require "bundler/setup"
4
+ require "zfs_mgmt"
5
+
6
+ # You can add fixtures and/or initialization code here to make experimenting
7
+ # with your gem easier. You can also use a different console, if you like.
8
+
9
+ # (If you use this, don't forget to add pry to your Gemfile!)
10
+ # require "pry"
11
+ # Pry.start
12
+
13
+ require "irb"
14
+ IRB.start(__FILE__)
data/bin/readsnaps ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'zfs_mgmt'
4
+ print ZfsMgmt.readsnaps()
data/bin/setup ADDED
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+ IFS=$'\n\t'
4
+ set -vx
5
+
6
+ bundle install
7
+
8
+ # Do any other automated setup that you need to do here
@@ -0,0 +1,10 @@
1
+ #! /bin/bash -e
2
+
3
+ timeout=1800
4
+ output=/etc/zfs-list-snapshots.txt
5
+ lock=/etc/zfs-list-snapshots.lock
6
+ tempfile=$(mktemp)
7
+
8
+ flock -w $timeout $lock /usr/sbin/zfs list -Hprt snapshot -o name,creation,used,written,refer,logicalreferenced,logicalused,zfssnapman:destroy,zfssnapman:snap -s creation > $tempfile
9
+ mv -f $tempfile $output
10
+ chmod 0644 $output
data/bin/zfsfuncs ADDED
@@ -0,0 +1,126 @@
1
+ #-*- mode: sh ; -*-
2
+
3
+ function zfssendrecv {
4
+ local OPTIND OPTARG opt
5
+ local zfs
6
+ local snap
7
+ local dest
8
+ local inc=''
9
+ local hold=0
10
+ while getopts "z:s:d:i:I:h" opt; do
11
+ case $opt in
12
+ z)
13
+ zfs=$OPTARG
14
+ ;;
15
+ s)
16
+ snap=$OPTARG
17
+ ;;
18
+ d)
19
+ dest=$OPTARG
20
+ ;;
21
+ i)
22
+ inc="-i ${OPTARG}"
23
+ ;;
24
+ I)
25
+ inc="-I ${OPTARG}"
26
+ ;;
27
+ h)
28
+ hold=1
29
+ ;;
30
+ esac
31
+ done
32
+ shift $((OPTIND-1))
33
+ local zfs_normal=$( echo $zfs|sed 's/[\:\|\/\\ ]/_/g' )
34
+ local lock="${LOCK_DIR}/${zfs_normal}.lock"
35
+ local zfs_recv_status
36
+ local zfs_send_status
37
+ local pipe_status
38
+ (
39
+ if ! $FLOCK; then
40
+ $ulog "unable to lock ${lock}"
41
+ return -2
42
+ fi
43
+ if [[ $TEST == 0 && $hold == 1 ]]; then
44
+ zfs hold -r zfsrecvman $snap 2>&1 | $ulog
45
+ local hold_status="${PIPESTATUS[0]}"
46
+ if [[ $hold_status != 0 ]]; then
47
+ $ulog "unable to place a hold on our snapshots: ${snap}"
48
+ return -3
49
+ fi
50
+ fi
51
+ $ulog "estimating size of sending ${snap}"
52
+ local size=$( zfs $SEND -v -n $inc $snap 2>&1 | tail -1 | cut -d" " -f 5 )
53
+ # could be 0 or 400 or 4K or 9.3g, etc.
54
+ local suf=$( echo $size | sed -E 's/[0-9]+\.?[0-9]*//' | tr '[:lower:]' '[:upper:]' )
55
+ size=$( echo $size | sed -E 's/[pPtTgGmMkKB]$//' ) # remove known suffixes
56
+ if [[ $suf != 'B' ]]; then
57
+ size=$( echo "${size} * 1024" | bc | sed -E 's/\.[0-9]+//' ) # use bc to multiply decimals, sed to make ceil()
58
+ fi
59
+ case $suf in
60
+ B)
61
+ suf=''
62
+ ;;
63
+ K)
64
+ suf=''
65
+ ;;
66
+ M)
67
+ suf='K'
68
+ ;;
69
+ G)
70
+ suf='M'
71
+ ;;
72
+ T)
73
+ suf='G'
74
+ ;;
75
+ P)
76
+ suf='T'
77
+ ;;
78
+ esac
79
+ $ulog "estimated size of sending ${snap} is ${size}${suf}"
80
+ local pv_more="-s ${size}${suf}"
81
+ if [[ $USE_MBUFFER == 'yes' ]]; then
82
+ ssh "${USER}@${REMOTE}" "mbuffer ${MBUFFER} -q -I ${PORT} | zfs ${RECV} ${dest}" 2>&1 | $ulog &
83
+ sleep 5
84
+ zfs $SEND $inc $snap 2> >($ulog)|
85
+ mbuffer $MBUFFER $MBUFFER_SEND_OPTS -O ${REMOTE}:${PORT}
86
+ zfs_send_status="${PIPESTATUS[0]}"
87
+ $ulog "zfs send exited with status: ${zfs_send_status}"
88
+ $ulog "about to wait on zfs send (this may take a while and appear to have hung)"
89
+ wait
90
+ zfs_recv_status="${PIPESTATUS[0]}"
91
+ $ulog "zfs recv exited with status: ${zfs_recv_status}"
92
+ else
93
+ zfs $SEND $inc $snap 2> >($ulog) | pv $PV_OPTS $pv_more | ssh "${USER}@${REMOTE}" "zfs ${RECV} ${dest}" 2>&1 | $ulog
94
+ pipe_status=("${PIPESTATUS[@]}")
95
+ zfs_send_status="${pipe_status[0]}"
96
+ zfs_recv_status="${pipe_status[2]}"
97
+ $ulog "zfs send exited with status: ${zfs_send_status}"
98
+ $ulog "zfs recv exited with status: ${zfs_recv_status}"
99
+ fi
100
+ if [[ $zfs_send_status != 0 ]]; then
101
+ return $zfs_send_status
102
+ elif [[ $zfs_recv_status != 0 ]]; then
103
+ return $zfs_recv_status
104
+ else
105
+ # both must be zero
106
+ return 0
107
+ fi
108
+ ) 9>$lock
109
+ }
110
+
111
+ function terminal_options {
112
+ if [ -t 1 ]; then
113
+ ISTERM=1
114
+ LOGGER_EXTRA='-s' # enable logger output to stderr
115
+ PV_OPTS='-perb' # enable all the magic output from pv
116
+ MBUFFER_SEND_OPTS='' # don't do quiet mode, we have a term
117
+ ulog="logger ${LOGGER_EXTRA} -p user.notice -t "$(basename $0 2>/dev/null)"[${$}]"
118
+ else
119
+ ISTERM=0
120
+ LOGGER_EXTRA='' # don't enable stderr output
121
+ PV_OPTS='-q' # make pv quiet
122
+ MBUFFER_SEND_OPTS='-q' # enable send side -q, no terminal
123
+ ulog="logger ${LOGGER_EXTRA} -p user.notice -t "$(basename $0 2>/dev/null)"[${$}]"
124
+ fi
125
+ }
126
+
data/bin/zfsmgr ADDED
@@ -0,0 +1,40 @@
1
+ require "thor"
2
+ require "zfs_mgmt"
3
+
4
+ class Snapshot < Thor
5
+ class_option :noop, :type => :boolean, :default => false,
6
+ :desc => 'pass -n option to zfs commands'
7
+ class_option :verbose, :type => :boolean, :default => false,
8
+ :desc => 'pass -v option to zfs commands'
9
+ class_option :debug, :type => :boolean, :default => false,
10
+ :desc => 'set logging level to debug'
11
+ class_option :filter, :type => :string, :default => '.+',
12
+ :desc => 'only act on zfs matching this regexp'
13
+ desc "destroy", "apply the snapshot destroy policy to zfs"
14
+ def destroy()
15
+ ZfsMgmt.snapshot_destroy(noop: options[:noop], verbopt: options[:verbose], debugopt: options[:debug], filter: options[:filter])
16
+ end
17
+ desc "policy", "print the policy table for zfs"
18
+ def policy()
19
+ ZfsMgmt.snapshot_policy(verbopt: options[:verbose], debugopt: options[:debug], filter: options[:filter])
20
+ end
21
+ desc "create", "execute zfs snapshot based on zfs properties"
22
+ def create()
23
+ ZfsMgmt.snapshot_create(verbopt: options[:verbose], debugopt: options[:debug], filter: options[:filter])
24
+ end
25
+ end
26
+
27
+ class ZfsMgr < Thor
28
+ desc "zfsget [ZFS]", "execute zfs get for the given properties and types and parse the output into a nested hash"
29
+ method_option :properties, :type => :array, :default => ['name'], :desc => "List of properties passed to zfs get"
30
+ method_option :types, :type => :array, :default => ['filesystem','volume'], enum: ['filesystem','volume','snapshot'], :desc => "list of types"
31
+ def zfsget(zfs)
32
+ pp ZfsMgmt.zfsget(properties: options[:properties],
33
+ types: options[:types],
34
+ zfs: zfs)
35
+ end
36
+ desc "snapshot SUBCOMMAND ...ARGS", "manage snapshots"
37
+ subcommand "snapshot", Snapshot
38
+ end
39
+
40
+ ZfsMgr.start(ARGV)
data/bin/zfsrecvman ADDED
@@ -0,0 +1,154 @@
1
+ #! /bin/bash
2
+
3
+ export PATH=$PATH:/sbin
4
+
5
+ FILTER='.'
6
+ SENDER='beast'
7
+ REMOTE='blob'
8
+ RECVER='blob'
9
+ USER='root'
10
+ DEST="blob"/$(hostname -s)
11
+ SEND='send -R'
12
+ RECV='recv -u -e -F'
13
+ FLOCK='/usr/bin/flock -w 60 -n 9'
14
+ PORT='1337'
15
+ MBUFFER='-s 128k -m 1G -4'
16
+ USE_MBUFFER='no'
17
+ LOCK_DIR='/var/run/'$(basename $0)
18
+ TEST=0
19
+ VERB=0
20
+
21
+ test -f $HOME/.keychain/$HOSTNAME-sh && . $HOME/.keychain/$HOSTNAME-sh
22
+
23
+ . /usr/bin/zfsfuncs
24
+
25
+ terminal_options
26
+
27
+ if [[ `hostname -s` == $SENDER ]]; then
28
+ SENDER_PREF=""
29
+ RECVER_PREF="ssh ${USER}@${SENDER}"
30
+ elif [[ `hostname -s` == $RECVER ]]; then
31
+ SENDER_PREF="ssh ${USER}@${SENDER}"
32
+ RECVER_PREF=""
33
+ else
34
+ $ulog "can only be run on ${SENDER} or ${RECVER}"
35
+ exit -1
36
+ fi
37
+
38
+ while getopts ":p:f:L:mnv" opt; do
39
+ case $opt in
40
+ p)
41
+ PORT=$OPTARG
42
+ ;;
43
+ f)
44
+ FILTER=$OPTARG
45
+ ;;
46
+ L)
47
+ PV_OPTS="${PV_OPTS} -L ${OPTARG}"
48
+ ;;
49
+ m)
50
+ USE_MBUFFER='yes'
51
+ ;;
52
+ n)
53
+ RECV="${RECV} -n"
54
+ TEST=1
55
+ VERB=1
56
+ PV_OPTS='-q' # make pv quiet
57
+ MBUFFER_SEND_OPTS='-q' # enable send side -q, no terminal
58
+ ;;
59
+ v)
60
+ VERB=1
61
+ ;;
62
+ esac
63
+ done
64
+
65
+ if [[ $VERB == 1 ]]; then
66
+ echo $RECV | grep -q -- -v || RECV="${RECV} -v"
67
+ fi
68
+
69
+ for zpool in $( zpool list -H -o name | egrep "$FILTER" | sort ); do
70
+ target="${DEST}/${zpool}"
71
+ target_dir=$( dirname $target )
72
+ # recv_last is the last snapshot on the recv side of this zfs
73
+ if ! ssh "${USER}@${REMOTE}" zfs get written $target >/dev/null 2>/dev/null; then
74
+ $ulog sending initial snapshot of $zpool to $target_dir on $REMOTE
75
+ snap=$( zfs list -t snapshot -o name -s creation -d 1 -H $zpool | grep @zfssnapman- | tail -1 )
76
+ result='-1'
77
+ zfssendrecv -z $zpool \
78
+ -s $snap \
79
+ -d $target_dir \
80
+ -h # create hold
81
+ result=$?
82
+ if [[ $TEST == 0 ]]; then
83
+ echo "${zpool}:${REMOTE}:${snap}:${result}" >> ~/.zfssendrecv.log
84
+ if [[ $result != 0 ]]; then
85
+ zfs release -r zfsrecvman $snap || $ulog "unable to remove hold on our source snapshot: ${snap}"
86
+ fi
87
+ fi
88
+ fi
89
+
90
+ # last known good snapshot sent
91
+ pattern="^${zpool}:${REMOTE}:.+:0$"
92
+ if ! egrep -q "${pattern}" ~/.zfssendrecv.log; then
93
+ $ulog "no known good snapshot logged for ${zpool} on ${REMOTE}, unable to continue"
94
+ continue;
95
+ fi
96
+ last_snap=$( egrep "${pattern}" ~/.zfssendrecv.log | tail -1 | cut -d: -f 3)
97
+
98
+ remote_snaps=$( mktemp )
99
+ ssh "${USER}@${REMOTE}" zfs list -t snapshot -o name -s creation -d 1 -H $target > $remote_snaps
100
+ if [[ $? != 0 ]]; then
101
+ $ulog "unable to retrieve list of remote snapshots for ${zpool} on ${REMOTE}"
102
+ continue;
103
+ fi
104
+
105
+ if ! egrep -q "^${target_dir}/${last_snap}" $remote_snaps; then
106
+ $ulog "${last_snap} does not exist on ${REMOTE}, you must destroy the filesystem: ${target}"
107
+ continue;
108
+ fi
109
+ if ! tail -1 $remote_snaps | egrep -q "^${target_dir}/${last_snap}"; then
110
+ $ulog "${last_snap} is not the most recent snapshot on ${REMOTE}, rollback will occur on ${target}"
111
+ fi
112
+
113
+ rm -f $remote_snaps
114
+ # grab the most recent local recursive snapshot
115
+ current=$( zfs list -t snapshot -o name -s creation -d 1 -H $zpool | grep @zfssnapman- | tail -1 )
116
+ if [[ "${last_snap}" == "${current}" ]]; then
117
+ $ulog "${zpool} is in sync on source and destination (${target})"
118
+ continue
119
+ fi
120
+ $ulog sending $last_snap through $current to $target
121
+ result='-1'
122
+ zfssendrecv -z $zpool \
123
+ -I $last_snap \
124
+ -s $current \
125
+ -d $target_dir \
126
+ -h
127
+ result=$?
128
+ if [[ $TEST == 1 ]]; then
129
+ continue
130
+ fi
131
+ echo "${zpool}:${REMOTE}:${current}:${result}" >> ~/.zfssendrecv.log
132
+ if [[ $result == 0 ]]; then
133
+ $ulog "${zpool} is in sync on source and destination (${target})"
134
+ zfs get -t filesystem -H -r all $zpool | ssh "${USER}@${REMOTE}" "cat > ~/.zpool-properties-${zpool}" ||
135
+ $ulog "unable to write zpool properties backup for ${zpool}"
136
+ zfs release -r zfsrecvman $last_snap ||
137
+ $ulog "unable to release old snapshot: ${last_snap}"
138
+ ssh "${USER}@${REMOTE}" "zfs release -r zfsrecvman ${target_dir}/${last_snap}" ||
139
+ $ulog "unable to release old snapshot on remote side: ${target_dir}/${last_snap}"
140
+ ssh "${USER}@${REMOTE}" "zfs hold -r zfsrecvman ${target_dir}/${current}" ||
141
+ $ulog "unable to hold snapshot on remote side: ${target_dir}/${current}"
142
+ com=''
143
+ for zfs in $(ssh "${USER}@${REMOTE}" "zfs list -H -o name -t filesystem -r ${target}" | sort -r ); do
144
+ for prop in 'canmount=off' 'sharenfs=off' 'sharesmb=off' 'mountpoint=none'; do
145
+ com="${com}zfs set ${prop} ${zfs};"
146
+ done
147
+ done
148
+ $ulog "fixing zfs properties for ${zpool} (this may take a while)"
149
+ ssh "${USER}@${REMOTE}" "${com}" 2>&1 | $ulog
150
+ else
151
+ zfs release -r zfsrecvman $current || $ulog "unable to release current snapshot: ${current}"
152
+ $ulog zfs exited with $result while sending $send through $current to $target
153
+ fi
154
+ done