vpsadmin-client 3.0.0.master.202211181.pre.0.ac358990 → 4.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.rubocop.yml +5 -0
- data/Gemfile +4 -0
- data/Rakefile +0 -1
- data/lib/{terminal-size.rb → terminal_size.rb} +23 -17
- data/lib/vpsadmin/cli/commands/backup_dataset.rb +94 -96
- data/lib/vpsadmin/cli/commands/backup_vps.rb +1 -1
- data/lib/vpsadmin/cli/commands/base_download.rb +6 -6
- data/lib/vpsadmin/cli/commands/network_top.rb +64 -58
- data/lib/vpsadmin/cli/commands/snapshot_download.rb +20 -21
- data/lib/vpsadmin/cli/commands/snapshot_send.rb +6 -7
- data/lib/vpsadmin/cli/commands/vps_migrate_many.rb +10 -12
- data/lib/vpsadmin/cli/commands/vps_remote_console.rb +29 -30
- data/lib/vpsadmin/cli/stream_downloader.rb +36 -43
- data/lib/vpsadmin/cli.rb +2 -2
- data/lib/vpsadmin/client/version.rb +1 -1
- data/lib/vpsadmin/client.rb +0 -1
- data/shell.nix +6 -6
- data/vpsadmin-client.gemspec +8 -11
- metadata +11 -38
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 73f9447cec0e8278700d4d5e56fdc50bd7b88ceb6debac252d3ab9bb5322260e
|
4
|
+
data.tar.gz: 45c1ef064847694f9432da3872baa7f67844b1fbd69500caa9369f4187f04bc5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: df1defede9d7b25d5279d0cfb75f84025ac503851737bda802acd084cb4cf3423fd2390d7083a8597b40f0bef86391fe6f74de67305505256cde04591a6082ee
|
7
|
+
data.tar.gz: e712ebdfc1f68faba266f8e117cf19403d705bf1f9f96bdf31824223362171506ac8de647eccd26ad239975179941f9747784da8ab347a0b5dca87d0f0849ac0
|
data/.rubocop.yml
ADDED
data/Gemfile
CHANGED
data/Rakefile
CHANGED
@@ -1,55 +1,61 @@
|
|
1
1
|
class Terminal
|
2
|
-
class Size; VERSION = '0.0.6' end
|
2
|
+
class Size; VERSION = '0.0.6'.freeze end
|
3
3
|
class << self
|
4
4
|
def size
|
5
5
|
size_via_low_level_ioctl or size_via_stty or nil
|
6
6
|
end
|
7
|
-
|
7
|
+
|
8
|
+
def size!
|
9
|
+
size or _height_width_hash_from 25, 80
|
10
|
+
end
|
8
11
|
|
9
12
|
# These are experimental
|
10
|
-
def resize
|
13
|
+
def resize(direction, magnitude)
|
11
14
|
tmux 'resize-pane', "-#{direction}", magnitude
|
12
15
|
end
|
13
16
|
|
14
17
|
def tmux *cmd
|
15
|
-
system 'tmux', *
|
18
|
+
system 'tmux', *cmd.map(&:to_s)
|
16
19
|
end
|
17
20
|
|
18
|
-
IOCTL_INPUT_BUF = "\x00"*8
|
21
|
+
IOCTL_INPUT_BUF = "\x00" * 8
|
19
22
|
def size_via_low_level_ioctl
|
20
23
|
# Thanks to runpaint for the general approach to this
|
21
24
|
return unless $stdin.respond_to? :ioctl
|
25
|
+
|
22
26
|
code = tiocgwinsz_value_for RUBY_PLATFORM
|
23
27
|
return unless code
|
28
|
+
|
24
29
|
buf = IOCTL_INPUT_BUF.dup
|
25
|
-
return
|
26
|
-
return if
|
30
|
+
return if $stdout.ioctl(code, buf) != 0
|
31
|
+
return if buf == IOCTL_INPUT_BUF
|
32
|
+
|
27
33
|
got = buf.unpack('S4')[0..1]
|
28
|
-
_height_width_hash_from
|
29
|
-
rescue
|
34
|
+
_height_width_hash_from(*got)
|
35
|
+
rescue StandardError
|
30
36
|
nil
|
31
37
|
end
|
32
38
|
|
33
|
-
def tiocgwinsz_value_for
|
39
|
+
def tiocgwinsz_value_for(platform)
|
34
40
|
# This is as reported by <sys/ioctl.h>
|
35
41
|
# Hard-coding because it seems like overkll to acutally involve C for this.
|
36
42
|
{
|
37
43
|
/linux/ => 0x5413,
|
38
|
-
/darwin/ => 0x40087468
|
39
|
-
}.find{|k,
|
44
|
+
/darwin/ => 0x40087468 # thanks to brandon@brandon.io for the lookup!
|
45
|
+
}.find { |k, _v| platform[k] }
|
40
46
|
end
|
41
47
|
|
42
48
|
def size_via_stty
|
43
|
-
ints = `stty size`.scan(/\d+/).map
|
44
|
-
_height_width_hash_from
|
45
|
-
rescue
|
49
|
+
ints = `stty size`.scan(/\d+/).map(&:to_i)
|
50
|
+
_height_width_hash_from(*ints)
|
51
|
+
rescue StandardError
|
46
52
|
nil
|
47
53
|
end
|
48
54
|
|
49
55
|
private
|
56
|
+
|
50
57
|
def _height_width_hash_from *dimensions
|
51
|
-
{ :
|
58
|
+
{ height: dimensions[0], width: dimensions[1] }
|
52
59
|
end
|
53
|
-
|
54
60
|
end
|
55
61
|
end
|
@@ -21,7 +21,7 @@ module VpsAdmin::CLI::Commands
|
|
21
21
|
attempts: 10,
|
22
22
|
checksum: true,
|
23
23
|
delete_after: true,
|
24
|
-
sudo: true
|
24
|
+
sudo: true
|
25
25
|
}
|
26
26
|
|
27
27
|
opts.on('-p', '--pretend', 'Print what would the program do') do
|
@@ -93,14 +93,14 @@ module VpsAdmin::CLI::Commands
|
|
93
93
|
|
94
94
|
ds_id = read_dataset_id(fs)
|
95
95
|
|
96
|
-
if ds_id
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
96
|
+
ds = if ds_id
|
97
|
+
@api.dataset.show(ds_id)
|
98
|
+
else
|
99
|
+
dataset_chooser
|
100
|
+
end
|
101
101
|
|
102
102
|
elsif args.size != 2
|
103
|
-
warn
|
103
|
+
warn 'Provide DATASET_ID and FILESYSTEM arguments'
|
104
104
|
exit(false)
|
105
105
|
|
106
106
|
else
|
@@ -144,15 +144,15 @@ module VpsAdmin::CLI::Commands
|
|
144
144
|
# This is the first run within this history id, no local snapshots are
|
145
145
|
# present
|
146
146
|
if !latest_local_snapshot && @opts[:init_snapshots]
|
147
|
-
remote_state[ds.current_history_id] =
|
147
|
+
remote_state[ds.current_history_id] =
|
148
148
|
remote_state[ds.current_history_id].last(@opts[:init_snapshots])
|
149
149
|
end
|
150
150
|
|
151
151
|
remote_state[ds.current_history_id].each do |snap|
|
152
152
|
found = false
|
153
153
|
|
154
|
-
local_state.
|
155
|
-
found =
|
154
|
+
local_state.each_value do |local_snapshots|
|
155
|
+
found = local_snapshots.detect { |s| s.name == snap.name }
|
156
156
|
break if found
|
157
157
|
end
|
158
158
|
|
@@ -170,33 +170,33 @@ module VpsAdmin::CLI::Commands
|
|
170
170
|
if for_transfer.empty?
|
171
171
|
if found_latest
|
172
172
|
exit_msg(
|
173
|
-
|
173
|
+
'Nothing to transfer: all snapshots with history id ' \
|
174
174
|
"#{ds.current_history_id} are already present locally",
|
175
175
|
error: @opts[:no_snapshots_error]
|
176
176
|
)
|
177
177
|
|
178
178
|
else
|
179
|
-
exit_msg(
|
180
|
-
Unable to transfer: the common snapshot has not been found
|
179
|
+
exit_msg(<<~END
|
180
|
+
Unable to transfer: the common snapshot has not been found
|
181
181
|
|
182
|
-
This can happen when the latest local snapshot was deleted from the server,
|
183
|
-
i.e. you have not backed up this dataset for quite some time.
|
182
|
+
This can happen when the latest local snapshot was deleted from the server,
|
183
|
+
i.e. you have not backed up this dataset for quite some time.
|
184
184
|
|
185
|
-
You can either rename or destroy the whole current history id:
|
185
|
+
You can either rename or destroy the whole current history id:
|
186
186
|
|
187
|
-
|
187
|
+
zfs rename #{fs}/#{ds.current_history_id} #{fs}/#{ds.current_history_id}.old
|
188
188
|
|
189
|
-
or
|
189
|
+
or
|
190
190
|
|
191
|
-
|
192
|
-
|
191
|
+
zfs list -r -t all #{fs}/#{ds.current_history_id}
|
192
|
+
zfs destroy -r #{fs}/#{ds.current_history_id}
|
193
193
|
|
194
|
-
which will destroy all snapshots with this history id.
|
194
|
+
which will destroy all snapshots with this history id.
|
195
195
|
|
196
|
-
You can also destroy the local backup completely or backup to another dataset
|
197
|
-
and start anew.
|
198
|
-
END
|
199
|
-
|
196
|
+
You can also destroy the local backup completely or backup to another dataset
|
197
|
+
and start anew.
|
198
|
+
END
|
199
|
+
)
|
200
200
|
end
|
201
201
|
end
|
202
202
|
|
@@ -226,9 +226,7 @@ END
|
|
226
226
|
if shared_name
|
227
227
|
shared = remote_state[ds.current_history_id].detect { |s| s.name == shared_name }
|
228
228
|
|
229
|
-
if shared && !for_transfer.detect { |s| s.id == shared.id }
|
230
|
-
for_transfer.insert(0, shared)
|
231
|
-
end
|
229
|
+
for_transfer.insert(0, shared) if shared && !for_transfer.detect { |s| s.id == shared.id }
|
232
230
|
end
|
233
231
|
|
234
232
|
write_dataset_id!(ds, fs) unless written_dataset_id?
|
@@ -238,13 +236,12 @@ END
|
|
238
236
|
end
|
239
237
|
|
240
238
|
protected
|
239
|
+
|
241
240
|
def transfer(local_state, snapshots, hist_id, fs)
|
242
241
|
ds = "#{fs}/#{hist_id}"
|
243
242
|
no_local_snapshots = local_state[hist_id].nil? || local_state[hist_id].empty?
|
244
243
|
|
245
|
-
if local_state[hist_id].nil?
|
246
|
-
zfs(:create, nil, ds)
|
247
|
-
end
|
244
|
+
zfs(:create, nil, ds) if local_state[hist_id].nil?
|
248
245
|
|
249
246
|
if no_local_snapshots
|
250
247
|
msg "Performing a full receive of @#{snapshots.first.name} to #{ds}"
|
@@ -255,60 +252,59 @@ END
|
|
255
252
|
else
|
256
253
|
run_piped(zfs_cmd(:recv, '-F', ds)) do
|
257
254
|
SnapshotSend.new({}, @api).do_exec({
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
255
|
+
snapshot: snapshots.first.id,
|
256
|
+
send_mail: false,
|
257
|
+
delete_after: @opts[:delete_after],
|
258
|
+
max_rate: @opts[:max_rate],
|
259
|
+
checksum: @opts[:checksum],
|
260
|
+
quiet: @opts[:quiet]
|
261
|
+
})
|
265
262
|
end || exit_msg('Receive failed')
|
266
263
|
end
|
267
264
|
end
|
268
265
|
|
269
|
-
|
270
|
-
msg "Performing an incremental receive of "+
|
271
|
-
"@#{snapshots.first.name} - @#{snapshots.last.name} to #{ds}"
|
266
|
+
return unless !no_local_snapshots || snapshots.size > 1
|
272
267
|
|
273
|
-
|
274
|
-
|
268
|
+
msg 'Performing an incremental receive of ' \
|
269
|
+
"@#{snapshots.first.name} - @#{snapshots.last.name} to #{ds}"
|
275
270
|
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
271
|
+
if @opts[:safe]
|
272
|
+
safe_download(ds, snapshots.last, snapshots.first)
|
273
|
+
|
274
|
+
else
|
275
|
+
run_piped(zfs_cmd(:recv, '-F', ds)) do
|
276
|
+
SnapshotSend.new({}, @api).do_exec({
|
277
|
+
snapshot: snapshots.last.id,
|
278
|
+
from_snapshot: snapshots.first.id,
|
279
|
+
send_mail: false,
|
280
|
+
delete_after: @opts[:delete_after],
|
281
|
+
max_rate: @opts[:max_rate],
|
282
|
+
checksum: @opts[:checksum],
|
283
|
+
quiet: @opts[:quiet]
|
284
|
+
})
|
285
|
+
end || exit_msg('Receive failed')
|
289
286
|
end
|
290
287
|
end
|
291
288
|
|
292
289
|
def safe_download(ds, snapshot, from_snapshot = nil)
|
293
290
|
part, full = snapshot_tmp_file(snapshot, from_snapshot)
|
294
291
|
|
295
|
-
|
292
|
+
unless File.exist?(full)
|
296
293
|
attempts = 0
|
297
294
|
|
298
295
|
begin
|
299
296
|
SnapshotDownload.new({}, @api).do_exec({
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
297
|
+
snapshot: snapshot.id,
|
298
|
+
from_snapshot: from_snapshot && from_snapshot.id,
|
299
|
+
format: from_snapshot ? :incremental_stream : :stream,
|
300
|
+
file: part,
|
301
|
+
max_rate: @opts[:max_rate],
|
302
|
+
checksum: @opts[:checksum],
|
303
|
+
quiet: @opts[:quiet],
|
304
|
+
resume: true,
|
305
|
+
delete_after: @opts[:delete_after],
|
306
|
+
send_mail: false
|
307
|
+
})
|
312
308
|
rescue Errno::ECONNREFUSED,
|
313
309
|
Errno::ETIMEDOUT,
|
314
310
|
Errno::EHOSTUNREACH,
|
@@ -318,11 +314,11 @@ END
|
|
318
314
|
attempts += 1
|
319
315
|
|
320
316
|
if attempts >= @opts[:attempts]
|
321
|
-
warn
|
317
|
+
warn 'Run out of attempts'
|
322
318
|
exit(false)
|
323
319
|
|
324
320
|
else
|
325
|
-
warn
|
321
|
+
warn 'Retry in 60 seconds'
|
326
322
|
sleep(60)
|
327
323
|
retry
|
328
324
|
end
|
@@ -339,15 +335,15 @@ END
|
|
339
335
|
end
|
340
336
|
|
341
337
|
def rotate(fs, pretend: false)
|
342
|
-
msg
|
343
|
-
local_state = pretend
|
338
|
+
msg 'Rotating snapshots'
|
339
|
+
local_state = pretend || parse_tree(fs)
|
344
340
|
|
345
341
|
# Order snapshots by date of creation
|
346
342
|
snapshots = local_state.values.flatten.sort do |a, b|
|
347
343
|
a.creation <=> b.creation
|
348
344
|
end
|
349
345
|
|
350
|
-
cnt = local_state.values.inject(0) { |sum,
|
346
|
+
cnt = local_state.values.inject(0) { |sum, local_snapshots| sum + local_snapshots.count }
|
351
347
|
deleted = 0
|
352
348
|
oldest = Time.now.to_i - (@opts[:max_age] * 60 * 60 * 24)
|
353
349
|
|
@@ -366,8 +362,8 @@ END
|
|
366
362
|
zfs(:destroy, nil, "#{ds}@#{s.name}", pretend: pretend)
|
367
363
|
end
|
368
364
|
|
369
|
-
local_state.each do |hist_id,
|
370
|
-
next unless
|
365
|
+
local_state.each do |hist_id, local_snapshots|
|
366
|
+
next unless local_snapshots.empty?
|
371
367
|
|
372
368
|
ds = "#{fs}/#{hist_id}"
|
373
369
|
|
@@ -382,15 +378,15 @@ END
|
|
382
378
|
# This is intentionally done by two zfs commands, because -d2 would include
|
383
379
|
# nested subdatasets, which should not be there, but the user might create
|
384
380
|
# them and it could confuse the program.
|
385
|
-
zfs(:list, '-r -d1 -tfilesystem -H -oname', fs).split("\n")[1
|
381
|
+
zfs(:list, '-r -d1 -tfilesystem -H -oname', fs).split("\n")[1..].each do |name|
|
386
382
|
last_name = name.split('/').last
|
387
383
|
ret[last_name.to_i] = [] if dataset?(last_name)
|
388
384
|
end
|
389
385
|
|
390
386
|
zfs(
|
391
|
-
|
392
|
-
|
393
|
-
|
387
|
+
:get,
|
388
|
+
'-Hrp -d2 -tsnapshot -oname,property,value name,creation',
|
389
|
+
fs
|
394
390
|
).split("\n").each do |line|
|
395
391
|
name, property, value = line.split
|
396
392
|
ds, snap_name = name.split('@')
|
@@ -399,7 +395,7 @@ END
|
|
399
395
|
|
400
396
|
hist_id = ds_name.to_i
|
401
397
|
|
402
|
-
if snap = ret[hist_id].detect { |s| s.name == snap_name }
|
398
|
+
if (snap = ret[hist_id].detect { |s| s.name == snap_name })
|
403
399
|
snap.send("#{property}=", value)
|
404
400
|
|
405
401
|
else
|
@@ -418,14 +414,15 @@ END
|
|
418
414
|
def read_dataset_id(fs)
|
419
415
|
ds_id = zfs(:get, '-H -ovalue cz.vpsfree.vpsadmin:dataset_id', fs).strip
|
420
416
|
return nil if ds_id == '-'
|
417
|
+
|
421
418
|
@dataset_id = ds_id.to_i
|
422
419
|
end
|
423
420
|
|
424
421
|
def check_dataset_id!(ds, fs)
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
422
|
+
return unless @dataset_id && @dataset_id != ds.id
|
423
|
+
|
424
|
+
warn "Dataset '#{fs}' is used to backup remote dataset with id '#{@dataset_id}', not '#{ds.id}'"
|
425
|
+
exit(false)
|
429
426
|
end
|
430
427
|
|
431
428
|
def written_dataset_id?
|
@@ -444,13 +441,13 @@ END
|
|
444
441
|
|
445
442
|
pids << Process.fork do
|
446
443
|
r.close
|
447
|
-
|
444
|
+
$stdout.reopen(w)
|
448
445
|
block.call
|
449
446
|
end
|
450
447
|
|
451
448
|
pids << Process.fork do
|
452
449
|
w.close
|
453
|
-
|
450
|
+
$stdin.reopen(r)
|
454
451
|
Process.exec(cmd2)
|
455
452
|
end
|
456
453
|
|
@@ -500,11 +497,12 @@ END
|
|
500
497
|
ds_map = {}
|
501
498
|
|
502
499
|
@api.dataset.index(user: user.id).each do |ds|
|
503
|
-
if vps = vps_map[ds.id]
|
500
|
+
if (vps = vps_map[ds.id])
|
504
501
|
puts "(#{i}) VPS ##{vps.id}"
|
505
502
|
|
506
503
|
else
|
507
504
|
next if vps_only
|
505
|
+
|
508
506
|
puts "(#{i}) Dataset #{ds.name}"
|
509
507
|
end
|
510
508
|
|
@@ -513,10 +511,10 @@ END
|
|
513
511
|
end
|
514
512
|
|
515
513
|
loop do
|
516
|
-
|
517
|
-
|
514
|
+
$stdout.write('Pick a dataset to backup: ')
|
515
|
+
$stdout.flush
|
518
516
|
|
519
|
-
i =
|
517
|
+
i = $stdin.readline.strip.to_i
|
520
518
|
next if i <= 0 || ds_map[i].nil?
|
521
519
|
|
522
520
|
return ds_map[i]
|
@@ -524,12 +522,12 @@ END
|
|
524
522
|
end
|
525
523
|
|
526
524
|
def snapshot_tmp_file(s, from_s = nil)
|
527
|
-
if from_s
|
528
|
-
|
525
|
+
base = if from_s
|
526
|
+
".snapshot_#{from_s.id}-#{s.id}.inc.dat.gz"
|
529
527
|
|
530
|
-
|
531
|
-
|
532
|
-
|
528
|
+
else
|
529
|
+
".snapshot_#{s.id}.dat.gz"
|
530
|
+
end
|
533
531
|
|
534
532
|
["#{base}.part", base]
|
535
533
|
end
|
@@ -7,15 +7,15 @@ module VpsAdmin::CLI::Commands
|
|
7
7
|
end
|
8
8
|
|
9
9
|
protected
|
10
|
+
|
10
11
|
def find_or_create_dl(opts, do_create = true)
|
11
12
|
@api.snapshot_download.index(snapshot: opts[:snapshot]).each do |r|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
end
|
16
|
-
|
17
|
-
return [r, false]
|
13
|
+
next unless opts[:from_snapshot] == (r.from_snapshot && r.from_snapshot_id)
|
14
|
+
if r.format != opts[:format].to_s
|
15
|
+
raise "SnapshotDownload id=#{r.id} is in unusable format '#{r.format}' (needs '#{opts[:format]}')"
|
18
16
|
end
|
17
|
+
|
18
|
+
return [r, false]
|
19
19
|
end
|
20
20
|
|
21
21
|
if do_create
|