confctl 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.editorconfig +11 -0
- data/.gitignore +8 -0
- data/.overcommit.yml +6 -0
- data/.rubocop.yml +67 -0
- data/.rubocop_todo.yml +5 -0
- data/.ruby-version +1 -0
- data/CHANGELOG.md +2 -0
- data/Gemfile +2 -0
- data/LICENSE.txt +674 -0
- data/README.md +522 -0
- data/Rakefile +40 -0
- data/bin/confctl +4 -0
- data/confctl.gemspec +33 -0
- data/example/.gitignore +2 -0
- data/example/README.md +38 -0
- data/example/cluster/cluster.nix +7 -0
- data/example/cluster/module-list.nix +3 -0
- data/example/cluster/nixos-machine/config.nix +15 -0
- data/example/cluster/nixos-machine/hardware.nix +4 -0
- data/example/cluster/nixos-machine/module.nix +8 -0
- data/example/cluster/vpsadminos-container/config.nix +22 -0
- data/example/cluster/vpsadminos-container/module.nix +8 -0
- data/example/cluster/vpsadminos-machine/config.nix +22 -0
- data/example/cluster/vpsadminos-machine/hardware.nix +4 -0
- data/example/cluster/vpsadminos-machine/module.nix +8 -0
- data/example/cluster/vpsfreecz-vps/config.nix +25 -0
- data/example/cluster/vpsfreecz-vps/module.nix +8 -0
- data/example/configs/confctl.nix +10 -0
- data/example/configs/swpins.nix +28 -0
- data/example/data/default.nix +5 -0
- data/example/data/ssh-keys.nix +7 -0
- data/example/environments/base.nix +13 -0
- data/example/modules/module-list.nix +13 -0
- data/example/shell.nix +11 -0
- data/example/swpins/channels/nixos-unstable.json +35 -0
- data/example/swpins/channels/vpsadminos-staging.json +35 -0
- data/lib/confctl/cli/app.rb +551 -0
- data/lib/confctl/cli/attr_filters.rb +51 -0
- data/lib/confctl/cli/cluster.rb +1248 -0
- data/lib/confctl/cli/command.rb +206 -0
- data/lib/confctl/cli/configuration.rb +296 -0
- data/lib/confctl/cli/gen_data.rb +97 -0
- data/lib/confctl/cli/generation.rb +335 -0
- data/lib/confctl/cli/log_view.rb +267 -0
- data/lib/confctl/cli/output_formatter.rb +288 -0
- data/lib/confctl/cli/swpins/base.rb +40 -0
- data/lib/confctl/cli/swpins/channel.rb +73 -0
- data/lib/confctl/cli/swpins/cluster.rb +80 -0
- data/lib/confctl/cli/swpins/core.rb +86 -0
- data/lib/confctl/cli/swpins/utils.rb +55 -0
- data/lib/confctl/cli/swpins.rb +5 -0
- data/lib/confctl/cli/tag_filters.rb +30 -0
- data/lib/confctl/cli.rb +5 -0
- data/lib/confctl/conf_cache.rb +105 -0
- data/lib/confctl/conf_dir.rb +88 -0
- data/lib/confctl/erb_template.rb +37 -0
- data/lib/confctl/exceptions.rb +3 -0
- data/lib/confctl/gcroot.rb +30 -0
- data/lib/confctl/generation/build.rb +145 -0
- data/lib/confctl/generation/build_list.rb +106 -0
- data/lib/confctl/generation/host.rb +35 -0
- data/lib/confctl/generation/host_list.rb +81 -0
- data/lib/confctl/generation/unified.rb +117 -0
- data/lib/confctl/generation/unified_list.rb +63 -0
- data/lib/confctl/git_repo_mirror.rb +79 -0
- data/lib/confctl/health_checks/base.rb +66 -0
- data/lib/confctl/health_checks/run_command.rb +179 -0
- data/lib/confctl/health_checks/systemd/properties.rb +84 -0
- data/lib/confctl/health_checks/systemd/property_check.rb +31 -0
- data/lib/confctl/health_checks/systemd/property_list.rb +20 -0
- data/lib/confctl/health_checks.rb +5 -0
- data/lib/confctl/hook.rb +35 -0
- data/lib/confctl/line_buffer.rb +53 -0
- data/lib/confctl/logger.rb +151 -0
- data/lib/confctl/machine.rb +107 -0
- data/lib/confctl/machine_control.rb +172 -0
- data/lib/confctl/machine_list.rb +108 -0
- data/lib/confctl/machine_status.rb +135 -0
- data/lib/confctl/module_options.rb +95 -0
- data/lib/confctl/nix.rb +382 -0
- data/lib/confctl/nix_build.rb +108 -0
- data/lib/confctl/nix_collect_garbage.rb +64 -0
- data/lib/confctl/nix_copy.rb +49 -0
- data/lib/confctl/nix_format.rb +124 -0
- data/lib/confctl/nix_literal_expression.rb +15 -0
- data/lib/confctl/parallel_executor.rb +43 -0
- data/lib/confctl/pattern.rb +9 -0
- data/lib/confctl/settings.rb +50 -0
- data/lib/confctl/std_line_buffer.rb +40 -0
- data/lib/confctl/swpins/change_set.rb +151 -0
- data/lib/confctl/swpins/channel.rb +62 -0
- data/lib/confctl/swpins/channel_list.rb +47 -0
- data/lib/confctl/swpins/cluster_name.rb +94 -0
- data/lib/confctl/swpins/cluster_name_list.rb +15 -0
- data/lib/confctl/swpins/core.rb +137 -0
- data/lib/confctl/swpins/deployed_info.rb +23 -0
- data/lib/confctl/swpins/spec.rb +20 -0
- data/lib/confctl/swpins/specs/base.rb +184 -0
- data/lib/confctl/swpins/specs/directory.rb +51 -0
- data/lib/confctl/swpins/specs/git.rb +135 -0
- data/lib/confctl/swpins/specs/git_rev.rb +24 -0
- data/lib/confctl/swpins.rb +17 -0
- data/lib/confctl/system_command.rb +10 -0
- data/lib/confctl/user_script.rb +13 -0
- data/lib/confctl/user_scripts.rb +41 -0
- data/lib/confctl/utils/file.rb +21 -0
- data/lib/confctl/version.rb +3 -0
- data/lib/confctl.rb +43 -0
- data/man/man8/confctl-options.nix.8 +1334 -0
- data/man/man8/confctl-options.nix.8.md +1340 -0
- data/man/man8/confctl.8 +660 -0
- data/man/man8/confctl.8.md +654 -0
- data/nix/evaluator.nix +160 -0
- data/nix/lib/default.nix +83 -0
- data/nix/lib/machine/default.nix +74 -0
- data/nix/lib/machine/info.nix +5 -0
- data/nix/lib/swpins/eval.nix +71 -0
- data/nix/lib/swpins/options.nix +94 -0
- data/nix/machines.nix +31 -0
- data/nix/modules/cluster/default.nix +459 -0
- data/nix/modules/confctl/cli.nix +21 -0
- data/nix/modules/confctl/generations.nix +84 -0
- data/nix/modules/confctl/nix.nix +28 -0
- data/nix/modules/confctl/swpins.nix +55 -0
- data/nix/modules/module-list.nix +19 -0
- data/shell.nix +42 -0
- data/template/confctl-options.nix/main.erb +45 -0
- data/template/confctl-options.nix/options.erb +15 -0
- metadata +353 -0
|
@@ -0,0 +1,1248 @@
|
|
|
1
|
+
require_relative 'command'
|
|
2
|
+
require_relative '../hook'
|
|
3
|
+
require 'json'
|
|
4
|
+
require 'rainbow'
|
|
5
|
+
require 'tty-pager'
|
|
6
|
+
require 'tty-progressbar'
|
|
7
|
+
require 'tty-spinner'
|
|
8
|
+
|
|
9
|
+
module ConfCtl::Cli
|
|
10
|
+
class Cluster < Command
|
|
11
|
+
ConfCtl::Hook.register :cluster_deploy
|
|
12
|
+
|
|
13
|
+
def list
|
|
14
|
+
if opts[:list]
|
|
15
|
+
prefix = 'cluster.<name>.'
|
|
16
|
+
nix = ConfCtl::Nix.new
|
|
17
|
+
|
|
18
|
+
puts 'name'
|
|
19
|
+
|
|
20
|
+
nix.module_options.each do |opt|
|
|
21
|
+
next unless opt['name'].start_with?(prefix)
|
|
22
|
+
|
|
23
|
+
puts opt['name'][prefix.length..]
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
return
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
list_machines(select_machines_with_managed(args[0]))
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def build
|
|
33
|
+
machines = select_machines(args[0]).managed
|
|
34
|
+
|
|
35
|
+
raise 'No machines to build' if machines.empty?
|
|
36
|
+
|
|
37
|
+
ask_confirmation! do
|
|
38
|
+
puts 'The following machines will be built:'
|
|
39
|
+
list_machines(machines)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
do_build(machines)
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def deploy
|
|
46
|
+
machines = select_machines(args[0]).managed
|
|
47
|
+
action = args[1] || 'switch'
|
|
48
|
+
|
|
49
|
+
raise GLI::BadCommandLine, "invalid action '#{action}'" unless %w[boot switch test dry-activate].include?(action)
|
|
50
|
+
|
|
51
|
+
if opts[:reboot]
|
|
52
|
+
raise GLI::BadCommandLine, '--reboot can be used only with switch-action boot' if action != 'boot'
|
|
53
|
+
|
|
54
|
+
parse_wait_online
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
raise 'No machines to deploy' if machines.empty?
|
|
58
|
+
|
|
59
|
+
ask_confirmation! do
|
|
60
|
+
puts 'The following machines will be deployed:'
|
|
61
|
+
list_machines(machines)
|
|
62
|
+
puts
|
|
63
|
+
puts "Generation: #{opts[:generation] || 'new build'}"
|
|
64
|
+
puts "Target action: #{action}#{opts[:reboot] ? ' + reboot' : ''}"
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
ConfCtl::Hook.call(:cluster_deploy, kwargs: {
|
|
68
|
+
machines:,
|
|
69
|
+
generation: opts[:generation],
|
|
70
|
+
action: opts[:action],
|
|
71
|
+
opts:
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
host_generations =
|
|
75
|
+
if opts[:generation]
|
|
76
|
+
find_generations(machines, opts[:generation])
|
|
77
|
+
else
|
|
78
|
+
do_build(machines)
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
nix = ConfCtl::Nix.new(show_trace: opts['show-trace'])
|
|
82
|
+
|
|
83
|
+
if opts['one-by-one']
|
|
84
|
+
deploy_one_by_one(machines, host_generations, nix, action)
|
|
85
|
+
else
|
|
86
|
+
deploy_in_bulk(machines, host_generations, nix, action)
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def health_check
|
|
91
|
+
machines = select_machines(args[0]).managed.select do |_host, machine|
|
|
92
|
+
machine.health_checks.any?
|
|
93
|
+
end
|
|
94
|
+
raise 'No machines to check or no health checks configured' if machines.empty?
|
|
95
|
+
|
|
96
|
+
run_checks = machines.health_checks
|
|
97
|
+
|
|
98
|
+
ask_confirmation! do
|
|
99
|
+
puts 'Health checks will be run on the following machines:'
|
|
100
|
+
|
|
101
|
+
list_machines(machines, prepend_cols: %w[checks])
|
|
102
|
+
puts
|
|
103
|
+
puts "#{run_checks.length} checks in total"
|
|
104
|
+
puts
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
return unless run_health_checks(machines, run_checks).any?
|
|
108
|
+
|
|
109
|
+
raise 'Health checks failed'
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def status
|
|
113
|
+
machines = select_machines(args[0]).managed
|
|
114
|
+
raise 'No machines to check' if machines.empty?
|
|
115
|
+
|
|
116
|
+
ask_confirmation! do
|
|
117
|
+
if opts[:generation]
|
|
118
|
+
puts 'The following machines will be checked:'
|
|
119
|
+
else
|
|
120
|
+
puts 'The following machines will be built and then checked:'
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
list_machines(machines)
|
|
124
|
+
puts
|
|
125
|
+
puts "Generation: #{opts[:generation] || 'new build'}"
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
statuses = machines.transform_values do |machine|
|
|
129
|
+
ConfCtl::MachineStatus.new(machine)
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
# Evaluate toplevels
|
|
133
|
+
if opts[:generation] == 'none'
|
|
134
|
+
host_generations = nil
|
|
135
|
+
elsif opts[:generation]
|
|
136
|
+
host_generations = find_generations(machines, opts[:generation])
|
|
137
|
+
|
|
138
|
+
# Ignore statuses when no generation was found
|
|
139
|
+
statuses.delete_if do |host, _st|
|
|
140
|
+
!host_generations.has_key?(host)
|
|
141
|
+
end
|
|
142
|
+
else
|
|
143
|
+
host_generations = do_build(machines)
|
|
144
|
+
puts
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
# Assign configured toplevel and swpins
|
|
148
|
+
if host_generations
|
|
149
|
+
host_generations.each do |host, gen|
|
|
150
|
+
statuses[host].target_toplevel = gen.toplevel
|
|
151
|
+
statuses[host].target_swpin_specs = gen.swpin_specs
|
|
152
|
+
end
|
|
153
|
+
else
|
|
154
|
+
# We're not comparing a system generation, only configured swpins
|
|
155
|
+
ConfCtl::Swpins::ClusterNameList.new(machines:).each do |cn|
|
|
156
|
+
cn.parse
|
|
157
|
+
|
|
158
|
+
statuses[cn.name].target_swpin_specs = cn.specs
|
|
159
|
+
end
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
# Check runtime status
|
|
163
|
+
tw = ConfCtl::ParallelExecutor.new(machines.length)
|
|
164
|
+
|
|
165
|
+
statuses.each_value do |st|
|
|
166
|
+
tw.add do
|
|
167
|
+
st.query(toplevel: opts[:generation] != 'none')
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
tw.run
|
|
172
|
+
|
|
173
|
+
# Collect all swpins
|
|
174
|
+
swpins = []
|
|
175
|
+
|
|
176
|
+
statuses.each_value do |st|
|
|
177
|
+
st.target_swpin_specs.each_key do |name|
|
|
178
|
+
swpins << name unless swpins.include?(name)
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
st.evaluate
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Render results
|
|
185
|
+
cols = %w[host online uptime status generations] + swpins
|
|
186
|
+
rows = []
|
|
187
|
+
|
|
188
|
+
statuses.each do |host, st|
|
|
189
|
+
build_generations = ConfCtl::Generation::BuildList.new(host)
|
|
190
|
+
|
|
191
|
+
row = {
|
|
192
|
+
'host' => host,
|
|
193
|
+
'online' => st.online? && Rainbow('yes').green,
|
|
194
|
+
'uptime' => st.uptime && format_duration(st.uptime),
|
|
195
|
+
'status' => st.status ? Rainbow('ok').green : Rainbow('outdated').red,
|
|
196
|
+
'generations' => "#{build_generations.count}:#{st.generations && st.generations.count}"
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
swpins.each do |name|
|
|
200
|
+
swpin_state = st.swpins_state[name]
|
|
201
|
+
|
|
202
|
+
row[name] =
|
|
203
|
+
if swpin_state
|
|
204
|
+
Rainbow(swpin_state.current_version).color(
|
|
205
|
+
swpin_state.uptodate? ? :green : :red
|
|
206
|
+
)
|
|
207
|
+
end
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
rows << row
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
OutputFormatter.print(rows, cols, layout: :columns, color: use_color?)
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
def changelog
|
|
217
|
+
compare_swpins do |io, _host, status, sw_name, spec|
|
|
218
|
+
s = spec.string_changelog_info(
|
|
219
|
+
opts[:downgrade] ? :downgrade : :upgrade,
|
|
220
|
+
status.swpins_info[sw_name],
|
|
221
|
+
color: use_color?,
|
|
222
|
+
verbose: opts[:verbose],
|
|
223
|
+
patch: opts[:patch]
|
|
224
|
+
)
|
|
225
|
+
rescue ConfCtl::Error => e
|
|
226
|
+
io.puts e.message
|
|
227
|
+
else
|
|
228
|
+
io.puts(s || 'no changes')
|
|
229
|
+
end
|
|
230
|
+
end
|
|
231
|
+
|
|
232
|
+
def diff
|
|
233
|
+
compare_swpins do |io, _host, status, sw_name, spec|
|
|
234
|
+
s = spec.string_diff_info(
|
|
235
|
+
opts[:downgrade] ? :downgrade : :upgrade,
|
|
236
|
+
status.swpins_info[sw_name],
|
|
237
|
+
color: use_color?
|
|
238
|
+
)
|
|
239
|
+
rescue ConfCtl::Error => e
|
|
240
|
+
io.puts e.message
|
|
241
|
+
else
|
|
242
|
+
io.puts(s || 'no changes')
|
|
243
|
+
end
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
def test_connection
|
|
247
|
+
machines = select_machines_with_managed(args[0])
|
|
248
|
+
raise 'No machines to test' if machines.empty?
|
|
249
|
+
|
|
250
|
+
ask_confirmation! do
|
|
251
|
+
puts 'Test SSH connection to the following machines:'
|
|
252
|
+
list_machines(machines)
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
succeeded = []
|
|
256
|
+
failed = []
|
|
257
|
+
|
|
258
|
+
machines.each do |host, machine|
|
|
259
|
+
mc = ConfCtl::MachineControl.new(machine)
|
|
260
|
+
|
|
261
|
+
begin
|
|
262
|
+
mc.test_connection
|
|
263
|
+
succeeded << host
|
|
264
|
+
rescue TTY::Command::ExitError => e
|
|
265
|
+
puts "Unable to connect to #{host}: #{e.message}"
|
|
266
|
+
puts
|
|
267
|
+
failed << host
|
|
268
|
+
end
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
puts
|
|
272
|
+
puts "Result: #{succeeded.length} successful, #{failed.length} failed"
|
|
273
|
+
puts
|
|
274
|
+
puts 'Failed machines:'
|
|
275
|
+
failed.each { |host| puts " #{host}" }
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
def ssh
|
|
279
|
+
machines = select_machines_with_managed(args[0])
|
|
280
|
+
raise 'No machines to ssh to' if machines.empty?
|
|
281
|
+
|
|
282
|
+
if opts['input-string'] && opts['input-file']
|
|
283
|
+
raise GLI::BadCommandLine, 'use one of --input-string or --input-file'
|
|
284
|
+
end
|
|
285
|
+
|
|
286
|
+
if args.length == 1
|
|
287
|
+
raise GLI::BadCommandLine, 'missing command' unless machines.length == 1
|
|
288
|
+
|
|
289
|
+
run_ssh_interactive(machines)
|
|
290
|
+
return
|
|
291
|
+
|
|
292
|
+
end
|
|
293
|
+
|
|
294
|
+
run_ssh_command(machines, args[1..])
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
def cssh
|
|
298
|
+
machines = select_machines_with_managed(args[0])
|
|
299
|
+
raise 'No machines to open cssh to' if machines.empty?
|
|
300
|
+
|
|
301
|
+
ask_confirmation! do
|
|
302
|
+
puts 'Open cssh to the following machines:'
|
|
303
|
+
list_machines(machines)
|
|
304
|
+
end
|
|
305
|
+
|
|
306
|
+
nix = ConfCtl::Nix.new
|
|
307
|
+
|
|
308
|
+
cssh = [
|
|
309
|
+
'cssh',
|
|
310
|
+
'-l', 'root'
|
|
311
|
+
]
|
|
312
|
+
|
|
313
|
+
machines.each_value do |machine|
|
|
314
|
+
cssh << machine.target_host
|
|
315
|
+
end
|
|
316
|
+
|
|
317
|
+
nix.run_command_in_shell(
|
|
318
|
+
packages: ['perlPackages.AppClusterSSH'],
|
|
319
|
+
command: cssh.join(' ')
|
|
320
|
+
)
|
|
321
|
+
end
|
|
322
|
+
|
|
323
|
+
protected
|
|
324
|
+
|
|
325
|
+
attr_reader :wait_online
|
|
326
|
+
|
|
327
|
+
def deploy_in_bulk(machines, host_generations, nix, action)
|
|
328
|
+
skipped_copy = []
|
|
329
|
+
skipped_activation = []
|
|
330
|
+
|
|
331
|
+
if opts[:interactive]
|
|
332
|
+
host_generations.each do |host, gen|
|
|
333
|
+
if copy_to_host(nix, host, machines[host], gen.toplevel) == :skip
|
|
334
|
+
puts Rainbow("Skipping #{host}").yellow
|
|
335
|
+
skipped_copy << host
|
|
336
|
+
end
|
|
337
|
+
end
|
|
338
|
+
else
|
|
339
|
+
concurrent_copy(machines, host_generations, nix)
|
|
340
|
+
end
|
|
341
|
+
|
|
342
|
+
return if opts['copy-only']
|
|
343
|
+
|
|
344
|
+
host_generations.each do |host, gen|
|
|
345
|
+
if skipped_copy.include?(host)
|
|
346
|
+
puts Rainbow("Copy to #{host} was skipped, skipping activation as well").yellow
|
|
347
|
+
skipped_activation << host
|
|
348
|
+
next
|
|
349
|
+
end
|
|
350
|
+
|
|
351
|
+
if deploy_to_host(nix, host, machines[host], gen.toplevel, action) == :skip
|
|
352
|
+
puts Rainbow("Skipping #{host}").yellow
|
|
353
|
+
skipped_activation << host
|
|
354
|
+
next
|
|
355
|
+
end
|
|
356
|
+
|
|
357
|
+
puts if opts[:interactive]
|
|
358
|
+
end
|
|
359
|
+
|
|
360
|
+
if opts[:reboot]
|
|
361
|
+
host_generations.each_key do |host|
|
|
362
|
+
if skipped_activation.include?(host)
|
|
363
|
+
puts Rainbow("Activation on #{host} was skipped, skipping reboot as well").yellow
|
|
364
|
+
next
|
|
365
|
+
end
|
|
366
|
+
|
|
367
|
+
if reboot_host(host, machines[host]) == :skip
|
|
368
|
+
puts "Skipping #{host}"
|
|
369
|
+
next
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
puts if opts[:interactive]
|
|
373
|
+
end
|
|
374
|
+
end
|
|
375
|
+
|
|
376
|
+
return unless opts['health-checks'] && need_health_checks?(action)
|
|
377
|
+
|
|
378
|
+
check_machines = machines.select do |host, _machine|
|
|
379
|
+
if skipped_activation.include?(host)
|
|
380
|
+
puts Rainbow("Activation on #{host} was skipped, skipping health checks as well").yellow
|
|
381
|
+
false
|
|
382
|
+
else
|
|
383
|
+
true
|
|
384
|
+
end
|
|
385
|
+
end
|
|
386
|
+
|
|
387
|
+
run_health_check_loop(check_machines)
|
|
388
|
+
|
|
389
|
+
puts if opts[:interactive]
|
|
390
|
+
end
|
|
391
|
+
|
|
392
|
+
def deploy_one_by_one(machines, host_generations, nix, action)
|
|
393
|
+
host_generations.each do |host, gen|
|
|
394
|
+
machine = machines[host]
|
|
395
|
+
|
|
396
|
+
if copy_to_host(nix, host, machine, gen.toplevel) == :skip
|
|
397
|
+
puts Rainbow("Skipping #{host}").yellow
|
|
398
|
+
next
|
|
399
|
+
end
|
|
400
|
+
|
|
401
|
+
next if opts['copy-only']
|
|
402
|
+
|
|
403
|
+
if deploy_to_host(nix, host, machine, gen.toplevel, action) == :skip
|
|
404
|
+
puts Rainbow("Skipping #{host}").yellow
|
|
405
|
+
next
|
|
406
|
+
end
|
|
407
|
+
|
|
408
|
+
if opts[:reboot] && reboot_host(host, machine) == :skip
|
|
409
|
+
puts Rainbow("Skipping #{host}").yellow
|
|
410
|
+
next
|
|
411
|
+
end
|
|
412
|
+
|
|
413
|
+
if opts['health-checks'] && need_health_checks?(action)
|
|
414
|
+
run_health_check_loop(ConfCtl::MachineList.from_machine(machine))
|
|
415
|
+
end
|
|
416
|
+
|
|
417
|
+
puts if opts[:interactive]
|
|
418
|
+
end
|
|
419
|
+
end
|
|
420
|
+
|
|
421
|
+
def copy_to_host(nix, host, machine, toplevel)
|
|
422
|
+
puts Rainbow("Copying configuration to #{host} (#{machine.target_host})").yellow
|
|
423
|
+
|
|
424
|
+
return :skip if opts[:interactive] && !ask_confirmation(always: true)
|
|
425
|
+
|
|
426
|
+
LogView.open(
|
|
427
|
+
header: "#{Rainbow('Copying to').bright} #{host}\n",
|
|
428
|
+
title: Rainbow('Live view').bright
|
|
429
|
+
) do |lw|
|
|
430
|
+
pb = TTY::ProgressBar.new(
|
|
431
|
+
'Copying [:bar] :current/:total (:percent)',
|
|
432
|
+
width: 80
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
ret = nix.copy(machine, toplevel) do |i, n, path|
|
|
436
|
+
lw << "[#{i}/#{n}] #{path}"
|
|
437
|
+
|
|
438
|
+
lw.sync_console do
|
|
439
|
+
pb.update(total: n) if pb.total != n
|
|
440
|
+
pb.advance
|
|
441
|
+
end
|
|
442
|
+
end
|
|
443
|
+
|
|
444
|
+
raise "Error while copying system to #{host}" unless ret
|
|
445
|
+
end
|
|
446
|
+
|
|
447
|
+
true
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
def concurrent_copy(machines, host_generations, nix)
|
|
451
|
+
LogView.open(
|
|
452
|
+
header: "#{Rainbow("Copying to #{host_generations.length} machines").bright}\n",
|
|
453
|
+
title: Rainbow('Live view').bright
|
|
454
|
+
) do |lw|
|
|
455
|
+
multibar = TTY::ProgressBar::Multi.new(
|
|
456
|
+
'Copying [:bar] :current/:total (:percent)',
|
|
457
|
+
width: 80
|
|
458
|
+
)
|
|
459
|
+
executor = ConfCtl::ParallelExecutor.new(opts['max-concurrent-copy'])
|
|
460
|
+
|
|
461
|
+
host_generations.each do |host, gen|
|
|
462
|
+
pb = multibar.register(
|
|
463
|
+
"#{host} [:bar] :current/:total (:percent)"
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
executor.add do
|
|
467
|
+
ret = nix.copy(machines[host], gen.toplevel) do |i, n, path|
|
|
468
|
+
lw << "#{host}> [#{i}/#{n}] #{path}"
|
|
469
|
+
|
|
470
|
+
lw.sync_console do
|
|
471
|
+
if pb.total != n
|
|
472
|
+
pb.update(total: n)
|
|
473
|
+
multibar.top_bar.resume if multibar.top_bar.done?
|
|
474
|
+
multibar.top_bar.update(total: multibar.total)
|
|
475
|
+
end
|
|
476
|
+
|
|
477
|
+
pb.advance
|
|
478
|
+
end
|
|
479
|
+
end
|
|
480
|
+
|
|
481
|
+
if !ret
|
|
482
|
+
lw.sync_console do
|
|
483
|
+
pb.format = "#{host}: error occurred"
|
|
484
|
+
pb.advance
|
|
485
|
+
end
|
|
486
|
+
elsif pb.total.nil?
|
|
487
|
+
lw.sync_console do
|
|
488
|
+
pb.format = "#{host}: nothing to do"
|
|
489
|
+
pb.advance
|
|
490
|
+
end
|
|
491
|
+
end
|
|
492
|
+
|
|
493
|
+
ret ? nil : host
|
|
494
|
+
end
|
|
495
|
+
end
|
|
496
|
+
|
|
497
|
+
retvals = executor.run
|
|
498
|
+
failed = retvals.compact
|
|
499
|
+
|
|
500
|
+
raise "Copy failed to: #{failed.join(', ')}" if failed.any?
|
|
501
|
+
end
|
|
502
|
+
end
|
|
503
|
+
|
|
504
|
+
def deploy_to_host(nix, host, machine, toplevel, action)
|
|
505
|
+
LogView.open_with_logger(
|
|
506
|
+
header: "#{Rainbow('Deploying to').bright} #{Rainbow(host).yellow}\n",
|
|
507
|
+
title: Rainbow('Live view').bright,
|
|
508
|
+
size: :auto,
|
|
509
|
+
reserved_lines: 10
|
|
510
|
+
) do |lw|
|
|
511
|
+
if opts['dry-activate-first']
|
|
512
|
+
lw.sync_console do
|
|
513
|
+
puts Rainbow(
|
|
514
|
+
"Trying to activate configuration on #{host} " \
|
|
515
|
+
"(#{machine.target_host})"
|
|
516
|
+
).yellow
|
|
517
|
+
end
|
|
518
|
+
|
|
519
|
+
raise "Error while activating configuration on #{host}" unless nix.activate(machine, toplevel, 'dry-activate')
|
|
520
|
+
end
|
|
521
|
+
|
|
522
|
+
lw.sync_console do
|
|
523
|
+
puts Rainbow(
|
|
524
|
+
"Activating configuration on #{host} (#{machine.target_host}): " \
|
|
525
|
+
"#{action}"
|
|
526
|
+
).yellow
|
|
527
|
+
end
|
|
528
|
+
|
|
529
|
+
return :skip if opts[:interactive] && !ask_confirmation(always: true)
|
|
530
|
+
|
|
531
|
+
raise "Error while activating configuration on #{host}" unless nix.activate(machine, toplevel, action)
|
|
532
|
+
|
|
533
|
+
if %w[boot switch].include?(action) && !nix.set_profile(machine, toplevel)
|
|
534
|
+
raise "Error while setting profile on #{host}"
|
|
535
|
+
end
|
|
536
|
+
end
|
|
537
|
+
end
|
|
538
|
+
|
|
539
|
+
def reboot_host(host, machine)
|
|
540
|
+
if machine.localhost?
|
|
541
|
+
puts Rainbow("Skipping reboot of #{host} as it is localhost").yellow
|
|
542
|
+
return :skip
|
|
543
|
+
end
|
|
544
|
+
|
|
545
|
+
puts Rainbow("Rebooting #{host} (#{machine.target_host})").yellow
|
|
546
|
+
|
|
547
|
+
return :skip if opts[:interactive] && !ask_confirmation(always: true)
|
|
548
|
+
|
|
549
|
+
m = ConfCtl::MachineControl.new(machine)
|
|
550
|
+
|
|
551
|
+
if wait_online == :nowait
|
|
552
|
+
m.reboot
|
|
553
|
+
else
|
|
554
|
+
since = Time.now
|
|
555
|
+
spinner = nil
|
|
556
|
+
|
|
557
|
+
secs = m.reboot_and_wait(
|
|
558
|
+
timeout: wait_online == :wait ? nil : wait_online
|
|
559
|
+
) do |state, timeleft|
|
|
560
|
+
if state == :reboot
|
|
561
|
+
spinner = TTY::Spinner.new(
|
|
562
|
+
":spinner Waiting for #{host} (:seconds s)",
|
|
563
|
+
format: :classic
|
|
564
|
+
)
|
|
565
|
+
spinner.auto_spin
|
|
566
|
+
elsif state == :is_up
|
|
567
|
+
spinner.success('up')
|
|
568
|
+
next
|
|
569
|
+
end
|
|
570
|
+
|
|
571
|
+
if wait_online == :wait
|
|
572
|
+
spinner.update(seconds: (Time.now - since).round)
|
|
573
|
+
else
|
|
574
|
+
spinner.update(seconds: timeleft.round)
|
|
575
|
+
end
|
|
576
|
+
end
|
|
577
|
+
|
|
578
|
+
puts Rainbow("#{host} (#{machine.target_host}) is online (took #{secs.round(1)}s to reboot)").yellow
|
|
579
|
+
end
|
|
580
|
+
end
|
|
581
|
+
|
|
582
|
+
def run_health_check_loop(machines)
|
|
583
|
+
all_checks = machines.health_checks
|
|
584
|
+
return if all_checks.empty?
|
|
585
|
+
|
|
586
|
+
run_checks = all_checks
|
|
587
|
+
|
|
588
|
+
if opts[:interactive]
|
|
589
|
+
if machines.length > 1
|
|
590
|
+
puts Rainbow("Running #{run_checks.length} health checks on #{machines.length} machines").yellow
|
|
591
|
+
else
|
|
592
|
+
puts Rainbow("Running #{run_checks.length} health checks on #{machines.first}").yellow
|
|
593
|
+
end
|
|
594
|
+
|
|
595
|
+
return unless ask_confirmation(always: true)
|
|
596
|
+
end
|
|
597
|
+
|
|
598
|
+
loop do
|
|
599
|
+
failed = run_health_checks(machines, run_checks)
|
|
600
|
+
return if failed.empty?
|
|
601
|
+
|
|
602
|
+
if opts[:interactive]
|
|
603
|
+
puts 'Health checks have failed'
|
|
604
|
+
|
|
605
|
+
answer = ask_action(
|
|
606
|
+
options: {
|
|
607
|
+
'c' => 'Continue',
|
|
608
|
+
'r' => 'Retry all',
|
|
609
|
+
'f' => 'Retry failed',
|
|
610
|
+
'a' => 'Abort'
|
|
611
|
+
},
|
|
612
|
+
default: 'a'
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
case answer
|
|
616
|
+
when 'c'
|
|
617
|
+
return
|
|
618
|
+
when 'r'
|
|
619
|
+
run_checks = all_checks
|
|
620
|
+
next
|
|
621
|
+
when 'f'
|
|
622
|
+
run_checks = failed
|
|
623
|
+
next
|
|
624
|
+
when 'a'
|
|
625
|
+
raise 'Aborting'
|
|
626
|
+
end
|
|
627
|
+
|
|
628
|
+
elsif opts['keep-going']
|
|
629
|
+
puts 'Health checks have failed, going on'
|
|
630
|
+
return
|
|
631
|
+
|
|
632
|
+
else
|
|
633
|
+
raise 'Health checks have failed'
|
|
634
|
+
end
|
|
635
|
+
end
|
|
636
|
+
end
|
|
637
|
+
|
|
638
|
+
# @return [Array<HealthChecks::Base>] failed checks
|
|
639
|
+
def run_health_checks(machines, run_checks = nil)
|
|
640
|
+
run_checks ||= machines.health_checks
|
|
641
|
+
|
|
642
|
+
tw = ConfCtl::ParallelExecutor.new(opts['max-jobs'] || 5)
|
|
643
|
+
|
|
644
|
+
header =
|
|
645
|
+
if machines.length > 1
|
|
646
|
+
Rainbow("Running health checks on #{machines.length} machines").bright
|
|
647
|
+
else
|
|
648
|
+
Rainbow('Running health checks on ').bright + Rainbow(machines.first.to_s).yellow
|
|
649
|
+
end
|
|
650
|
+
|
|
651
|
+
header << "\n" << Rainbow('Full log: ').bright << ConfCtl::Logger.relative_path << "\n"
|
|
652
|
+
|
|
653
|
+
LogView.open(
|
|
654
|
+
header:,
|
|
655
|
+
title: Rainbow('Live view').bright,
|
|
656
|
+
size: :auto,
|
|
657
|
+
reserved_lines: 10
|
|
658
|
+
) do |lw|
|
|
659
|
+
pb = TTY::ProgressBar.new(
|
|
660
|
+
'Checks [:bar] :current/:total (:percent)',
|
|
661
|
+
width: 80,
|
|
662
|
+
total: run_checks.length
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
run_checks.each_with_index do |check, i|
|
|
666
|
+
tw.add do
|
|
667
|
+
prefix = "[#{i + 1}/#{run_checks.length}] #{check.machine}> "
|
|
668
|
+
lw << "#{prefix}#{check.description}"
|
|
669
|
+
|
|
670
|
+
check.run do |attempt, _errors|
|
|
671
|
+
lw << "#{prefix}failed ##{attempt}: #{check.message}"
|
|
672
|
+
ConfCtl::Logger.keep
|
|
673
|
+
end
|
|
674
|
+
|
|
675
|
+
lw << if check.successful?
|
|
676
|
+
"#{prefix}succeeded"
|
|
677
|
+
else
|
|
678
|
+
"#{prefix}error: #{check.message}"
|
|
679
|
+
end
|
|
680
|
+
|
|
681
|
+
lw.sync_console do
|
|
682
|
+
pb.advance
|
|
683
|
+
end
|
|
684
|
+
end
|
|
685
|
+
end
|
|
686
|
+
|
|
687
|
+
tw.run
|
|
688
|
+
lw.flush
|
|
689
|
+
end
|
|
690
|
+
|
|
691
|
+
successful = []
|
|
692
|
+
failed = []
|
|
693
|
+
|
|
694
|
+
run_checks.each do |check|
|
|
695
|
+
if check.successful?
|
|
696
|
+
successful << check
|
|
697
|
+
else
|
|
698
|
+
failed << check
|
|
699
|
+
end
|
|
700
|
+
end
|
|
701
|
+
|
|
702
|
+
puts "#{successful.length} checks passed, #{failed.length} failed"
|
|
703
|
+
puts
|
|
704
|
+
|
|
705
|
+
failed_by_machine = {}
|
|
706
|
+
|
|
707
|
+
failed.each do |check|
|
|
708
|
+
failed_by_machine[check.machine] ||= []
|
|
709
|
+
failed_by_machine[check.machine] << check
|
|
710
|
+
end
|
|
711
|
+
|
|
712
|
+
failed_by_machine.each do |machine, checks|
|
|
713
|
+
puts "#{machine}: #{checks.length} failures"
|
|
714
|
+
|
|
715
|
+
checks.each do |check|
|
|
716
|
+
puts " #{check.message}"
|
|
717
|
+
end
|
|
718
|
+
|
|
719
|
+
puts
|
|
720
|
+
end
|
|
721
|
+
|
|
722
|
+
failed
|
|
723
|
+
end
|
|
724
|
+
|
|
725
|
+
def need_health_checks?(action)
|
|
726
|
+
%w[switch test].include?(action) || (action == 'boot' && opts[:reboot])
|
|
727
|
+
end
|
|
728
|
+
|
|
729
|
+
def run_ssh_interactive(machines)
|
|
730
|
+
raise ArgumentError if machines.length != 1
|
|
731
|
+
|
|
732
|
+
ask_confirmation! do
|
|
733
|
+
puts 'Open interactive shell on the following machine:'
|
|
734
|
+
list_machines(machines)
|
|
735
|
+
end
|
|
736
|
+
|
|
737
|
+
machine = machines.first
|
|
738
|
+
mc = ConfCtl::MachineControl.new(machine)
|
|
739
|
+
mc.interactive_shell
|
|
740
|
+
end
|
|
741
|
+
|
|
742
|
+
def run_ssh_command(machines, cmd)
|
|
743
|
+
ask_confirmation! do
|
|
744
|
+
puts 'Run command over SSH on the following machines:'
|
|
745
|
+
list_machines(machines)
|
|
746
|
+
puts
|
|
747
|
+
puts "Command: #{cmd.map(&:inspect).join(' ')}"
|
|
748
|
+
end
|
|
749
|
+
|
|
750
|
+
if opts[:parallel]
|
|
751
|
+
run_ssh_command_in_parallel(machines, cmd)
|
|
752
|
+
else
|
|
753
|
+
run_ssh_command_one_by_one(machines, cmd)
|
|
754
|
+
end
|
|
755
|
+
end
|
|
756
|
+
|
|
757
|
+
def run_ssh_command_one_by_one(machines, cmd)
|
|
758
|
+
aggregate = opts[:aggregate]
|
|
759
|
+
results = {}
|
|
760
|
+
|
|
761
|
+
machines.each do |host, machine|
|
|
762
|
+
mc = ConfCtl::MachineControl.new(machine)
|
|
763
|
+
|
|
764
|
+
begin
|
|
765
|
+
puts "#{host}:" unless aggregate
|
|
766
|
+
|
|
767
|
+
result = run_ssh_command_on_machine(mc, cmd)
|
|
768
|
+
|
|
769
|
+
if aggregate
|
|
770
|
+
results[host] = result
|
|
771
|
+
else
|
|
772
|
+
puts result.out
|
|
773
|
+
end
|
|
774
|
+
rescue TTY::Command::ExitError => e
|
|
775
|
+
if aggregate
|
|
776
|
+
results[host] = e
|
|
777
|
+
else
|
|
778
|
+
puts e.message
|
|
779
|
+
end
|
|
780
|
+
end
|
|
781
|
+
|
|
782
|
+
puts unless aggregate
|
|
783
|
+
end
|
|
784
|
+
|
|
785
|
+
return unless aggregate
|
|
786
|
+
|
|
787
|
+
process_aggregated_results(results)
|
|
788
|
+
end
|
|
789
|
+
|
|
790
|
+
def run_ssh_command_in_parallel(machines, cmd)
|
|
791
|
+
aggregate = opts[:aggregate]
|
|
792
|
+
results = {}
|
|
793
|
+
tw = ConfCtl::ParallelExecutor.new(machines.length)
|
|
794
|
+
|
|
795
|
+
LogView.open_with_logger(
|
|
796
|
+
header: Rainbow('Executing').bright + " #{cmd.join(' ')}\n",
|
|
797
|
+
title: Rainbow('Live view').bright,
|
|
798
|
+
size: :auto,
|
|
799
|
+
reserved_lines: 10
|
|
800
|
+
) do |lw|
|
|
801
|
+
pb = TTY::ProgressBar.new(
|
|
802
|
+
'Command [:bar] :current/:total (:percent)',
|
|
803
|
+
width: 80,
|
|
804
|
+
total: machines.length
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
machines.each do |host, machine|
|
|
808
|
+
tw.add do
|
|
809
|
+
mc = ConfCtl::MachineControl.new(machine)
|
|
810
|
+
|
|
811
|
+
begin
|
|
812
|
+
result = run_ssh_command_on_machine(mc, cmd)
|
|
813
|
+
results[host] = result
|
|
814
|
+
rescue TTY::Command::ExitError => e
|
|
815
|
+
results[host] = e
|
|
816
|
+
end
|
|
817
|
+
|
|
818
|
+
lw.sync_console { pb.advance }
|
|
819
|
+
end
|
|
820
|
+
end
|
|
821
|
+
|
|
822
|
+
tw.run
|
|
823
|
+
lw.flush
|
|
824
|
+
end
|
|
825
|
+
|
|
826
|
+
if aggregate
|
|
827
|
+
process_aggregated_results(results)
|
|
828
|
+
return
|
|
829
|
+
end
|
|
830
|
+
|
|
831
|
+
results.each do |host, result|
|
|
832
|
+
puts "#{host}:"
|
|
833
|
+
puts result.out
|
|
834
|
+
puts
|
|
835
|
+
end
|
|
836
|
+
end
|
|
837
|
+
|
|
838
|
+
def run_ssh_command_on_machine(mc, cmd)
|
|
839
|
+
cmd_opts = { err: :out }
|
|
840
|
+
|
|
841
|
+
if opts['input-string']
|
|
842
|
+
cmd_opts[:input] = opts['input-string']
|
|
843
|
+
elsif opts['input-file']
|
|
844
|
+
cmd_opts[:in] = opts['input-file']
|
|
845
|
+
end
|
|
846
|
+
|
|
847
|
+
mc.execute(*cmd, **cmd_opts)
|
|
848
|
+
end
|
|
849
|
+
|
|
850
|
+
def process_aggregated_results(results)
|
|
851
|
+
groups = {}
|
|
852
|
+
|
|
853
|
+
results.each do |host, result|
|
|
854
|
+
key = [result.exit_status, result.out]
|
|
855
|
+
groups[key] ||= []
|
|
856
|
+
groups[key] << host
|
|
857
|
+
end
|
|
858
|
+
|
|
859
|
+
groups.each do |key, hosts|
|
|
860
|
+
exit_status, out = key
|
|
861
|
+
puts "#{hosts.sort.join(', ')}:"
|
|
862
|
+
puts "Exit status: #{exit_status}"
|
|
863
|
+
puts out
|
|
864
|
+
puts
|
|
865
|
+
end
|
|
866
|
+
end
|
|
867
|
+
|
|
868
|
+
def find_generations(machines, generation_name)
|
|
869
|
+
host_generations = {}
|
|
870
|
+
missing_hosts = []
|
|
871
|
+
|
|
872
|
+
machines.each_key do |host|
|
|
873
|
+
list = ConfCtl::Generation::BuildList.new(host)
|
|
874
|
+
|
|
875
|
+
gen =
|
|
876
|
+
if generation_name == 'current'
|
|
877
|
+
list.current
|
|
878
|
+
else
|
|
879
|
+
list[generation_name]
|
|
880
|
+
end
|
|
881
|
+
|
|
882
|
+
if gen
|
|
883
|
+
host_generations[host] = gen
|
|
884
|
+
else
|
|
885
|
+
missing_hosts << host
|
|
886
|
+
end
|
|
887
|
+
end
|
|
888
|
+
|
|
889
|
+
raise 'No generation found' if host_generations.empty?
|
|
890
|
+
|
|
891
|
+
if missing_hosts.any?
|
|
892
|
+
ask_confirmation! do
|
|
893
|
+
puts "Generation '#{generation_name}' was not found on the following hosts:"
|
|
894
|
+
missing_hosts.each { |host| puts " #{host}" }
|
|
895
|
+
puts
|
|
896
|
+
puts 'These hosts will be ignored.'
|
|
897
|
+
end
|
|
898
|
+
end
|
|
899
|
+
|
|
900
|
+
host_generations
|
|
901
|
+
end
|
|
902
|
+
|
|
903
|
+
def do_build(machines)
|
|
904
|
+
nix = ConfCtl::Nix.new(
|
|
905
|
+
show_trace: opts['show-trace'],
|
|
906
|
+
max_jobs: opts['max-jobs']
|
|
907
|
+
)
|
|
908
|
+
hosts_swpin_paths = {}
|
|
909
|
+
|
|
910
|
+
autoupdate_swpins(machines)
|
|
911
|
+
host_swpin_specs = check_swpins(machines)
|
|
912
|
+
|
|
913
|
+
raise 'one or more swpins need to be updated' unless host_swpin_specs
|
|
914
|
+
|
|
915
|
+
machines.each do |host, d|
|
|
916
|
+
puts Rainbow("Evaluating swpins for #{host}...").bright
|
|
917
|
+
hosts_swpin_paths[host] = nix.eval_host_swpins(host).update(d.nix_paths)
|
|
918
|
+
end
|
|
919
|
+
|
|
920
|
+
grps = swpin_build_groups(hosts_swpin_paths)
|
|
921
|
+
puts
|
|
922
|
+
puts "Machines will be built in #{grps.length} groups"
|
|
923
|
+
puts
|
|
924
|
+
host_generations = {}
|
|
925
|
+
time = Time.now
|
|
926
|
+
|
|
927
|
+
puts "#{Rainbow('Build log:').yellow} #{Rainbow(ConfCtl::Logger.path).cyan}"
|
|
928
|
+
puts
|
|
929
|
+
|
|
930
|
+
grps.each_with_index do |grp, i|
|
|
931
|
+
hosts, swpin_paths = grp
|
|
932
|
+
|
|
933
|
+
built_generations = do_build_group(
|
|
934
|
+
i,
|
|
935
|
+
grps.length,
|
|
936
|
+
hosts,
|
|
937
|
+
swpin_paths,
|
|
938
|
+
host_swpin_specs,
|
|
939
|
+
nix,
|
|
940
|
+
time
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
host_generations.update(built_generations)
|
|
944
|
+
end
|
|
945
|
+
|
|
946
|
+
generation_hosts = {}
|
|
947
|
+
|
|
948
|
+
host_generations.each do |host, gen|
|
|
949
|
+
generation_hosts[gen.name] ||= []
|
|
950
|
+
generation_hosts[gen.name] << host
|
|
951
|
+
end
|
|
952
|
+
|
|
953
|
+
puts
|
|
954
|
+
puts Rainbow('Built generations:').bright
|
|
955
|
+
generation_hosts.each do |gen, hosts|
|
|
956
|
+
puts Rainbow(gen).cyan
|
|
957
|
+
hosts.each { |host| puts " #{host}" }
|
|
958
|
+
end
|
|
959
|
+
|
|
960
|
+
host_generations
|
|
961
|
+
end
|
|
962
|
+
|
|
963
|
+
def do_build_group(group_index, group_count, hosts, swpin_paths, host_swpin_specs, nix, time)
|
|
964
|
+
puts Rainbow('Building machines').bright
|
|
965
|
+
hosts.each { |h| puts " #{h}" }
|
|
966
|
+
puts 'with swpins'
|
|
967
|
+
swpin_paths.each { |k, v| puts " #{k}=#{v}" }
|
|
968
|
+
|
|
969
|
+
header = '' \
|
|
970
|
+
<< Rainbow('Command:').bright \
|
|
971
|
+
<< " #{format_command(10)}" \
|
|
972
|
+
<< "\n" \
|
|
973
|
+
<< Rainbow('Build group:').bright \
|
|
974
|
+
<< " #{group_index + 1}/#{group_count} (#{hosts.length} machines)" \
|
|
975
|
+
<< "\n" \
|
|
976
|
+
<< Rainbow('Full log: ').bright \
|
|
977
|
+
<< " #{ConfCtl::Logger.relative_path}" \
|
|
978
|
+
<< "\n\n"
|
|
979
|
+
|
|
980
|
+
LogView.open_with_logger(
|
|
981
|
+
header:,
|
|
982
|
+
title: Rainbow('Live view').bright,
|
|
983
|
+
size: :auto,
|
|
984
|
+
reserved_lines: 10
|
|
985
|
+
) do |lw|
|
|
986
|
+
multibar = TTY::ProgressBar::Multi.new(
|
|
987
|
+
'nix-build [:bar] :current/:total (:percent)',
|
|
988
|
+
width: 80
|
|
989
|
+
)
|
|
990
|
+
|
|
991
|
+
build_pb = multibar.register(
|
|
992
|
+
'Building [:bar] :current/:total (:percent)'
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
fetch_pb = multibar.register(
|
|
996
|
+
'Fetching [:bar] :current/:total (:percent)'
|
|
997
|
+
)
|
|
998
|
+
|
|
999
|
+
built_generations = nix.build_toplevels(
|
|
1000
|
+
hosts:,
|
|
1001
|
+
swpin_paths:,
|
|
1002
|
+
time:,
|
|
1003
|
+
host_swpin_specs:
|
|
1004
|
+
) do |type, _progress, total, _path|
|
|
1005
|
+
if type == :build
|
|
1006
|
+
lw.sync_console do
|
|
1007
|
+
build_pb.update(total:) if total > 0 && build_pb.total.nil?
|
|
1008
|
+
build_pb.advance
|
|
1009
|
+
end
|
|
1010
|
+
elsif type == :fetch
|
|
1011
|
+
lw.sync_console do
|
|
1012
|
+
fetch_pb.update(total:) if total > 0 && fetch_pb.total.nil?
|
|
1013
|
+
fetch_pb.advance
|
|
1014
|
+
end
|
|
1015
|
+
end
|
|
1016
|
+
|
|
1017
|
+
if build_pb.total && fetch_pb.total && multibar.top_bar.total.nil?
|
|
1018
|
+
lw.sync_console do
|
|
1019
|
+
multibar.top_bar.update(total: multibar.total)
|
|
1020
|
+
end
|
|
1021
|
+
end
|
|
1022
|
+
end
|
|
1023
|
+
|
|
1024
|
+
built_generations
|
|
1025
|
+
end
|
|
1026
|
+
end
|
|
1027
|
+
|
|
1028
|
+
def autoupdate_swpins(machines)
|
|
1029
|
+
puts Rainbow('Running swpins auto updates...').bright
|
|
1030
|
+
channels_update = []
|
|
1031
|
+
any_updated = false
|
|
1032
|
+
|
|
1033
|
+
core = ConfCtl::Swpins::Core.get
|
|
1034
|
+
|
|
1035
|
+
core.channels.each do |c|
|
|
1036
|
+
channels_update << c unless channels_update.include?(c)
|
|
1037
|
+
end
|
|
1038
|
+
|
|
1039
|
+
cluster_names = ConfCtl::Swpins::ClusterNameList.new(machines:)
|
|
1040
|
+
|
|
1041
|
+
cluster_names.each do |cn|
|
|
1042
|
+
cn.parse
|
|
1043
|
+
|
|
1044
|
+
cn.channels.each do |c|
|
|
1045
|
+
channels_update << c unless channels_update.include?(c)
|
|
1046
|
+
end
|
|
1047
|
+
end
|
|
1048
|
+
|
|
1049
|
+
channels_update.each do |c|
|
|
1050
|
+
updated = false
|
|
1051
|
+
|
|
1052
|
+
c.specs.each do |name, s|
|
|
1053
|
+
next unless s.auto_update?
|
|
1054
|
+
|
|
1055
|
+
puts " updating #{c.name}.#{name}"
|
|
1056
|
+
s.prefetch_update
|
|
1057
|
+
updated = true
|
|
1058
|
+
end
|
|
1059
|
+
|
|
1060
|
+
if updated
|
|
1061
|
+
c.save
|
|
1062
|
+
any_updated = true
|
|
1063
|
+
end
|
|
1064
|
+
end
|
|
1065
|
+
|
|
1066
|
+
core_updated = false
|
|
1067
|
+
|
|
1068
|
+
core.specs.each do |name, s|
|
|
1069
|
+
next unless !s.from_channel? && s.auto_update?
|
|
1070
|
+
|
|
1071
|
+
puts " updating #{core.name}.#{name}"
|
|
1072
|
+
s.prefetch_update
|
|
1073
|
+
core_updated = true
|
|
1074
|
+
end
|
|
1075
|
+
|
|
1076
|
+
if core_updated
|
|
1077
|
+
core.save
|
|
1078
|
+
core.pre_evaluate
|
|
1079
|
+
end
|
|
1080
|
+
|
|
1081
|
+
cluster_names.each do |cn|
|
|
1082
|
+
updated = false
|
|
1083
|
+
|
|
1084
|
+
cn.specs.each do |name, s|
|
|
1085
|
+
next unless !s.from_channel? && s.auto_update?
|
|
1086
|
+
|
|
1087
|
+
puts " updating #{cn.name}.#{name}"
|
|
1088
|
+
s.prefetch_update
|
|
1089
|
+
updated = true
|
|
1090
|
+
end
|
|
1091
|
+
|
|
1092
|
+
if updated
|
|
1093
|
+
cn.save
|
|
1094
|
+
any_updated = true
|
|
1095
|
+
end
|
|
1096
|
+
end
|
|
1097
|
+
|
|
1098
|
+
return unless any_updated || core_updated
|
|
1099
|
+
|
|
1100
|
+
ConfCtl::Swpins::ChannelList.refresh
|
|
1101
|
+
end
|
|
1102
|
+
|
|
1103
|
+
def check_swpins(machines)
|
|
1104
|
+
ret = {}
|
|
1105
|
+
valid = true
|
|
1106
|
+
|
|
1107
|
+
puts Rainbow('Checking core swpins...').bright
|
|
1108
|
+
|
|
1109
|
+
ConfCtl::Swpins::Core.get.specs.each do |name, s|
|
|
1110
|
+
puts " #{name} ... " +
|
|
1111
|
+
(s.valid? ? Rainbow('ok').green : Rainbow('needs update').cyan)
|
|
1112
|
+
valid = false unless s.valid?
|
|
1113
|
+
end
|
|
1114
|
+
|
|
1115
|
+
ConfCtl::Swpins::ClusterNameList.new(machines:).each do |cn|
|
|
1116
|
+
cn.parse
|
|
1117
|
+
|
|
1118
|
+
puts Rainbow("Checking swpins for #{cn.name}...").bright
|
|
1119
|
+
|
|
1120
|
+
cn.specs.each do |name, s|
|
|
1121
|
+
puts " #{name} ... " +
|
|
1122
|
+
(s.valid? ? Rainbow('ok').green : Rainbow('needs update').cyan)
|
|
1123
|
+
valid = false unless s.valid?
|
|
1124
|
+
end
|
|
1125
|
+
|
|
1126
|
+
ret[cn.name] = cn.specs
|
|
1127
|
+
end
|
|
1128
|
+
|
|
1129
|
+
valid ? ret : false
|
|
1130
|
+
end
|
|
1131
|
+
|
|
1132
|
+
def swpin_build_groups(hosts_swpins)
|
|
1133
|
+
ret = []
|
|
1134
|
+
all_swpins = hosts_swpins.values.uniq
|
|
1135
|
+
|
|
1136
|
+
all_swpins.each do |swpins|
|
|
1137
|
+
hosts = []
|
|
1138
|
+
|
|
1139
|
+
hosts_swpins.each do |host, host_swpins|
|
|
1140
|
+
hosts << host if swpins == host_swpins
|
|
1141
|
+
end
|
|
1142
|
+
|
|
1143
|
+
ret << [hosts, swpins]
|
|
1144
|
+
end
|
|
1145
|
+
|
|
1146
|
+
ret
|
|
1147
|
+
end
|
|
1148
|
+
|
|
1149
|
+
def compare_swpins
|
|
1150
|
+
machines = select_machines(args[0]).managed
|
|
1151
|
+
|
|
1152
|
+
ask_confirmation! do
|
|
1153
|
+
puts 'Compare swpins on the following machines:'
|
|
1154
|
+
list_machines(machines)
|
|
1155
|
+
puts
|
|
1156
|
+
puts "Generation: #{opts[:generation] || 'current configuration'}"
|
|
1157
|
+
end
|
|
1158
|
+
|
|
1159
|
+
statuses = machines.transform_values do |machine|
|
|
1160
|
+
ConfCtl::MachineStatus.new(machine)
|
|
1161
|
+
end
|
|
1162
|
+
|
|
1163
|
+
if opts[:generation]
|
|
1164
|
+
host_generations = find_generations(machines, opts[:generation])
|
|
1165
|
+
|
|
1166
|
+
host_generations.each do |host, gen|
|
|
1167
|
+
statuses[host].target_swpin_specs = gen.swpin_specs
|
|
1168
|
+
end
|
|
1169
|
+
|
|
1170
|
+
# Ignore statuses when no generation was found
|
|
1171
|
+
statuses.delete_if do |host, _st|
|
|
1172
|
+
!host_generations.has_key?(host)
|
|
1173
|
+
end
|
|
1174
|
+
else
|
|
1175
|
+
ConfCtl::Swpins::ClusterNameList.new(machines:).each do |cn|
|
|
1176
|
+
cn.parse
|
|
1177
|
+
|
|
1178
|
+
statuses[cn.name].target_swpin_specs = cn.specs
|
|
1179
|
+
end
|
|
1180
|
+
end
|
|
1181
|
+
|
|
1182
|
+
TTY::Pager.page(enabled: use_pager?) do |io|
|
|
1183
|
+
statuses.each do |host, st|
|
|
1184
|
+
st.query(toplevel: false, generations: false)
|
|
1185
|
+
st.evaluate
|
|
1186
|
+
|
|
1187
|
+
unless st.online?
|
|
1188
|
+
io.puts "#{host} is offline"
|
|
1189
|
+
next
|
|
1190
|
+
end
|
|
1191
|
+
|
|
1192
|
+
st.target_swpin_specs.each do |name, spec|
|
|
1193
|
+
next if args[1] && !ConfCtl::Pattern.match?(args[1], name)
|
|
1194
|
+
|
|
1195
|
+
if st.swpins_info[name]
|
|
1196
|
+
io.puts "#{host} @ #{name}:"
|
|
1197
|
+
|
|
1198
|
+
yield(io, host, st, name, spec)
|
|
1199
|
+
else
|
|
1200
|
+
io.puts "#{host} @ #{name} in unknown state"
|
|
1201
|
+
end
|
|
1202
|
+
|
|
1203
|
+
io.puts ''
|
|
1204
|
+
end
|
|
1205
|
+
end
|
|
1206
|
+
end
|
|
1207
|
+
end
|
|
1208
|
+
|
|
1209
|
+
def parse_wait_online
|
|
1210
|
+
@wait_online =
|
|
1211
|
+
case opts['wait-online']
|
|
1212
|
+
when 'wait'
|
|
1213
|
+
:wait
|
|
1214
|
+
when 'nowait'
|
|
1215
|
+
:nowait
|
|
1216
|
+
when /^\d+$/
|
|
1217
|
+
opts['wait-online'].to_i
|
|
1218
|
+
else
|
|
1219
|
+
raise GLI::BadCommandLine, 'invalid value of --wait-online'
|
|
1220
|
+
end
|
|
1221
|
+
end
|
|
1222
|
+
|
|
1223
|
+
def format_command(reserved_cols = 0)
|
|
1224
|
+
cmd = "#{$0.split('/').last} #{ARGV.join(' ')}"
|
|
1225
|
+
_, cols = IO.console.winsize
|
|
1226
|
+
max_length = cols - reserved_cols
|
|
1227
|
+
|
|
1228
|
+
if cmd.length > max_length
|
|
1229
|
+
"#{cmd[0..(max_length - 4)]}..."
|
|
1230
|
+
else
|
|
1231
|
+
cmd
|
|
1232
|
+
end
|
|
1233
|
+
end
|
|
1234
|
+
|
|
1235
|
+
def format_duration(interval)
|
|
1236
|
+
{
|
|
1237
|
+
'd' => 24 * 60 * 60,
|
|
1238
|
+
'h' => 60 * 60,
|
|
1239
|
+
'm' => 60,
|
|
1240
|
+
's' => 1
|
|
1241
|
+
}.each do |unit, n|
|
|
1242
|
+
return "#{(interval / n.to_f).round(1)}#{unit}" if interval > n
|
|
1243
|
+
end
|
|
1244
|
+
|
|
1245
|
+
raise ArgumentError, "invalid time duration '#{interval}'"
|
|
1246
|
+
end
|
|
1247
|
+
end
|
|
1248
|
+
end
|