cloud-mu 3.1.4 → 3.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ansible/roles/mu-windows/README.md +33 -0
- data/ansible/roles/mu-windows/defaults/main.yml +2 -0
- data/ansible/roles/mu-windows/handlers/main.yml +2 -0
- data/ansible/roles/mu-windows/meta/main.yml +53 -0
- data/ansible/roles/mu-windows/tasks/main.yml +20 -0
- data/ansible/roles/mu-windows/tests/inventory +2 -0
- data/ansible/roles/mu-windows/tests/test.yml +5 -0
- data/ansible/roles/mu-windows/vars/main.yml +2 -0
- data/cloud-mu.gemspec +4 -2
- data/cookbooks/mu-tools/recipes/selinux.rb +2 -1
- data/cookbooks/mu-tools/recipes/windows-client.rb +140 -144
- data/cookbooks/mu-tools/resources/windows_users.rb +44 -43
- data/extras/image-generators/AWS/win2k12.yaml +16 -13
- data/extras/image-generators/AWS/win2k16.yaml +16 -13
- data/extras/image-generators/AWS/win2k19.yaml +19 -0
- data/modules/mu.rb +72 -9
- data/modules/mu/adoption.rb +14 -2
- data/modules/mu/cloud.rb +111 -10
- data/modules/mu/clouds/aws.rb +23 -7
- data/modules/mu/clouds/aws/container_cluster.rb +640 -692
- data/modules/mu/clouds/aws/dnszone.rb +49 -45
- data/modules/mu/clouds/aws/firewall_rule.rb +177 -214
- data/modules/mu/clouds/aws/role.rb +17 -8
- data/modules/mu/clouds/aws/search_domain.rb +1 -1
- data/modules/mu/clouds/aws/server.rb +734 -1027
- data/modules/mu/clouds/aws/userdata/windows.erb +2 -1
- data/modules/mu/clouds/aws/vpc.rb +297 -786
- data/modules/mu/clouds/aws/vpc_subnet.rb +286 -0
- data/modules/mu/clouds/google/bucket.rb +1 -1
- data/modules/mu/clouds/google/container_cluster.rb +21 -17
- data/modules/mu/clouds/google/function.rb +8 -2
- data/modules/mu/clouds/google/server.rb +102 -32
- data/modules/mu/clouds/google/vpc.rb +1 -1
- data/modules/mu/config.rb +12 -1
- data/modules/mu/config/server.yml +1 -0
- data/modules/mu/defaults/AWS.yaml +51 -28
- data/modules/mu/groomers/ansible.rb +54 -17
- data/modules/mu/groomers/chef.rb +13 -7
- data/modules/mu/master/ssl.rb +0 -1
- data/modules/mu/mommacat.rb +8 -0
- data/modules/tests/ecs.yaml +23 -0
- data/modules/tests/includes-and-params.yaml +2 -1
- data/modules/tests/server-with-scrub-muisms.yaml +1 -0
- data/modules/tests/win2k12.yaml +25 -0
- data/modules/tests/win2k16.yaml +25 -0
- data/modules/tests/win2k19.yaml +25 -0
- data/requirements.txt +1 -0
- metadata +50 -4
- data/extras/image-generators/AWS/windows.yaml +0 -18
- data/modules/tests/needwork/win2k12.yaml +0 -13
@@ -195,48 +195,49 @@ action :config do
|
|
195
195
|
else
|
196
196
|
# We want to run ec2config as admin user so Windows userdata executes as admin, however the local admin account doesn't have Logon As a Service right. Domain privileges are set separately
|
197
197
|
|
198
|
-
cookbook_file "c:\\Windows\\SysWOW64\\ntrights.exe" do
|
199
|
-
source "ntrights"
|
200
|
-
end
|
201
|
-
[new_resource.ssh_user, new_resource.ec2config_user].each { |usr|
|
202
|
-
pass = if usr == new_resource.ec2config_user
|
203
|
-
new_resource.ec2config_password
|
204
|
-
elsif usr == new_resource.ssh_user
|
205
|
-
new_resource.ssh_password
|
206
|
-
end
|
207
|
-
|
208
|
-
user usr do
|
209
|
-
password pass
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
198
|
+
# cookbook_file "c:\\Windows\\SysWOW64\\ntrights.exe" do
|
199
|
+
# source "ntrights"
|
200
|
+
# end
|
201
|
+
# [new_resource.ssh_user, new_resource.ec2config_user].each { |usr|
|
202
|
+
# pass = if usr == new_resource.ec2config_user
|
203
|
+
# new_resource.ec2config_password
|
204
|
+
# elsif usr == new_resource.ssh_user
|
205
|
+
# new_resource.ssh_password
|
206
|
+
# end
|
207
|
+
#
|
208
|
+
# user usr do
|
209
|
+
# password pass
|
210
|
+
# action :modify
|
211
|
+
# end
|
212
|
+
#
|
213
|
+
# group "Administrators" do
|
214
|
+
# action :modify
|
215
|
+
# members usr
|
216
|
+
# append true
|
217
|
+
# end
|
218
|
+
#
|
219
|
+
# %w{SeDenyRemoteInteractiveLogonRight SeDenyInteractiveLogonRight SeServiceLogonRight}.each { |privilege|
|
220
|
+
# batch "Grant local user #{usr} logon as service right" do
|
221
|
+
# code "C:\\Windows\\SysWOW64\\ntrights +r #{privilege} -u #{usr}"
|
222
|
+
# end
|
223
|
+
# }
|
224
|
+
#
|
225
|
+
# # XXX user resource seems not to really be setting password, or is setting # in such a way that the user is being required to change it. Workaround.
|
226
|
+
# powershell_script "Adjust local account params for #{usr}" do
|
227
|
+
# code <<-EOH
|
228
|
+
# (([adsi]('WinNT://./#{usr}, user')).psbase.invoke('SetPassword', '#{pass}'))
|
229
|
+
# EOH
|
230
|
+
# end
|
231
|
+
#
|
232
|
+
# if usr == new_resource.ssh_user
|
233
|
+
#
|
234
|
+
# %w{SeCreateTokenPrivilege SeTcbPrivilege SeAssignPrimaryTokenPrivilege}.each { |privilege|
|
235
|
+
# batch "Grant local user #{usr} logon as service right" do
|
236
|
+
# code "C:\\Windows\\SysWOW64\\ntrights +r #{privilege} -u #{usr}"
|
237
|
+
# end
|
238
|
+
# }
|
239
|
+
#
|
240
|
+
# end
|
241
|
+
# }
|
241
242
|
end
|
242
243
|
end
|
@@ -1,16 +1,19 @@
|
|
1
1
|
---
|
2
2
|
appname: mu
|
3
|
+
vpcs:
|
4
|
+
- name: windowsbuild
|
3
5
|
servers:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
6
|
+
- name: win2k12
|
7
|
+
platform: win2k12
|
8
|
+
vpc:
|
9
|
+
name: windowsbuild
|
10
|
+
size: m4.large
|
11
|
+
scrub_groomer: true
|
12
|
+
groomer: Ansible
|
13
|
+
run_list:
|
14
|
+
- mu-windows
|
15
|
+
create_image:
|
16
|
+
image_then_destroy: true
|
17
|
+
public: true
|
18
|
+
copy_to_regions:
|
19
|
+
- "#ALL"
|
@@ -1,16 +1,19 @@
|
|
1
1
|
---
|
2
2
|
appname: mu
|
3
|
+
vpcs:
|
4
|
+
- name: windowsbuild
|
3
5
|
servers:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
6
|
+
- name: win2k16
|
7
|
+
platform: win2k16
|
8
|
+
vpc:
|
9
|
+
name: windowsbuild
|
10
|
+
size: m4.large
|
11
|
+
scrub_groomer: true
|
12
|
+
groomer: Ansible
|
13
|
+
run_list:
|
14
|
+
- mu-windows
|
15
|
+
create_image:
|
16
|
+
image_then_destroy: true
|
17
|
+
public: true
|
18
|
+
copy_to_regions:
|
19
|
+
- "#ALL"
|
@@ -0,0 +1,19 @@
|
|
1
|
+
---
|
2
|
+
appname: mu
|
3
|
+
vpcs:
|
4
|
+
- name: windowsbuild
|
5
|
+
servers:
|
6
|
+
- name: win2k19
|
7
|
+
platform: windows
|
8
|
+
vpc:
|
9
|
+
name: windowsbuild
|
10
|
+
size: m4.large
|
11
|
+
scrub_groomer: true
|
12
|
+
groomer: Ansible
|
13
|
+
run_list:
|
14
|
+
- mu-windows
|
15
|
+
create_image:
|
16
|
+
image_then_destroy: true
|
17
|
+
public: true
|
18
|
+
copy_to_regions:
|
19
|
+
- "#ALL"
|
data/modules/mu.rb
CHANGED
@@ -263,6 +263,7 @@ module MU
|
|
263
263
|
end while newguy.nil?
|
264
264
|
|
265
265
|
@@mu_global_thread_semaphore.synchronize {
|
266
|
+
MU.dupGlobals(Thread.current.object_id, target_thread: newguy)
|
266
267
|
@@mu_global_threads << newguy
|
267
268
|
}
|
268
269
|
|
@@ -295,6 +296,68 @@ module MU
|
|
295
296
|
end
|
296
297
|
end
|
297
298
|
|
299
|
+
# Boilerplate retry block executor, for making cloud API calls which might
|
300
|
+
# fail transiently.
|
301
|
+
#
|
302
|
+
# @param catchme [Array<Exception>]: Exception classes which should be caught and retried
|
303
|
+
# @param wait [Integer]: Number of seconds to wait between retries
|
304
|
+
# @param max [Integer]: Maximum number of retries; if less than 1, will retry indefinitely
|
305
|
+
# @param ignoreme [Array<Exception>]: Exception classes which can be silently treated as success. This will override any +loop_if+ block and return automatically (after invoking +always+, if the latter was specified).
|
306
|
+
# @param on_retry [Proc]: Optional block of code to invoke during retries
|
307
|
+
# @param always [Proc]: Optional block of code to invoke before returning or failing, a bit like +ensure+
|
308
|
+
# @param loop_if [Proc]: Optional block of code to invoke which will cause our block to be rerun until true
|
309
|
+
# @param loop_msg [String]: Message to display every third attempt
|
310
|
+
def self.retrier(catchme = nil, wait: 30, max: 0, ignoreme: [], on_retry: nil, always: nil, loop_if: nil, loop_msg: nil)
|
311
|
+
|
312
|
+
loop_if ||= Proc.new { false }
|
313
|
+
|
314
|
+
retries = 0
|
315
|
+
begin
|
316
|
+
retries += 1
|
317
|
+
loglevel = ((retries % 3) == 0) ? MU::NOTICE : MU::DEBUG
|
318
|
+
log_attempts = retries.to_s
|
319
|
+
log_attempts += (max > 0 ? "/"+max.to_s : "")
|
320
|
+
yield(retries, wait) if block_given?
|
321
|
+
if loop_if.call
|
322
|
+
MU.log loop_msg, loglevel, details: log_attempts if loop_msg
|
323
|
+
sleep wait
|
324
|
+
end
|
325
|
+
rescue StandardError => e
|
326
|
+
if catchme and catchme.include?(e.class)
|
327
|
+
if max > 0 and retries >= max
|
328
|
+
always.call if always and always.is_a?(Proc)
|
329
|
+
if ignoreme.include?(e.class)
|
330
|
+
return
|
331
|
+
else
|
332
|
+
raise e
|
333
|
+
end
|
334
|
+
end
|
335
|
+
|
336
|
+
if on_retry and on_retry.is_a?(Proc)
|
337
|
+
on_retry.call(e)
|
338
|
+
end
|
339
|
+
|
340
|
+
if retries == max-1
|
341
|
+
MU.log e.message, MU::WARN, details: caller
|
342
|
+
sleep wait # wait extra on the final attempt
|
343
|
+
else
|
344
|
+
MU.log e.message, loglevel, details: log_attempts
|
345
|
+
end
|
346
|
+
|
347
|
+
sleep wait
|
348
|
+
retry
|
349
|
+
elsif ignoreme and ignoreme.include?(e.class)
|
350
|
+
always.call if always and always.is_a?(Proc)
|
351
|
+
return
|
352
|
+
else
|
353
|
+
always.call if always and always.is_a?(Proc)
|
354
|
+
raise e
|
355
|
+
end
|
356
|
+
end while loop_if.call and (max < 1 or retries < max)
|
357
|
+
|
358
|
+
always.call if always and always.is_a?(Proc)
|
359
|
+
end
|
360
|
+
|
298
361
|
if !ENV.has_key?("MU_LIBDIR") and ENV.has_key?("MU_INSTALLDIR")
|
299
362
|
ENV['MU_LIBDIR'] = ENV['MU_INSTALLDIR']+"/lib"
|
300
363
|
else
|
@@ -394,20 +457,20 @@ module MU
|
|
394
457
|
@@global_var_semaphore = Mutex.new
|
395
458
|
|
396
459
|
# Set one of our global per-thread variables.
|
397
|
-
def self.setVar(name, value)
|
460
|
+
def self.setVar(name, value, target_thread: Thread.current)
|
398
461
|
@@global_var_semaphore.synchronize {
|
399
|
-
@@globals[
|
400
|
-
@@globals[
|
401
|
-
@@globals[
|
462
|
+
@@globals[target_thread.object_id] ||= Hash.new
|
463
|
+
@@globals[target_thread.object_id][name] ||= Hash.new
|
464
|
+
@@globals[target_thread.object_id][name] = value
|
402
465
|
}
|
403
466
|
end
|
404
467
|
|
405
468
|
# Copy the set of global variables in use by another thread, typically our
|
406
469
|
# parent thread.
|
407
|
-
def self.dupGlobals(parent_thread_id)
|
470
|
+
def self.dupGlobals(parent_thread_id, target_thread: Thread.current)
|
408
471
|
@@globals[parent_thread_id] ||= {}
|
409
472
|
@@globals[parent_thread_id].each_pair { |name, value|
|
410
|
-
setVar(name, value)
|
473
|
+
setVar(name, value, target_thread: target_thread)
|
411
474
|
}
|
412
475
|
end
|
413
476
|
|
@@ -1015,15 +1078,15 @@ module MU
|
|
1015
1078
|
|
1016
1079
|
# Generate a random password which will satisfy the complexity requirements of stock Amazon Windows AMIs.
|
1017
1080
|
# return [String]: A password string.
|
1018
|
-
def self.generateWindowsPassword(safe_pattern: '~!@#%^&*_-+=`|(){}[]:;<>,.?', retries:
|
1081
|
+
def self.generateWindowsPassword(safe_pattern: '~!@#%^&*_-+=`|(){}[]:;<>,.?', retries: 50)
|
1019
1082
|
# We have dopey complexity requirements, be stringent here.
|
1020
1083
|
# I'll be nice and not condense this into one elegant-but-unreadable regular expression
|
1021
1084
|
attempts = 0
|
1022
1085
|
safe_metachars = Regexp.escape(safe_pattern)
|
1023
1086
|
begin
|
1024
1087
|
if attempts > retries
|
1025
|
-
MU.log "Failed to generate an adequate Windows password after #{attempts}", MU::ERR
|
1026
|
-
raise MuError, "Failed to generate an adequate Windows password after #{attempts}"
|
1088
|
+
MU.log "Failed to generate an adequate Windows password after #{attempts} attempts", MU::ERR
|
1089
|
+
raise MuError, "Failed to generate an adequate Windows password after #{attempts} attempts"
|
1027
1090
|
end
|
1028
1091
|
winpass = Password.random(14..16)
|
1029
1092
|
attempts += 1
|
data/modules/mu/adoption.rb
CHANGED
@@ -227,6 +227,16 @@ module MU
|
|
227
227
|
|
228
228
|
Thread.abort_on_exception = true
|
229
229
|
resources.values.each { |obj_thr|
|
230
|
+
obj_desc = nil
|
231
|
+
begin
|
232
|
+
obj_desc = obj_thr.cloud_desc
|
233
|
+
rescue StandardError
|
234
|
+
ensure
|
235
|
+
if !obj_desc
|
236
|
+
MU.log cloud+" "+type.to_s+" "+obj_thr.cloud_id+" did not return a cloud descriptor, skipping", MU::WARN
|
237
|
+
next
|
238
|
+
end
|
239
|
+
end
|
230
240
|
threads << Thread.new(obj_thr) { |obj|
|
231
241
|
|
232
242
|
kitten_cfg = obj.toKitten(rootparent: @default_parent, billing: @billing, habitats: @habitats)
|
@@ -463,7 +473,9 @@ module MU
|
|
463
473
|
|
464
474
|
def resolveReferences(cfg, deploy, parent)
|
465
475
|
if cfg.is_a?(MU::Config::Ref)
|
476
|
+
cfg.kitten(deploy) || cfg.kitten
|
466
477
|
hashcfg = cfg.to_h
|
478
|
+
|
467
479
|
if cfg.kitten(deploy)
|
468
480
|
littermate = deploy.findLitterMate(type: cfg.type, name: cfg.name, cloud_id: cfg.id, habitat: cfg.habitat)
|
469
481
|
|
@@ -486,14 +498,14 @@ module MU
|
|
486
498
|
hashcfg.delete("name") if cfg.id and !cfg.deploy_id
|
487
499
|
end
|
488
500
|
end
|
489
|
-
elsif hashcfg["id"]
|
501
|
+
elsif hashcfg["id"] and !hashcfg["name"]
|
490
502
|
hashcfg.delete("deploy_id")
|
491
|
-
hashcfg.delete("name")
|
492
503
|
else
|
493
504
|
pp parent.cloud_desc
|
494
505
|
raise Incomplete, "Failed to resolve reference on behalf of #{parent}"
|
495
506
|
end
|
496
507
|
hashcfg.delete("deploy_id") if hashcfg['deploy_id'] == deploy.deploy_id
|
508
|
+
|
497
509
|
if parent and parent.config
|
498
510
|
cred_cfg = MU::Cloud.const_get(parent.cloud).credConfig(parent.credentials)
|
499
511
|
|
data/modules/mu/cloud.rb
CHANGED
@@ -1344,8 +1344,10 @@ module MU
|
|
1344
1344
|
# which can refer to external resources (@vpc, @loadbalancers,
|
1345
1345
|
# @add_firewall_rules)
|
1346
1346
|
def dependencies(use_cache: false, debug: false)
|
1347
|
-
@dependencies
|
1348
|
-
@loadbalancers
|
1347
|
+
@dependencies ||= {}
|
1348
|
+
@loadbalancers ||= []
|
1349
|
+
@firewall_rules ||= []
|
1350
|
+
|
1349
1351
|
if @config.nil?
|
1350
1352
|
return [@dependencies, @vpc, @loadbalancers]
|
1351
1353
|
end
|
@@ -1560,9 +1562,101 @@ puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}"
|
|
1560
1562
|
}
|
1561
1563
|
end
|
1562
1564
|
|
1565
|
+
# Munge in external resources referenced by the existing_deploys
|
1566
|
+
# keyword
|
1567
|
+
if @config["existing_deploys"] && !@config["existing_deploys"].empty?
|
1568
|
+
@config["existing_deploys"].each { |ext_deploy|
|
1569
|
+
if ext_deploy["cloud_id"]
|
1570
|
+
found = MU::MommaCat.findStray(
|
1571
|
+
@config['cloud'],
|
1572
|
+
ext_deploy["cloud_type"],
|
1573
|
+
cloud_id: ext_deploy["cloud_id"],
|
1574
|
+
region: @config['region'],
|
1575
|
+
dummy_ok: false
|
1576
|
+
).first
|
1577
|
+
|
1578
|
+
MU.log "Couldn't find existing resource #{ext_deploy["cloud_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
|
1579
|
+
@deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: found.mu_name, triggering_node: @mu_name)
|
1580
|
+
elsif ext_deploy["mu_name"] && ext_deploy["deploy_id"]
|
1581
|
+
MU.log "#{ext_deploy["mu_name"]} / #{ext_deploy["deploy_id"]}"
|
1582
|
+
found = MU::MommaCat.findStray(
|
1583
|
+
@config['cloud'],
|
1584
|
+
ext_deploy["cloud_type"],
|
1585
|
+
deploy_id: ext_deploy["deploy_id"],
|
1586
|
+
mu_name: ext_deploy["mu_name"],
|
1587
|
+
region: @config['region'],
|
1588
|
+
dummy_ok: false
|
1589
|
+
).first
|
1590
|
+
|
1591
|
+
MU.log "Couldn't find existing resource #{ext_deploy["mu_name"]}/#{ext_deploy["deploy_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
|
1592
|
+
@deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: ext_deploy["mu_name"], triggering_node: @mu_name)
|
1593
|
+
else
|
1594
|
+
MU.log "Trying to find existing deploy, but either the cloud_id is not valid or no mu_name and deploy_id where provided", MU::ERR
|
1595
|
+
end
|
1596
|
+
}
|
1597
|
+
end
|
1598
|
+
|
1599
|
+
if @config['dns_records'] && !@config['dns_records'].empty?
|
1600
|
+
@config['dns_records'].each { |dnsrec|
|
1601
|
+
if dnsrec.has_key?("name")
|
1602
|
+
if dnsrec['name'].start_with?(@deploy.deploy_id.downcase) && !dnsrec['name'].start_with?(@mu_name.downcase)
|
1603
|
+
MU.log "DNS records for #{@mu_name} seem to be wrong, deleting from current config", MU::WARN, details: dnsrec
|
1604
|
+
dnsrec.delete('name')
|
1605
|
+
dnsrec.delete('target')
|
1606
|
+
end
|
1607
|
+
end
|
1608
|
+
}
|
1609
|
+
end
|
1610
|
+
|
1563
1611
|
return [@dependencies, @vpc, @loadbalancers]
|
1564
1612
|
end
|
1565
1613
|
|
1614
|
+
# Using the automatically-defined +@vpc+ from {dependencies} in
|
1615
|
+
# conjunction with our config, return our configured subnets.
|
1616
|
+
# @return [Array<MU::Cloud::VPC::Subnet>]
|
1617
|
+
def mySubnets
|
1618
|
+
dependencies
|
1619
|
+
if !@vpc or !@config["vpc"]
|
1620
|
+
return nil
|
1621
|
+
end
|
1622
|
+
|
1623
|
+
if @config["vpc"]["subnet_id"] or @config["vpc"]["subnet_name"]
|
1624
|
+
@config["vpc"]["subnets"] ||= []
|
1625
|
+
subnet_block = {}
|
1626
|
+
subnet_block["subnet_id"] = @config["vpc"]["subnet_id"] if @config["vpc"]["subnet_id"]
|
1627
|
+
subnet_block["subnet_name"] = @config["vpc"]["subnet_name"] if @config["vpc"]["subnet_name"]
|
1628
|
+
@config["vpc"]["subnets"] << subnet_block
|
1629
|
+
@config["vpc"]["subnets"].uniq!
|
1630
|
+
end
|
1631
|
+
|
1632
|
+
if (!@config["vpc"]["subnets"] or @config["vpc"]["subnets"].empty?) and
|
1633
|
+
!@config["vpc"]["subnet_id"]
|
1634
|
+
return @vpc.subnets
|
1635
|
+
end
|
1636
|
+
|
1637
|
+
subnets = []
|
1638
|
+
@config["vpc"]["subnets"].each { |subnet|
|
1639
|
+
subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s)
|
1640
|
+
raise MuError, "Couldn't find a live subnet for #{self.to_s} matching #{subnet} in #{@vpc.to_s} (#{@vpc.subnets.map { |s| s.name }.join(",")})" if subnet_obj.nil?
|
1641
|
+
subnets << subnet_obj
|
1642
|
+
}
|
1643
|
+
|
1644
|
+
subnets
|
1645
|
+
end
|
1646
|
+
|
1647
|
+
# @return [Array<MU::Cloud::FirewallRule>]
|
1648
|
+
def myFirewallRules
|
1649
|
+
dependencies
|
1650
|
+
|
1651
|
+
rules = []
|
1652
|
+
if @dependencies.has_key?("firewall_rule")
|
1653
|
+
rules = @dependencies['firewall_rule'].values
|
1654
|
+
end
|
1655
|
+
# XXX what other ways are these specified?
|
1656
|
+
|
1657
|
+
rules
|
1658
|
+
end
|
1659
|
+
|
1566
1660
|
# Defaults any resources that don't declare their release-readiness to
|
1567
1661
|
# ALPHA. That'll learn 'em.
|
1568
1662
|
def self.quality
|
@@ -1668,13 +1762,13 @@ puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}"
|
|
1668
1762
|
def handleWindowsFail(e, retries, rebootable_fails, max_retries: 30, reboot_on_problems: false, retry_interval: 45)
|
1669
1763
|
msg = "WinRM connection to https://"+@mu_name+":5986/wsman: #{e.message}, waiting #{retry_interval}s (attempt #{retries}/#{max_retries})"
|
1670
1764
|
if e.class.name == "WinRM::WinRMAuthorizationError" or e.message.match(/execution expired/) and reboot_on_problems
|
1671
|
-
if rebootable_fails > 0 and (rebootable_fails %
|
1765
|
+
if rebootable_fails > 0 and (rebootable_fails % 7) == 0
|
1672
1766
|
MU.log "#{@mu_name} still misbehaving, forcing Stop and Start from API", MU::WARN
|
1673
1767
|
reboot(true) # vicious API stop/start
|
1674
1768
|
sleep retry_interval*3
|
1675
1769
|
rebootable_fails = 0
|
1676
1770
|
else
|
1677
|
-
if rebootable_fails ==
|
1771
|
+
if rebootable_fails == 5
|
1678
1772
|
MU.log "#{@mu_name} misbehaving, attempting to reboot from API", MU::WARN
|
1679
1773
|
reboot # graceful API restart
|
1680
1774
|
sleep retry_interval*2
|
@@ -1900,7 +1994,7 @@ puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}"
|
|
1900
1994
|
# @param timeout [Integer]:
|
1901
1995
|
# @param winrm_retries [Integer]:
|
1902
1996
|
# @param reboot_on_problems [Boolean]:
|
1903
|
-
def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_retries:
|
1997
|
+
def getWinRMSession(max_retries = 40, retry_interval = 60, timeout: 30, winrm_retries: 2, reboot_on_problems: false)
|
1904
1998
|
_nat_ssh_key, _nat_ssh_user, _nat_ssh_host, canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig
|
1905
1999
|
@mu_name ||= @config['mu_name']
|
1906
2000
|
|
@@ -1924,7 +2018,8 @@ puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}"
|
|
1924
2018
|
retries = 0
|
1925
2019
|
rebootable_fails = 0
|
1926
2020
|
begin
|
1927
|
-
|
2021
|
+
loglevel = retries > 4 ? MU::NOTICE : MU::DEBUG
|
2022
|
+
MU.log "Calling WinRM on #{@mu_name}", loglevel, details: opts
|
1928
2023
|
opts = {
|
1929
2024
|
endpoint: 'https://'+@mu_name+':5986/wsman',
|
1930
2025
|
retry_limit: winrm_retries,
|
@@ -1932,10 +2027,16 @@ puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}"
|
|
1932
2027
|
ca_trust_path: "#{MU.mySSLDir}/Mu_CA.pem",
|
1933
2028
|
transport: :ssl,
|
1934
2029
|
operation_timeout: timeout,
|
1935
|
-
client_cert: "#{MU.mySSLDir}/#{@mu_name}-winrm.crt",
|
1936
|
-
client_key: "#{MU.mySSLDir}/#{@mu_name}-winrm.key"
|
1937
2030
|
}
|
2031
|
+
if retries % 2 == 0
|
2032
|
+
opts[:user] = @config['windows_admin_username']
|
2033
|
+
opts[:password] = getWindowsAdminPassword
|
2034
|
+
else
|
2035
|
+
opts[:client_cert] = "#{MU.mySSLDir}/#{@mu_name}-winrm.crt"
|
2036
|
+
opts[:client_key] = "#{MU.mySSLDir}/#{@mu_name}-winrm.key"
|
2037
|
+
end
|
1938
2038
|
conn = WinRM::Connection.new(opts)
|
2039
|
+
conn.logger.level = :debug if retries > 2
|
1939
2040
|
MU.log "WinRM connection to #{@mu_name} created", MU::DEBUG, details: conn
|
1940
2041
|
shell = conn.shell(:powershell)
|
1941
2042
|
shell.run('ipconfig') # verify that we can do something
|
@@ -2069,10 +2170,10 @@ puts "CHOOSING #{@vpc.to_s} 'cause it has #{@config['vpc']['subnet_name']}"
|
|
2069
2170
|
if params and params[:region]
|
2070
2171
|
in_msg += " "+params[:region]
|
2071
2172
|
end
|
2072
|
-
if params and params[:flags] and params[:flags]["project"]
|
2173
|
+
if params and params[:flags] and params[:flags]["project"] and !params[:flags]["project"].empty?
|
2073
2174
|
in_msg += " project "+params[:flags]["project"]
|
2074
2175
|
end
|
2075
|
-
MU.log "Skipping #{shortname} cleanup method in #{in_msg} due to
|
2176
|
+
MU.log "Skipping #{shortname} cleanup method in #{in_msg} due to #{e.class.name}: #{e.message}", MU::WARN, details: e.backtrace
|
2076
2177
|
ok = false
|
2077
2178
|
end
|
2078
2179
|
}
|