cloud-mu 3.1.3 → 3.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Dockerfile +10 -2
- data/bin/mu-adopt +5 -1
- data/bin/mu-load-config.rb +2 -3
- data/bin/mu-run-tests +112 -27
- data/cloud-mu.gemspec +20 -20
- data/cookbooks/mu-tools/libraries/helper.rb +2 -1
- data/cookbooks/mu-tools/libraries/monkey.rb +35 -0
- data/cookbooks/mu-tools/recipes/google_api.rb +2 -2
- data/cookbooks/mu-tools/resources/disk.rb +1 -1
- data/extras/image-generators/Google/centos6.yaml +1 -0
- data/extras/image-generators/Google/centos7.yaml +1 -1
- data/modules/mommacat.ru +5 -15
- data/modules/mu.rb +10 -14
- data/modules/mu/adoption.rb +20 -14
- data/modules/mu/cleanup.rb +13 -9
- data/modules/mu/cloud.rb +26 -26
- data/modules/mu/clouds/aws.rb +100 -59
- data/modules/mu/clouds/aws/alarm.rb +4 -2
- data/modules/mu/clouds/aws/bucket.rb +25 -21
- data/modules/mu/clouds/aws/cache_cluster.rb +25 -23
- data/modules/mu/clouds/aws/collection.rb +21 -20
- data/modules/mu/clouds/aws/container_cluster.rb +47 -26
- data/modules/mu/clouds/aws/database.rb +57 -68
- data/modules/mu/clouds/aws/dnszone.rb +14 -14
- data/modules/mu/clouds/aws/endpoint.rb +20 -16
- data/modules/mu/clouds/aws/firewall_rule.rb +19 -16
- data/modules/mu/clouds/aws/folder.rb +7 -7
- data/modules/mu/clouds/aws/function.rb +15 -12
- data/modules/mu/clouds/aws/group.rb +14 -10
- data/modules/mu/clouds/aws/habitat.rb +16 -13
- data/modules/mu/clouds/aws/loadbalancer.rb +16 -15
- data/modules/mu/clouds/aws/log.rb +13 -10
- data/modules/mu/clouds/aws/msg_queue.rb +15 -8
- data/modules/mu/clouds/aws/nosqldb.rb +18 -11
- data/modules/mu/clouds/aws/notifier.rb +11 -6
- data/modules/mu/clouds/aws/role.rb +87 -70
- data/modules/mu/clouds/aws/search_domain.rb +30 -19
- data/modules/mu/clouds/aws/server.rb +102 -72
- data/modules/mu/clouds/aws/server_pool.rb +47 -28
- data/modules/mu/clouds/aws/storage_pool.rb +5 -6
- data/modules/mu/clouds/aws/user.rb +13 -10
- data/modules/mu/clouds/aws/vpc.rb +135 -121
- data/modules/mu/clouds/azure.rb +16 -9
- data/modules/mu/clouds/azure/container_cluster.rb +2 -3
- data/modules/mu/clouds/azure/firewall_rule.rb +10 -10
- data/modules/mu/clouds/azure/habitat.rb +8 -6
- data/modules/mu/clouds/azure/loadbalancer.rb +5 -5
- data/modules/mu/clouds/azure/role.rb +8 -10
- data/modules/mu/clouds/azure/server.rb +65 -25
- data/modules/mu/clouds/azure/user.rb +5 -7
- data/modules/mu/clouds/azure/vpc.rb +12 -15
- data/modules/mu/clouds/cloudformation.rb +8 -7
- data/modules/mu/clouds/cloudformation/vpc.rb +2 -4
- data/modules/mu/clouds/google.rb +39 -24
- data/modules/mu/clouds/google/bucket.rb +9 -11
- data/modules/mu/clouds/google/container_cluster.rb +27 -42
- data/modules/mu/clouds/google/database.rb +6 -9
- data/modules/mu/clouds/google/firewall_rule.rb +11 -10
- data/modules/mu/clouds/google/folder.rb +16 -9
- data/modules/mu/clouds/google/function.rb +127 -161
- data/modules/mu/clouds/google/group.rb +21 -18
- data/modules/mu/clouds/google/habitat.rb +18 -15
- data/modules/mu/clouds/google/loadbalancer.rb +14 -16
- data/modules/mu/clouds/google/role.rb +48 -31
- data/modules/mu/clouds/google/server.rb +105 -105
- data/modules/mu/clouds/google/server_pool.rb +12 -31
- data/modules/mu/clouds/google/user.rb +67 -13
- data/modules/mu/clouds/google/vpc.rb +58 -65
- data/modules/mu/config.rb +89 -1738
- data/modules/mu/config/bucket.rb +3 -3
- data/modules/mu/config/collection.rb +3 -3
- data/modules/mu/config/container_cluster.rb +2 -2
- data/modules/mu/config/dnszone.rb +5 -5
- data/modules/mu/config/doc_helpers.rb +517 -0
- data/modules/mu/config/endpoint.rb +3 -3
- data/modules/mu/config/firewall_rule.rb +118 -3
- data/modules/mu/config/folder.rb +3 -3
- data/modules/mu/config/function.rb +2 -2
- data/modules/mu/config/group.rb +3 -3
- data/modules/mu/config/habitat.rb +3 -3
- data/modules/mu/config/loadbalancer.rb +3 -3
- data/modules/mu/config/log.rb +3 -3
- data/modules/mu/config/msg_queue.rb +3 -3
- data/modules/mu/config/nosqldb.rb +3 -3
- data/modules/mu/config/notifier.rb +2 -2
- data/modules/mu/config/ref.rb +333 -0
- data/modules/mu/config/role.rb +3 -3
- data/modules/mu/config/schema_helpers.rb +508 -0
- data/modules/mu/config/search_domain.rb +3 -3
- data/modules/mu/config/server.rb +86 -58
- data/modules/mu/config/server_pool.rb +2 -2
- data/modules/mu/config/tail.rb +189 -0
- data/modules/mu/config/user.rb +3 -3
- data/modules/mu/config/vpc.rb +44 -4
- data/modules/mu/defaults/Google.yaml +2 -2
- data/modules/mu/deploy.rb +13 -10
- data/modules/mu/groomer.rb +1 -1
- data/modules/mu/groomers/ansible.rb +69 -24
- data/modules/mu/groomers/chef.rb +52 -44
- data/modules/mu/logger.rb +17 -14
- data/modules/mu/master.rb +317 -2
- data/modules/mu/master/chef.rb +3 -4
- data/modules/mu/master/ldap.rb +3 -3
- data/modules/mu/master/ssl.rb +12 -2
- data/modules/mu/mommacat.rb +85 -1766
- data/modules/mu/mommacat/daemon.rb +394 -0
- data/modules/mu/mommacat/naming.rb +366 -0
- data/modules/mu/mommacat/storage.rb +689 -0
- data/modules/tests/bucket.yml +4 -0
- data/modules/tests/{win2k12.yaml → needwork/win2k12.yaml} +0 -0
- data/modules/tests/regrooms/aws-iam.yaml +201 -0
- data/modules/tests/regrooms/bucket.yml +19 -0
- metadata +112 -102
|
@@ -0,0 +1,689 @@
|
|
|
1
|
+
# Copyright:: Copyright (c) 2020 eGlobalTech, Inc., all rights reserved
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the BSD-3 license (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License in the root of the project or at
|
|
6
|
+
#
|
|
7
|
+
# http://egt-labs.com/mu/LICENSE.html
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
module MU
|
|
16
|
+
|
|
17
|
+
# MommaCat is in charge of managing metadata about resources we've created,
|
|
18
|
+
# as well as orchestrating amongst them and bootstrapping nodes outside of
|
|
19
|
+
# the normal synchronous deploy sequence invoked by *mu-deploy*.
|
|
20
|
+
class MommaCat
|
|
21
|
+
@myhome = Etc.getpwuid(Process.uid).dir
|
|
22
|
+
@nagios_home = "/opt/mu/var/nagios_user_home"
|
|
23
|
+
@locks = Hash.new
|
|
24
|
+
@deploy_cache = Hash.new
|
|
25
|
+
|
|
26
|
+
# Return a {MU::MommaCat} instance for an existing deploy. Use this instead
|
|
27
|
+
# of using #initialize directly to avoid loading deploys multiple times or
|
|
28
|
+
# stepping on the global context for the deployment you're really working
|
|
29
|
+
# on..
|
|
30
|
+
# @param deploy_id [String]: The deploy ID of the deploy to load.
|
|
31
|
+
# @param set_context_to_me [Boolean]: Whether new MommaCat objects should overwrite any existing per-thread global deploy variables.
|
|
32
|
+
# @param use_cache [Boolean]: If we have an existing object for this deploy, use that
|
|
33
|
+
# @return [MU::MommaCat]
|
|
34
|
+
def self.getLitter(deploy_id, set_context_to_me: false, use_cache: true)
|
|
35
|
+
if deploy_id.nil? or deploy_id.empty?
|
|
36
|
+
raise MuError, "Cannot fetch a deployment without a deploy_id"
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# XXX this caching may be harmful, causing stale resource objects to stick
|
|
40
|
+
# around. Have we fixed this? Sort of. Bad entries seem to have no kittens,
|
|
41
|
+
# so force a reload if we see that. That's probably not the root problem.
|
|
42
|
+
littercache = nil
|
|
43
|
+
begin
|
|
44
|
+
@@litter_semaphore.synchronize {
|
|
45
|
+
littercache = @@litters.dup
|
|
46
|
+
}
|
|
47
|
+
if littercache[deploy_id] and @@litters_loadtime[deploy_id]
|
|
48
|
+
deploy_root = File.expand_path(MU.dataDir+"/deployments")
|
|
49
|
+
this_deploy_dir = deploy_root+"/"+deploy_id
|
|
50
|
+
if File.exist?("#{this_deploy_dir}/deployment.json")
|
|
51
|
+
lastmod = File.mtime("#{this_deploy_dir}/deployment.json")
|
|
52
|
+
if lastmod > @@litters_loadtime[deploy_id]
|
|
53
|
+
MU.log "Deployment metadata for #{deploy_id} was modified on disk, reload", MU::NOTICE
|
|
54
|
+
use_cache = false
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
rescue ThreadError => e
|
|
59
|
+
# already locked by a parent caller and this is a read op, so this is ok
|
|
60
|
+
raise e if !e.message.match(/recursive locking/)
|
|
61
|
+
littercache = @@litters.dup
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
if !use_cache or littercache[deploy_id].nil?
|
|
65
|
+
need_gc = !littercache[deploy_id].nil?
|
|
66
|
+
newlitter = MU::MommaCat.new(deploy_id, set_context_to_me: set_context_to_me)
|
|
67
|
+
# This, we have to synchronize, as it's a write
|
|
68
|
+
@@litter_semaphore.synchronize {
|
|
69
|
+
@@litters[deploy_id] = newlitter
|
|
70
|
+
@@litters_loadtime[deploy_id] = Time.now
|
|
71
|
+
}
|
|
72
|
+
GC.start if need_gc
|
|
73
|
+
elsif set_context_to_me
|
|
74
|
+
MU::MommaCat.setThreadContext(@@litters[deploy_id])
|
|
75
|
+
end
|
|
76
|
+
return @@litters[deploy_id]
|
|
77
|
+
# MU::MommaCat.new(deploy_id, set_context_to_me: set_context_to_me)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
# List the currently held flock() locks.
|
|
81
|
+
def self.trapSafeLocks;
|
|
82
|
+
@locks
|
|
83
|
+
end
|
|
84
|
+
# List the currently held flock() locks.
|
|
85
|
+
def self.locks;
|
|
86
|
+
@lock_semaphore.synchronize {
|
|
87
|
+
@locks
|
|
88
|
+
}
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
# Overwrite this deployment's configuration with a new version. Save the
|
|
92
|
+
# previous version as well.
|
|
93
|
+
# @param new_conf [Hash]: A new configuration, fully resolved by {MU::Config}
|
|
94
|
+
def updateBasketofKittens(new_conf)
|
|
95
|
+
loadDeploy
|
|
96
|
+
if new_conf == @original_config
|
|
97
|
+
MU.log "#{@deploy_id}", MU::WARN
|
|
98
|
+
return
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
backup = "#{deploy_dir}/basket_of_kittens.json.#{Time.now.to_i.to_s}"
|
|
102
|
+
MU.log "Saving previous config of #{@deploy_id} to #{backup}"
|
|
103
|
+
config = File.new(backup, File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
104
|
+
config.flock(File::LOCK_EX)
|
|
105
|
+
config.puts JSON.pretty_generate(@original_config)
|
|
106
|
+
config.flock(File::LOCK_UN)
|
|
107
|
+
config.close
|
|
108
|
+
|
|
109
|
+
@original_config = new_conf
|
|
110
|
+
# save! # XXX this will happen later, more sensibly
|
|
111
|
+
MU.log "New config saved to #{deploy_dir}/basket_of_kittens.json"
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
@lock_semaphore = Mutex.new
|
|
115
|
+
# Release all flock() locks held by the current thread.
|
|
116
|
+
def self.unlockAll
|
|
117
|
+
if !@locks.nil? and !@locks[Thread.current.object_id].nil?
|
|
118
|
+
# Work from a copy so we can iterate without worrying about contention
|
|
119
|
+
# in lock() or unlock(). We can't just wrap our iterator block in a
|
|
120
|
+
# semaphore here, because we're calling another method that uses the
|
|
121
|
+
# same semaphore.
|
|
122
|
+
@lock_semaphore.synchronize {
|
|
123
|
+
delete_list = []
|
|
124
|
+
@locks[Thread.current.object_id].keys.each { |id|
|
|
125
|
+
MU.log "Releasing lock on #{deploy_dir(MU.deploy_id)}/locks/#{id}.lock (thread #{Thread.current.object_id})", MU::DEBUG
|
|
126
|
+
begin
|
|
127
|
+
@locks[Thread.current.object_id][id].flock(File::LOCK_UN)
|
|
128
|
+
@locks[Thread.current.object_id][id].close
|
|
129
|
+
rescue IOError => e
|
|
130
|
+
MU.log "Got #{e.inspect} unlocking #{id} on #{Thread.current.object_id}", MU::WARN
|
|
131
|
+
end
|
|
132
|
+
delete_list << id
|
|
133
|
+
}
|
|
134
|
+
# We do this here because we can't mangle a Hash while we're iterating
|
|
135
|
+
# over it.
|
|
136
|
+
delete_list.each { |id|
|
|
137
|
+
@locks[Thread.current.object_id].delete(id)
|
|
138
|
+
}
|
|
139
|
+
if @locks[Thread.current.object_id].size == 0
|
|
140
|
+
@locks.delete(Thread.current.object_id)
|
|
141
|
+
end
|
|
142
|
+
}
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# Create/hold a flock() lock.
|
|
147
|
+
# @param id [String]: The lock identifier to release.
|
|
148
|
+
# @param nonblock [Boolean]: Whether to block while waiting for the lock. In non-blocking mode, we simply return false if the lock is not available.
|
|
149
|
+
# return [false, nil]
|
|
150
|
+
def self.lock(id, nonblock = false, global = false)
|
|
151
|
+
raise MuError, "Can't pass a nil id to MU::MommaCat.lock" if id.nil?
|
|
152
|
+
|
|
153
|
+
if !global
|
|
154
|
+
lockdir = "#{deploy_dir(MU.deploy_id)}/locks"
|
|
155
|
+
else
|
|
156
|
+
lockdir = File.expand_path(MU.dataDir+"/locks")
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
if !Dir.exist?(lockdir)
|
|
160
|
+
MU.log "Creating #{lockdir}", MU::DEBUG
|
|
161
|
+
Dir.mkdir(lockdir, 0700)
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
@lock_semaphore.synchronize {
|
|
165
|
+
if @locks[Thread.current.object_id].nil?
|
|
166
|
+
@locks[Thread.current.object_id] = Hash.new
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
@locks[Thread.current.object_id][id] = File.open("#{lockdir}/#{id}.lock", File::CREAT|File::RDWR, 0600)
|
|
170
|
+
}
|
|
171
|
+
MU.log "Getting a lock on #{lockdir}/#{id}.lock (thread #{Thread.current.object_id})...", MU::DEBUG
|
|
172
|
+
begin
|
|
173
|
+
if nonblock
|
|
174
|
+
if !@locks[Thread.current.object_id][id].flock(File::LOCK_EX|File::LOCK_NB)
|
|
175
|
+
return false
|
|
176
|
+
end
|
|
177
|
+
else
|
|
178
|
+
@locks[Thread.current.object_id][id].flock(File::LOCK_EX)
|
|
179
|
+
end
|
|
180
|
+
rescue IOError
|
|
181
|
+
raise MU::BootstrapTempFail, "Interrupted waiting for lock on thread #{Thread.current.object_id}, probably just a node rebooting as part of a synchronous install"
|
|
182
|
+
end
|
|
183
|
+
MU.log "Lock on #{lockdir}/#{id}.lock on thread #{Thread.current.object_id} acquired", MU::DEBUG
|
|
184
|
+
return true
|
|
185
|
+
end
|
|
186
|
+
|
|
187
|
+
# Release a flock() lock.
|
|
188
|
+
# @param id [String]: The lock identifier to release.
|
|
189
|
+
def self.unlock(id, global = false)
|
|
190
|
+
raise MuError, "Can't pass a nil id to MU::MommaCat.unlock" if id.nil?
|
|
191
|
+
lockdir = nil
|
|
192
|
+
if !global
|
|
193
|
+
lockdir = "#{deploy_dir(MU.deploy_id)}/locks"
|
|
194
|
+
else
|
|
195
|
+
lockdir = File.expand_path(MU.dataDir+"/locks")
|
|
196
|
+
end
|
|
197
|
+
@lock_semaphore.synchronize {
|
|
198
|
+
return if @locks.nil? or @locks[Thread.current.object_id].nil? or @locks[Thread.current.object_id][id].nil?
|
|
199
|
+
}
|
|
200
|
+
MU.log "Releasing lock on #{lockdir}/#{id}.lock (thread #{Thread.current.object_id})", MU::DEBUG
|
|
201
|
+
begin
|
|
202
|
+
@locks[Thread.current.object_id][id].flock(File::LOCK_UN)
|
|
203
|
+
@locks[Thread.current.object_id][id].close
|
|
204
|
+
if !@locks[Thread.current.object_id].nil?
|
|
205
|
+
@locks[Thread.current.object_id].delete(id)
|
|
206
|
+
end
|
|
207
|
+
if @locks[Thread.current.object_id].size == 0
|
|
208
|
+
@locks.delete(Thread.current.object_id)
|
|
209
|
+
end
|
|
210
|
+
rescue IOError => e
|
|
211
|
+
MU.log "Got #{e.inspect} unlocking #{id} on #{Thread.current.object_id}", MU::WARN
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
# Remove a deployment's metadata.
|
|
216
|
+
# @param deploy_id [String]: The deployment identifier to remove.
|
|
217
|
+
def self.purge(deploy_id)
|
|
218
|
+
if deploy_id.nil? or deploy_id.empty?
|
|
219
|
+
raise MuError, "Got nil deploy_id in MU::MommaCat.purge"
|
|
220
|
+
end
|
|
221
|
+
# XXX archiving is better than annihilating
|
|
222
|
+
path = File.expand_path(MU.dataDir+"/deployments")
|
|
223
|
+
if Dir.exist?(path+"/"+deploy_id)
|
|
224
|
+
unlockAll
|
|
225
|
+
MU.log "Purging #{path}/#{deploy_id}" if File.exist?(path+"/"+deploy_id+"/deployment.json")
|
|
226
|
+
|
|
227
|
+
FileUtils.rm_rf(path+"/"+deploy_id, :secure => true)
|
|
228
|
+
end
|
|
229
|
+
if File.exist?(path+"/unique_ids")
|
|
230
|
+
File.open(path+"/unique_ids", File::CREAT|File::RDWR, 0600) { |f|
|
|
231
|
+
newlines = []
|
|
232
|
+
f.flock(File::LOCK_EX)
|
|
233
|
+
f.readlines.each { |line|
|
|
234
|
+
newlines << line if !line.match(/:#{deploy_id}$/)
|
|
235
|
+
}
|
|
236
|
+
f.rewind
|
|
237
|
+
f.truncate(0)
|
|
238
|
+
f.puts(newlines)
|
|
239
|
+
f.flush
|
|
240
|
+
f.flock(File::LOCK_UN)
|
|
241
|
+
}
|
|
242
|
+
end
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
# Remove the metadata of the currently loaded deployment.
|
|
246
|
+
def purge!
|
|
247
|
+
MU::MommaCat.purge(MU.deploy_id)
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
# Return a list of all currently active deploy identifiers.
|
|
251
|
+
# @return [Array<String>]
|
|
252
|
+
def self.listDeploys
|
|
253
|
+
return [] if !Dir.exist?("#{MU.dataDir}/deployments")
|
|
254
|
+
deploys = []
|
|
255
|
+
Dir.entries("#{MU.dataDir}/deployments").reverse_each { |muid|
|
|
256
|
+
next if !Dir.exist?("#{MU.dataDir}/deployments/#{muid}") or muid == "." or muid == ".."
|
|
257
|
+
deploys << muid
|
|
258
|
+
}
|
|
259
|
+
return deploys
|
|
260
|
+
end
|
|
261
|
+
|
|
262
|
+
# Return a list of all nodes in all deployments. Does so without loading
|
|
263
|
+
# deployments fully.
|
|
264
|
+
# @return [Hash]
|
|
265
|
+
def self.listAllNodes
|
|
266
|
+
nodes = Hash.new
|
|
267
|
+
MU::MommaCat.deploy_struct_semaphore.synchronize {
|
|
268
|
+
MU::MommaCat.listDeploys.each { |deploy|
|
|
269
|
+
if !Dir.exist?(MU::MommaCat.deploy_dir(deploy)) or
|
|
270
|
+
!File.size?("#{MU::MommaCat.deploy_dir(deploy)}/deployment.json")
|
|
271
|
+
MU.log "Didn't see deployment metadata for '#{deploy}'", MU::WARN
|
|
272
|
+
next
|
|
273
|
+
end
|
|
274
|
+
data = File.open("#{MU::MommaCat.deploy_dir(deploy)}/deployment.json", File::RDONLY)
|
|
275
|
+
MU.log "Getting lock to read #{MU::MommaCat.deploy_dir(deploy)}/deployment.json", MU::DEBUG
|
|
276
|
+
data.flock(File::LOCK_EX)
|
|
277
|
+
begin
|
|
278
|
+
deployment = JSON.parse(File.read("#{MU::MommaCat.deploy_dir(deploy)}/deployment.json"))
|
|
279
|
+
deployment["deploy_id"] = deploy
|
|
280
|
+
if deployment.has_key?("servers")
|
|
281
|
+
deployment["servers"].each_key { |nodeclass|
|
|
282
|
+
deployment["servers"][nodeclass].each_pair { |mu_name, metadata|
|
|
283
|
+
nodes[mu_name] = metadata
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
end
|
|
287
|
+
rescue JSON::ParserError => e
|
|
288
|
+
MU.log "JSON parse failed on #{MU::MommaCat.deploy_dir(deploy)}/deployment.json", MU::ERR, details: e.message
|
|
289
|
+
end
|
|
290
|
+
data.flock(File::LOCK_UN)
|
|
291
|
+
data.close
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
return nodes
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
# @return [String]: The Mu Master filesystem directory holding metadata for the current deployment
|
|
298
|
+
def deploy_dir
|
|
299
|
+
MU::MommaCat.deploy_dir(@deploy_id)
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
# Locate and return the deploy, if any, which matches the provided origin
|
|
303
|
+
# description
|
|
304
|
+
# @param origin [Hash]
|
|
305
|
+
def self.findMatchingDeploy(origin)
|
|
306
|
+
MU::MommaCat.listDeploys.each { |deploy_id|
|
|
307
|
+
o_path = deploy_dir(deploy_id)+"/origin.json"
|
|
308
|
+
next if !File.exist?(o_path)
|
|
309
|
+
this_origin = JSON.parse(File.read(o_path))
|
|
310
|
+
if origin == this_origin
|
|
311
|
+
MU.log "Deploy #{deploy_id} matches origin hash, loading", details: origin
|
|
312
|
+
return MU::MommaCat.new(deploy_id)
|
|
313
|
+
end
|
|
314
|
+
}
|
|
315
|
+
nil
|
|
316
|
+
end
|
|
317
|
+
|
|
318
|
+
# Synchronize all in-memory information related to this to deployment to
|
|
319
|
+
# disk.
|
|
320
|
+
# @param triggering_node [MU::Cloud::Server]: If we're being triggered by the addition/removal/update of a node, this allows us to notify any sibling or dependent nodes of changes
|
|
321
|
+
# @param force [Boolean]: Save even if +no_artifacts+ is set
|
|
322
|
+
# @param origin [Hash]: Optional blob of data indicating how this deploy was created
|
|
323
|
+
def save!(triggering_node = nil, force: false, origin: nil)
|
|
324
|
+
|
|
325
|
+
return if @no_artifacts and !force
|
|
326
|
+
|
|
327
|
+
MU::MommaCat.deploy_struct_semaphore.synchronize {
|
|
328
|
+
MU.log "Saving deployment #{MU.deploy_id}", MU::DEBUG
|
|
329
|
+
|
|
330
|
+
if !Dir.exist?(deploy_dir)
|
|
331
|
+
MU.log "Creating #{deploy_dir}", MU::DEBUG
|
|
332
|
+
Dir.mkdir(deploy_dir, 0700)
|
|
333
|
+
end
|
|
334
|
+
|
|
335
|
+
if !origin.nil?
|
|
336
|
+
o_file = File.new("#{deploy_dir}/origin.json", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
337
|
+
o_file.puts JSON.pretty_generate(origin)
|
|
338
|
+
o_file.close
|
|
339
|
+
end
|
|
340
|
+
|
|
341
|
+
if !@private_key.nil?
|
|
342
|
+
privkey = File.new("#{deploy_dir}/private_key", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
343
|
+
privkey.puts @private_key
|
|
344
|
+
privkey.close
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
if !@public_key.nil?
|
|
348
|
+
pubkey = File.new("#{deploy_dir}/public_key", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
349
|
+
pubkey.puts @public_key
|
|
350
|
+
pubkey.close
|
|
351
|
+
end
|
|
352
|
+
|
|
353
|
+
if !@deployment.nil? and @deployment.size > 0
|
|
354
|
+
@deployment['handle'] = MU.handle if @deployment['handle'].nil? and !MU.handle.nil?
|
|
355
|
+
@deployment['public_key'] = @public_key
|
|
356
|
+
@deployment['timestamp'] ||= @timestamp
|
|
357
|
+
@deployment['seed'] ||= @seed
|
|
358
|
+
@deployment['appname'] ||= @appname
|
|
359
|
+
@deployment['handle'] ||= @handle
|
|
360
|
+
@deployment['ssh_public_key'] ||= @ssh_public_key if @ssh_public_key
|
|
361
|
+
begin
|
|
362
|
+
# XXX doing this to trigger JSON errors before stomping the stored
|
|
363
|
+
# file...
|
|
364
|
+
JSON.pretty_generate(@deployment, max_nesting: false)
|
|
365
|
+
deploy = File.new("#{deploy_dir}/deployment.json", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
366
|
+
MU.log "Getting lock to write #{deploy_dir}/deployment.json", MU::DEBUG
|
|
367
|
+
deploy.flock(File::LOCK_EX)
|
|
368
|
+
deploy.puts JSON.pretty_generate(@deployment, max_nesting: false)
|
|
369
|
+
rescue JSON::NestingError => e
|
|
370
|
+
MU.log e.inspect, MU::ERR, details: @deployment
|
|
371
|
+
raise MuError, "Got #{e.message} trying to save deployment"
|
|
372
|
+
rescue Encoding::UndefinedConversionError => e
|
|
373
|
+
MU.log e.inspect, MU::ERR, details: @deployment
|
|
374
|
+
raise MuError, "Got #{e.message} at #{e.error_char.dump} (#{e.source_encoding_name} => #{e.destination_encoding_name}) trying to save deployment"
|
|
375
|
+
end
|
|
376
|
+
deploy.flock(File::LOCK_UN)
|
|
377
|
+
deploy.close
|
|
378
|
+
@need_deploy_flush = false
|
|
379
|
+
MU::MommaCat.updateLitter(@deploy_id, self)
|
|
380
|
+
end
|
|
381
|
+
|
|
382
|
+
if !@original_config.nil? and @original_config.is_a?(Hash)
|
|
383
|
+
config = File.new("#{deploy_dir}/basket_of_kittens.json", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
384
|
+
config.puts JSON.pretty_generate(MU::Config.manxify(@original_config))
|
|
385
|
+
config.close
|
|
386
|
+
end
|
|
387
|
+
|
|
388
|
+
if !@ssh_private_key.nil?
|
|
389
|
+
key = File.new("#{deploy_dir}/node_ssh.key", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
390
|
+
key.puts @ssh_private_key
|
|
391
|
+
key.close
|
|
392
|
+
end
|
|
393
|
+
if !@ssh_public_key.nil?
|
|
394
|
+
key = File.new("#{deploy_dir}/node_ssh.pub", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
395
|
+
key.puts @ssh_public_key
|
|
396
|
+
key.close
|
|
397
|
+
end
|
|
398
|
+
if !@ssh_key_name.nil?
|
|
399
|
+
key = File.new("#{deploy_dir}/ssh_key_name", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
400
|
+
key.puts @ssh_key_name
|
|
401
|
+
key.close
|
|
402
|
+
end
|
|
403
|
+
if !@environment.nil?
|
|
404
|
+
env = File.new("#{deploy_dir}/environment_name", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
405
|
+
env.puts @environment
|
|
406
|
+
env.close
|
|
407
|
+
end
|
|
408
|
+
if !@deploy_secret.nil?
|
|
409
|
+
secret = File.new("#{deploy_dir}/deploy_secret", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
410
|
+
secret.print @deploy_secret
|
|
411
|
+
secret.close
|
|
412
|
+
end
|
|
413
|
+
if !@secrets.nil?
|
|
414
|
+
secretdir = "#{deploy_dir}/secrets"
|
|
415
|
+
if !Dir.exist?(secretdir)
|
|
416
|
+
MU.log "Creating #{secretdir}", MU::DEBUG
|
|
417
|
+
Dir.mkdir(secretdir, 0700)
|
|
418
|
+
end
|
|
419
|
+
@secrets.each_pair { |type, servers|
|
|
420
|
+
servers.each_pair { |server, svr_secret|
|
|
421
|
+
key = File.new("#{secretdir}/#{type}.#{server}", File::CREAT|File::TRUNC|File::RDWR, 0600)
|
|
422
|
+
key.puts svr_secret
|
|
423
|
+
key.close
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
end
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
# Update groomer copies of this metadata
|
|
430
|
+
syncLitter(@deployment['servers'].keys, triggering_node: triggering_node, save_only: true) if @deployment.has_key?("servers")
|
|
431
|
+
end
|
|
432
|
+
|
|
433
|
+
# Find one or more resources by their Mu resource name, and return
|
|
434
|
+
# MommaCat objects for their containing deploys, their BoK config data,
|
|
435
|
+
# and their deployment data.
|
|
436
|
+
#
|
|
437
|
+
# @param type [String]: The type of resource, e.g. "vpc" or "server."
|
|
438
|
+
# @param name [String]: The Mu resource class, typically the name field of a Basket of Kittens resource declaration.
|
|
439
|
+
# @param mu_name [String]: The fully-expanded Mu resource name, e.g. MGMT-PROD-2015040115-FR-ADMGMT2
|
|
440
|
+
# @param deploy_id [String]: The deployment to search. Will search all deployments if not specified.
|
|
441
|
+
# @return [Hash,Array<Hash>]
|
|
442
|
+
def self.getResourceMetadata(type, name: nil, deploy_id: nil, use_cache: true, mu_name: nil)
|
|
443
|
+
if type.nil?
|
|
444
|
+
raise MuError, "Can't call getResourceMetadata without a type argument"
|
|
445
|
+
end
|
|
446
|
+
_shortclass, _cfg_name, type, _classname = MU::Cloud.getResourceNames(type)
|
|
447
|
+
|
|
448
|
+
# first, check our in-memory deploys, which may or may not have been
|
|
449
|
+
# written to disk yet.
|
|
450
|
+
littercache = nil
|
|
451
|
+
begin
|
|
452
|
+
@@litter_semaphore.synchronize {
|
|
453
|
+
littercache = @@litters.dup
|
|
454
|
+
}
|
|
455
|
+
rescue ThreadError => e
|
|
456
|
+
# already locked by a parent caller and this is a read op, so this is ok
|
|
457
|
+
raise e if !e.message.match(/recursive locking/)
|
|
458
|
+
littercache = @@litters.dup
|
|
459
|
+
end
|
|
460
|
+
littercache.each_pair { |deploy, momma|
|
|
461
|
+
@@deploy_struct_semaphore.synchronize {
|
|
462
|
+
@deploy_cache[deploy] = {
|
|
463
|
+
"mtime" => Time.now,
|
|
464
|
+
"data" => momma.deployment
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
deploy_root = File.expand_path(MU.dataDir+"/deployments")
|
|
470
|
+
MU::MommaCat.deploy_struct_semaphore.synchronize {
|
|
471
|
+
if Dir.exist?(deploy_root)
|
|
472
|
+
Dir.entries(deploy_root).each { |deploy|
|
|
473
|
+
this_deploy_dir = deploy_root+"/"+deploy
|
|
474
|
+
next if deploy == "." or deploy == ".." or !Dir.exist?(this_deploy_dir)
|
|
475
|
+
next if deploy_id and deploy_id != deploy
|
|
476
|
+
|
|
477
|
+
if !File.size?(this_deploy_dir+"/deployment.json")
|
|
478
|
+
MU.log "#{this_deploy_dir}/deployment.json doesn't exist, skipping when loading cache", MU::DEBUG
|
|
479
|
+
next
|
|
480
|
+
end
|
|
481
|
+
if @deploy_cache[deploy].nil? or !use_cache
|
|
482
|
+
@deploy_cache[deploy] = Hash.new
|
|
483
|
+
elsif @deploy_cache[deploy]['mtime'] == File.mtime("#{this_deploy_dir}/deployment.json")
|
|
484
|
+
MU.log "Using cached copy of deploy #{deploy} from #{@deploy_cache[deploy]['mtime']}", MU::DEBUG
|
|
485
|
+
|
|
486
|
+
next
|
|
487
|
+
end
|
|
488
|
+
|
|
489
|
+
@deploy_cache[deploy] = Hash.new if !@deploy_cache.has_key?(deploy)
|
|
490
|
+
MU.log "Caching deploy #{deploy}", MU::DEBUG
|
|
491
|
+
lock = File.open("#{this_deploy_dir}/deployment.json", File::RDONLY)
|
|
492
|
+
lock.flock(File::LOCK_EX)
|
|
493
|
+
@deploy_cache[deploy]['mtime'] = File.mtime("#{this_deploy_dir}/deployment.json")
|
|
494
|
+
|
|
495
|
+
begin
|
|
496
|
+
@deploy_cache[deploy]['data'] = JSON.parse(File.read("#{this_deploy_dir}/deployment.json"))
|
|
497
|
+
lock.flock(File::LOCK_UN)
|
|
498
|
+
|
|
499
|
+
next if @deploy_cache[deploy].nil? or @deploy_cache[deploy]['data'].nil?
|
|
500
|
+
# Populate some generable entries that should be in the deploy
|
|
501
|
+
# data. Also, bounce out if we realize we've found exactly what
|
|
502
|
+
# we needed already.
|
|
503
|
+
MU::Cloud.resource_types.values.each { |attrs|
|
|
504
|
+
|
|
505
|
+
next if @deploy_cache[deploy]['data'][attrs[:cfg_plural]].nil?
|
|
506
|
+
if !attrs[:has_multiples]
|
|
507
|
+
@deploy_cache[deploy]['data'][attrs[:cfg_plural]].each_pair { |nodename, data|
|
|
508
|
+
# XXX we don't actually store node names for some resources, need to farm them
|
|
509
|
+
# and fix metadata
|
|
510
|
+
# if !mu_name.nil? and nodename == mu_name
|
|
511
|
+
# return { deploy => [data] }
|
|
512
|
+
# end
|
|
513
|
+
}
|
|
514
|
+
else
|
|
515
|
+
@deploy_cache[deploy]['data'][attrs[:cfg_plural]].each_pair { |node_class, nodes|
|
|
516
|
+
next if nodes.nil? or !nodes.is_a?(Hash)
|
|
517
|
+
nodes.each_pair { |nodename, data|
|
|
518
|
+
next if !data.is_a?(Hash)
|
|
519
|
+
data['#MU_NODE_CLASS'] = node_class
|
|
520
|
+
if !data.has_key?("cloud") # XXX kludge until old metadata gets fixed
|
|
521
|
+
data["cloud"] = MU::Config.defaultCloud
|
|
522
|
+
end
|
|
523
|
+
data['#MU_NAME'] = nodename
|
|
524
|
+
if !mu_name.nil? and nodename == mu_name
|
|
525
|
+
return {deploy => [data]} if deploy_id && deploy == deploy_id
|
|
526
|
+
end
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
end
|
|
530
|
+
}
|
|
531
|
+
rescue JSON::ParserError => e
|
|
532
|
+
raise MuError, "JSON parse failed on #{this_deploy_dir}/deployment.json\n\n"+File.read("#{this_deploy_dir}/deployment.json")
|
|
533
|
+
end
|
|
534
|
+
lock.flock(File::LOCK_UN)
|
|
535
|
+
lock.close
|
|
536
|
+
}
|
|
537
|
+
end
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
matches = {}
|
|
541
|
+
|
|
542
|
+
if deploy_id.nil?
|
|
543
|
+
@deploy_cache.each_key { |deploy|
|
|
544
|
+
next if !@deploy_cache[deploy].has_key?('data')
|
|
545
|
+
next if !@deploy_cache[deploy]['data'].has_key?(type)
|
|
546
|
+
if !name.nil?
|
|
547
|
+
next if @deploy_cache[deploy]['data'][type][name].nil?
|
|
548
|
+
matches[deploy] ||= []
|
|
549
|
+
matches[deploy] << @deploy_cache[deploy]['data'][type][name].dup
|
|
550
|
+
else
|
|
551
|
+
matches[deploy] ||= []
|
|
552
|
+
matches[deploy].concat(@deploy_cache[deploy]['data'][type].values)
|
|
553
|
+
end
|
|
554
|
+
}
|
|
555
|
+
return matches
|
|
556
|
+
elsif !@deploy_cache[deploy_id].nil?
|
|
557
|
+
if !@deploy_cache[deploy_id]['data'].nil? and
|
|
558
|
+
!@deploy_cache[deploy_id]['data'][type].nil?
|
|
559
|
+
if !name.nil?
|
|
560
|
+
if !@deploy_cache[deploy_id]['data'][type][name].nil?
|
|
561
|
+
matches[deploy_id] ||= []
|
|
562
|
+
matches[deploy_id] << @deploy_cache[deploy_id]['data'][type][name].dup
|
|
563
|
+
else
|
|
564
|
+
return matches # nothing, actually
|
|
565
|
+
end
|
|
566
|
+
else
|
|
567
|
+
matches[deploy_id] = @deploy_cache[deploy_id]['data'][type].values
|
|
568
|
+
end
|
|
569
|
+
end
|
|
570
|
+
end
|
|
571
|
+
|
|
572
|
+
return matches
|
|
573
|
+
end
|
|
574
|
+
|
|
575
|
+
# Get the deploy directory
|
|
576
|
+
# @param deploy_id [String]
|
|
577
|
+
# @return [String]
|
|
578
|
+
def self.deploy_dir(deploy_id)
|
|
579
|
+
raise MuError, "deploy_dir must get a deploy_id if called as class method (from #{caller[0]}; #{caller[1]})" if deploy_id.nil?
|
|
580
|
+
# XXX this will blow up if someone sticks MU in /
|
|
581
|
+
path = File.expand_path(MU.dataDir+"/deployments")
|
|
582
|
+
if !Dir.exist?(path)
|
|
583
|
+
MU.log "Creating #{path}", MU::DEBUG
|
|
584
|
+
Dir.mkdir(path, 0700)
|
|
585
|
+
end
|
|
586
|
+
path = path+"/"+deploy_id
|
|
587
|
+
return path
|
|
588
|
+
end
|
|
589
|
+
|
|
590
|
+
# Does the deploy with the given id exist?
|
|
591
|
+
# @param deploy_id [String]
|
|
592
|
+
# @return [String]
|
|
593
|
+
def self.deploy_exists?(deploy_id)
|
|
594
|
+
if deploy_id.nil? or deploy_id.empty?
|
|
595
|
+
MU.log "Got nil deploy_id in MU::MommaCat.deploy_exists?", MU::WARN
|
|
596
|
+
return
|
|
597
|
+
end
|
|
598
|
+
path = File.expand_path(MU.dataDir+"/deployments")
|
|
599
|
+
if !Dir.exist?(path)
|
|
600
|
+
Dir.mkdir(path, 0700)
|
|
601
|
+
end
|
|
602
|
+
deploy_path = File.expand_path(path+"/"+deploy_id)
|
|
603
|
+
return Dir.exist?(deploy_path)
|
|
604
|
+
end
|
|
605
|
+
|
|
606
|
+
private
|
|
607
|
+
|
|
608
|
+
###########################################################################
|
|
609
|
+
###########################################################################
|
|
610
|
+
def loadDeployFromCache(set_context_to_me = true)
|
|
611
|
+
return false if !File.size?(deploy_dir+"/deployment.json")
|
|
612
|
+
|
|
613
|
+
deploy = File.open("#{deploy_dir}/deployment.json", File::RDONLY)
|
|
614
|
+
MU.log "Getting lock to read #{deploy_dir}/deployment.json", MU::DEBUG
|
|
615
|
+
# deploy.flock(File::LOCK_EX)
|
|
616
|
+
begin
|
|
617
|
+
Timeout::timeout(90) {deploy.flock(File::LOCK_EX)}
|
|
618
|
+
rescue Timeout::Error
|
|
619
|
+
raise MuError, "Timed out trying to get an exclusive lock on #{deploy_dir}/deployment.json"
|
|
620
|
+
end
|
|
621
|
+
|
|
622
|
+
begin
|
|
623
|
+
@deployment = JSON.parse(File.read("#{deploy_dir}/deployment.json"))
|
|
624
|
+
rescue JSON::ParserError => e
|
|
625
|
+
MU.log "JSON parse failed on #{deploy_dir}/deployment.json", MU::ERR, details: e.message
|
|
626
|
+
end
|
|
627
|
+
|
|
628
|
+
deploy.flock(File::LOCK_UN)
|
|
629
|
+
deploy.close
|
|
630
|
+
|
|
631
|
+
setThreadContextToMe if set_context_to_me
|
|
632
|
+
|
|
633
|
+
@timestamp = @deployment['timestamp']
|
|
634
|
+
@seed = @deployment['seed']
|
|
635
|
+
@appname = @deployment['appname']
|
|
636
|
+
@handle = @deployment['handle']
|
|
637
|
+
|
|
638
|
+
true
|
|
639
|
+
end
|
|
640
|
+
|
|
641
|
+
###########################################################################
|
|
642
|
+
###########################################################################
|
|
643
|
+
def loadDeploy(deployment_json_only = false, set_context_to_me: true)
|
|
644
|
+
MU::MommaCat.deploy_struct_semaphore.synchronize {
|
|
645
|
+
success = loadDeployFromCache(set_context_to_me)
|
|
646
|
+
|
|
647
|
+
return if deployment_json_only and success
|
|
648
|
+
|
|
649
|
+
if File.exist?(deploy_dir+"/private_key")
|
|
650
|
+
@private_key = File.read("#{deploy_dir}/private_key")
|
|
651
|
+
@public_key = File.read("#{deploy_dir}/public_key")
|
|
652
|
+
end
|
|
653
|
+
|
|
654
|
+
if File.exist?(deploy_dir+"/basket_of_kittens.json")
|
|
655
|
+
begin
|
|
656
|
+
@original_config = JSON.parse(File.read("#{deploy_dir}/basket_of_kittens.json"))
|
|
657
|
+
rescue JSON::ParserError => e
|
|
658
|
+
MU.log "JSON parse failed on #{deploy_dir}/basket_of_kittens.json", MU::ERR, details: e.message
|
|
659
|
+
end
|
|
660
|
+
end
|
|
661
|
+
if File.exist?(deploy_dir+"/ssh_key_name")
|
|
662
|
+
@ssh_key_name = File.read("#{deploy_dir}/ssh_key_name").chomp!
|
|
663
|
+
end
|
|
664
|
+
if File.exist?(deploy_dir+"/node_ssh.key")
|
|
665
|
+
@ssh_private_key = File.read("#{deploy_dir}/node_ssh.key")
|
|
666
|
+
end
|
|
667
|
+
if File.exist?(deploy_dir+"/node_ssh.pub")
|
|
668
|
+
@ssh_public_key = File.read("#{deploy_dir}/node_ssh.pub")
|
|
669
|
+
end
|
|
670
|
+
if File.exist?(deploy_dir+"/environment_name")
|
|
671
|
+
@environment = File.read("#{deploy_dir}/environment_name").chomp!
|
|
672
|
+
end
|
|
673
|
+
if File.exist?(deploy_dir+"/deploy_secret")
|
|
674
|
+
@deploy_secret = File.read("#{deploy_dir}/deploy_secret")
|
|
675
|
+
end
|
|
676
|
+
if Dir.exist?("#{deploy_dir}/secrets")
|
|
677
|
+
@secrets.each_key { |type|
|
|
678
|
+
Dir.glob("#{deploy_dir}/secrets/#{type}.*") { |filename|
|
|
679
|
+
server = File.basename(filename).split(/\./)[1]
|
|
680
|
+
|
|
681
|
+
@secrets[type][server] = File.read(filename).chomp!
|
|
682
|
+
}
|
|
683
|
+
}
|
|
684
|
+
end
|
|
685
|
+
}
|
|
686
|
+
end
|
|
687
|
+
|
|
688
|
+
end #class
|
|
689
|
+
end #module
|