opensecret 0.0.2 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +4 -0
  3. data/README.md +2 -2
  4. data/bin/opensecret +3 -6
  5. data/lib/opensecret-domain.ini +23 -0
  6. data/lib/opensecret.rb +30 -2
  7. data/lib/opensecret/additions/array.rb +117 -0
  8. data/lib/opensecret/additions/dir.rb +35 -0
  9. data/lib/opensecret/additions/string.rb +312 -0
  10. data/lib/opensecret/commons/eco.cmdline.rb +446 -0
  11. data/lib/opensecret/commons/eco.faculty.rb +364 -0
  12. data/lib/opensecret/commons/eco.system.rb +437 -0
  13. data/lib/opensecret/commons/eco.systems.rb +98 -0
  14. data/lib/opensecret/{safe.rb → delegate.rb} +4 -2
  15. data/lib/opensecret/eco.do.rb +46 -0
  16. data/lib/opensecret/executors/crypt.keys/crypt.keys.ini +79 -0
  17. data/lib/opensecret/executors/crypt.keys/crypt.keys.rb +68 -0
  18. data/lib/opensecret/executors/decrypt/decrypt.ini +64 -0
  19. data/lib/opensecret/executors/decrypt/decrypt.rb +49 -0
  20. data/lib/opensecret/executors/encrypt/encrypt.ini +55 -0
  21. data/lib/opensecret/executors/encrypt/encrypt.rb +82 -0
  22. data/lib/opensecret/factbase/hub-runtime.ini +123 -0
  23. data/lib/opensecret/factbase/known-hosts.ini +75 -0
  24. data/lib/opensecret/factbase/published.facts/blobbolicious-facts.ini +553 -0
  25. data/lib/opensecret/factbase/published.facts/credential-facts.ini +40 -0
  26. data/lib/opensecret/factbase/published.facts/infrastructure-facts.ini +63 -0
  27. data/lib/opensecret/factbase/readme.md +24 -0
  28. data/lib/opensecret/factbase/retired.facts/maven.database.ide.facts.ini +127 -0
  29. data/lib/opensecret/factbase/retired.facts/s3-upload-block-facts.ini +17 -0
  30. data/lib/opensecret/plugins.io/cipher/crypto.rb +174 -0
  31. data/lib/opensecret/plugins.io/error/eco.exceptions.rb +24 -0
  32. data/lib/opensecret/plugins.io/facts/fact.chars.rb +66 -0
  33. data/lib/opensecret/plugins.io/facts/fact.factor.rb +156 -0
  34. data/lib/opensecret/plugins.io/facts/fact.locator.rb +105 -0
  35. data/lib/opensecret/plugins.io/facts/fact.reader.rb +137 -0
  36. data/lib/opensecret/plugins.io/facts/fact.tree.rb +661 -0
  37. data/lib/opensecret/plugins.io/file/file.rb +483 -0
  38. data/lib/opensecret/plugins.io/git/git.flow.rb +388 -0
  39. data/lib/opensecret/plugins.io/logs/log.object.rb +89 -0
  40. data/lib/opensecret/plugins.io/logs/logging.rb +203 -0
  41. data/lib/opensecret/plugins.io/time/time.stamp.rb +425 -0
  42. data/lib/opensecret/version.rb +2 -2
  43. data/opensecret.gemspec +8 -13
  44. metadata +68 -18
@@ -0,0 +1,437 @@
1
+ #!/usr/bin/ruby
2
+
3
+ # --- --------------------------------------------------------------------------------------- -- #
4
+ # --- Build the [services eco-system]. The app sits at the centre of the services eco-system. -- #
5
+ # --- Everything that is done -> is done for (because of, to, in spite of) the [application]. -- #
6
+ # --- --------------------------------------------------------------------------------------- -- #
7
+ # --- --------------------------------------------------------------------------- --- #
8
+ # --- The [eco service folder] contains the templates, scripts and configuration. --- #
9
+ # --- By convention the folder name (off prj root) matches the name of the class. --- #
10
+ # --- --------------------------------------------------------------------------- --- #
11
+ # --- Example => ProvisionMongoDb assets are provision.mongo.db --- #
12
+ # --- --------------------------------------------------------------------------- --- #
13
+ # --- By Convention --- #
14
+ # --- Ruby Class => EcoAppServer --- #
15
+ # --- is Found in File => eco.system.plugins/eco.app.server.rb --- #
16
+ # --- Has Assets in => provision.app.server/ --- #
17
+ # --- and Inherits from => ProvisionEcoService --- #
18
+ # --- Found in File => provision.services/provision.eco.service.rb --- #
19
+ # --- --------------------------------------------------------------------------- --- #
20
+ class EcoSystem < EcoFaculty
21
+
22
+ # -- -------------------------------------------------------------- -- #
23
+ # -- eco-system [provisioning] begins in earnest here. By making -- #
24
+ # -- a [super] call (at the beginning, middle or end) - eco-systems -- #
25
+ # -- can extend the functionality provided here. -- #
26
+ # -- -------------------------------------------------------------- -- #
27
+ # -- To prevent this code running, child classes must provide their -- #
28
+ # -- own provision along an (optional) alternative implementation. -- #
29
+ # -- -------------------------------------------------------------- -- #
30
+ def provision
31
+
32
+ super
33
+
34
+ pre_provisioning # --> Do work to gather key provisioning facts
35
+ replace_placeholders # --> Replace key facts in files within the eco folder
36
+
37
+ core_provisioning # --> Do the heavy lifting 4 provisioning the eco service
38
+
39
+ overwrite_lines # --> Replace pinpointed lines that include a string set.
40
+ replace_placeholders # --> Replace xtra key facts to prep 4 post provisioning.
41
+ post_provisioning # --> Notifying service dependents is usually done here.
42
+
43
+ end
44
+
45
+ ## --- ----------------------------------------------------------------------------- --- #
46
+ ## --- Provision the services eco-system (universe) with the app as the focal point. --- #
47
+ ## --- ----------------------------------------------------------------------------- --- #
48
+ def pre_provisioning
49
+
50
+ read_properties
51
+ inject_reusables
52
+
53
+ end
54
+
55
+
56
+ # --
57
+ # -- Implements service discovery for the provisioned eco-system services.
58
+ # --
59
+ def post_provisioning
60
+
61
+ execute_scripts
62
+ s3_upload
63
+ s3_synchronize
64
+ write_properties
65
+ sync_2s3_bucket
66
+
67
+ end
68
+
69
+
70
+ #--
71
+ #-- Get eco-system reusable directory filepaths within
72
+ #-- an array.
73
+ #--
74
+ #-- The two known directories are
75
+ #--
76
+ #-- [1] - reusable.scripts
77
+ #-- [2] - reusable.templates
78
+ #--
79
+ #--
80
+ def self.reusable_buckets
81
+
82
+ project_basedir = File.dirname( File.dirname( __FILE__ ) )
83
+ reusable_buckets = Array.new
84
+
85
+ reusable_buckets.push( File.join(project_basedir, "reusable.scripts") )
86
+ reusable_buckets.push( File.join(project_basedir, "reusable.templates") )
87
+
88
+ return reusable_buckets
89
+
90
+ end
91
+
92
+
93
+ # --
94
+ # -- Gather the reusable [file] resources from the directoy bucket
95
+ # -- arrays that are declared to hold these assets.
96
+ # --
97
+ # -- The reusables are gathered only if the plugin declares a fact
98
+ # -- called [:reusables] that is an array of simple filenames.
99
+ # --
100
+ # -- This method does a recursive search to find and then copy over
101
+ # -- these reusable files into the runtime directory.
102
+ # --
103
+ # -- ------------------------------
104
+ # -- Constraint - Duplicate Names
105
+ # -- ------------------------------
106
+ # --
107
+ # -- Duplicate asset filenames introduce ambiguity in as far as
108
+ # -- reusable assets are concerned. Therefore an error will be
109
+ # -- raised if this situation arises.
110
+ # --
111
+ def inject_reusables
112
+
113
+ return unless eco_fact_exists?(:inventory) || eco_fact_exists?(:runnables)
114
+ files_map = Files.in_folders EcoSystem.reusable_buckets
115
+ reusables = e_fact(:inventory).merge( e_fact(:runnables) )
116
+ reusables.each do |source_name, target_name|
117
+
118
+ error_1 = "Cannot find reusable [#{source_name}].\n\n#{files_map.inspect}"
119
+ raise ArgumentError.new error_1 unless files_map.has_key? source_name
120
+ log.info(ere) {"Copying reusable #{source_name} => to => #{target_name}"}
121
+ source_file = File.join files_map[source_name], source_name
122
+ target_file = File.join @c[:runtime][:dir], target_name
123
+ log.info(ere) {"Source DevOps Asset => #{nickname source_file}"}
124
+ log.info(ere) {"Target DevOps Asset => #{nickname target_file}"}
125
+
126
+ FileUtils.cp source_file, target_file
127
+
128
+ end
129
+
130
+ end
131
+
132
+
133
+ # --
134
+ # -- Use the remote host instantiated for the eco plugin.
135
+ # -- Upload the plugin folder and run the reusables.
136
+ # --
137
+ def execute_scripts
138
+
139
+ return unless eco_fact_exists? :runnables
140
+
141
+ log.info(ere) { "[collate] ---------------------------------------- --- #" }
142
+ log.info(ere) { "[collate] collate will upload and execute scripts. --- #" }
143
+ log.info(ere) { "[collate] ---------------------------------------- --- #" }
144
+ log.info(ere) { "#{pp e_fact(:runnables).values}" }
145
+ log.info(ere) { "[collate] ---------------------------------------- --- #" }
146
+
147
+ install_dos2unix = "sudo apt-get install -y dos2unix"
148
+ plugin_host = @c[:machine][:host_class]
149
+ plugin_host.runtime_dir = @c[:runtime][:dir]
150
+
151
+ plugin_host.execute_cmd install_dos2unix
152
+ plugin_host.upload_folder @c[:runtime][:dstname], @c[:runtime][:dir]
153
+
154
+
155
+ e_fact(:runnables).each_value do | script_name |
156
+
157
+ script_path = @c[:runtime][:dstname] + "/" + @c[:runtime][:dirname] + "/" + script_name
158
+
159
+ cmd1 = "chmod u+x " + script_path
160
+ cmd2 = "dos2unix " + script_path
161
+ cmd3 = script_path
162
+
163
+ #### plugin_host.execute_ansible_cmd @c[:runtime][:dir]
164
+ #### exit
165
+
166
+
167
+ plugin_host.execute_cmd cmd1
168
+ plugin_host.execute_cmd cmd2
169
+ plugin_host.execute_cmd cmd3
170
+
171
+ end
172
+
173
+ plugin_host.log_remote_host
174
+
175
+ end
176
+
177
+
178
+ # -- ----------------------------------------------------- -- #
179
+ # -- sync folder with s3 bucket under certain conditions. -- #
180
+ # -- ----------------------------------------------------- -- #
181
+ # -- Sync Conditions -- #
182
+ # -- ----------------------------------------------------- -- #
183
+ # -- [1] - running in a unix environment -- #
184
+ # -- [2] - key [s3sync.bucket.name] exists -- #
185
+ # -- [3] - key [s3sync.path.offset] exists -- #
186
+ # -- [4] - s3 bucket exists and is writeable -- #
187
+ # -- [5] - local dir exists and is readable -- #
188
+ # -- -- #
189
+ # -- ----------------------------------------------------- -- #
190
+ # -- Dependencies and Assumptions -- #
191
+ # -- ----------------------------------------------------- -- #
192
+ # -- the aws iam environment variables are set -- #
193
+ # -- the s3 bucket specified exists and is writable -- #
194
+ # -- the s3 bucket contents are deletable -- #
195
+ # -- local path offset off [plugin folder] exists -- #
196
+ # -- the [awscli] apt-get package is installed -- #
197
+ # -- ----------------------------------------------------- -- #
198
+ def s3_synchronize
199
+
200
+ return if Gem.win_platform?
201
+ return unless eco_fact_exists? :s3sync_bucket
202
+ return unless eco_fact_exists? :s3sync_folder
203
+
204
+ log.info(ere) { "[s3 sync] -------------------------------------------- --- #" }
205
+ log.info(ere) { "[s3 sync] eco plugin running on a non-windows platform --- #" }
206
+ log.info(ere) { "[s3 sync] with s3 sync parameters available. --- #" }
207
+ log.info(ere) { "[s3 sync] -------------------------------------------- --- #" }
208
+
209
+ AwsS3.instance.log_bucket_summary
210
+ AwsS3.instance.sync_with_s3 e_fact(:s3sync_bucket), e_fact(:s3sync_folder)
211
+ AwsS3.instance.log_bucket_summary
212
+
213
+ end
214
+
215
+
216
+ # --
217
+ # -- [SYNC] a local folder with a given S3 bucket at a particular
218
+ # -- folder offset location, with a specific set of sync options.
219
+ # --
220
+ # -- This behaviour is driven by a (plugin.id).s3.sync.spec.json
221
+ # -- specification file that states
222
+ # --
223
+ # -- [1] - the source folder whose contents will be sync'd up
224
+ # -- [2] - the S3 bucket name into which to sync the contents
225
+ # -- [3] - the S3 folder path offset (within the S3 bucket)
226
+ # -- [4] - sync options like delete, size-only, acl and more
227
+ # --
228
+ def sync_2s3_bucket
229
+
230
+ return unless @c.has_key?(:s3_sync) && File.exists?(@c[:s3_sync][:spec_filepath])
231
+
232
+ AwsS3.instance.log_bucket_summary
233
+
234
+ sync_directives = JSON.parse(
235
+ File.read(@c[:s3_sync][:spec_filepath]),
236
+ object_class: OpenStruct
237
+ )
238
+
239
+ sync_directives.each do | sync_directive |
240
+
241
+ log.info(ere) { "[sync] ############################################################### ### #" }
242
+ log.info(ere) { "[sync] --------------------------------------------------------------- --- #" }
243
+ log.info(ere) { "[sync] sync-ing local folder to s3 bucket [#{sync_directive.s3_bucket_name}]" }
244
+ log.info(ere) { "[sync] --------------------------------------------------------------- --- #" }
245
+ log.info(ere) { "[sync] sync source folder => #{sync_directive.local_folder}" }
246
+ log.info(ere) { "[sync] source bucket name => #{sync_directive.s3_bucket_name}" }
247
+ log.info(ere) { "[sync] mirror bucket name => #{sync_directive.bucket_b4_name}" }
248
+ log.info(ere) { "[sync] bucket offset path => #{sync_directive.offset_path}" }
249
+ log.info(ere) { "[sync] sync options array => #{sync_directive.sync_options}" }
250
+ log.info(ere) { "[sync] --------------------------------------------------------------- --- #" }
251
+
252
+ # --
253
+ # -- Is it worthwhile to copy between S3 buckets first
254
+ # -- before sync-ing up the local folder?
255
+ # --
256
+ # -- We deem it yes if (and only if)
257
+ # --
258
+ # -- a) the to-sync folder is over [10MB]
259
+ # -- b) a bucket_b4_name has been specified
260
+ # -- c) the folder to sync does [NOT] exist.
261
+ # -- d) the b4 folder [DOES] exist.
262
+ # --
263
+ # -- If so a S3 [bucket] to [bucket] mirror/copy may
264
+ # -- dramatically reduce sync time.
265
+ # --
266
+ AwsS3.instance.copy_folder_between_buckets(
267
+ sync_directive.bucket_b4_name,
268
+ sync_directive.s3_bucket_name,
269
+ sync_directive.offset_path
270
+ ) if copy_b4_sync_worthwhile?( sync_directive )
271
+
272
+
273
+ AwsS3.instance.sync_local_to_s3(
274
+ sync_directive.local_folder,
275
+ sync_directive.s3_bucket_name,
276
+ sync_directive.offset_path,
277
+ sync_directive.sync_options
278
+ )
279
+
280
+ end
281
+
282
+ AwsS3.instance.log_bucket_summary
283
+
284
+ end
285
+
286
+
287
+ # --
288
+ # -- [COPY] from another s3 bucket [B4 SYNC] if [WORTHWHILE]
289
+ # --
290
+ # -- Once a month (or week) performance may be gained by copying
291
+ # -- from the previous s3 bucket before sync-ing the local folder.
292
+ # --
293
+ # -- The first [backup] of the new month/week/day is a full backup
294
+ # -- of a local folder to up-sync. This can take a lot of time for
295
+ # -- a say [7Gig] folder holding many little files.
296
+ # --
297
+ # -- -------------------
298
+ # -- S3 to S3 Mirror
299
+ # -- -------------------
300
+ # -- If we copy (mirror) the previous S3 bucket folder before the
301
+ # -- sync we gain much in performance because S3 to S3 copying is
302
+ # -- super fast - then just the delta is sync'd up.
303
+ # --
304
+ # -- -------------------------------
305
+ # -- Pre-Conditions - Copy B4 Sync
306
+ # -- -------------------------------
307
+ # --
308
+ # -- They copy/mirror before sync will occur when the
309
+ # --
310
+ # -- 1 - [sync_options.copy_b4_sync_if] flag is [true]
311
+ # -- 2 - to sync S3 folder (not bucket) does NOT exist
312
+ # -- 3 - previous periods (month/week..) folder exists
313
+ # --
314
+ # -- -------------
315
+ # -- Assumptions
316
+ # -- -------------
317
+ # -- Currently assumes the period is ALWAYS [monthly].
318
+ # -- Change this to cater for
319
+ # -- [ hourly, daily, weekly, monthly, quarterly, yearly ]
320
+ # --
321
+ def copy_b4_sync_worthwhile? sync_attr
322
+
323
+ return false if sync_attr.bucket_b4_name.nil?
324
+
325
+ sync_folder_exists =
326
+ AwsS3.instance.bucket_folder_contains_something?(
327
+ sync_attr.s3_bucket_name,
328
+ sync_attr.offset_path
329
+ )
330
+
331
+ return false if sync_folder_exists
332
+
333
+ b4_folder_exists =
334
+ AwsS3.instance.bucket_folder_contains_something?(
335
+ sync_attr.bucket_b4_name,
336
+ sync_attr.offset_path
337
+ )
338
+
339
+ return b4_folder_exists
340
+
341
+ end
342
+
343
+
344
+ # --- ---------------------------------------------------------------------------- --- #
345
+ # --- Any file in the eco folder whose name starts with [:s3][:upload_prefix] gets --- #
346
+ # --- uploaded to the S3 provisioning folder (in monthly bucket). Then the url --- #
347
+ # --- is written into the app properties database with a key that is the remaining --- #
348
+ # --- filename after the preceeding s3 prefix is removed and subsequently appended --- #
349
+ # --- appended with the string ".url" --- #
350
+ # --- ---------------------------------------------------------------------------- --- #
351
+ def s3_upload
352
+
353
+ log.info(ere) { "[s3 upload] examing files in #{@c[:runtime][:dir]}" }
354
+
355
+ # -- ------------------------------------------------------------------ -- #
356
+ # -- Scan folder for files whose names begin with the s3 upload prefix. -- #
357
+ # -- ------------------------------------------------------------------ -- #
358
+ Dir.foreach( @c[:runtime][:dir] ) do | file_name |
359
+
360
+ file_path = File.join @c[:runtime][:dir], file_name
361
+ next if File.directory? file_path
362
+ next unless file_name.start_with? @c[:s3][:upload_prefix]
363
+
364
+ read_block_facts __FILE__, __method__, :upload, :src_file_name, file_name
365
+ Dir.mkdir @c[:s3][:uploads_dir] unless File.exists? @c[:s3][:uploads_dir]
366
+ next if File.exists? @c[:upload][:dst_file_path]
367
+
368
+ FileUtils.cp @c[:upload][:src_file_path], @c[:upload][:dst_file_path]
369
+
370
+ AwsS3.instance.log_bucket_summary
371
+
372
+ log.warn(ere) { "Warning - Not uploading to S3 bucket = File ==| #{@c[:upload][:dst_file_path]}" }
373
+ log.warn(ere) { "Warning - Not adding S3 resource URL fact to app_properties fact group." }
374
+ ##### === =============================================================================================
375
+ ##### === Commenting this prevents uploading any file with the s3put tag.
376
+ ##### === =============================================================================================
377
+ ##### === s3_url = AwsS3.instance.upload_to_s3 @c[:s3][:bucket_name], @c[:upload][:dst_file_path]
378
+ ##### === @c.add_fact :app_properties, @c[:upload][:app_props_key], s3_url
379
+ ##### === =============================================================================================
380
+
381
+ end
382
+
383
+ end
384
+
385
+
386
+ # --
387
+ # -- [FIND] lines that include a set of configured strings and
388
+ # -- [REPLACE] then with the configured alternative.
389
+ # --
390
+ # -- This behaviour is driven by a (plugin.id).line.replace.json
391
+ # -- configuration file that states
392
+ # --
393
+ # -- [1] - the target file to change
394
+ # -- [2] - the array of words to match each line against
395
+ # -- [3] - new line replacing old if all the words match
396
+ # --
397
+ # -- -----------------------------------
398
+ # -- [Pre-Conditions] => Only act when
399
+ # -- -----------------------------------
400
+ # --
401
+ # -- 1. plugin dir has a json [targetting] configuration file
402
+ # --
403
+ # -- ---------------------------------
404
+ # -- [Dependencies and Assumptions]
405
+ # -- ---------------------------------
406
+ # --
407
+ # -- 1. json file is formatted with below keys (and value types)
408
+ # --
409
+ # -- - replace_file_path : value type => String
410
+ # -- - line_search_strings : value type => Array of Strings
411
+ # -- - replace_with_string : value type => String
412
+ # --
413
+ # -- 2. every file specified exists and is readable + writeable
414
+ # --
415
+ def overwrite_lines
416
+
417
+ return unless File.exists? @c[:overwrite][:spec_filepath]
418
+
419
+ pointers = JSON.parse(
420
+ File.read(@c[:overwrite][:spec_filepath]),
421
+ object_class: OpenStruct
422
+ )
423
+
424
+ pointers.each do | pinpoint |
425
+
426
+ Files.find_replace_lines(
427
+ pinpoint.replace_file_path,
428
+ pinpoint.line_search_strings,
429
+ pinpoint.replace_with_string
430
+ )
431
+
432
+ end
433
+
434
+ end
435
+
436
+
437
+ end
@@ -0,0 +1,98 @@
1
+ #!/usr/bin/ruby
2
+
3
+ # ---
4
+ # --- Build the complete services eco-system directed by the parameter service descriptors.
5
+ # --- Everything that is done -> is done for (because of, to, in spite of) the application.
6
+ # ---
7
+ class EcoSystems
8
+
9
+ # -
10
+ # -- Create one or more eco-systems if that is
11
+ # -- the wish of the commmander (interpreter).
12
+ # -
13
+ def self.create
14
+
15
+ return unless CmdLine.instance.create || CmdLine.instance.task
16
+ eco_descriptors = CmdLine.instance.key_values[:service_descriptors]
17
+ EcoSystems.new.provision_services eco_descriptors
18
+
19
+ end
20
+
21
+ # --- -------------------------------------------------------------------------- --- #
22
+ # --- This input array of [service descriptors] are mapped to the below classes. --- #
23
+ # --- -------------------------------------------------------------------------- --- #
24
+ # --- the service descriptors => ["mongo.db", "properties", "rest.docs"] --- #
25
+ # --- -------------------------------------------------------------------------- --- #
26
+ # --- [eco boot] service => [mongo.db] implemented by => [MongoDb]
27
+ # --- [eco boot] service => [ properties] implemented by => [Properties]
28
+ # --- [eco boot] service => [ rest.docs] implemented by => [RestDocs]
29
+ # --- ------------------------------------------------------------------------- --- #
30
+ def provision_services service_descriptors
31
+
32
+
33
+ log.info(ere) { "------------------------------------------------------------------- --- #" }
34
+ log.info(ere) { "[service descriptor to class conversion loop] --- #" }
35
+ log.info(ere) { "------------------------------------------------------------------- --- #" }
36
+
37
+ eco_services = eco_system_classes service_descriptors
38
+ eco_services.each do |eco_service|
39
+
40
+ top_and_tail "Provision the => [#{eco_service.class.name}] eco-system."
41
+ eco_service.provision
42
+ top_and_tail "The [#{eco_service.eco_id_str}] => eco system is now [ready]."
43
+
44
+ end
45
+
46
+ end
47
+
48
+
49
+ # --- -------------------------------------------------------------------- --- #
50
+ # --- Create the provisioner classes from the parameter descriptors array. --- #
51
+ # --- -------------------------------------------------------------------- --- #
52
+ def eco_system_classes service_descriptors
53
+
54
+ log.info(ere) { "[eco boot] service descriptors => #{service_descriptors.to_s}" }
55
+ eco_services = Array.new
56
+
57
+ service_descriptors.each do |class_desc|
58
+
59
+ class_name = SnapFlat.undo class_desc
60
+ log.info(ere) { "[eco boot] service => [#{class_desc}] implemented by => [#{class_name}]" }
61
+
62
+ begin
63
+
64
+ class_object = Object::const_get("#{class_name}").new
65
+
66
+ rescue Exception => e
67
+
68
+ log.fatal(ere) { "Exception instantiating eco-system [#{class_name}]." }
69
+ log.fatal(ere) { "Exception Message => #{e.message}" }
70
+ log.fatal(ere) { "Exception Backtrace => #{e.backtrace.inspect}" }
71
+ exit
72
+
73
+ end
74
+
75
+ eco_services.push class_object
76
+
77
+ end
78
+
79
+ return eco_services
80
+
81
+ end
82
+
83
+
84
+ # --
85
+ # -- Write the header string as the meat in a highlighted
86
+ # -- lines sandwich.
87
+ # --
88
+ def top_and_tail header_str
89
+
90
+ log.info(ere) { "# ### ############################################################## ### #" }
91
+ log.info(ere) { "# ~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~ #" }
92
+ log.info(ere) { "#{header_str}" }
93
+ log.info(ere) { "# ~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~ #" }
94
+ log.info(ere) { "# ### ############################################################## ### #" }
95
+
96
+ end
97
+
98
+ end