continuent-tools-core 0.9.0 → 0.10.6

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,1654 @@
1
+ #!/usr/bin/env ruby
2
+ # Copyright (C) 2014 Continuent, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
5
+ # not use this file except in compliance with the License. You may obtain
6
+ # a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13
+ # License for the specific language governing permissions and limitations
14
+ # under the License.
15
+ #
16
+ # Initial developer(s): Jeff Mace
17
+ # Contributor(s):
18
+
19
+ begin
20
+ require 'rubygems'
21
+ gem 'continuent-tools-core'
22
+ rescue LoadError
23
+ end
24
+
25
+ require 'continuent-tools-core'
26
+ require 'digest/md5'
27
+
28
+ class TungstenManageConfiguration
29
+ include TungstenScript
30
+ private
31
+
32
+ SERVER_TYPE = "tungsten-ServerType"
33
+ CLUSTER_NAME = "tungsten-ClusterName"
34
+ COMPOSITE_CLUSTER_NAME = "tungsten-CompositeClusterName"
35
+ COMPOSITE_CLUSTER_MASTER = "tungsten-CompositeClusterMaster"
36
+ ARCHIVE_CLUSTERS = "tungsten-ArchiveClusters"
37
+ CONNECTOR_CLUSTERS = "tungsten-ConnectorClusters"
38
+ CLUSTER_PASSIVE_WITNESS = "tungsten-PassiveWitness"
39
+
40
+ def main
41
+ directory_entries = load_directory_entries()
42
+ entry = directory_entries[opt(:hostname)]
43
+ if entry == nil
44
+ raise MessageError.new("Unable to find a directory entry for '#{opt(:hostname)}'")
45
+ end
46
+
47
+ validate_entry(entry)
48
+ unless TU.is_valid?()
49
+ return
50
+ end
51
+
52
+ services = generate_services_map(directory_entries)
53
+ unless TU.is_valid?()
54
+ return
55
+ end
56
+
57
+ case command()
58
+ when "install"
59
+ install_entry(services, entry, directory_entries)
60
+ when "uninstall"
61
+ uninstall_entry(entry)
62
+ when "repair"
63
+ repair_entry(entry)
64
+ when "lastrun"
65
+ if File.exists?(opt(:lastrun))
66
+ File.open(opt(:lastrun), "r") {
67
+ |f|
68
+ TU.output(f.read().chomp())
69
+ }
70
+ else
71
+ TU.output("-1")
72
+ end
73
+ when "ini"
74
+ entry_services = filter_services_map(services, entry)
75
+ pending_ini = write_ini_configuration(entry_services, entry)
76
+ pending_ini.rewind()
77
+ TU.output(pending_ini.read())
78
+ when "hosts"
79
+ entry_services = filter_services_map(services, entry)
80
+ hostsmap = generate_hosts_map(entry_services, directory_entries, entry["location"])
81
+ hostsmap.keys().sort().each{
82
+ |h|
83
+ TU.output("#{hostsmap[h]}\t#{h}")
84
+ }
85
+ when "hosts_puppet_manifest"
86
+ entry_services = filter_services_map(services, entry)
87
+ hostsmap = generate_hosts_map(entry_services, directory_entries, entry["location"])
88
+ TU.output(generate_hosts_puppet_manifest(hostsmap))
89
+ end
90
+ end
91
+
92
+ # Write the tungsten.ini configuration and install/update all needed
93
+ # software packages. Automatically detect the correct master and provision
94
+ # the server if it is a slave.
95
+ def install_entry(services, entry, directory_entries)
96
+ call_hook(:hook_before_install)
97
+
98
+ # Eliminate services not related to the current entry and write to INI
99
+ entry_services = filter_services_map(services, entry)
100
+ pending_ini = write_ini_configuration(entry_services, entry)
101
+
102
+ # Parse and validate the INI contents as a hash instead of raw content
103
+ pending_contents = TU.parse_ini_file(pending_ini.path())
104
+ validate_pending_ini_contents(entry, pending_contents)
105
+ unless TU.is_valid?()
106
+ return
107
+ end
108
+
109
+ # Parse the original contents before the file is replaced
110
+ if File.exists?(opt(:outputfile))
111
+ initial_contents = TU.parse_ini_file(opt(:outputfile))
112
+ else
113
+ initial_contents = {}
114
+ end
115
+
116
+ # Check the status of the last time the script was run
117
+ if opt(:lastrun) != nil && File.exist?(opt(:lastrun))
118
+ lastrun = File.open(opt(:lastrun), "r").read().chomp()
119
+ else
120
+ lastrun = nil
121
+ end
122
+
123
+ # returns true if opt(:outputfile) was actually modified
124
+ file_replaced = replace_managed_file(opt(:outputfile), pending_ini)
125
+
126
+ # We only need to update the software configuration if
127
+ # - there were configuration changes
128
+ # - if the user requested it with --replace-release
129
+ # - if the last run was not successful
130
+ apply_changes = false
131
+ if file_replaced == true
132
+ apply_changes = true
133
+ elsif opt(:replace_release) == true
134
+ apply_changes = true
135
+ elsif lastrun != nil && lastrun != "0"
136
+ apply_changes = true
137
+ end
138
+
139
+ if apply_changes == true
140
+ enable_script_log()
141
+
142
+ # If enabled, update /etc/hosts with any available directory entries
143
+ case opt(:manage_etc_hosts)
144
+ when "puppet"
145
+ hostsmap = generate_hosts_map(entry_services, directory_entries, entry["location"])
146
+ manifest = Tempfile.new('tmcmanifest')
147
+ manifest.puts(generate_hosts_puppet_manifest(hostsmap))
148
+ manifest.flush()
149
+ TU.cmd_result("cat #{manifest.path} | sudo puppet apply")
150
+ end
151
+
152
+ # Install or Update Continuent Tungsten from the configured path
153
+ if entry_is_type?(entry, ["datasource", "connector"])
154
+ update_continuent_tungsten(entry, initial_contents, pending_contents)
155
+ end
156
+
157
+ # Install or Update Tungsten Replicator from the configured path
158
+ if entry_is_type?(entry, "archive")
159
+ update_tungsten_replicator(entry, initial_contents, pending_contents)
160
+ end
161
+ end
162
+
163
+ # This section is not linked to changes in the INI file because it
164
+ # is used to reload the users.map file. Changes in the two are not tied
165
+ # to each other.
166
+ if entry_is_type?(entry, "connector")
167
+ update_usersmap(entry, entry_services.keys())
168
+ end
169
+
170
+ call_hook(:hook_after_install)
171
+ end
172
+
173
+ def update_continuent_tungsten(entry, initial_contents, new_contents)
174
+ home = opt(:continuent_tungsten_home)
175
+ TU.mkdir_if_absent("#{home}/service_logs")
176
+ tpmlog = "#{home}/service_logs/tungsten-configure.log"
177
+ hook_arguments = []
178
+ hook_arguments << "--continuent-tungsten-home=#{home}"
179
+
180
+ if File.exists?("#{home}/tungsten")
181
+ install = TungstenInstall.new("#{home}/tungsten")
182
+ migrate_schema = false
183
+ current_schema = nil
184
+ target_schema = nil
185
+
186
+ new_clusters = nil
187
+ new_members = nil
188
+ removed_clusters = nil
189
+ removed_members = nil
190
+
191
+ if entry_is_type?(entry, "datasource")
192
+ # Calculate new or removed members and clusters so we can take
193
+ # the appropriate action
194
+ new_clusters, new_members = find_cluster_differences(new_contents, initial_contents)
195
+ removed_clusters, removed_members = find_cluster_differences(initial_contents, new_contents)
196
+
197
+ if new_clusters != nil
198
+ TU.debug("New clusters: #{new_clusters.join(',')}")
199
+ new_clusters.each{
200
+ |cluster|
201
+ hook_arguments << "--add-cluster=#{cluster}"
202
+ }
203
+ end
204
+ if removed_clusters != nil
205
+ TU.debug("Removed clusters: #{removed_clusters.join(',')}")
206
+ removed_clusters.each{
207
+ |cluster|
208
+ hook_arguments << "--remove-cluster=#{cluster}"
209
+ }
210
+ end
211
+ if new_members != nil
212
+ new_members.each{
213
+ |svc, members|
214
+ if members.size() == 0
215
+ next
216
+ end
217
+ TU.debug("New #{svc} members: #{members.join(',')}")
218
+ members.each{
219
+ |member|
220
+ hook_arguments << "--add-member=#{svc}.#{member}"
221
+ }
222
+ }
223
+ end
224
+ if removed_members != nil
225
+ removed_members.each{
226
+ |svc, members|
227
+ if members.size() == 0
228
+ next
229
+ end
230
+ TU.debug("Removed #{svc} members: #{members.join(',')}")
231
+ members.each{
232
+ |member|
233
+ hook_arguments << "--remove-member=#{svc}.#{member}"
234
+ }
235
+ }
236
+ end
237
+ end
238
+
239
+ begin
240
+ if entry["tags"][COMPOSITE_CLUSTER_NAME].to_s() != ""
241
+ # Check if the service is currently writing to the composite service schema
242
+ current_schema = install.setting(install.setting_key(REPL_SERVICES, entry["tags"][CLUSTER_NAME], "repl_svc_schema"))
243
+ target_schema = "tungsten_#{TU.to_identifier(entry["tags"][COMPOSITE_CLUSTER_NAME])}"
244
+
245
+ if current_schema != target_schema
246
+ TU.notice("Migrate the #{current_schema} schema to #{target_schema}")
247
+ migrate_schema = true
248
+
249
+ # Shutdown the replicator while we migrate the schema and then
250
+ # upgrade Continuent Tungsten. It needs to be stopped instead of
251
+ # put OFFLINE so the manager does not put it ONLINE.
252
+ TU.cmd_result("#{home}/tungsten/tungsten-replicator/bin/replicator stop")
253
+ TU.tungsten_cmd_result("tungsten_migrate_schema --from-schema=#{current_schema} --to-schema=#{target_schema} --drop-target-schema=true")
254
+ end
255
+ end
256
+ rescue CommandError => ce
257
+ TU.debug(ce)
258
+
259
+ # Make sure the replicator is restarted if we are cancelling the script
260
+ TU.cmd_result("#{home}/tungsten/tungsten-replicator/bin/replicator start")
261
+
262
+ raise "Unable to update Continuent Tungsten to a composite cluster because of issues migrating the tracking schema."
263
+ end
264
+
265
+ begin
266
+ replace_release = false
267
+ if migrate_schema == true
268
+ TU.debug("Force `tpm update --replace-release` due to a new composite cluster")
269
+ replace_release = true
270
+ end
271
+
272
+ call_hook(:hook_before_ct_update, hook_arguments)
273
+ if replace_release == true
274
+ TU.notice("Update #{home}/tungsten and replace release directory")
275
+ TU.cmd_result("#{opt(:continuent_tungsten_package)}/tools/tpm update --replace-release --tty --log=#{tpmlog}")
276
+ else
277
+ TU.notice("Update #{home}/tungsten")
278
+ TU.cmd_result("#{opt(:continuent_tungsten_home)}/tungsten/tools/tpm update --tty --log=#{tpmlog}")
279
+ end
280
+ call_hook(:hook_after_ct_update, hook_arguments)
281
+ rescue CommandError => ce
282
+ TU.debug(ce)
283
+ raise "Unable to update Continuent Tungsten check #{tpmlog} for more information"
284
+ ensure
285
+ if migrate_schema == true
286
+ # Restart the replicator after shutting it down for the schema migration
287
+ TU.cmd_result("#{home}/tungsten/tungsten-replicator/bin/replicator start")
288
+ end
289
+ end
290
+ else
291
+ begin
292
+ call_hook(:hook_before_ct_install, hook_arguments)
293
+ TU.notice("Install #{opt(:continuent_tungsten_package)}")
294
+ TU.cmd_result("#{opt(:continuent_tungsten_package)}/tools/tpm install --tty --log=#{tpmlog}")
295
+ call_hook(:hook_after_ct_install, hook_arguments)
296
+ rescue CommandError => ce
297
+ TU.debug(ce)
298
+ raise "Unable to install Continuent Tungsten check #{tpmlog} for more information"
299
+ end
300
+
301
+ if entry_is_type?(entry, "datasource")
302
+ provision_cluster_replicator(home, entry)
303
+ end
304
+
305
+ TU.notice("Start Continuent Tungsten")
306
+ begin
307
+ cmd = "#{home}/tungsten/cluster-home/bin/startall"
308
+ TU.cmd_result(cmd)
309
+ rescue CommandError => ce
310
+ raise "There was an error running `#{cmd}`."
311
+ end
312
+ end
313
+ end
314
+
315
+ def update_tungsten_replicator(entry, initial_contents, new_contents)
316
+ home = opt(:tungsten_replicator_home)
317
+ TU.mkdir_if_absent("#{home}/service_logs")
318
+ tpmlog = "#{home}/service_logs/tungsten-configure.log"
319
+ hook_arguments = []
320
+ hook_arguments << "--tungsten-replicator-home=#{home}"
321
+
322
+ if File.exists?("#{home}/tungsten")
323
+ begin
324
+ call_hook(:hook_before_tr_update, hook_arguments)
325
+ TU.notice("Update #{home}/tungsten")
326
+ TU.cmd_result("#{opt(:tungsten_replicator_home)}/tungsten/tools/tpm update --tty --log=#{tpmlog}")
327
+ call_hook(:hook_after_tr_update, hook_arguments)
328
+ rescue CommandError => ce
329
+ TU.debug(ce)
330
+ raise "Unable to update Tungsten Replicator check #{tpmlog} for more information"
331
+ end
332
+ else
333
+ begin
334
+ call_hook(:hook_before_tr_install, hook_arguments)
335
+ TU.notice("Install #{opt(:tungsten_replicator_package)}")
336
+ TU.cmd_result("#{opt(:tungsten_replicator_package)}/tools/tpm install --tty --log=#{tpmlog}")
337
+ call_hook(:hook_after_tr_install, hook_arguments)
338
+ rescue CommandError => ce
339
+ TU.debug(ce)
340
+ raise "Unable to install Tungsten Replicator check #{tpmlog} for more information"
341
+ end
342
+
343
+ unless entry_is_type?(entry, "datasource")
344
+ entry["tags"][ARCHIVE_CLUSTERS].each{
345
+ |svc|
346
+ # TODO : Provision logically for each remote service
347
+ # provision_archive_replicator(home, entry, svc)
348
+ }
349
+ end
350
+
351
+ TU.notice("Start Tungsten Replicator")
352
+ begin
353
+ cmd = "#{home}/tungsten/cluster-home/bin/startall"
354
+ TU.cmd_result(cmd)
355
+ rescue CommandError => ce
356
+ raise "There was an error running `#{cmd}`."
357
+ end
358
+ end
359
+ end
360
+
361
+ # Identify if this server should be started as a slave to an existing
362
+ # master or relay server. If a valid master or relay, can't be found
363
+ # the replicator will be started with its default role.
364
+ def provision_cluster_replicator(home, entry)
365
+ install = TungstenInstall.new("#{home}/tungsten")
366
+
367
+ # What is the role we will assign to this replicator
368
+ role = nil
369
+ # What is the default role of this replicator
370
+ default_role = install.setting(install.setting_key(REPL_SERVICES, install.default_dataservice(), "repl_role"))
371
+
372
+ # The THL URI to use when the role will be changed to slave
373
+ master_listen_uri = nil
374
+ # Command used to identify the master listen uri on a remote replicator
375
+ masterListenUriCommand = "#{install.base_path()}/tungsten-replicator/bin/trepctl properties -filter replicator.master.listen.uri -values"
376
+
377
+ # A valid host that can be used in the tungsten_provision_slave command
378
+ provision_source = nil
379
+
380
+ # Collect the current state from all other members and identify if there
381
+ # is already a valid master or relay server the cluster.
382
+ master = nil
383
+ relay = nil
384
+
385
+ service_members = install.setting(install.setting_key(DATASERVICES, install.default_dataservice(), "dataservice_hosts")).split(",")
386
+ service_members.each{
387
+ |member|
388
+ if member == opt(:hostname)
389
+ next
390
+ end
391
+
392
+ begin
393
+ # Get the cctrl contents in a JSON object
394
+ contents = nil
395
+ Timeout.timeout(15) {
396
+ contents = TU.ssh_result("if [ -f #{install.base_path()}/tools/tpm ]; then #{install.base_path()}/tools/tpm cctrl; else echo \"\"; fi", member, install.user())
397
+ }
398
+
399
+ result = result = JSON.parse(contents)
400
+ if result.instance_of?(Hash)
401
+ values = result["#{TU.to_identifier(member)}:#{install.root()}"]
402
+
403
+ if values == nil
404
+ TU.debug("Unable to find cctrl results for #{TU.to_identifier(member)}:#{install.root()}")
405
+ next
406
+ end
407
+ if values["manager_is_running"] != "true"
408
+ TU.debug("Skipping this member because the manager is not running")
409
+ next
410
+ end
411
+
412
+ # Only one member of the cluster should be a coordinator so we
413
+ # use that to make sure we are looking at the correct output.
414
+ # In some corner cases, there may be multiple coordinators. In the
415
+ # event that this happens and each gives a different state, the
416
+ # script will throw an error.
417
+ if values["cctrl"]["coordinator"]["host"] != member
418
+ TU.debug("Skipping this member because it is not the coordinator")
419
+ next
420
+ end
421
+ if values["cctrl"]["coordinator"]["state"] != "ONLINE"
422
+ TU.debug("Skipping this member because the coordinator is not ONLINE")
423
+ next
424
+ end
425
+
426
+ # Go through each datasource to find the ONLINE master or relay
427
+ values["cctrl"]["datasources"].each{
428
+ |ds, ds_values|
429
+ if ds == opt(:hostname)
430
+ TU.debug("Skip the #{ds} datasource because it is the current host")
431
+ next
432
+ end
433
+
434
+ if ds_values["role"] != ds_values["replicator"]["role"]
435
+ TU.debug("Skip the #{ds} datasource because the replicator and datasource roles do not match")
436
+ next
437
+ end
438
+
439
+ if ds_values["status"] != "ONLINE"
440
+ TU.debug("Skip the #{ds} datasource because it is not ONLINE")
441
+ next
442
+ end
443
+
444
+ if ds_values["replicator"]["status"] != "ONLINE"
445
+ TU.debug("Skip the #{ds} datasource because the replicator is not ONLINE")
446
+ next
447
+ end
448
+
449
+ if ds_values["role"] == "master"
450
+ if master == nil
451
+ master = ds
452
+ elsif master != ds
453
+ # There are multiple coordinators and they don't agree on the
454
+ # current master.
455
+ throw "Unable to provision because both #{ds} and #{master} are listed as ONLINE master datasources"
456
+ end
457
+ elsif ds_values["role"] == "relay"
458
+ if relay == nil
459
+ relay = ds
460
+ elsif relay != ds
461
+ # There are multiple coordinators and they don't agree on the
462
+ # current relay.
463
+ throw "Unable to provision because both #{ds} and #{relay} are listed as ONLINE relay datasources"
464
+ end
465
+ else
466
+ provision_source = ds
467
+ end
468
+ }
469
+
470
+ end
471
+ rescue JSON::ParserError
472
+ rescue CommandError
473
+ rescue RemoteCommandError
474
+ rescue MessageError => me
475
+ rescue Timeout::Error
476
+ end
477
+ }
478
+
479
+ # Collect the THL URI from the server identified as a valid master or
480
+ # relay.
481
+ if master != nil
482
+ role = "slave"
483
+ master_listen_uri = TU.ssh_result(masterListenUriCommand, master, install.user())
484
+
485
+ if provision_source == nil
486
+ provision_source = master
487
+ end
488
+ elsif relay != nil
489
+ role = "slave"
490
+ master_listen_uri = TU.ssh_result(masterListenUriCommand, relay, install.user())
491
+
492
+ if provision_source == nil
493
+ provision_source = relay
494
+ end
495
+ end
496
+
497
+ if role == "slave"
498
+ # Write the slave connection information to the dynamic properties file
499
+ dynamic_path = install.setting(install.setting_key(REPL_SERVICES, install.default_dataservice(), 'repl_svc_dynamic_config'))
500
+ if dynamic_path.to_s() == ""
501
+ raise "Unable to set the replication URI because the dynamic properties file could not be found."
502
+ end
503
+ File.open(dynamic_path, "w") {
504
+ |f|
505
+ f.puts("replicator.master.connect.uri=#{master_listen_uri}")
506
+ f.puts("replicator.role=slave")
507
+ }
508
+ elsif default_role == "master"
509
+ # Write the dynamic properties file so that any future runs
510
+ # of this script will not cause this replicator to change role.
511
+ dynamic_path = install.setting(install.setting_key(REPL_SERVICES, install.default_dataservice(), 'repl_svc_dynamic_config'))
512
+ if dynamic_path.to_s() == ""
513
+ raise "Unable to set the default role because the dynamic properties file could not be found."
514
+ end
515
+ File.open(dynamic_path, "w") {
516
+ |f|
517
+ f.puts("replicator.role=master")
518
+ }
519
+ elsif default_role == "relay"
520
+ master = nil
521
+
522
+ composite_datasources = install.setting([DATASERVICES, entry["tags"][COMPOSITE_CLUSTER_NAME], "dataservice_composite_datasources"].join('.'))
523
+ composite_datasources.split(",").each{
524
+ |composite_ds|
525
+
526
+ # We don't need to continue after finding a master
527
+ if master_listen_uri != nil
528
+ next
529
+ end
530
+
531
+ # Don't look in the local cluster because we've already checked for
532
+ # valid datasources there
533
+ if composite_ds == entry["tags"][CLUSTER_NAME]
534
+ next
535
+ end
536
+
537
+ service_members = install.setting([DATASERVICES, composite_ds, "dataservice_hosts"].join('.')).split(",")
538
+ service_members.each{
539
+ |member|
540
+ if member == opt(:hostname)
541
+ next
542
+ end
543
+
544
+ begin
545
+ # Get the cctrl contents in a JSON object
546
+ contents = nil
547
+ Timeout.timeout(15) {
548
+ contents = TU.ssh_result("if [ -f #{install.base_path()}/tools/tpm ]; then #{install.base_path()}/tools/tpm cctrl; else echo \"\"; fi", member, install.user())
549
+ }
550
+
551
+ result = result = JSON.parse(contents)
552
+ if result.instance_of?(Hash)
553
+ values = result["#{TU.to_identifier(member)}:#{install.root()}"]
554
+
555
+ if values == nil
556
+ TU.debug("Unable to find cctrl results for #{TU.to_identifier(member)}:#{install.root()}")
557
+ next
558
+ end
559
+ if values["manager_is_running"] != "true"
560
+ TU.debug("Skipping this member because the manager is not running")
561
+ next
562
+ end
563
+
564
+ # Only one member of the cluster should be a coordinator so we
565
+ # use that to make sure we are looking at the correct output.
566
+ # In some corner cases, there may be multiple coordinators. In the
567
+ # event that this happens and each gives a different state, the
568
+ # script will throw an error.
569
+ if values["cctrl"]["coordinator"]["host"] != member
570
+ TU.debug("Skipping this member because it is not the coordinator")
571
+ next
572
+ end
573
+ if values["cctrl"]["coordinator"]["state"] != "ONLINE"
574
+ TU.debug("Skipping this member because the coordinator is not ONLINE")
575
+ next
576
+ end
577
+
578
+ # Go through each datasource to find the ONLINE master or relay
579
+ values["cctrl"]["datasources"].each{
580
+ |ds, ds_values|
581
+ if ds == opt(:hostname)
582
+ TU.debug("Skip the #{ds} datasource because it is the current host")
583
+ next
584
+ end
585
+
586
+ if ds_values["role"] != ds_values["replicator"]["role"]
587
+ TU.debug("Skip the #{ds} datasource because the replicator and datasource roles do not match")
588
+ next
589
+ end
590
+
591
+ if ds_values["status"] != "ONLINE"
592
+ TU.debug("Skip the #{ds} datasource because it is not ONLINE")
593
+ next
594
+ end
595
+
596
+ if ds_values["replicator"]["status"] != "ONLINE"
597
+ TU.debug("Skip the #{ds} datasource because the replicator is not ONLINE")
598
+ next
599
+ end
600
+
601
+ if ds_values["role"] == "master"
602
+ if master == nil
603
+ master = ds
604
+ elsif master != ds
605
+ # There are multiple coordinators and they don't agree on the
606
+ # current master.
607
+ throw "Unable to provision because both #{ds} and #{master} are listed as ONLINE master datasources"
608
+ end
609
+ else
610
+ provision_source = ds
611
+ end
612
+ }
613
+
614
+ end
615
+ rescue JSON::ParserError
616
+ rescue CommandError
617
+ rescue RemoteCommandError
618
+ rescue MessageError => me
619
+ rescue Timeout::Error
620
+ end
621
+ }
622
+ }
623
+
624
+ if master == nil
625
+ # There is no master so there is no clear decision that we can make
626
+ return
627
+ end
628
+
629
+ master_listen_uri = TU.ssh_result(masterListenUriCommand, master, install.user())
630
+
631
+ if provision_source == nil
632
+ provision_source = master
633
+ end
634
+
635
+ # Write the slave connection information to the dynamic properties file
636
+ dynamic_path = install.setting(install.setting_key(REPL_SERVICES, install.default_dataservice(), 'repl_svc_dynamic_config'))
637
+ if dynamic_path.to_s() == ""
638
+ raise "Unable to set the replication URI because the dynamic properties file could not be found."
639
+ end
640
+ File.open(dynamic_path, "w") {
641
+ |f|
642
+ f.puts("replicator.master.connect.uri=#{master_listen_uri}")
643
+ f.puts("replicator.role=relay")
644
+ }
645
+ end
646
+
647
+ if provision_source != nil
648
+ TU.notice("Provisioning the server. This may take some time.")
649
+ begin
650
+ TU.tungsten_cmd_result("#{home}/tungsten/tungsten-replicator/scripts/tungsten_provision_slave --source=#{provision_source} #{opt(:provision_slave_arguments)}")
651
+ rescue CommandError => ce
652
+ TU.debug(ce)
653
+ raise "Unable to provision #{entry["hostname"]}. Review the log for more information."
654
+ end
655
+ else
656
+ TU.debug("Skipping the provision step because there is no clear provision source")
657
+ end
658
+ end
659
+
660
+ def update_usersmap(entry, service_list)
661
+ home = opt(:continuent_tungsten_home)
662
+ if File.exists?("#{home}/tungsten")
663
+ begin
664
+ usersmap = "#{home}/tungsten/tungsten-connector/conf/user.map"
665
+ if File.exists?(usersmap)
666
+ # Ensure that the users.map file has not been modified manually
667
+ validate_managed_file(usersmap, true)
668
+ end
669
+
670
+ # Generate a new users.map file and replace the old one if different
671
+ pending_usersmap = write_usersmap(service_list, entry)
672
+ if pending_usersmap != false
673
+ file_replaced = replace_managed_file(usersmap, pending_usersmap)
674
+ else
675
+ TU.debug("Skipping user.map management because no users templates were found")
676
+ end
677
+ rescue MessageError => me
678
+ TU.error(me.message)
679
+ end
680
+ end
681
+ end
682
+
683
+ def uninstall_entry(entry)
684
+ enable_script_log()
685
+ call_hook(:hook_before_uninstall)
686
+
687
+ if entry_is_type?(entry, ["datasource", "connector"])
688
+ begin
689
+ TU.notice("Uninstall #{opt(:continuent_tungsten_package)}")
690
+ TU.cmd_result("#{opt(:continuent_tungsten_package)}/tools/tpm uninstall --i-am-sure")
691
+ rescue CommandError => ce
692
+ TU.debug(ce)
693
+ TU.warning "Unable to uninstall Continuent Tungsten. Proceeding with uninstall of other components."
694
+ end
695
+ end
696
+
697
+ if entry_is_type?(entry, "archive")
698
+ begin
699
+ TU.notice("Uninstall #{opt(:tungsten_replicator_package)}")
700
+ TU.cmd_result("#{opt(:tungsten_replicator_package)}/tools/tpm uninstall --i-am-sure")
701
+ rescue CommandError => ce
702
+ TU.debug(ce)
703
+ TU.warning "Unable to uninstall Tungsten Replicator. Proceeding with uninstall of other components."
704
+ end
705
+ end
706
+
707
+ TU.cmd_result("rm -f #{opt(:outputfile)}", true)
708
+ TU.cmd_result("rm -f #{get_original_file(opt(:outputfile))}", true)
709
+ call_hook(:hook_after_uninstall)
710
+ end
711
+
712
+ # Calculate the hostnames needed to run the listed services
713
+ # For each hostname include the private address if the host location
714
+ # matches the given location. Use the public address if there is no private
715
+ # address or the locations do not match.
716
+ def generate_hosts_map(services, directory_entries, location)
717
+ hosts_map = {}
718
+
719
+ services.each{
720
+ |key,service|
721
+ if service.has_key?("members") && service["members"].is_a?(Array)
722
+ service["members"].each{
723
+ |hostname|
724
+ entry = directory_entries[hostname]
725
+ if entry == nil
726
+ TU.error("Unable to find a directory entry for #{hostname}")
727
+ next
728
+ end
729
+
730
+ if entry["location"] == location
731
+ if entry.has_key?("private-address")
732
+ hosts_map[hostname] = entry["private-address"]
733
+ elsif entry.has_key?("public-address")
734
+ hosts_map[hostname] = entry["public-address"]
735
+ else
736
+ TU.error("Unable to find a private or public address for #{hostname}")
737
+ end
738
+ else
739
+ if entry.has_key?("public-address")
740
+ hosts_map[hostname] = entry["public-address"]
741
+ else
742
+ TU.error("Unable to find a public address for #{hostname}")
743
+ end
744
+ end
745
+ }
746
+ end
747
+ }
748
+
749
+ return hosts_map
750
+ end
751
+
752
+ def generate_hosts_puppet_manifest(hostsmap)
753
+ manifest = []
754
+ hostsmap.keys().sort().each{
755
+ |h|
756
+ manifest << "host { '#{h}' : ip => '#{hostsmap[h]}', comment => 'Created by #{script_name()}'}"
757
+ }
758
+ manifest.join("\n")
759
+ end
760
+
761
+ # TODO : Add a 'repair' command that will reprovision or change the slave URI
762
+ def repair_entry(entry)
763
+ raise "The repair command is currently not supported"
764
+ end
765
+
766
+ # Returns a parsed version of the output from tungsten_directory
767
+ def load_directory_entries
768
+ begin
769
+ json = TU.cmd_result("tungsten_directory")
770
+ rescue CommandError => ce
771
+ raise MessageError.new("Unable to manage configuration because the tungsten_directory command failed")
772
+ end
773
+
774
+ begin
775
+ entries = JSON.parse(json)
776
+ rescue
777
+ entries = nil
778
+ end
779
+
780
+ unless entries.is_a?(Hash)
781
+ raise MessageError.new("Unable to manage configuration because the tungsten_directory command did not return valid JSON")
782
+ end
783
+
784
+ return entries
785
+ end
786
+
787
+ # Return clusters and cluster members that exist in a but not b
788
+ def find_cluster_differences(a, b)
789
+ clusters = []
790
+ members = {}
791
+
792
+ unless a.is_a?(Hash)
793
+ return
794
+ end
795
+
796
+ a.keys().each{
797
+ |svc|
798
+ if svc =~ /^defaults/
799
+ next
800
+ end
801
+
802
+ unless a[svc]["topology"] == "clustered"
803
+ next
804
+ end
805
+
806
+ if b.is_a?(Hash) && b.has_key?(svc)
807
+ if b[svc]["topology"] != "clustered"
808
+ TU.error("Unable to convert non-clustered #{svc} into a cluster")
809
+ else
810
+ members[svc] = a[svc]["members"].split(",") - b[svc]["members"].split(",")
811
+ end
812
+ else
813
+ clusters << svc
814
+ end
815
+ }
816
+
817
+ return clusters,members
818
+ end
819
+
820
+ # Take the information from tungsten_directory and turn it into
821
+ # a set of cluster data that could be written to the INI file
822
+ def generate_services_map(entries)
823
+ map = {}
824
+ is_valid = true
825
+
826
+ entries.each{
827
+ |k,entry|
828
+ types = entry["tags"][SERVER_TYPE]
829
+ unless types.is_a?(Array)
830
+ types = [types]
831
+ end
832
+
833
+ types.each{
834
+ |type|
835
+ begin
836
+ case type
837
+ when "datasource"
838
+ generate_datasource_services_map(map, entry)
839
+ when "witness"
840
+ generate_witness_services_map(map, entry)
841
+ when "connector"
842
+ generate_connector_services_map(map, entry)
843
+ when "archive"
844
+ generate_archive_services_map(map, entry)
845
+ else
846
+ raise MessageError.new("Unable to process a #{SERVER_TYPE} of #{type} for #{entry["hostname"]}")
847
+ end
848
+ rescue MessageError => me
849
+ is_valid = false
850
+ TU.error(me.message)
851
+ end
852
+ }
853
+ }
854
+
855
+ unless is_valid == true
856
+ raise MessageError.new("Unable to proceed due to issues with the directory entries")
857
+ end
858
+
859
+ map
860
+ end
861
+
862
+ def generate_datasource_services_map(map, entry)
863
+ svc = entry["tags"][CLUSTER_NAME]
864
+ if svc.to_s() == ""
865
+ raise MessageError.new "Unable to create the cluster on #{entry['hostname']} because it does not have a value for the '#{CLUSTER_NAME}' tag."
866
+ end
867
+
868
+ unless map.has_key?(svc)
869
+ map[svc] = {}
870
+ map[svc]["skip-validation-check"] = ["ManagerWitnessNeededCheck"]
871
+ end
872
+ unless map[svc].has_key?("members")
873
+ map[svc]["members"] = []
874
+ end
875
+
876
+ # Initiate the cluster
877
+ map[svc]["topology"] = "clustered"
878
+
879
+ # Add the host to the cluster
880
+ map[svc]["members"] << entry["hostname"]
881
+ map[svc]["members"].uniq!()
882
+ map[svc]["members"].sort!()
883
+
884
+ composite_svc = entry["tags"][COMPOSITE_CLUSTER_NAME]
885
+ if composite_svc.to_s() != ""
886
+ unless map.has_key?(composite_svc)
887
+ map[composite_svc] = {}
888
+ end
889
+ unless map[composite_svc].has_key?("composite-datasources")
890
+ map[composite_svc]["composite-datasources"] = []
891
+ end
892
+
893
+ # Add this cluster to the composite cluster
894
+ map[composite_svc]["composite-datasources"] << svc
895
+
896
+ # Define the replication relay source for this cluster
897
+ composite_master = entry["tags"][COMPOSITE_CLUSTER_MASTER]
898
+ if composite_master.to_s() == ""
899
+ raise MessageError.new "Unable to create the '#{composite_svc}' composite cluster because #{entry['hostname']} does not define the '#{COMPOSITE_CLUSTER_MASTER}' tag."
900
+ end
901
+
902
+ # Set relay-source if this cluster is not the master
903
+ # Throw an error if another cluster has specified a different
904
+ # composite cluster master
905
+ unless map[svc].has_key?("relay-source")
906
+ unless composite_master == svc
907
+ map[svc]["relay-source"] = composite_master
908
+ end
909
+ else
910
+ if composite_master != map[svc]["relay-source"]
911
+ raise MessageError.new "Unable to create the '#{composite_svc}' composite cluster because #{entry['hostname']} defines a different '#{COMPOSITE_CLUSTER_MASTER}' than other hosts."
912
+ end
913
+ end
914
+ end
915
+
916
+ # Only use the passive witness if it was defined for this host
917
+ # The check makes sure that entry is the directory entry for the current host
918
+ if entry["hostname"] == opt(:hostname)
919
+ if entry["tags"][CLUSTER_PASSIVE_WITNESS].to_s() != ""
920
+ map[svc]["passive-witness"] = entry["tags"][CLUSTER_PASSIVE_WITNESS]
921
+ end
922
+ end
923
+ end
924
+
925
+ def generate_witness_services_map(map, entry)
926
+ svc = entry["tags"][CLUSTER_NAME]
927
+ if svc.to_s() == ""
928
+ raise MessageError.new "Unable to create the cluster on #{entry['hostname']} because it does not have a value for the '#{CLUSTER_NAME}' tag."
929
+ end
930
+
931
+ unless map.has_key?(svc)
932
+ map[svc] = {}
933
+ end
934
+
935
+ # Initiate the cluster
936
+ map[svc]["topology"] = "clustered"
937
+
938
+ map[svc]["active-witness"] = entry["hostname"]
939
+ end
940
+
941
+ def generate_connector_services_map(map, entry)
942
+ services = entry["tags"][CONNECTOR_CLUSTERS]
943
+ if services.to_s() == ""
944
+ services = entry["tags"][CLUSTER_NAME]
945
+ end
946
+ if services.to_s() == ""
947
+ raise MessageError.new "Unable to create the connector on #{entry['hostname']} because it does not have a value for the '#{CONNECTOR_CLUSTERS}' tag."
948
+ end
949
+
950
+ unless services.is_a?(Array)
951
+ services = services.split(",")
952
+ end
953
+ services.each{
954
+ |svc|
955
+ unless map.has_key?(svc)
956
+ map[svc] = {}
957
+ end
958
+ unless map[svc].has_key?("connectors")
959
+ map[svc]["connectors"] = []
960
+ end
961
+
962
+ # Add this host to each cluster
963
+ map[svc]["connectors"] << entry["hostname"]
964
+ }
965
+ end
966
+
967
+ def generate_archive_services_map(map, entry)
968
+ services = entry["tags"][ARCHIVE_CLUSTERS]
969
+ if services.to_s() == ""
970
+ raise MessageError.new "Unable to create the archive replicator on #{entry['hostname']} because it does not have a value for the '#{ARCHIVE_CLUSTERS}' tag."
971
+ end
972
+
973
+ unless services.is_a?(Array)
974
+ services = services.split(",")
975
+ end
976
+ services.each{
977
+ |svc|
978
+ svc_alias = "#{svc}_slave"
979
+ unless map.has_key?(svc_alias)
980
+ map[svc_alias] = {}
981
+ end
982
+ unless map[svc_alias].has_key?("members")
983
+ map[svc_alias]["members"] = []
984
+ end
985
+
986
+ # Initiate the cluster-slave service
987
+ map[svc_alias]["master-dataservice"] = svc
988
+ map[svc_alias]["topology"] = "cluster-slave"
989
+
990
+ # Add this host to the list of servers that is
991
+ # replicating from the cluster
992
+ map[svc_alias]["members"] << entry["hostname"]
993
+ }
994
+ end
995
+
996
+ # Remove any services that aren't required for the given entry
997
+ def filter_services_map(map, entry)
998
+ # Filter the services down to the services that are needed to configure
999
+ # this server
1000
+ allowed_services = []
1001
+ # - Services it is a member or connector of
1002
+ unless entry["tags"][CLUSTER_NAME].to_s() == ""
1003
+ allowed_services << entry["tags"][CLUSTER_NAME].to_s()
1004
+ end
1005
+ services = entry["tags"][CONNECTOR_CLUSTERS]
1006
+ unless services.to_s() == ""
1007
+ unless services.is_a?(Array)
1008
+ services = services.split(",")
1009
+ end
1010
+ services.each{
1011
+ |svc|
1012
+ allowed_services << svc
1013
+ }
1014
+ end
1015
+
1016
+ # - Services that are part of its composite cluster
1017
+ unless entry["tags"][COMPOSITE_CLUSTER_NAME].to_s() == ""
1018
+ allowed_services << entry["tags"][COMPOSITE_CLUSTER_NAME].to_s()
1019
+ allowed_services = allowed_services +
1020
+ map[entry["tags"][COMPOSITE_CLUSTER_NAME].to_s()]["composite-datasources"]
1021
+ end
1022
+ # - Services that it replicates from
1023
+ services = entry["tags"][ARCHIVE_CLUSTERS]
1024
+ unless services.to_s() == ""
1025
+ unless services.is_a?(Array)
1026
+ services = services.split(",")
1027
+ end
1028
+ services.each{
1029
+ |svc|
1030
+ allowed_services << svc
1031
+ allowed_services << "#{svc}_slave"
1032
+ }
1033
+ end
1034
+
1035
+ # Remove any services that aren't needed
1036
+ map.delete_if {
1037
+ |k,v|
1038
+ (allowed_services.include?(k) == false)
1039
+ }
1040
+
1041
+ map.keys().each{
1042
+ |svc|
1043
+ # The cluster-slave topology just needs to know about itself
1044
+ # Including other entries may cause extra replicator restarts
1045
+ if map[svc]["topology"] == "cluster-slave"
1046
+ map[svc]["members"].delete_if{|v| v != entry["hostname"]}
1047
+ elsif map[svc]["topology"] == "clustered"
1048
+ # Pick a default master for this dataservice using the first member
1049
+ # when sorted alphabetically. The role may be charged when Continuent
1050
+ # is started the first time if another member is already running as
1051
+ # the master.
1052
+ map[svc]["master"] = map[svc]["members"].sort()[0]
1053
+
1054
+ if map[svc].has_key?("active-witness")
1055
+ map[svc]["witnesses"] = map[svc]["active-witness"]
1056
+ map[svc]["enable-active-witnesses"] = "true"
1057
+
1058
+ map[svc]["members"] = map[svc]["members"] << map[svc]["active-witness"]
1059
+ map[svc]["members"] = map[svc]["members"].uniq().sort()
1060
+ else
1061
+ members_count = map[svc]["members"].size()
1062
+ even_members = (members_count % 2 == 0)
1063
+ if even_members == true || members_count == 1
1064
+ if map[svc].has_key?("passive-witness")
1065
+ map[svc]["witnesses"] = map[svc]["passive-witness"]
1066
+ end
1067
+ end
1068
+ end
1069
+
1070
+ map[svc].delete("passive-witness")
1071
+ map[svc].delete("active-witness")
1072
+ end
1073
+ }
1074
+
1075
+ if map.keys().size() == 0
1076
+ raise MessageError.new("Unable to manage configuration because there are no services defined for #{entry['hostname']}")
1077
+ end
1078
+
1079
+ return map
1080
+ end
1081
+
1082
+ # Create an INI file that is full sorted and optimized for the host
1083
+ def write_ini_configuration(map, entry)
1084
+ ini = Tempfile.new('tmcini')
1085
+ ini.puts("# DO NOT MODIFY BY HAND")
1086
+ ini.puts("# This file is managed by #{script_name()} for #{entry['hostname']}")
1087
+ ini.puts("# Any manual changes to this file will disable script execution")
1088
+
1089
+ @defaults_files.sort().each{
1090
+ |path|
1091
+ TU.parse_ini_file(path, false).each{
1092
+ |section,values|
1093
+ ini.puts("[#{section}]")
1094
+ filter_defaults_values(values).each{
1095
+ |value|
1096
+ ini.puts(value)
1097
+ }
1098
+ }
1099
+ }
1100
+
1101
+ # Output each of the services with the keys sorted so they
1102
+ # come out in a consistent order
1103
+ map.keys().sort().each{
1104
+ |svc|
1105
+ ini.puts("")
1106
+ ini.puts("[#{svc}]")
1107
+
1108
+ # Print all configuration values in a sorted order
1109
+ map[svc].keys().sort().each{
1110
+ |key|
1111
+ value = map[svc][key]
1112
+ if value.is_a?(Array)
1113
+ value = value.uniq().sort().join(",")
1114
+ end
1115
+
1116
+ ini.puts("#{key}=#{value}")
1117
+ }
1118
+
1119
+ # Include additional service settings
1120
+ path = "/etc/tungsten/service.#{svc}.tungsten.ini"
1121
+ if File.exists?(path)
1122
+ TU.parse_ini_file(path, false).each{
1123
+ |section,values|
1124
+ unless section == svc || section == "__anonymous__"
1125
+ next
1126
+ end
1127
+
1128
+ filter_defaults_values(values).each{
1129
+ |value|
1130
+ ini.puts(value)
1131
+ }
1132
+ }
1133
+ end
1134
+ }
1135
+
1136
+ ini.flush()
1137
+ return ini
1138
+ end
1139
+
1140
+ def filter_defaults_values(values)
1141
+ values.delete_if{
1142
+ |v|
1143
+ if v =~ /^start=/
1144
+ true
1145
+ elsif v =~ /^start-and-report=/
1146
+ true
1147
+ else
1148
+ false
1149
+ end
1150
+ }
1151
+ end
1152
+
1153
+ def write_usersmap(service_list, entry)
1154
+ usersmap = Tempfile.new('tmcusersmap')
1155
+ usersmap.puts("# DO NOT MODIFY BY HAND")
1156
+ usersmap.puts("# This file is managed by #{script_name()} for #{entry['hostname']}")
1157
+ usersmap.puts("# Any manual changes to this file will disable script execution")
1158
+
1159
+ users_found = false
1160
+ search = ["defaults"] + service_list.sort()
1161
+ search.each{
1162
+ |svc|
1163
+ path = "#{opt(:usersmap_templates_directory)}/users.#{svc}"
1164
+ if File.exists?(path)
1165
+ File.open(path, "r").each{
1166
+ |line|
1167
+ usersmap.puts(line.chomp())
1168
+ users_found = true
1169
+ }
1170
+ end
1171
+ }
1172
+
1173
+ if users_found == false
1174
+ return false
1175
+ end
1176
+
1177
+ usersmap.flush()
1178
+ usersmap
1179
+ end
1180
+
1181
+ def configure
1182
+ super()
1183
+
1184
+ require_installed_directory?(false)
1185
+
1186
+ add_option(:defaults, {
1187
+ :on => "--defaults String",
1188
+ :help => "Path to file containing the default sections of the tungsten.ini file",
1189
+ :default => "/etc/tungsten/defaults.tungsten.ini"
1190
+ })
1191
+
1192
+ add_option(:outputfile, {
1193
+ :on => "--output-file String",
1194
+ :help => "Path to file containing the default sections of the tungsten.ini file",
1195
+ :default => "/etc/tungsten/tungsten.ini"
1196
+ })
1197
+
1198
+ add_option(:hostname, {
1199
+ :on => "--hostname String",
1200
+ :help => "Write the INI file for this hostname",
1201
+ :default => TU.hostname()
1202
+ })
1203
+
1204
+ add_option(:continuent_tungsten_package, {
1205
+ :on => "--continuent-tungsten-package String",
1206
+ :help => "Path to the Continuent Tungsten package to be installed"
1207
+ })
1208
+
1209
+ add_option(:tungsten_replicator_package, {
1210
+ :on => "--tungsten-replicator-package String",
1211
+ :help => "Path to the Tungsten Replicator package to be installed"
1212
+ })
1213
+
1214
+ add_option(:replace_release, {
1215
+ :on => "--replace-release String",
1216
+ :parse => method(:parse_boolean_option),
1217
+ :help => "Force the script to run tpm update or install even if there are no changes to the INI file",
1218
+ })
1219
+
1220
+ add_option(:usersmap_templates_directory, {
1221
+ :on => "--usersmap-templates-directory String",
1222
+ :help => "Location to find templates for the users.map file",
1223
+ :default => "/etc/tungsten/users"
1224
+ })
1225
+
1226
+ add_option(:log, {
1227
+ :on => "--log String",
1228
+ :help => "Log debug output to this file for every run that modifies system configuration",
1229
+ })
1230
+
1231
+ add_option(:lastrun, {
1232
+ :on => "--lastrun String",
1233
+ :help => "A file to store the exit code for the last run of this script",
1234
+ })
1235
+
1236
+ add_option(:manage_etc_hosts, {
1237
+ :on => "--manage-etc-hosts String",
1238
+ :help => "Update the hosts configuration during each run of #{script_name()}. Valid values: puppet."
1239
+ })
1240
+
1241
+ add_option(:provision_slave_arguments, {
1242
+ :on => "--provision-slave-arguments String",
1243
+ :help => "Additional arguments to pass along to the tungsten_provision_slave command"
1244
+ })
1245
+
1246
+ add_option(:hook_error, {
1247
+ :on => "--hook-error String",
1248
+ :help => "Call this script if the command does not finish successfully"
1249
+ })
1250
+
1251
+ add_option(:hook_before_install, {
1252
+ :on => "--hook-before-install String",
1253
+ :help => "Call this script before starting the install command"
1254
+ })
1255
+
1256
+ add_option(:hook_after_install, {
1257
+ :on => "--hook-after-install String",
1258
+ :help => "Call this script after starting the install command"
1259
+ })
1260
+
1261
+ add_option(:hook_before_uninstall, {
1262
+ :on => "--hook-before-uninstall String",
1263
+ :help => "Call this script before finishing the uninstall command"
1264
+ })
1265
+
1266
+ add_option(:hook_after_uninstall, {
1267
+ :on => "--hook-after-uninstall String",
1268
+ :help => "Call this script after finishing the uninstall command"
1269
+ })
1270
+
1271
+ add_option(:hook_before_ct_install, {
1272
+ :on => "--hook-before-ct-install String",
1273
+ :help => "Call this script before installing Continuent Tungsten"
1274
+ })
1275
+
1276
+ add_option(:hook_after_ct_install, {
1277
+ :on => "--hook-after-ct-install String",
1278
+ :help => "Call this script after installing Continuent Tungsten"
1279
+ })
1280
+
1281
+ add_option(:hook_before_ct_update, {
1282
+ :on => "--hook-before-ct-update String",
1283
+ :help => "Call this script before updating Continuent Tungsten"
1284
+ })
1285
+
1286
+ add_option(:hook_after_ct_update, {
1287
+ :on => "--hook-after-ct-update String",
1288
+ :help => "Call this script after updating Continuent Tungsten"
1289
+ })
1290
+
1291
+ add_option(:hook_before_tr_install, {
1292
+ :on => "--hook-before-tr-install String",
1293
+ :help => "Call this script before installing Tungsten Replicator"
1294
+ })
1295
+
1296
+ add_option(:hook_after_tr_install, {
1297
+ :on => "--hook-after-tr-install String",
1298
+ :help => "Call this script after installing Tungsten Replicator"
1299
+ })
1300
+
1301
+ add_option(:hook_before_tr_update, {
1302
+ :on => "--hook-before-tr-update String",
1303
+ :help => "Call this script before updating Tungsten Replicator"
1304
+ })
1305
+
1306
+ add_option(:hook_after_tr_update, {
1307
+ :on => "--hook-after-tr-update String",
1308
+ :help => "Call this script after updating Tungsten Replicator"
1309
+ })
1310
+
1311
+ add_command(:install, {
1312
+ :help => "Write the INI configuration file and install/update software",
1313
+ :default => true
1314
+ })
1315
+
1316
+ add_command(:uninstall, {
1317
+ :help => "Remove managed software from this system"
1318
+ })
1319
+
1320
+ add_command(:repair, {
1321
+ :help => "Look for issues with the current configuration and attempt to fix them"
1322
+ })
1323
+
1324
+ add_command(:lastrun, {
1325
+ :help => "Output the exit code for the last run of #{script_name()}"
1326
+ })
1327
+
1328
+ add_command(:ini, {
1329
+ :help => "Output the INI contents that would be written"
1330
+ })
1331
+
1332
+ add_command(:hosts, {
1333
+ :help => "Output /etc/hosts entries for the directory hosts"
1334
+ })
1335
+
1336
+ add_command(:hosts_puppet_manifest, {
1337
+ :help => "Output a Puppet manifest for the directory hosts"
1338
+ })
1339
+ end
1340
+
1341
+ def validate
1342
+ super()
1343
+
1344
+ unless TU.is_valid?()
1345
+ return TU.is_valid?()
1346
+ end
1347
+
1348
+ unless File.exists?("/etc/tungsten") && File.writable?("/etc/tungsten")
1349
+ TU.error("The /etc/tungsten directory either does not exist or is not writeable")
1350
+ end
1351
+
1352
+ # Make sure that the tungsten.ini file is managed if it exists
1353
+ if File.exists?(opt(:outputfile))
1354
+ matched_lines = TU.cmd_result("grep #{script_name()} /etc/tungsten/tungsten.ini | wc -l", true)
1355
+ if matched_lines.to_s() != "1"
1356
+ TU.error("Unable to manage #{opt(:outputfile)} because it already exists and was not created by #{script_name()}")
1357
+ else
1358
+ begin
1359
+ validate_managed_file(opt(:outputfile))
1360
+ rescue MessageError => me
1361
+ TU.error(me.message)
1362
+ end
1363
+ end
1364
+ elsif require_directory_configuration?()
1365
+ TU.error("Unable to run '#{command()}' because #{opt(:outputfile)} isn't managed by #{script_name()}")
1366
+ end
1367
+
1368
+ if require_valid_defaults_files?()
1369
+ validate_defaults_files()
1370
+ end
1371
+
1372
+ case command()
1373
+ when "install"
1374
+ if opt(:continuent_tungsten_package) != nil
1375
+ unless File.exists?(opt(:continuent_tungsten_package))
1376
+ TU.error("Unable to find the Continuent Tungsten package at #{opt(:continuent_tungsten_package)}")
1377
+ end
1378
+ end
1379
+
1380
+ if opt(:tungsten_replicator_package) != nil
1381
+ unless File.exists?(opt(:tungsten_replicator_package))
1382
+ TU.error("Unable to find the Tungsten Replicator package at #{opt(:tungsten_replicator_package)}")
1383
+ end
1384
+ end
1385
+ when "lastrun"
1386
+ if opt(:lastrun) == nil
1387
+ TU.error("The 'lastrun' command is not supported because there is no configuration value for '--lastrun'.")
1388
+ end
1389
+ end
1390
+ end
1391
+
1392
+ def require_valid_defaults_files?()
1393
+ case command()
1394
+ when "install"
1395
+ true
1396
+ when "ini"
1397
+ true
1398
+ else
1399
+ false
1400
+ end
1401
+ end
1402
+
1403
+ def require_directory_configuration?()
1404
+ case command()
1405
+ when "uninstall"
1406
+ true
1407
+ when "repair"
1408
+ true
1409
+ else
1410
+ false
1411
+ end
1412
+ end
1413
+
1414
+ def manage_lastrun_file?()
1415
+ case command()
1416
+ when "install"
1417
+ true
1418
+ when "uninstall"
1419
+ true
1420
+ else
1421
+ false
1422
+ end
1423
+ end
1424
+
1425
+ def validate_defaults_files
1426
+ @defaults_files = []
1427
+ Dir.glob(opt(:defaults)).each{
1428
+ |f|
1429
+ @defaults_files << f
1430
+ }
1431
+
1432
+ if @defaults_files.size() == 0
1433
+ TU.error("Unable to find any defaults files at #{opt(:defaults)}")
1434
+ end
1435
+ end
1436
+
1437
+ # Check the server types for this host and validate we have enough information
1438
+ def validate_entry(entry)
1439
+ if entry_is_type?(entry, ["datasource", "connector"])
1440
+ unless opt(:continuent_tungsten_package) != nil
1441
+ TU.error("Unable to manage #{opt(:hostname)} because it includes the 'datasource' or 'connector' #{SERVER_TYPE} tag and no argument was given for --continuent-tungsten-package")
1442
+ end
1443
+ end
1444
+
1445
+ if entry_is_type?(entry, "archive")
1446
+ unless opt(:tungsten_replicator_package)
1447
+ TU.error("Unable to manage #{opt(:hostname)} because it includes the 'archive' #{SERVER_TYPE} tag and no argument was given for --tungsten-replicator-package")
1448
+ end
1449
+ end
1450
+ end
1451
+
1452
+ def validate_pending_ini_contents(entry, pending_contents)
1453
+ ini = Properties.new()
1454
+ ini.props = pending_contents
1455
+
1456
+ if entry_is_type?(entry, ["datasource", "connector"])
1457
+ continuent_tungsten_home = ini.getProperty(["defaults", "home-directory"])
1458
+ if continuent_tungsten_home == nil
1459
+ continuent_tungsten_home = ini.getProperty(["defaults", "install-directory"])
1460
+ end
1461
+ if continuent_tungsten_home == nil
1462
+ TU.error("Unable to manage #{opt(:hostname)} because it includes the 'datasource' or 'connector' #{SERVER_TYPE} tag but the INI defaults do not include a value for 'home-directory' under '[defaults]'.")
1463
+ else
1464
+ opt(:continuent_tungsten_home, continuent_tungsten_home)
1465
+ end
1466
+ end
1467
+
1468
+ if entry_is_type?(entry, "datasource")
1469
+ if File.exists?("#{opt(:continuent_tungsten_home)}/tungsten")
1470
+ install = TungstenInstall.new("#{opt(:continuent_tungsten_home)}/tungsten")
1471
+
1472
+ svc = install.setting("deployment_dataservice")
1473
+ composite_svc = install.setting(install.setting_key("dataservices", svc, "dataservice_parent_dataservice"))
1474
+
1475
+ pending_svc = entry["tags"][CLUSTER_NAME]
1476
+ pending_composite_svc = entry["tags"][COMPOSITE_CLUSTER_NAME]
1477
+
1478
+ if svc != pending_svc
1479
+ TU.error("Unable to migrate the cluster name from #{svc} to #{pending_svc}")
1480
+ end
1481
+
1482
+ if composite_svc.to_s() != "" and composite_svc != pending_composite_svc
1483
+ TU.error("Unable to migrate the composite cluster name from #{composite_svc} to #{pending_composite_svc}")
1484
+ end
1485
+ end
1486
+ end
1487
+
1488
+ if entry_is_type?(entry, "archive")
1489
+ tungsten_replicator_home = ini.getProperty(["defaults.replicator", "home-directory"])
1490
+ if tungsten_replicator_home == nil
1491
+ tungsten_replicator_home = ini.getProperty(["defaults.replicator", "install-directory"])
1492
+ end
1493
+ if tungsten_replicator_home == nil
1494
+ TU.error("Unable to manage #{opt(:hostname)} because it includes the 'archive' #{SERVER_TYPE} tag but the INI defaults do not include a value for 'home-directory' under '[defaults.replicator]'.")
1495
+ else
1496
+ opt(:tungsten_replicator_home, tungsten_replicator_home)
1497
+ end
1498
+
1499
+ if ini.getProperty(["defaults.replicator", "rmi-port"]) == nil
1500
+ TU.error("Unable to manage #{opt(:hostname)} because it includes the 'archive' #{SERVER_TYPE} tag but the INI defaults do not include a value for 'rmi-port' under '[defaults.replicator]'.")
1501
+ end
1502
+ end
1503
+ end
1504
+
1505
+ def cleanup(code = 0)
1506
+ if opt(:lastrun) != nil && manage_lastrun_file?()
1507
+ begin
1508
+ File.open(opt(:lastrun), "w") {
1509
+ |f|
1510
+ f.puts(code)
1511
+ }
1512
+ rescue => e
1513
+ TU.exception(e)
1514
+ end
1515
+ end
1516
+
1517
+ if code != 0
1518
+ call_hook(:hook_error, ["--rc=#{code}"])
1519
+ end
1520
+
1521
+ super(code)
1522
+ end
1523
+
1524
+ def get_original_file(file)
1525
+ File.dirname(file) + "/." + File.basename(file) + ".orig"
1526
+ end
1527
+
1528
+ # Validate that a managed file still matches the original version of it.
1529
+ # The lack of an original file may be ignored if you want to takeover
1530
+ # management of this file.
1531
+ def validate_managed_file(file, ignore_missing_original = false)
1532
+ # If it doesn't match .tungsten.ini.orig then changes have been made
1533
+ original_file = get_original_file(file)
1534
+
1535
+ if File.exists?(original_file)
1536
+ begin
1537
+ file_differences = TU.cmd_result("diff -u #{original_file} #{file}")
1538
+ # No differences
1539
+ rescue CommandError => ce
1540
+ if ce.rc == 1
1541
+ raise MessageError.new("Unable to manage #{file} because changes to it have been made manually")
1542
+ else
1543
+ raise ce
1544
+ end
1545
+ end
1546
+ elsif ignore_missing_original == false
1547
+ raise MessageError.new("Unable to manage #{file} because the tracking version #{original_file} is no longer available")
1548
+ end
1549
+ end
1550
+
1551
+ # Take the new file for a given path and update the target file only
1552
+ # if there is a change to it. Return true if the file was replaced, and
1553
+ # false if no change was made.
1554
+ def replace_managed_file(path, pending_file)
1555
+ # Rewind and calculate the pending md5sum
1556
+ pending_file.rewind()
1557
+ pending_md5sum = Digest::MD5.hexdigest(pending_file.read())
1558
+
1559
+ # Calculate the starting signature and contents of the INI file
1560
+ initial_md5sum = nil
1561
+ if File.exists?(path)
1562
+ File.open(path, "r") {
1563
+ |f|
1564
+ initial_md5sum = Digest::MD5.hexdigest(f.read())
1565
+ }
1566
+ end
1567
+
1568
+ if initial_md5sum != pending_md5sum
1569
+ enable_script_log()
1570
+
1571
+ # Add a diff to the debug log for review purposes
1572
+ TU.cmd_result("diff -u #{path} #{pending_file.path()}", true)
1573
+
1574
+ TU.debug("Update the contents of #{path}")
1575
+ FileUtils.cp(pending_file.path(), path)
1576
+ FileUtils.cp(pending_file.path(), get_original_file(path))
1577
+
1578
+ return true
1579
+ else
1580
+ TU.debug("There are no changes to #{path}")
1581
+
1582
+ # Make sure the original file exists since a `tpm update` may not
1583
+ # bring it across when upgrading versions.
1584
+ unless File.exists?(get_original_file(path))
1585
+ FileUtils.cp(path, get_original_file(path))
1586
+ end
1587
+
1588
+ return false
1589
+ end
1590
+ end
1591
+
1592
+ # Does the directory entry contain a matching ServerType
1593
+ def entry_is_type?(entry, type)
1594
+ unless entry["tags"].has_key?(SERVER_TYPE)
1595
+ return false
1596
+ else
1597
+ unless type.is_a?(Array)
1598
+ type = [type]
1599
+ end
1600
+ type.each{
1601
+ |t|
1602
+ if entry["tags"][SERVER_TYPE].include?(t)
1603
+ return true
1604
+ end
1605
+ }
1606
+
1607
+ return false
1608
+ end
1609
+ end
1610
+
1611
+ # Call the prescribed hook as a script
1612
+ def call_hook(hookname, arguments = [])
1613
+ if opt(hookname).to_s() == ""
1614
+ return
1615
+ end
1616
+
1617
+ if script_name() != ""
1618
+ arguments << "--tungsten-script-name=#{script_name()}"
1619
+ end
1620
+ if command() != ""
1621
+ arguments << "--tungsten-script-command=#{command()}"
1622
+ end
1623
+ arguments << "--tungsten-script-hook=#{hookname.to_s()}"
1624
+
1625
+ begin
1626
+ TU.cmd_result("#{opt(hookname)} #{arguments.join(" ")}")
1627
+ rescue CommandError => ce
1628
+ raise "There were errors while executing #{opt(hookname)}"
1629
+ end
1630
+ end
1631
+
1632
+ def script_name
1633
+ "tungsten_manage_configuration"
1634
+ end
1635
+
1636
+ def enable_script_log
1637
+ if opt(:log) != nil
1638
+ if @script_log_enabled != true
1639
+ TU.set_log_path(opt(:log))
1640
+ end
1641
+ @script_log_enabled = true
1642
+ end
1643
+ end
1644
+
1645
+ def script_log_path
1646
+ if @script_log_enabled == true
1647
+ opt(:log)
1648
+ else
1649
+ super()
1650
+ end
1651
+ end
1652
+
1653
+ self.new().run()
1654
+ end