manageiq-appliance_console 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +7 -0
  2. data/.codeclimate.yml +47 -0
  3. data/.gitignore +12 -0
  4. data/.rspec +4 -0
  5. data/.rspec_ci +4 -0
  6. data/.rubocop.yml +4 -0
  7. data/.rubocop_cc.yml +5 -0
  8. data/.rubocop_local.yml +2 -0
  9. data/.travis.yml +19 -0
  10. data/Gemfile +6 -0
  11. data/LICENSE.txt +202 -0
  12. data/README.md +45 -0
  13. data/Rakefile +6 -0
  14. data/bin/appliance_console +661 -0
  15. data/bin/appliance_console_cli +7 -0
  16. data/lib/manageiq-appliance_console.rb +51 -0
  17. data/lib/manageiq/appliance_console/certificate.rb +146 -0
  18. data/lib/manageiq/appliance_console/certificate_authority.rb +140 -0
  19. data/lib/manageiq/appliance_console/cli.rb +363 -0
  20. data/lib/manageiq/appliance_console/database_configuration.rb +286 -0
  21. data/lib/manageiq/appliance_console/database_maintenance.rb +35 -0
  22. data/lib/manageiq/appliance_console/database_maintenance_hourly.rb +58 -0
  23. data/lib/manageiq/appliance_console/database_maintenance_periodic.rb +84 -0
  24. data/lib/manageiq/appliance_console/database_replication.rb +146 -0
  25. data/lib/manageiq/appliance_console/database_replication_primary.rb +59 -0
  26. data/lib/manageiq/appliance_console/database_replication_standby.rb +166 -0
  27. data/lib/manageiq/appliance_console/date_time_configuration.rb +117 -0
  28. data/lib/manageiq/appliance_console/errors.rb +5 -0
  29. data/lib/manageiq/appliance_console/external_auth_options.rb +153 -0
  30. data/lib/manageiq/appliance_console/external_database_configuration.rb +34 -0
  31. data/lib/manageiq/appliance_console/external_httpd_authentication.rb +157 -0
  32. data/lib/manageiq/appliance_console/external_httpd_authentication/external_httpd_configuration.rb +249 -0
  33. data/lib/manageiq/appliance_console/internal_database_configuration.rb +187 -0
  34. data/lib/manageiq/appliance_console/key_configuration.rb +118 -0
  35. data/lib/manageiq/appliance_console/logfile_configuration.rb +117 -0
  36. data/lib/manageiq/appliance_console/logger.rb +23 -0
  37. data/lib/manageiq/appliance_console/logging.rb +102 -0
  38. data/lib/manageiq/appliance_console/logical_volume_management.rb +94 -0
  39. data/lib/manageiq/appliance_console/principal.rb +46 -0
  40. data/lib/manageiq/appliance_console/prompts.rb +211 -0
  41. data/lib/manageiq/appliance_console/scap.rb +53 -0
  42. data/lib/manageiq/appliance_console/temp_storage_configuration.rb +79 -0
  43. data/lib/manageiq/appliance_console/timezone_configuration.rb +58 -0
  44. data/lib/manageiq/appliance_console/utilities.rb +67 -0
  45. data/lib/manageiq/appliance_console/version.rb +5 -0
  46. data/locales/appliance/en.yml +42 -0
  47. data/locales/container/en.yml +30 -0
  48. data/manageiq-appliance_console.gemspec +40 -0
  49. data/zanata.xml +7 -0
  50. metadata +317 -0
@@ -0,0 +1,146 @@
1
+ require 'pg'
2
+ require 'English'
3
+
4
+ module ManageIQ
5
+ module ApplianceConsole
6
+ class DatabaseReplication
7
+ include ManageIQ::ApplianceConsole::Logging
8
+
9
+ REPMGR_CONFIG = '/etc/repmgr.conf'.freeze
10
+ REPMGR_LOG = '/var/log/repmgr/repmgrd.log'.freeze
11
+ PGPASS_FILE = '/var/lib/pgsql/.pgpass'.freeze
12
+
13
+ attr_accessor :cluster_name, :node_number, :database_name, :database_user,
14
+ :database_password, :primary_host
15
+
16
+ def ask_for_unique_cluster_node_number
17
+ self.node_number = ask_for_integer("number uniquely identifying this node in the replication cluster")
18
+ end
19
+
20
+ def ask_for_database_credentials
21
+ ask_for_cluster_database_credentials
22
+ self.primary_host = ask_for_ip_or_hostname("primary database hostname or IP address", primary_host)
23
+ end
24
+
25
+ def confirm
26
+ clear_screen
27
+ say(<<-EOL)
28
+ Replication Server Configuration
29
+
30
+ Cluster Node Number: #{node_number}
31
+ Cluster Database Name: #{database_name}
32
+ Cluster Database User: #{database_user}
33
+ Cluster Database Password: "********"
34
+ Cluster Primary Host: #{primary_host}
35
+ EOL
36
+ end
37
+
38
+ def repmgr_configured?
39
+ File.exist?(REPMGR_CONFIG)
40
+ end
41
+
42
+ def confirm_reconfiguration
43
+ say("Warning: File #{REPMGR_CONFIG} exists. Replication is already configured")
44
+ logger.warn("Warning: File #{REPMGR_CONFIG} exists. Replication is already configured")
45
+ agree("Continue with configuration? (Y/N): ")
46
+ end
47
+
48
+ def create_config_file(host)
49
+ File.write(REPMGR_CONFIG, config_file_contents(host))
50
+ true
51
+ end
52
+
53
+ def config_file_contents(host)
54
+ <<-EOS.strip_heredoc
55
+ cluster=#{cluster_name}
56
+ node=#{node_number}
57
+ node_name=#{host}
58
+ conninfo='host=#{host} user=#{database_user} dbname=#{database_name}'
59
+ use_replication_slots=1
60
+ pg_basebackup_options='--xlog-method=stream'
61
+ failover=automatic
62
+ promote_command='repmgr standby promote'
63
+ follow_command='repmgr standby follow'
64
+ logfile=#{REPMGR_LOG}
65
+ EOS
66
+ end
67
+
68
+ def generate_cluster_name
69
+ begin
70
+ pg_conn = PG::Connection.new(primary_connection_hash)
71
+ primary_region_number =
72
+ pg_conn.exec("SELECT last_value FROM miq_databases_id_seq").first["last_value"].to_i / 1_000_000_000_000
73
+ self.cluster_name = "miq_region_#{primary_region_number}_cluster"
74
+ rescue PG::ConnectionBad => e
75
+ say("Failed to get primary region number #{e.message}")
76
+ logger.error("Failed to get primary region number #{e.message}")
77
+ return false
78
+ end
79
+ true
80
+ end
81
+
82
+ def write_pgpass_file
83
+ File.open(PGPASS_FILE, "w") do |f|
84
+ f.write("*:*:#{database_name}:#{database_user}:#{database_password}\n")
85
+ f.write("*:*:replication:#{database_user}:#{database_password}\n")
86
+ end
87
+
88
+ FileUtils.chmod(0600, PGPASS_FILE)
89
+ FileUtils.chown("postgres", "postgres", PGPASS_FILE)
90
+ true
91
+ end
92
+
93
+ private
94
+
95
+ def ask_for_cluster_database_credentials
96
+ self.database_name = just_ask("cluster database name", database_name)
97
+ self.database_user = just_ask("cluster database username", database_user)
98
+
99
+ count = 0
100
+ loop do
101
+ count += 1
102
+ password1 = ask_for_password("cluster database password", database_password)
103
+ # if they took the default, just bail
104
+ break if password1 == database_password
105
+ password2 = ask_for_password("cluster database password")
106
+ if password1 == password2
107
+ self.database_password = password1
108
+ break
109
+ elsif count > 1 # only reprompt password once
110
+ raise RuntimeError, "passwords did not match"
111
+ else
112
+ say("\nThe passwords did not match, please try again")
113
+ end
114
+ end
115
+ end
116
+
117
+ def run_repmgr_command(cmd, params = {})
118
+ pid = fork do
119
+ Process::UID.change_privilege(Process::UID.from_name("postgres"))
120
+ begin
121
+ res = AwesomeSpawn.run!(cmd, :params => params, :env => {"PGPASSWORD" => database_password})
122
+ say(res.output)
123
+ rescue AwesomeSpawn::CommandResultError => e
124
+ say(e.result.output)
125
+ say(e.result.error)
126
+ say("")
127
+ say("Failed to configure replication server")
128
+ raise
129
+ end
130
+ end
131
+
132
+ Process.wait(pid)
133
+ $CHILD_STATUS.success?
134
+ end
135
+
136
+ def primary_connection_hash
137
+ {
138
+ :dbname => database_name,
139
+ :host => primary_host,
140
+ :user => database_user,
141
+ :password => database_password
142
+ }
143
+ end
144
+ end # class DatabaseReplication < DatabaseConfiguration
145
+ end # module ApplianceConsole
146
+ end
@@ -0,0 +1,59 @@
1
+ module ManageIQ
2
+ module ApplianceConsole
3
+ class DatabaseReplicationPrimary < DatabaseReplication
4
+ include ManageIQ::ApplianceConsole::Logging
5
+
6
+ REGISTER_CMD = 'repmgr master register'.freeze
7
+
8
+ def initialize
9
+ self.cluster_name = nil
10
+ self.node_number = nil
11
+ self.database_name = "vmdb_production"
12
+ self.database_user = "root"
13
+ self.database_password = nil
14
+ self.primary_host = LinuxAdmin::NetworkInterface.new(NETWORK_INTERFACE).address
15
+ end
16
+
17
+ def ask_questions
18
+ clear_screen
19
+ say("Establish Primary Replication Server\n")
20
+ ask_for_unique_cluster_node_number
21
+ ask_for_database_credentials
22
+ return false if repmgr_configured? && !confirm_reconfiguration
23
+ confirm
24
+ end
25
+
26
+ def confirm
27
+ super
28
+ agree("Apply this Replication Server Configuration? (Y/N): ")
29
+ end
30
+
31
+ def activate
32
+ say("Configuring Primary Replication Server...")
33
+ generate_cluster_name &&
34
+ create_config_file(primary_host) &&
35
+ initialize_primary_server &&
36
+ write_pgpass_file
37
+ end
38
+
39
+ def initialize_primary_server
40
+ run_repmgr_command(REGISTER_CMD) &&
41
+ add_repmgr_schema_to_search_path
42
+ end
43
+
44
+ def add_repmgr_schema_to_search_path
45
+ schema_name = "repmgr_#{cluster_name}"
46
+ begin
47
+ pg_conn = PG::Connection.new(primary_connection_hash)
48
+ new_path = pg_conn.exec("SHOW search_path").first["search_path"].split(",") << schema_name
49
+ pg_conn.exec("ALTER ROLE #{database_user} SET search_path = #{new_path.join(",")}")
50
+ rescue PG::ConnectionBad => e
51
+ say("Failed to add #{schema_name} to search path for #{database_user} #{e.message}")
52
+ logger.error("Failed to add #{schema_name} to search path for #{database_user} #{e.message}")
53
+ return false
54
+ end
55
+ true
56
+ end
57
+ end # class DatabaseReplicationPrimary < DatabaseReplication
58
+ end # module ApplianceConsole
59
+ end
@@ -0,0 +1,166 @@
1
+ require 'util/postgres_admin'
2
+ require 'fileutils'
3
+ require 'linux_admin'
4
+
5
+ module ManageIQ
6
+ module ApplianceConsole
7
+ class DatabaseReplicationStandby < DatabaseReplication
8
+ include ManageIQ::ApplianceConsole::Logging
9
+
10
+ REGISTER_CMD = 'repmgr standby register'.freeze
11
+ REPMGRD_SERVICE = 'rh-postgresql95-repmgr'.freeze
12
+
13
+ attr_accessor :disk, :standby_host, :run_repmgrd_configuration, :resync_data, :force_register
14
+
15
+ def initialize
16
+ self.cluster_name = nil
17
+ self.node_number = nil
18
+ self.database_name = "vmdb_production"
19
+ self.database_user = "root"
20
+ self.database_password = nil
21
+ self.primary_host = nil
22
+ self.standby_host = LinuxAdmin::NetworkInterface.new(NETWORK_INTERFACE).address
23
+ self.resync_data = false
24
+ end
25
+
26
+ def ask_questions
27
+ clear_screen
28
+ say("Establish Replication Standby Server\n")
29
+ return false if !data_dir_empty? && !confirm_data_resync
30
+ self.disk = ask_for_disk("Standby database disk")
31
+ ask_for_unique_cluster_node_number
32
+ ask_for_database_credentials
33
+ ask_for_standby_host
34
+ ask_for_repmgrd_configuration
35
+ return false unless node_number_valid?
36
+ return false if repmgr_configured? && !confirm_reconfiguration
37
+ confirm
38
+ end
39
+
40
+ def confirm
41
+ super
42
+ say(<<-EOS) if disk
43
+ Database Disk: #{disk.path}
44
+ EOS
45
+ say(<<-EOS)
46
+ Standby Host: #{standby_host}
47
+ Automatic Failover: #{run_repmgrd_configuration ? "enabled" : "disabled"}
48
+ EOS
49
+ agree("Apply this Replication Server Configuration? (Y/N): ")
50
+ end
51
+
52
+ def ask_for_standby_host
53
+ self.standby_host = ask_for_ip_or_hostname("Standby Server hostname or IP address", standby_host)
54
+ end
55
+
56
+ def ask_for_repmgrd_configuration
57
+ self.run_repmgrd_configuration = ask_yn?("Configure Replication Manager (repmgrd) for automatic failover")
58
+ end
59
+
60
+ def activate
61
+ say("Configuring Replication Standby Server...")
62
+ stop_postgres
63
+ stop_repmgrd
64
+ initialize_postgresql_disk if disk
65
+ PostgresAdmin.prep_data_directory if disk || resync_data
66
+ generate_cluster_name &&
67
+ create_config_file(standby_host) &&
68
+ clone_standby_server &&
69
+ start_postgres &&
70
+ register_standby_server &&
71
+ write_pgpass_file &&
72
+ (run_repmgrd_configuration ? start_repmgrd : true)
73
+ end
74
+
75
+ def data_dir_empty?
76
+ Dir[PostgresAdmin.data_directory.join("*")].empty?
77
+ end
78
+
79
+ def confirm_data_resync
80
+ logger.info("Appliance database found under: #{PostgresAdmin.data_directory}")
81
+ say("")
82
+ say("Appliance database found under: #{PostgresAdmin.data_directory}")
83
+ say("Replication standby server can not be configured if the database already exists")
84
+ say("Would you like to remove the existing database before configuring as a standby server?")
85
+ say(" WARNING: This is destructive. This will remove all previous data from this server")
86
+ self.resync_data = ask_yn?("Continue")
87
+ end
88
+
89
+ def clone_standby_server
90
+ params = { :h => primary_host,
91
+ :U => database_user,
92
+ :d => database_name,
93
+ :D => PostgresAdmin.data_directory,
94
+ nil => %w(standby clone)
95
+ }
96
+ run_repmgr_command("repmgr", params)
97
+ end
98
+
99
+ def start_postgres
100
+ LinuxAdmin::Service.new(PostgresAdmin.service_name).enable.start
101
+ true
102
+ end
103
+
104
+ def stop_postgres
105
+ LinuxAdmin::Service.new(PostgresAdmin.service_name).stop
106
+ true
107
+ end
108
+
109
+ def register_standby_server
110
+ run_repmgr_command(REGISTER_CMD, :force => nil)
111
+ end
112
+
113
+ def start_repmgrd
114
+ LinuxAdmin::Service.new(REPMGRD_SERVICE).enable.start
115
+ true
116
+ rescue AwesomeSpawn::CommandResultError => e
117
+ message = "Failed to start repmgrd: #{e.message}"
118
+ logger.error(message)
119
+ say(message)
120
+ false
121
+ end
122
+
123
+ def stop_repmgrd
124
+ LinuxAdmin::Service.new(REPMGRD_SERVICE).stop
125
+ true
126
+ end
127
+
128
+ def node_number_valid?
129
+ rec = record_for_node_number
130
+
131
+ return true if rec.nil?
132
+ node_state = rec["active"] ? "active" : "inactive"
133
+
134
+ say("An #{node_state} #{rec["type"]} node (#{rec["name"]}) with the node number #{node_number} already exists")
135
+ ask_yn?("Would you like to continue configuration by overwriting the existing node", "N")
136
+
137
+ rescue PG::Error => e
138
+ error_msg = "Failed to validate node number #{node_number}. #{e.message}"
139
+ say(error_msg)
140
+ logger.error(error_msg)
141
+ return false
142
+ end
143
+
144
+ private
145
+
146
+ def record_for_node_number
147
+ c = PG::Connection.new(primary_connection_hash)
148
+ c.exec_params(<<-SQL, [node_number]).map_types!(PG::BasicTypeMapForResults.new(c)).first
149
+ SELECT type, name, active
150
+ FROM repl_nodes where id = $1
151
+ SQL
152
+ end
153
+
154
+ def initialize_postgresql_disk
155
+ log_and_feedback(__method__) do
156
+ LogicalVolumeManagement.new(:disk => disk,
157
+ :mount_point => PostgresAdmin.mount_point,
158
+ :name => "pg",
159
+ :volume_group_name => PostgresAdmin.volume_group_name,
160
+ :filesystem_type => PostgresAdmin.database_disk_filesystem,
161
+ :logical_volume_path => PostgresAdmin.logical_volume_path).setup
162
+ end
163
+ end
164
+ end # class DatabaseReplicationStandby < DatabaseReplication
165
+ end # module ApplianceConsole
166
+ end
@@ -0,0 +1,117 @@
1
+ module ManageIQ
2
+ module ApplianceConsole
3
+ class DateTimeConfiguration
4
+ DATE_REGEXP = /^(2[0-9]{3})-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01])/
5
+ DATE_PROMPT = "current date (YYYY-MM-DD)".freeze
6
+ TIME_REGEXP = /^(0?[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])/
7
+ TIME_PROMPT = "current time in 24 hour format (HH:MM:SS)".freeze
8
+
9
+ attr_accessor :new_date, :new_time, :manual_time_sync
10
+
11
+ include ManageIQ::ApplianceConsole::Logging
12
+
13
+ def initialize
14
+ @new_date = nil
15
+ @new_time = nil
16
+ @manual_time_sync = false
17
+ end
18
+
19
+ def activate
20
+ say("Applying time configuration...")
21
+ establish_auto_sync &&
22
+ configure_date_time
23
+ end
24
+
25
+ def ask_questions
26
+ ask_establish_auto_sync
27
+ ask_for_date &&
28
+ ask_for_time &&
29
+ confirm
30
+ end
31
+
32
+ def ask_establish_auto_sync
33
+ say("Automatic time synchronization must be disabled to manually set date or time\n\n")
34
+
35
+ @manual_time_sync = agree(<<-EOL)
36
+ Yes to disable Automatic time synchronization and prompt for date and time.
37
+ No to enable Automatic time synchronization. (Y/N):
38
+
39
+ EOL
40
+ end
41
+
42
+ def ask_for_date
43
+ return true unless manual_time_sync
44
+ @new_date = just_ask(DATE_PROMPT, nil, DATE_REGEXP)
45
+ true
46
+ rescue
47
+ false
48
+ end
49
+
50
+ def ask_for_time
51
+ return true unless manual_time_sync
52
+ @new_time = just_ask(TIME_PROMPT, nil, TIME_REGEXP)
53
+ true
54
+ rescue
55
+ false
56
+ end
57
+
58
+ def confirm
59
+ manual_time_sync ? confirm_manual : confirm_auto
60
+ end
61
+
62
+ def confirm_auto
63
+ clear_screen
64
+ say("Date and Time Configuration will be automatic")
65
+
66
+ agree("Apply automatic time configuration? (Y/N): ")
67
+ end
68
+
69
+ def confirm_manual
70
+ clear_screen
71
+ say(<<-EOL)
72
+ Date and Time Configuration
73
+
74
+ Date: #{new_date}
75
+ Time: #{new_time}
76
+
77
+ EOL
78
+
79
+ agree("Apply manual time configuration? (Y/N): ")
80
+ end
81
+
82
+ def establish_auto_sync
83
+ manual_time_sync ? disable_auto_sync : enable_auto_sync
84
+ end
85
+
86
+ def enable_auto_sync
87
+ LinuxAdmin::Service.new("chronyd").enable.start
88
+ LinuxAdmin::Service.new("systemd-timedated").restart
89
+ true
90
+ rescue => e
91
+ say("Failed to enable time synchronization")
92
+ logger.error("Failed to enable time synchronization: #{e.message}")
93
+ false
94
+ end
95
+
96
+ def disable_auto_sync
97
+ LinuxAdmin::Service.new("chronyd").stop.disable
98
+ LinuxAdmin::Service.new("systemd-timedated").restart
99
+ true
100
+ rescue => e
101
+ say("Failed to disable time synchronization")
102
+ logger.error("Failed to disable time synchronization: #{e.message}")
103
+ false
104
+ end
105
+
106
+ def configure_date_time
107
+ return true unless manual_time_sync
108
+ LinuxAdmin::TimeDate.system_time = Time.parse("#{new_date} #{new_time}").getlocal
109
+ true
110
+ rescue => e
111
+ say("Failed to apply time configuration")
112
+ logger.error("Failed to apply time configuration: #{e.message}")
113
+ false
114
+ end
115
+ end # class TimezoneConfiguration
116
+ end # module ApplianceConsole
117
+ end