manageiq-appliance_console 5.5.0 → 7.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.codeclimate.yml +24 -25
- data/.rspec_ci +2 -0
- data/.rubocop.yml +3 -3
- data/.rubocop_cc.yml +3 -4
- data/.rubocop_local.yml +1 -1
- data/.travis.yml +4 -3
- data/Gemfile +1 -3
- data/README.md +1 -2
- data/Rakefile +20 -1
- data/bin/appliance_console +30 -6
- data/lib/manageiq/appliance_console/certificate_authority.rb +1 -1
- data/lib/manageiq/appliance_console/cli.rb +166 -70
- data/lib/manageiq/appliance_console/database_admin.rb +35 -206
- data/lib/manageiq/appliance_console/database_configuration.rb +10 -2
- data/lib/manageiq/appliance_console/database_replication.rb +1 -1
- data/lib/manageiq/appliance_console/database_replication_standby.rb +1 -1
- data/lib/manageiq/appliance_console/external_auth_options.rb +3 -13
- data/lib/manageiq/appliance_console/internal_database_configuration.rb +4 -12
- data/lib/manageiq/appliance_console/key_configuration.rb +8 -1
- data/lib/manageiq/appliance_console/logfile_configuration.rb +2 -2
- data/lib/manageiq/appliance_console/manageiq_user_mixin.rb +15 -0
- data/lib/manageiq/appliance_console/message_configuration.rb +205 -0
- data/lib/manageiq/appliance_console/message_configuration_client.rb +98 -0
- data/lib/manageiq/appliance_console/message_configuration_server.rb +321 -0
- data/lib/manageiq/appliance_console/oidc_authentication.rb +27 -1
- data/lib/manageiq/appliance_console/postgres_admin.rb +412 -0
- data/lib/manageiq/appliance_console/utilities.rb +61 -2
- data/lib/manageiq/appliance_console/version.rb +1 -1
- data/lib/manageiq-appliance_console.rb +2 -6
- data/locales/appliance/en.yml +0 -16
- data/manageiq-appliance_console.gemspec +4 -3
- metadata +54 -24
- data/lib/manageiq/appliance_console/messaging_configuration.rb +0 -92
@@ -1,3 +1,6 @@
|
|
1
|
+
require "net/http"
|
2
|
+
require "uri"
|
3
|
+
|
1
4
|
module ManageIQ
|
2
5
|
module ApplianceConsole
|
3
6
|
class OIDCAuthentication
|
@@ -62,6 +65,13 @@ module ManageIQ
|
|
62
65
|
:oidc_client_id => options[:oidc_client_id],
|
63
66
|
:oidc_client_secret => options[:oidc_client_secret],
|
64
67
|
:oidc_introspection_endpoint => options[:oidc_introspection_endpoint])
|
68
|
+
|
69
|
+
if options[:oidc_insecure]
|
70
|
+
File.open("#{HTTPD_CONFIG_DIRECTORY}/manageiq-external-auth-openidc.conf", "a") do |f|
|
71
|
+
f.write("\nOIDCSSLValidateServer Off\n")
|
72
|
+
f.write("OIDCOAuthSSLValidateServer Off\n")
|
73
|
+
end
|
74
|
+
end
|
65
75
|
end
|
66
76
|
|
67
77
|
def remove_apache_oidc_configfiles
|
@@ -85,10 +95,26 @@ module ManageIQ
|
|
85
95
|
def derive_introspection_endpoint
|
86
96
|
return if options[:oidc_introspection_endpoint].present?
|
87
97
|
|
88
|
-
options[:oidc_introspection_endpoint] =
|
98
|
+
options[:oidc_introspection_endpoint] = fetch_introspection_endpoint
|
89
99
|
raise INTROSPECT_ENDPOINT_ERROR if options[:oidc_introspection_endpoint].blank?
|
90
100
|
end
|
91
101
|
|
102
|
+
def fetch_introspection_endpoint
|
103
|
+
uri = URI.parse(options[:oidc_url])
|
104
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
105
|
+
http.use_ssl = (uri.scheme == "https")
|
106
|
+
http.verify_mode = OpenSSL::SSL::VERIFY_NONE if options[:oidc_insecure]
|
107
|
+
|
108
|
+
request = Net::HTTP::Get.new(uri.request_uri)
|
109
|
+
request.basic_auth(options[:oidc_client_id], options[:oidc_client_secret])
|
110
|
+
response = http.request(request)
|
111
|
+
|
112
|
+
JSON.parse(response.body)["introspection_endpoint"]
|
113
|
+
rescue => err
|
114
|
+
say("Failed to fetch introspection endpoint - #{err}")
|
115
|
+
nil
|
116
|
+
end
|
117
|
+
|
92
118
|
# Appliance Settings
|
93
119
|
|
94
120
|
def configure_auth_settings_oidc
|
@@ -0,0 +1,412 @@
|
|
1
|
+
require 'awesome_spawn'
|
2
|
+
require 'pathname'
|
3
|
+
require 'linux_admin'
|
4
|
+
require 'pg'
|
5
|
+
|
6
|
+
module ManageIQ
|
7
|
+
module ApplianceConsole
|
8
|
+
class PostgresAdmin
|
9
|
+
def self.data_directory
|
10
|
+
Pathname.new(ENV.fetch("APPLIANCE_PG_DATA"))
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.mount_point
|
14
|
+
Pathname.new(ENV.fetch("APPLIANCE_PG_MOUNT_POINT"))
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.template_directory
|
18
|
+
Pathname.new(ENV.fetch("APPLIANCE_TEMPLATE_DIRECTORY"))
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.service_name
|
22
|
+
ENV.fetch("APPLIANCE_PG_SERVICE")
|
23
|
+
end
|
24
|
+
|
25
|
+
def self.package_name
|
26
|
+
ENV.fetch('APPLIANCE_PG_PACKAGE_NAME')
|
27
|
+
end
|
28
|
+
|
29
|
+
# Unprivileged user to run postgresql
|
30
|
+
def self.user
|
31
|
+
"postgres".freeze
|
32
|
+
end
|
33
|
+
|
34
|
+
def self.group
|
35
|
+
user
|
36
|
+
end
|
37
|
+
|
38
|
+
def self.logical_volume_name
|
39
|
+
"lv_pg".freeze
|
40
|
+
end
|
41
|
+
|
42
|
+
def self.volume_group_name
|
43
|
+
"vg_data".freeze
|
44
|
+
end
|
45
|
+
|
46
|
+
def self.database_disk_filesystem
|
47
|
+
"xfs".freeze
|
48
|
+
end
|
49
|
+
|
50
|
+
def self.with_pg_connection(db_opts = {:user => user, :dbname => user})
|
51
|
+
conn = PG.connect(db_opts)
|
52
|
+
yield conn
|
53
|
+
ensure
|
54
|
+
conn.close if conn
|
55
|
+
end
|
56
|
+
|
57
|
+
def self.initialized?
|
58
|
+
!Dir[data_directory.join("*")].empty?
|
59
|
+
end
|
60
|
+
|
61
|
+
def self.service_running?
|
62
|
+
LinuxAdmin::Service.new(service_name).running?
|
63
|
+
end
|
64
|
+
|
65
|
+
def self.local_server_in_recovery?
|
66
|
+
data_directory.join("recovery.conf").exist?
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.local_server_status
|
70
|
+
if service_running?
|
71
|
+
"running (#{local_server_in_recovery? ? "standby" : "primary"})"
|
72
|
+
elsif initialized?
|
73
|
+
"initialized and stopped"
|
74
|
+
else
|
75
|
+
"not initialized"
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def self.logical_volume_path
|
80
|
+
Pathname.new("/dev").join(volume_group_name, logical_volume_name)
|
81
|
+
end
|
82
|
+
|
83
|
+
def self.database_size(opts)
|
84
|
+
result = run_command("psql", opts, :command => "SELECT pg_database_size('#{opts[:dbname]}');")
|
85
|
+
result.match(/^\s+([0-9]+)\n/)[1].to_i
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.prep_data_directory
|
89
|
+
# initdb will fail if the database directory is not empty or not owned by the PostgresAdmin.user
|
90
|
+
FileUtils.mkdir(PostgresAdmin.data_directory) unless Dir.exist?(PostgresAdmin.data_directory)
|
91
|
+
FileUtils.chown_R(PostgresAdmin.user, PostgresAdmin.group, PostgresAdmin.data_directory)
|
92
|
+
FileUtils.rm_rf(PostgresAdmin.data_directory.children.map(&:to_s))
|
93
|
+
end
|
94
|
+
|
95
|
+
PG_DUMP_MAGIC = "PGDMP".force_encoding(Encoding::BINARY).freeze
|
96
|
+
def self.pg_dump_file?(file)
|
97
|
+
File.open(file, "rb") { |f| f.readpartial(5) } == PG_DUMP_MAGIC
|
98
|
+
end
|
99
|
+
|
100
|
+
BASE_BACKUP_MAGIC = "\037\213".force_encoding(Encoding::BINARY).freeze # just the first 2 bits of gzip magic
|
101
|
+
def self.base_backup_file?(file)
|
102
|
+
File.open(file, "rb") { |f| f.readpartial(2) } == BASE_BACKUP_MAGIC
|
103
|
+
end
|
104
|
+
|
105
|
+
def self.backup(opts)
|
106
|
+
backup_pg_compress(opts)
|
107
|
+
end
|
108
|
+
|
109
|
+
def self.restore(opts)
|
110
|
+
file = opts[:local_file]
|
111
|
+
backup_type = opts.delete(:backup_type) || validate_backup_file_type(file)
|
112
|
+
|
113
|
+
prepare_restore(backup_type, opts[:dbname])
|
114
|
+
|
115
|
+
case backup_type
|
116
|
+
when :pgdump then restore_pg_dump(opts)
|
117
|
+
when :basebackup then restore_pg_basebackup(file)
|
118
|
+
else
|
119
|
+
raise "#{file} is not a database backup"
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
def self.restore_pg_basebackup(file)
|
124
|
+
pg_service = LinuxAdmin::Service.new(service_name)
|
125
|
+
|
126
|
+
pg_service.stop
|
127
|
+
prep_data_directory
|
128
|
+
|
129
|
+
require 'rubygems/package'
|
130
|
+
|
131
|
+
# Using a Gem::Package instance for the #extract_tar_gz method, so we don't
|
132
|
+
# have to re-write all of that logic. Mostly making use of
|
133
|
+
# `Gem::Package::TarReader` + `Zlib::GzipReader` that is already part of
|
134
|
+
# rubygems/stdlib and integrated there.
|
135
|
+
unpacker = Gem::Package.new("obviously_not_a_gem")
|
136
|
+
File.open(file, IO::RDONLY | IO::NONBLOCK) do |backup_file|
|
137
|
+
unpacker.extract_tar_gz(backup_file, data_directory.to_s)
|
138
|
+
end
|
139
|
+
|
140
|
+
FileUtils.chown_R(PostgresAdmin.user, PostgresAdmin.group, PostgresAdmin.data_directory)
|
141
|
+
|
142
|
+
pg_service.start
|
143
|
+
file
|
144
|
+
end
|
145
|
+
|
146
|
+
def self.backup_pg_dump(opts)
|
147
|
+
opts = opts.dup
|
148
|
+
dbname = opts.delete(:dbname)
|
149
|
+
|
150
|
+
args = combine_command_args(opts, :format => "c", :file => opts[:local_file], nil => dbname)
|
151
|
+
args = handle_multi_value_pg_dump_args!(opts, args)
|
152
|
+
|
153
|
+
FileUtils.mkdir_p(File.dirname(opts.fetch(:local_file, "")))
|
154
|
+
run_command_with_logging("pg_dump", opts, args)
|
155
|
+
opts[:local_file]
|
156
|
+
end
|
157
|
+
|
158
|
+
def self.backup_pg_compress(opts)
|
159
|
+
opts = opts.dup
|
160
|
+
|
161
|
+
# discard dbname as pg_basebackup does not connect to a specific database
|
162
|
+
opts.delete(:dbname)
|
163
|
+
|
164
|
+
path = Pathname.new(opts.delete(:local_file))
|
165
|
+
FileUtils.mkdir_p(path.dirname)
|
166
|
+
|
167
|
+
# Build commandline from AwesomeSpawn
|
168
|
+
args = {:z => nil, :format => "t", :wal_method => "fetch", :pgdata => "-"}
|
169
|
+
cmd = AwesomeSpawn.build_command_line("pg_basebackup", combine_command_args(opts, args))
|
170
|
+
logger.info("MIQ(#{name}.#{__method__}) Running command... #{cmd}")
|
171
|
+
|
172
|
+
# Run command in a separate thread
|
173
|
+
read, write = IO.pipe
|
174
|
+
error_path = Dir::Tmpname.create("") { |tmpname| tmpname }
|
175
|
+
process_thread = Process.detach(Kernel.spawn(pg_env(opts), cmd, :out => write, :err => error_path))
|
176
|
+
stream_reader = Thread.new { IO.copy_stream(read, path) } # Copy output to path
|
177
|
+
write.close
|
178
|
+
|
179
|
+
# Wait for them to finish
|
180
|
+
process_status = process_thread.value
|
181
|
+
stream_reader.join
|
182
|
+
read.close
|
183
|
+
|
184
|
+
handle_error(cmd, process_status.exitstatus, error_path)
|
185
|
+
path.to_s
|
186
|
+
end
|
187
|
+
|
188
|
+
def self.recreate_db(opts)
|
189
|
+
dbname = opts[:dbname]
|
190
|
+
opts = opts.merge(:dbname => 'postgres')
|
191
|
+
run_command("psql", opts, :command => "DROP DATABASE IF EXISTS #{dbname}")
|
192
|
+
run_command("psql", opts, :command => "CREATE DATABASE #{dbname} WITH OWNER = #{opts[:username] || 'root'} ENCODING = 'UTF8'")
|
193
|
+
end
|
194
|
+
|
195
|
+
def self.restore_pg_dump(opts)
|
196
|
+
recreate_db(opts)
|
197
|
+
args = { :verbose => nil, :exit_on_error => nil }
|
198
|
+
|
199
|
+
if File.pipe?(opts[:local_file])
|
200
|
+
cmd_args = combine_command_args(opts, args)
|
201
|
+
cmd = AwesomeSpawn.build_command_line("pg_restore", cmd_args)
|
202
|
+
error_path = Dir::Tmpname.create("") { |tmpname| tmpname }
|
203
|
+
spawn_args = { :err => error_path, :in => [opts[:local_file].to_s, "rb"] }
|
204
|
+
|
205
|
+
logger.info("MIQ(#{name}.#{__method__}) Running command... #{cmd}")
|
206
|
+
process_thread = Process.detach(Kernel.spawn(pg_env(opts), cmd, spawn_args))
|
207
|
+
process_status = process_thread.value
|
208
|
+
|
209
|
+
handle_error(cmd, process_status.exitstatus, error_path)
|
210
|
+
else
|
211
|
+
args[nil] = opts[:local_file]
|
212
|
+
run_command("pg_restore", opts, args)
|
213
|
+
end
|
214
|
+
opts[:local_file]
|
215
|
+
end
|
216
|
+
|
217
|
+
GC_DEFAULTS = {
|
218
|
+
:analyze => false,
|
219
|
+
:full => false,
|
220
|
+
:verbose => false,
|
221
|
+
:table => nil,
|
222
|
+
:dbname => nil,
|
223
|
+
:username => nil,
|
224
|
+
:reindex => false
|
225
|
+
}
|
226
|
+
|
227
|
+
GC_AGGRESSIVE_DEFAULTS = {
|
228
|
+
:analyze => true,
|
229
|
+
:full => true,
|
230
|
+
:verbose => false,
|
231
|
+
:table => nil,
|
232
|
+
:dbname => nil,
|
233
|
+
:username => nil,
|
234
|
+
:reindex => true
|
235
|
+
}
|
236
|
+
|
237
|
+
def self.gc(options = {})
|
238
|
+
options = (options[:aggressive] ? GC_AGGRESSIVE_DEFAULTS : GC_DEFAULTS).merge(options)
|
239
|
+
|
240
|
+
result = vacuum(options)
|
241
|
+
logger.info("MIQ(#{name}.#{__method__}) Output... #{result}") if result.to_s.length > 0
|
242
|
+
|
243
|
+
if options[:reindex]
|
244
|
+
result = reindex(options)
|
245
|
+
logger.info("MIQ(#{name}.#{__method__}) Output... #{result}") if result.to_s.length > 0
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
249
|
+
def self.vacuum(opts)
|
250
|
+
# TODO: Add a real exception here
|
251
|
+
raise "Vacuum requires database" unless opts[:dbname]
|
252
|
+
|
253
|
+
args = {}
|
254
|
+
args[:analyze] = nil if opts[:analyze]
|
255
|
+
args[:full] = nil if opts[:full]
|
256
|
+
args[:verbose] = nil if opts[:verbose]
|
257
|
+
args[:table] = opts[:table] if opts[:table]
|
258
|
+
run_command("vacuumdb", opts, args)
|
259
|
+
end
|
260
|
+
|
261
|
+
def self.reindex(opts)
|
262
|
+
args = {}
|
263
|
+
args[:table] = opts[:table] if opts[:table]
|
264
|
+
run_command("reindexdb", opts, args)
|
265
|
+
end
|
266
|
+
|
267
|
+
def self.run_command(cmd_str, opts, args)
|
268
|
+
run_command_with_logging(cmd_str, opts, combine_command_args(opts, args))
|
269
|
+
end
|
270
|
+
|
271
|
+
def self.run_command_with_logging(cmd_str, opts, params = {})
|
272
|
+
logger.info("MIQ(#{name}.#{__method__}) Running command... #{AwesomeSpawn.build_command_line(cmd_str, params)}")
|
273
|
+
AwesomeSpawn.run!(cmd_str, :params => params, :env => pg_env(opts)).output
|
274
|
+
end
|
275
|
+
|
276
|
+
class << self
|
277
|
+
# Temporary alias due to manageiq core stubbing this method
|
278
|
+
alias runcmd_with_logging run_command_with_logging
|
279
|
+
end
|
280
|
+
|
281
|
+
private_class_method def self.combine_command_args(opts, args)
|
282
|
+
default_args = {:no_password => nil}
|
283
|
+
default_args[:dbname] = opts[:dbname] if opts[:dbname]
|
284
|
+
default_args[:username] = opts[:username] if opts[:username]
|
285
|
+
default_args[:host] = opts[:hostname] if opts[:hostname]
|
286
|
+
default_args[:port] = opts[:port] if opts[:port]
|
287
|
+
default_args.merge(args)
|
288
|
+
end
|
289
|
+
|
290
|
+
private_class_method def self.logger
|
291
|
+
ManageIQ::ApplianceConsole.logger
|
292
|
+
end
|
293
|
+
|
294
|
+
private_class_method def self.pg_env(opts)
|
295
|
+
{
|
296
|
+
"PGUSER" => opts[:username],
|
297
|
+
"PGPASSWORD" => opts[:password]
|
298
|
+
}.delete_blanks
|
299
|
+
end
|
300
|
+
# rubocop:disable Style/SymbolArray
|
301
|
+
PG_DUMP_MULTI_VALUE_ARGS = [
|
302
|
+
:t, :table, :T, :exclude_table, :"exclude-table", :exclude_table_data, :"exclude-table-data",
|
303
|
+
:n, :schema, :N, :exclude_schema, :"exclude-schema"
|
304
|
+
].freeze
|
305
|
+
# rubocop:enable Style/SymbolArray
|
306
|
+
#
|
307
|
+
# NOTE: Potentially mutates opts hash (args becomes new array and not
|
308
|
+
# mutated by this method)
|
309
|
+
private_class_method def self.handle_multi_value_pg_dump_args!(opts, args)
|
310
|
+
if opts.keys.any? { |key| PG_DUMP_MULTI_VALUE_ARGS.include?(key) }
|
311
|
+
args = args.to_a
|
312
|
+
PG_DUMP_MULTI_VALUE_ARGS.each do |table_key|
|
313
|
+
next unless opts.key?(table_key)
|
314
|
+
table_val = opts.delete(table_key)
|
315
|
+
args += Array.wrap(table_val).map! { |v| [table_key, v] }
|
316
|
+
end
|
317
|
+
end
|
318
|
+
args
|
319
|
+
end
|
320
|
+
|
321
|
+
private_class_method def self.handle_error(cmd, exit_status, error_path)
|
322
|
+
if exit_status != 0
|
323
|
+
result = AwesomeSpawn::CommandResult.new(cmd, "", File.read(error_path), exit_status)
|
324
|
+
message = AwesomeSpawn::CommandResultError.default_message(cmd, exit_status)
|
325
|
+
logger.error("AwesomeSpawn: #{message}")
|
326
|
+
logger.error("AwesomeSpawn: #{result.error}")
|
327
|
+
raise AwesomeSpawn::CommandResultError.new(message, result)
|
328
|
+
end
|
329
|
+
ensure
|
330
|
+
File.delete(error_path) if File.exist?(error_path)
|
331
|
+
end
|
332
|
+
|
333
|
+
private_class_method def self.prepare_restore(backup_type, dbname)
|
334
|
+
if application_connections?
|
335
|
+
message = "Database restore failed. Shut down all evmserverd processes before attempting a database restore"
|
336
|
+
ManageIQ::ApplianceConsole.logger.error(message)
|
337
|
+
raise message
|
338
|
+
end
|
339
|
+
|
340
|
+
disable_replication(dbname)
|
341
|
+
|
342
|
+
conn_count = connection_count(backup_type, dbname)
|
343
|
+
if conn_count > 1
|
344
|
+
message = "Database restore failed. #{conn_count - 1} connections remain to the database."
|
345
|
+
ManageIQ::ApplianceConsole.logger.error(message)
|
346
|
+
raise message
|
347
|
+
end
|
348
|
+
end
|
349
|
+
|
350
|
+
private_class_method def self.application_connections?
|
351
|
+
result = [{"count" => 0}]
|
352
|
+
|
353
|
+
with_pg_connection do |conn|
|
354
|
+
result = conn.exec("SELECT COUNT(pid) FROM pg_stat_activity WHERE application_name LIKE '%MIQ%'")
|
355
|
+
end
|
356
|
+
|
357
|
+
result[0]["count"].to_i > 0
|
358
|
+
end
|
359
|
+
|
360
|
+
private_class_method def self.disable_replication(dbname)
|
361
|
+
require 'pg/logical_replication'
|
362
|
+
|
363
|
+
with_pg_connection do |conn|
|
364
|
+
pglogical = PG::LogicalReplication::Client.new(conn)
|
365
|
+
|
366
|
+
if pglogical.subscriber?
|
367
|
+
pglogical.subcriptions(dbname).each do |subscriber|
|
368
|
+
sub_id = subscriber["subscription_name"]
|
369
|
+
begin
|
370
|
+
pglogical.drop_subscription(sub_id, true)
|
371
|
+
rescue PG::InternalError => e
|
372
|
+
raise unless e.message.include?("could not connect to publisher")
|
373
|
+
raise unless e.message.match?(/replication slot .* does not exist/)
|
374
|
+
|
375
|
+
pglogical.disable_subscription(sub_id).check
|
376
|
+
pglogical.alter_subscription_options(sub_id, "slot_name" => "NONE")
|
377
|
+
pglogical.drop_subscription(sub_id, true)
|
378
|
+
end
|
379
|
+
end
|
380
|
+
elsif pglogical.publishes?('miq')
|
381
|
+
pglogical.drop_publication('miq')
|
382
|
+
end
|
383
|
+
end
|
384
|
+
end
|
385
|
+
|
386
|
+
private_class_method def self.connection_count(backup_type, dbname)
|
387
|
+
result = nil
|
388
|
+
|
389
|
+
with_pg_connection do |conn|
|
390
|
+
query = "SELECT COUNT(pid) FROM pg_stat_activity"
|
391
|
+
query << " WHERE backend_type = 'client backend'" if backup_type == :basebackup
|
392
|
+
query << " WHERE datname = '#{dbname}'" if backup_type == :pgdump
|
393
|
+
result = conn.exec(query)
|
394
|
+
end
|
395
|
+
|
396
|
+
result[0]["count"].to_i
|
397
|
+
end
|
398
|
+
|
399
|
+
private_class_method def self.validate_backup_file_type(file)
|
400
|
+
if base_backup_file?(file)
|
401
|
+
:basebackup
|
402
|
+
elsif pg_dump_file?(file)
|
403
|
+
:pgdump
|
404
|
+
else
|
405
|
+
message = "#{filename} is not in a recognized database backup format"
|
406
|
+
ManageIQ::ApplianceConsole.error(message)
|
407
|
+
raise message
|
408
|
+
end
|
409
|
+
end
|
410
|
+
end
|
411
|
+
end
|
412
|
+
end
|