foreman_maintain 1.12.3 → 1.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/definitions/checks/candlepin/db_index.rb +25 -0
  3. data/definitions/checks/foreman/db_index.rb +25 -0
  4. data/definitions/checks/pulpcore/db_index.rb +25 -0
  5. data/definitions/checks/restore/validate_backup.rb +5 -10
  6. data/definitions/checks/system_registration.rb +5 -3
  7. data/definitions/features/iop.rb +44 -0
  8. data/definitions/features/service.rb +4 -58
  9. data/definitions/features/timer.rb +96 -0
  10. data/definitions/procedures/iop/image_prune.rb +19 -0
  11. data/definitions/procedures/iop/update.rb +25 -0
  12. data/definitions/procedures/knowledge_base_article.rb +1 -0
  13. data/definitions/procedures/restore/extract_files.rb +0 -18
  14. data/definitions/procedures/timer/start.rb +20 -0
  15. data/definitions/procedures/timer/stop.rb +20 -0
  16. data/definitions/reports/bookmarks.rb +51 -0
  17. data/definitions/reports/disconnected_environment.rb +23 -0
  18. data/definitions/reports/grouping.rb +58 -0
  19. data/definitions/reports/lab_features.rb +19 -0
  20. data/definitions/reports/personal_access_token.rb +26 -0
  21. data/definitions/reports/selinux.rb +27 -0
  22. data/definitions/reports/webhooks.rb +37 -0
  23. data/definitions/scenarios/backup.rb +3 -3
  24. data/definitions/scenarios/foreman_upgrade.rb +2 -0
  25. data/definitions/scenarios/maintenance_mode.rb +2 -0
  26. data/definitions/scenarios/restore.rb +4 -6
  27. data/definitions/scenarios/satellite_upgrade.rb +4 -0
  28. data/definitions/scenarios/update.rb +5 -1
  29. data/lib/foreman_maintain/cli/base.rb +3 -3
  30. data/lib/foreman_maintain/concerns/base_database.rb +34 -0
  31. data/lib/foreman_maintain/concerns/systemd.rb +50 -0
  32. data/lib/foreman_maintain/feature.rb +9 -0
  33. data/lib/foreman_maintain/utils/backup.rb +10 -39
  34. data/lib/foreman_maintain/version.rb +1 -1
  35. metadata +16 -3
  36. data/definitions/checks/backup/incremental_parent_type.rb +0 -33
  37. data/definitions/procedures/restore/reindex_databases.rb +0 -28
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b5e95ad904a84a122e5c9d82120299674f55998997acab8c8d29af3848a78d42
4
- data.tar.gz: 66e3dbc6a9f9862a23d8c9ec0dd82f5fd3e3b856fbf5d3844eadbf448b762284
3
+ metadata.gz: e99fd5f893ec24f16eaea4c20b8260cd2878bf63149ddf830c29e15f0324cda0
4
+ data.tar.gz: 5f1e7e90783d76b45231d393e204f2588d5b4daf46723792186aaa14d9c525d9
5
5
  SHA512:
6
- metadata.gz: ab607c39722607f8e80a56ffe3406d34bed7c72335bcba543b5fe76c339bc5e12216a588d5dacbc665f943f79b07cd00c293496425c925932d2a6dd9130efc91
7
- data.tar.gz: '0290ebaa45d7c32847a1d0f92b23dd12f98e0b951c486c10f2cc043d8f53c488085a3f1095e585b70e2db60b9dbb12ad5ee60bcd9eec1741cb269e5ce2b08830'
6
+ metadata.gz: d5c950a2a4b110d1425f071a5532010f4b704267e3bde1d082ea1bed209ccf6f82bf5d2682ebbe444679edf85eb03e4b216ed7ba298dd3a6fd73904390ddce77
7
+ data.tar.gz: cccf743480372ef3b39e54ab995a9be077006dfe991a799021378371b3f58db1a04cac263f70e21bcd75b87e30892d55d390af17bb59cf0c57aee63030ae0543
@@ -0,0 +1,25 @@
1
+ module Checks
2
+ module Candlepin
3
+ class DBIndex < ForemanMaintain::Check
4
+ metadata do
5
+ description 'Make sure Candlepin DB indexes are OK'
6
+ label :candlepin_db_index
7
+ tags :db_index
8
+ for_feature :candlepin_database
9
+ confine do
10
+ feature(:candlepin_database)&.local?
11
+ end
12
+ end
13
+
14
+ def run
15
+ status, output = feature(:candlepin_database).amcheck
16
+
17
+ if !status.nil?
18
+ assert(status == 0, "Candlepin DB indexes have issues:\n#{output}")
19
+ else
20
+ skip 'amcheck is not available in this setup'
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,25 @@
1
+ module Checks
2
+ module Foreman
3
+ class DBIndex < ForemanMaintain::Check
4
+ metadata do
5
+ description 'Make sure Foreman DB indexes are OK'
6
+ label :foreman_db_index
7
+ tags :db_index
8
+ for_feature :foreman_database
9
+ confine do
10
+ feature(:foreman_database)&.local?
11
+ end
12
+ end
13
+
14
+ def run
15
+ status, output = feature(:foreman_database).amcheck
16
+
17
+ if !status.nil?
18
+ assert(status == 0, "Foreman DB indexes have issues:\n#{output}")
19
+ else
20
+ skip 'amcheck is not available in this setup'
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,25 @@
1
+ module Checks
2
+ module Pulpcore
3
+ class DBIndex < ForemanMaintain::Check
4
+ metadata do
5
+ description 'Make sure Pulpcore DB indexes are OK'
6
+ label :pulpcore_db_index
7
+ tags :db_index
8
+ for_feature :pulpcore_database
9
+ confine do
10
+ feature(:pulpcore_database)&.local?
11
+ end
12
+ end
13
+
14
+ def run
15
+ status, output = feature(:pulpcore_database).amcheck
16
+
17
+ if !status.nil?
18
+ assert(status == 0, "Pulpcore DB indexes have issues:\n#{output}")
19
+ else
20
+ skip 'amcheck is not available in this setup'
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -41,31 +41,26 @@ module Checks::Restore
41
41
 
42
42
  def required_katello_files(backup)
43
43
  backup_files_message(
44
- backup.katello_online_files.join(', '),
45
- backup.katello_offline_files.join(', ')
44
+ backup.katello_online_files.join(', ')
46
45
  )
47
46
  end
48
47
 
49
48
  def required_fpc_files(backup)
50
49
  backup_files_message(
51
- backup.fpc_online_files.join(', '),
52
- backup.fpc_offline_files.join(', ')
50
+ backup.fpc_online_files.join(', ')
53
51
  )
54
52
  end
55
53
 
56
54
  def required_foreman_files(backup)
57
55
  backup_files_message(
58
- backup.foreman_online_files.join(', '),
59
- backup.foreman_offline_files.join(', ')
56
+ backup.foreman_online_files.join(', ')
60
57
  )
61
58
  end
62
59
 
63
- def backup_files_message(online_files, offline_files)
60
+ def backup_files_message(online_files)
64
61
  message = ''
65
- message += 'An online or remote database backup directory contains: '
62
+ message += 'A backup directory contains: '
66
63
  message += "#{online_files}\n"
67
- message += 'An offline backup directory contains: '
68
- message += "#{offline_files}\n"
69
64
  message
70
65
  end
71
66
  end
@@ -10,9 +10,11 @@ class Checks::SystemRegistration < ForemanMaintain::Check
10
10
  end
11
11
 
12
12
  def run
13
- if rhsm_hostname_eql_hostname?
14
- warn! 'System is self registered'
15
- end
13
+ assert(!rhsm_hostname_eql_hostname?, 'System is self registered',
14
+ {
15
+ :warn => true,
16
+ :next_steps => [Procedures::KnowledgeBaseArticle.new(:doc => 'self_registered')],
17
+ })
16
18
  end
17
19
 
18
20
  def rhsm_hostname
@@ -8,6 +8,22 @@ class Features::Iop < ForemanMaintain::Feature
8
8
  end
9
9
  end
10
10
 
11
+ CONTAINER_NAMES =
12
+ [
13
+ 'insights-engine',
14
+ 'gateway',
15
+ 'host-inventory',
16
+ 'ingress',
17
+ 'puptoo',
18
+ 'yuptoo',
19
+ 'advisor-backend',
20
+ 'advisor-frontend',
21
+ 'remediations',
22
+ 'vmaas',
23
+ 'vulnerability-engine',
24
+ 'vulnerability-frontend',
25
+ ].freeze
26
+
11
27
  def config_files
12
28
  [
13
29
  '/var/lib/containers/storage/volumes/iop-core-kafka-data',
@@ -42,4 +58,32 @@ class Features::Iop < ForemanMaintain::Feature
42
58
  ]
43
59
  end
44
60
  # rubocop:enable Metrics/MethodLength
61
+
62
+ def timers
63
+ [
64
+ system_service('iop-service-vuln-vmaas-sync.timer', 20),
65
+ ]
66
+ end
67
+
68
+ def container_base
69
+ if feature(:instance).downstream
70
+ 'registry.redhat.io/satellite'
71
+ else
72
+ 'quay.io/iop'
73
+ end
74
+ end
75
+
76
+ def container_names
77
+ if feature(:instance).downstream
78
+ CONTAINER_NAMES.map { |container_name| "#{container_name}-rhel9" }
79
+ else
80
+ CONTAINER_NAMES
81
+ end
82
+ end
83
+
84
+ def container_images(container_version)
85
+ container_names.map do |container_name|
86
+ "#{container_base}/#{container_name}:#{container_version}"
87
+ end
88
+ end
45
89
  end
@@ -1,4 +1,7 @@
1
+ require 'foreman_maintain/concerns/systemd'
2
+
1
3
  class Features::Service < ForemanMaintain::Feature
4
+ include ForemanMaintain::Concerns::Systemd
2
5
  metadata do
3
6
  label :service
4
7
  end
@@ -13,10 +16,7 @@ class Features::Service < ForemanMaintain::Feature
13
16
 
14
17
  def existing_services
15
18
  ForemanMaintain.available_features.flat_map(&:services).
16
- sort.
17
- inject([]) do |pool, service| # uniq(&:to_s) for ruby 1.8.7
18
- (pool.last.nil? || !pool.last.matches?(service)) ? pool << service : pool
19
- end.
19
+ sort.uniq(&:to_s).
20
20
  select(&:exist?)
21
21
  end
22
22
 
@@ -30,14 +30,6 @@ class Features::Service < ForemanMaintain::Feature
30
30
  Hash[services.sort_by { |k, _| k.to_i }.reverse]
31
31
  end
32
32
 
33
- def action_noun(action)
34
- action_word_modified(action) + 'ing'
35
- end
36
-
37
- def action_past_tense(action)
38
- action_word_modified(action) + 'ed'
39
- end
40
-
41
33
  def filter_disabled_services!(action, service_list)
42
34
  if %w[start stop restart status].include?(action)
43
35
  service_list.select! { |service| !service.respond_to?(:enabled?) || service.enabled? }
@@ -45,13 +37,6 @@ class Features::Service < ForemanMaintain::Feature
45
37
  service_list
46
38
  end
47
39
 
48
- def unit_file_available?(name)
49
- cmd = "systemctl --no-legend --no-pager list-unit-files --type=service #{name} |\
50
- grep --word-regexp --quiet #{name}"
51
- exit_status, = execute_with_status(cmd)
52
- exit_status == 0
53
- end
54
-
55
40
  private
56
41
 
57
42
  def use_system_service(action, options, spinner)
@@ -96,28 +81,6 @@ class Features::Service < ForemanMaintain::Feature
96
81
  services_and_statuses.map! { |service, status| [service, status.value] }
97
82
  end
98
83
 
99
- def format_status(output, exit_code, options)
100
- status = ''
101
- if !options[:failing] || exit_code > 0
102
- if options[:brief]
103
- status += format_brief_status(exit_code)
104
- elsif !(output.nil? || output.empty?)
105
- status += "\n" + output
106
- end
107
- end
108
- status
109
- end
110
-
111
- def format_brief_status(exit_code)
112
- result = (exit_code == 0) ? reporter.status_label(:success) : reporter.status_label(:fail)
113
- padding = reporter.max_length - reporter.last_line.to_s.length - 30
114
- "#{' ' * padding} #{result}"
115
- end
116
-
117
- def allowed_action?(action)
118
- %w[start stop restart status enable disable].include?(action)
119
- end
120
-
121
84
  def extend_service_list_with_sockets(service_list, options)
122
85
  return service_list unless options[:include_sockets]
123
86
 
@@ -162,21 +125,4 @@ class Features::Service < ForemanMaintain::Feature
162
125
  service_list.concat(unregistered_service_list)
163
126
  service_list
164
127
  end
165
-
166
- def action_word_modified(action)
167
- case action
168
- when 'status'
169
- 'display'
170
- when 'enable', 'disable'
171
- action.chomp('e')
172
- when 'stop'
173
- action + 'p'
174
- else
175
- action
176
- end
177
- end
178
-
179
- def exclude_services_only(options)
180
- existing_services - filtered_services(options)
181
- end
182
128
  end
@@ -0,0 +1,96 @@
1
+ require 'foreman_maintain/concerns/systemd'
2
+
3
+ class Features::Timer < ForemanMaintain::Feature
4
+ include ForemanMaintain::Concerns::Systemd
5
+ metadata do
6
+ label :timer
7
+ end
8
+
9
+ def handle_timers(spinner, action, options = {})
10
+ # options is used to handle "exclude" and "only" i.e.
11
+ # { :only => ["httpd"] }
12
+ # { :exclude => ["pulp-workers", "tomcat"] }
13
+ use_system_timer(action, options, spinner)
14
+ end
15
+
16
+ def existing_timers
17
+ ForemanMaintain.available_features.flat_map(&:timers).
18
+ sort.uniq(&:to_s).
19
+ select(&:exist?)
20
+ end
21
+
22
+ def filtered_timers(options, action = '')
23
+ timers = filter_timers(existing_timers, options, action)
24
+
25
+ raise 'No timers found matching your parameters' unless timers.any?
26
+ return timers unless options[:reverse]
27
+
28
+ Hash[timers.sort_by { |k, _| k.to_i }.reverse]
29
+ end
30
+
31
+ def filter_disabled_timers!(action, timer_list)
32
+ if %w[start stop restart status].include?(action)
33
+ timer_list.select! { |timer| !timer.respond_to?(:enabled?) || timer.enabled? }
34
+ end
35
+ timer_list
36
+ end
37
+
38
+ private
39
+
40
+ def use_system_timer(action, options, spinner)
41
+ options[:reverse] = action == 'stop'
42
+ raise 'Unsupported action detected' unless allowed_action?(action)
43
+
44
+ status, failed_timers = run_action_on_timers(action, options, spinner)
45
+
46
+ spinner.update("All timers #{action_past_tense(action)}")
47
+ if action == 'status'
48
+ raise "Some timers are not running (#{failed_timers.join(', ')})" if status > 0
49
+
50
+ spinner.update('All timers are running')
51
+ end
52
+ end
53
+
54
+ def run_action_on_timers(action, options, spinner)
55
+ status = 0
56
+ failed_timers = []
57
+ filtered_timers(options, action).each_value do |group|
58
+ fork_threads_for_timers(action, group, spinner).each do |timer, status_and_output|
59
+ spinner.update("#{action_noun(action)} #{timer}") if action == 'status'
60
+ item_status, output = status_and_output
61
+ formatted = format_status(output, item_status, options)
62
+ puts formatted unless formatted.empty?
63
+
64
+ if item_status > 0
65
+ status = item_status
66
+ failed_timers << timer
67
+ end
68
+ end
69
+ end
70
+ [status, failed_timers]
71
+ end
72
+
73
+ def fork_threads_for_timers(action, timers, spinner)
74
+ timers_and_statuses = []
75
+ timers.each do |timer|
76
+ spinner.update("#{action_noun(action)} #{timer}") if action != 'status'
77
+ timers_and_statuses << [timer, Thread.new { timer.send(action.to_sym) }]
78
+ end
79
+ timers_and_statuses.map! { |timer, status| [timer, status.value] }
80
+ end
81
+
82
+ def filter_timers(timer_list, options, action)
83
+ if options[:only]&.any?
84
+ timer_list = timer_list.select do |timer|
85
+ options[:only].any? { |opt| timer.matches?(opt) }
86
+ end
87
+ end
88
+
89
+ if options[:exclude]&.any?
90
+ timer_list = timer_list.reject { |timer| options[:exclude].include?(timer.name) }
91
+ end
92
+
93
+ timer_list = filter_disabled_timers!(action, timer_list)
94
+ timer_list.group_by(&:priority).to_h
95
+ end
96
+ end
@@ -0,0 +1,19 @@
1
+ module Procedures::Iop
2
+ class ImagePrune < ForemanMaintain::Procedure
3
+ metadata do
4
+ description 'Prune unused IoP container images'
5
+
6
+ confine do
7
+ feature(:iop)
8
+ end
9
+ end
10
+
11
+ def run
12
+ prune_images
13
+ end
14
+
15
+ def prune_images
16
+ execute!("podman image prune --force")
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,25 @@
1
+ module Procedures::Iop
2
+ class Update < ForemanMaintain::Procedure
3
+ metadata do
4
+ description 'Update IoP containers'
5
+
6
+ confine do
7
+ feature(:iop) && (feature(:satellite)&.connected? || !feature(:satellite))
8
+ end
9
+
10
+ param :version,
11
+ 'Version of the containers to pull',
12
+ :required => true
13
+ end
14
+
15
+ def run
16
+ pull_images
17
+ end
18
+
19
+ def pull_images
20
+ feature(:iop).container_images(@version).each do |container_image|
21
+ execute!("podman pull #{container_image}")
22
+ end
23
+ end
24
+ end
25
+ end
@@ -26,6 +26,7 @@ class Procedures::KnowledgeBaseArticle < ForemanMaintain::Procedure
26
26
  'fix_cpdb_validate_failure' => 'https://access.redhat.com/solutions/3362821',
27
27
  'fix_db_migrate_failure_on_duplicate_roles' => 'https://access.redhat.com/solutions/3998941',
28
28
  'many_fact_values' => 'https://access.redhat.com/solutions/4163891',
29
+ 'self_registered' => 'https://access.redhat.com/solutions/3225941',
29
30
  }
30
31
  end
31
32
  end
@@ -15,10 +15,6 @@ module Procedures::Restore
15
15
  spinner.update('Extracting pulp data')
16
16
  extract_pulp_data(backup)
17
17
  end
18
- if backup.file_map[:pgsql_data][:present]
19
- spinner.update('Extracting pgsql data')
20
- extract_pgsql_data(backup)
21
- end
22
18
  end
23
19
  end
24
20
 
@@ -51,19 +47,5 @@ module Procedures::Restore
51
47
  def any_database
52
48
  feature(:foreman_database) || feature(:candlepin_database) || feature(:pulpcore_database)
53
49
  end
54
-
55
- def extract_pgsql_data(backup)
56
- pgsql_data_tar = base_tar.merge(
57
- :archive => backup.file_map[:pgsql_data][:path],
58
- :gzip => true
59
- )
60
- feature(:tar).run(pgsql_data_tar)
61
- del_data_dir_param if el?
62
- end
63
-
64
- def del_data_dir_param
65
- # workaround for https://tickets.puppetlabs.com/browse/MODULES-11160
66
- execute("sed -i '/data_directory/d' #{any_database.postgresql_conf}")
67
- end
68
50
  end
69
51
  end
@@ -0,0 +1,20 @@
1
+ module Procedures::Timer
2
+ class Start < ForemanMaintain::Procedure
3
+ metadata do
4
+ description 'Start systemd timers'
5
+
6
+ for_feature :timer
7
+ confine do
8
+ feature(:timer)&.existing_timers&.any?
9
+ end
10
+
11
+ tags :post_migrations
12
+ end
13
+
14
+ def run
15
+ with_spinner('Starting systemd timers') do |spinner|
16
+ feature(:timer).handle_timers(spinner, 'start')
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,20 @@
1
+ module Procedures::Timer
2
+ class Stop < ForemanMaintain::Procedure
3
+ metadata do
4
+ description 'Stop systemd timers'
5
+
6
+ for_feature :timer
7
+ confine do
8
+ feature(:timer)&.existing_timers&.any?
9
+ end
10
+
11
+ tags :pre_migrations
12
+ end
13
+
14
+ def run
15
+ with_spinner('Stopping systemd timers') do |spinner|
16
+ feature(:timer).handle_timers(spinner, 'stop')
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Reports
4
+ class Bookmarks < ForemanMaintain::Report
5
+ metadata do
6
+ description 'Report about bookmark usage'
7
+ end
8
+
9
+ def run
10
+ public_count = bookmarks_custom_public_count
11
+ private_count = bookmarks_custom_private_count
12
+
13
+ data_field('bookmarks_custom_public_count') { public_count }
14
+ data_field('bookmarks_custom_private_count') { private_count }
15
+ data_field('bookmarks_custom_count') { public_count + private_count }
16
+ end
17
+
18
+ private
19
+
20
+ def bookmarks_custom_public_count
21
+ # Count public bookmarks that are NOT owned by internal users
22
+ bookmarks_not_owned_by_internal_users(public: true)
23
+ end
24
+
25
+ def bookmarks_custom_private_count
26
+ # Count private bookmarks that are NOT owned by internal users
27
+ bookmarks_not_owned_by_internal_users(public: false)
28
+ end
29
+
30
+ def bookmarks_not_owned_by_internal_users(public:)
31
+ # Helper method to count bookmarks not owned by internal users with optional public filter
32
+ public_condition = public ? 'AND b.public = true' : 'AND b.public = false'
33
+
34
+ sql_count(
35
+ <<~SQL
36
+ bookmarks b
37
+ WHERE (
38
+ b.owner_type = 'User'
39
+ #{public_condition}
40
+ AND b.owner_id NOT IN (
41
+ SELECT u.id
42
+ FROM users u
43
+ INNER JOIN auth_sources a ON u.auth_source_id = a.id
44
+ WHERE a.type = 'AuthSourceHidden'
45
+ )
46
+ )
47
+ SQL
48
+ )
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Reports
4
+ class DisconnectedEnvironment < ForemanMaintain::Report
5
+ metadata do
6
+ description 'Checks if the instance is in a disconnected environment'
7
+ end
8
+
9
+ def run
10
+ data_field('disconnected_environment') do
11
+ subscription_connection_setting = sql_setting('subscription_connection_enabled')
12
+
13
+ # If setting doesn't exist, assume connected (not disconnected)
14
+ if subscription_connection_setting.nil?
15
+ false
16
+ else
17
+ # disconnected when subscription_connection_enabled is false
18
+ YAML.safe_load(subscription_connection_setting) == false
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -4,6 +4,7 @@ module Reports
4
4
  description 'Check how resources are grouped'
5
5
  end
6
6
 
7
+ # rubocop:disable Metrics/AbcSize, Metrics/MethodLength
7
8
  def run
8
9
  self.data = {}
9
10
  data_field('host_collections_count') { sql_count('katello_host_collections') }
@@ -26,6 +27,63 @@ module Reports
26
27
  if table_exists('config_groups')
27
28
  data_field('config_group_count') { sql_count('config_groups') }
28
29
  end
30
+
31
+ data_field('usergroup_max_nesting_level') { usergroup_max_nesting_level }
32
+
33
+ usergroup_roles_stats = usergroup_roles_statistics
34
+ data['user_group_roles_max_count'] = usergroup_roles_stats[:max_count]
35
+ data['user_group_roles_min_count'] = usergroup_roles_stats[:min_count]
36
+ end
37
+ # rubocop:enable Metrics/AbcSize, Metrics/MethodLength
38
+
39
+ private
40
+
41
+ def usergroup_max_nesting_level
42
+ # Use recursive CTE to find maximum nesting level of usergroups
43
+ cte_sql = <<~SQL
44
+ WITH RECURSIVE usergroup_hierarchy AS (
45
+ -- Base case: root usergroups (not members of any other usergroup)
46
+ SELECT id, 1 as level
47
+ FROM usergroups
48
+ WHERE id NOT IN (
49
+ SELECT member_id
50
+ FROM usergroup_members
51
+ WHERE member_type = 'Usergroup'
52
+ )
53
+ UNION ALL
54
+ -- Recursive case: usergroups that are members of other usergroups
55
+ SELECT ug.id, uh.level + 1
56
+ FROM usergroups ug
57
+ INNER JOIN usergroup_members ugm ON ug.id = ugm.member_id
58
+ INNER JOIN usergroup_hierarchy uh ON ugm.usergroup_id = uh.id
59
+ WHERE ugm.member_type = 'Usergroup'
60
+ )
61
+ SQL
62
+
63
+ sql_as_count('COALESCE(MAX(level) - 1, 0)', 'usergroup_hierarchy', cte: cte_sql)
64
+ end
65
+
66
+ def usergroup_roles_statistics
67
+ # Query to get role counts per usergroup, including usergroups with 0 roles
68
+ roles_per_usergroup = query(
69
+ <<~SQL
70
+ SELECT ug.id, COALESCE(ur.role_count, 0) as role_count
71
+ FROM usergroups ug
72
+ LEFT JOIN (
73
+ SELECT owner_id, COUNT(*) as role_count
74
+ FROM user_roles
75
+ WHERE owner_type = 'Usergroup'
76
+ GROUP BY owner_id
77
+ ) ur ON ug.id = ur.owner_id
78
+ SQL
79
+ )
80
+
81
+ if roles_per_usergroup.empty?
82
+ { max_count: 0, min_count: 0 }
83
+ else
84
+ role_counts = roles_per_usergroup.map { |row| row['role_count'].to_i }
85
+ { max_count: role_counts.max, min_count: role_counts.min }
86
+ end
29
87
  end
30
88
  end
31
89
  end