hybrid_platforms_conductor 32.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/check-node +24 -0
- data/bin/deploy +12 -0
- data/bin/dump_nodes_json +12 -0
- data/bin/free_ips +23 -0
- data/bin/free_veids +17 -0
- data/bin/get_impacted_nodes +43 -0
- data/bin/last_deploys +56 -0
- data/bin/nodes_to_deploy +104 -0
- data/bin/report +10 -0
- data/bin/run +39 -0
- data/bin/setup +11 -0
- data/bin/ssh_config +14 -0
- data/bin/test +13 -0
- data/bin/topograph +54 -0
- data/lib/hybrid_platforms_conductor/action.rb +82 -0
- data/lib/hybrid_platforms_conductor/actions_executor.rb +307 -0
- data/lib/hybrid_platforms_conductor/bitbucket.rb +123 -0
- data/lib/hybrid_platforms_conductor/cmd_runner.rb +188 -0
- data/lib/hybrid_platforms_conductor/cmdb.rb +34 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/bitbucket.rb +78 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/confluence.rb +43 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/file_system_tests.rb +110 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/idempotence_tests.rb +38 -0
- data/lib/hybrid_platforms_conductor/config.rb +263 -0
- data/lib/hybrid_platforms_conductor/confluence.rb +119 -0
- data/lib/hybrid_platforms_conductor/connector.rb +84 -0
- data/lib/hybrid_platforms_conductor/credentials.rb +127 -0
- data/lib/hybrid_platforms_conductor/current_dir_monitor.rb +42 -0
- data/lib/hybrid_platforms_conductor/deployer.rb +598 -0
- data/lib/hybrid_platforms_conductor/executable.rb +145 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/bash.rb +44 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/interactive.rb +44 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/my_action.rb.sample +79 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/remote_bash.rb +63 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/ruby.rb +69 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/scp.rb +61 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/config.rb +78 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/host_ip.rb +104 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/host_keys.rb +114 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/my_cmdb.rb.sample +129 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/platform_handlers.rb +66 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/connector/my_connector.rb.sample +156 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/connector/ssh.rb +702 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/platform_handler/platform_handler_plugin.rb.sample +292 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/docker.rb +148 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/my_provisioner.rb.sample +103 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/podman.rb +125 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/proxmox.rb +522 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/proxmox/proxmox_waiter.rb +707 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/proxmox/reserve_proxmox_container +122 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/confluence.rb +69 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/mediawiki.rb +164 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/my_report_plugin.rb.sample +88 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/stdout.rb +61 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/templates/confluence_inventory.html.erb +33 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/bitbucket_conf.rb +137 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/can_be_checked.rb +21 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/check_deploy_and_idempotence.rb +112 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/check_from_scratch.rb +35 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/connection.rb +28 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/deploy_freshness.rb +44 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/deploy_from_scratch.rb +36 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/deploy_removes_root_access.rb +49 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/divergence.rb +25 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/executables.rb +46 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/file_system.rb +45 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/file_system_hdfs.rb +45 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/hostname.rb +25 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/idempotence.rb +77 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/ip.rb +38 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/jenkins_ci_conf.rb +56 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/jenkins_ci_masters_ok.rb +54 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/linear_strategy.rb +47 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/local_users.rb +82 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/mounts.rb +120 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/my_test_plugin.rb.sample +143 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/orphan_files.rb +74 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/ports.rb +85 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/private_ips.rb +38 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/public_ips.rb +38 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/spectre-meltdown-checker.sh +1930 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/spectre.rb +56 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/veids.rb +31 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/vulnerabilities.rb +159 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/confluence.rb +122 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/my_test_report.rb.sample +48 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/stdout.rb +120 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/templates/_confluence_errors_status.html.erb +46 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/templates/_confluence_gauge.html.erb +49 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/templates/confluence.html.erb +242 -0
- data/lib/hybrid_platforms_conductor/io_router.rb +70 -0
- data/lib/hybrid_platforms_conductor/json_dumper.rb +88 -0
- data/lib/hybrid_platforms_conductor/logger_helpers.rb +319 -0
- data/lib/hybrid_platforms_conductor/mutex_dir +76 -0
- data/lib/hybrid_platforms_conductor/nodes_handler.rb +597 -0
- data/lib/hybrid_platforms_conductor/parallel_threads.rb +97 -0
- data/lib/hybrid_platforms_conductor/platform_handler.rb +188 -0
- data/lib/hybrid_platforms_conductor/platforms_handler.rb +118 -0
- data/lib/hybrid_platforms_conductor/plugin.rb +53 -0
- data/lib/hybrid_platforms_conductor/plugins.rb +101 -0
- data/lib/hybrid_platforms_conductor/provisioner.rb +181 -0
- data/lib/hybrid_platforms_conductor/report.rb +31 -0
- data/lib/hybrid_platforms_conductor/reports_handler.rb +84 -0
- data/lib/hybrid_platforms_conductor/services_handler.rb +274 -0
- data/lib/hybrid_platforms_conductor/test.rb +141 -0
- data/lib/hybrid_platforms_conductor/test_by_service.rb +22 -0
- data/lib/hybrid_platforms_conductor/test_report.rb +282 -0
- data/lib/hybrid_platforms_conductor/tests_runner.rb +590 -0
- data/lib/hybrid_platforms_conductor/thycotic.rb +92 -0
- data/lib/hybrid_platforms_conductor/topographer.rb +859 -0
- data/lib/hybrid_platforms_conductor/topographer/plugin.rb +20 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/graphviz.rb +127 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/json.rb +72 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/my_topographer_output_plugin.rb.sample +37 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/svg.rb +30 -0
- data/lib/hybrid_platforms_conductor/version.rb +5 -0
- data/spec/hybrid_platforms_conductor_test.rb +159 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/bash_spec.rb +43 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/interactive_spec.rb +18 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/remote_bash_spec.rb +102 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/ruby_spec.rb +108 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/scp_spec.rb +79 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions_spec.rb +199 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connection_spec.rb +212 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/cli_options_spec.rb +125 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/config_dsl_spec.rb +50 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/connectable_nodes_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/connections_spec.rb +448 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/global_helpers_spec.rb +313 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/node_helpers_spec.rb +32 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/remote_actions_spec.rb +134 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/logging_spec.rb +256 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/parallel_spec.rb +338 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/timeout_spec.rb +101 -0
- data/spec/hybrid_platforms_conductor_test/api/cmd_runner_spec.rb +165 -0
- data/spec/hybrid_platforms_conductor_test/api/config_spec.rb +238 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/check_spec.rb +9 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/deploy_spec.rb +243 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/parse_deploy_output_spec.rb +104 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioner_spec.rb +131 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/docker/Dockerfile +10 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/docker_spec.rb +123 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/podman_spec.rb +211 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/config_dsl_spec.rb +126 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/create_spec.rb +290 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/destroy_spec.rb +43 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/ip_spec.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/proxmox.json +3 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/destroy_vm_spec.rb +82 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/expired_containers_spec.rb +786 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/ips_assignment_spec.rb +112 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/other_lxc_containers_resources_spec.rb +190 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/pve_node_resources_spec.rb +200 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/retries_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/vm_ids_assignment_spec.rb +67 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/start_spec.rb +79 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/state_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/stop_spec.rb +41 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/config_spec.rb +33 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/host_ip_spec.rb +64 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/host_keys_spec.rb +133 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/platform_handlers_spec.rb +19 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs_plugins_api_spec.rb +446 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/common_spec.rb +127 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/git_diff_impacts_spec.rb +318 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/nodes_selectors_spec.rb +132 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/platform_handlers_plugins_api_spec.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/several_platforms_spec.rb +58 -0
- data/spec/hybrid_platforms_conductor_test/api/platform_handler_spec.rb +97 -0
- data/spec/hybrid_platforms_conductor_test/api/platforms_handler_spec.rb +104 -0
- data/spec/hybrid_platforms_conductor_test/api/plugins_spec.rb +243 -0
- data/spec/hybrid_platforms_conductor_test/api/reports_handler_spec.rb +44 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/actions_to_deploy_spec.rb +121 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/deploy_allowed_spec.rb +142 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/log_info_spec.rb +101 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/package_spec.rb +388 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/parse_deploy_output_spec.rb +274 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/prepare_for_deploy_spec.rb +264 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/common_spec.rb +194 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/global_spec.rb +37 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/node_check_spec.rb +194 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/node_spec.rb +137 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/node_ssh_spec.rb +257 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/platform_spec.rb +110 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/reports_spec.rb +367 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/test_plugins/bitbucket_conf_spec.rb +111 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/test_reports_plugins/confluence_spec.rb +29 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb.rb +166 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb2.rb +93 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb_others.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb_others2.rb +58 -0
- data/spec/hybrid_platforms_conductor_test/executables/check-node_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/executables/deploy_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/executables/get_impacted_nodes_spec.rb +158 -0
- data/spec/hybrid_platforms_conductor_test/executables/last_deploys_spec.rb +173 -0
- data/spec/hybrid_platforms_conductor_test/executables/nodes_to_deploy_spec.rb +283 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/actions_executor_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/cmd_runner_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/common_spec.rb +67 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/deployer_spec.rb +251 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/nodes_handler_spec.rb +111 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/nodes_selectors_spec.rb +71 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/reports_handler_spec.rb +54 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/tests_runner_spec.rb +139 -0
- data/spec/hybrid_platforms_conductor_test/executables/report_spec.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/executables/run_spec.rb +173 -0
- data/spec/hybrid_platforms_conductor_test/executables/ssh_config_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/executables/test_spec.rb +41 -0
- data/spec/hybrid_platforms_conductor_test/helpers/actions_executor_helpers.rb +98 -0
- data/spec/hybrid_platforms_conductor_test/helpers/cmd_runner_helpers.rb +92 -0
- data/spec/hybrid_platforms_conductor_test/helpers/cmdb_helpers.rb +37 -0
- data/spec/hybrid_platforms_conductor_test/helpers/config_helpers.rb +20 -0
- data/spec/hybrid_platforms_conductor_test/helpers/connector_ssh_helpers.rb +130 -0
- data/spec/hybrid_platforms_conductor_test/helpers/deployer_helpers.rb +149 -0
- data/spec/hybrid_platforms_conductor_test/helpers/deployer_test_helpers.rb +812 -0
- data/spec/hybrid_platforms_conductor_test/helpers/executables_helpers.rb +96 -0
- data/spec/hybrid_platforms_conductor_test/helpers/nodes_handler_helpers.rb +20 -0
- data/spec/hybrid_platforms_conductor_test/helpers/platform_handler_helpers.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/helpers/platforms_handler_helpers.rb +127 -0
- data/spec/hybrid_platforms_conductor_test/helpers/plugins_helpers.rb +48 -0
- data/spec/hybrid_platforms_conductor_test/helpers/provisioner_proxmox_helpers.rb +789 -0
- data/spec/hybrid_platforms_conductor_test/helpers/reports_handler_helpers.rb +29 -0
- data/spec/hybrid_platforms_conductor_test/helpers/services_handler_helpers.rb +20 -0
- data/spec/hybrid_platforms_conductor_test/helpers/tests_runner_helpers.rb +38 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem/hpc_plugins/test_plugin_type/test_plugin_id1.rb +22 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem/hpc_plugins/test_plugin_type/test_plugin_id2.rb +22 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem2/sub_dir/hpc_plugins/test_plugin_type/test_plugin_id3.rb +26 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem2/sub_dir/hpc_plugins/test_plugin_type2/test_plugin_id4.rb +26 -0
- data/spec/hybrid_platforms_conductor_test/platform_handler_plugins/test.rb +225 -0
- data/spec/hybrid_platforms_conductor_test/platform_handler_plugins/test2.rb +11 -0
- data/spec/hybrid_platforms_conductor_test/report_plugin.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/test_action.rb +66 -0
- data/spec/hybrid_platforms_conductor_test/test_connector.rb +151 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/global.rb +30 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/node.rb +53 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/node_check.rb +47 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/node_ssh.rb +42 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/platform.rb +50 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/several_checks.rb +50 -0
- data/spec/hybrid_platforms_conductor_test/test_provisioner.rb +95 -0
- data/spec/hybrid_platforms_conductor_test/tests_report_plugin.rb +49 -0
- data/spec/spec_helper.rb +111 -0
- metadata +566 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
require 'base64'
|
|
2
|
+
require 'savon'
|
|
3
|
+
require 'hybrid_platforms_conductor/credentials'
|
|
4
|
+
require 'hybrid_platforms_conductor/logger_helpers'
|
|
5
|
+
|
|
6
|
+
module HybridPlatformsConductor
|
|
7
|
+
|
|
8
|
+
# Gives ways to query the Thycotic SOAP API at a given URL
|
|
9
|
+
class Thycotic
|
|
10
|
+
|
|
11
|
+
include LoggerHelpers
|
|
12
|
+
|
|
13
|
+
# Provide a Thycotic connector, and make sure the password is being cleaned when exiting.
|
|
14
|
+
#
|
|
15
|
+
# Parameters::
|
|
16
|
+
# * *thycotic_url* (String): The Thycotic URL
|
|
17
|
+
# * *logger* (Logger): Logger to be used
|
|
18
|
+
# * *logger_stderr* (Logger): Logger to be used for stderr
|
|
19
|
+
# * *domain* (String): Domain to use for authentication to Thycotic [default: ENV['hpc_domain_for_thycotic']]
|
|
20
|
+
# * Proc: Code called with the Thyctotic instance.
|
|
21
|
+
# * *thycotic* (Thyctotic): The Thyctotic instance to use.
|
|
22
|
+
def self.with_thycotic(thycotic_url, logger, logger_stderr, domain: ENV['hpc_domain_for_thycotic'])
|
|
23
|
+
Credentials.with_credentials_for(:thycotic, logger, logger_stderr, url: thycotic_url) do |thycotic_user, thycotic_password|
|
|
24
|
+
yield Thycotic.new(thycotic_url, thycotic_user, thycotic_password, logger: logger, logger_stderr: logger_stderr)
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Constructor
|
|
29
|
+
#
|
|
30
|
+
# Parameters::
|
|
31
|
+
# * *url* (String): URL of the Thycotic Secret Server
|
|
32
|
+
# * *user* (String): User name to be used to connect to Thycotic
|
|
33
|
+
# * *password* (String): Password to be used to connect to Thycotic
|
|
34
|
+
# * *domain* (String): Domain to use for authentication to Thycotic [default: ENV['hpc_domain_for_thycotic']]
|
|
35
|
+
# * *logger* (Logger): Logger to be used [default: Logger.new(STDOUT)]
|
|
36
|
+
# * *logger_stderr* (Logger): Logger to be used for stderr [default: Logger.new(STDERR)]
|
|
37
|
+
def initialize(
|
|
38
|
+
url,
|
|
39
|
+
user,
|
|
40
|
+
password,
|
|
41
|
+
domain: ENV['hpc_domain_for_thycotic'],
|
|
42
|
+
logger: Logger.new(STDOUT),
|
|
43
|
+
logger_stderr: Logger.new(STDERR)
|
|
44
|
+
)
|
|
45
|
+
init_loggers(logger, logger_stderr)
|
|
46
|
+
# Get a token to this SOAP API
|
|
47
|
+
@client = Savon.client(
|
|
48
|
+
wsdl: "#{url}/webservices/SSWebservice.asmx?wsdl",
|
|
49
|
+
ssl_verify_mode: :none,
|
|
50
|
+
logger: @logger,
|
|
51
|
+
log: log_debug?
|
|
52
|
+
)
|
|
53
|
+
@token = @client.call(:authenticate, message: {
|
|
54
|
+
username: user,
|
|
55
|
+
password: password,
|
|
56
|
+
domain: domain
|
|
57
|
+
}).to_hash.dig(:authenticate_response, :authenticate_result, :token)
|
|
58
|
+
raise "Unable to get token from SOAP authentication to #{url}" if @token.nil?
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Return secret corresponding to a given secret ID
|
|
62
|
+
#
|
|
63
|
+
# Parameters::
|
|
64
|
+
# * *secret_id* (Object): The secret ID
|
|
65
|
+
# Result::
|
|
66
|
+
# * Hash: The corresponding API result
|
|
67
|
+
def get_secret(secret_id)
|
|
68
|
+
@client.call(:get_secret, message: {
|
|
69
|
+
token: @token,
|
|
70
|
+
secretId: secret_id
|
|
71
|
+
}).to_hash.dig(:get_secret_response, :get_secret_result)
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
# Get a file attached to a given secret
|
|
75
|
+
#
|
|
76
|
+
# Parameters::
|
|
77
|
+
# * *secret_id* (Object): The secret ID
|
|
78
|
+
# * *secret_item_id* (Object): The secret item id
|
|
79
|
+
# Result::
|
|
80
|
+
# * String or nil: The file content, or nil if none
|
|
81
|
+
def download_file_attachment_by_item_id(secret_id, secret_item_id)
|
|
82
|
+
file_in_base64 = @client.call(:download_file_attachment_by_item_id, message: {
|
|
83
|
+
token: @token,
|
|
84
|
+
secretId: secret_id,
|
|
85
|
+
secretItemId: secret_item_id
|
|
86
|
+
}).to_hash.dig(:download_file_attachment_by_item_id_response, :download_file_attachment_by_item_id_result, :file_attachment)
|
|
87
|
+
file_in_base64.nil? ? nil : Base64.decode64(file_in_base64)
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
end
|
|
@@ -0,0 +1,859 @@
|
|
|
1
|
+
require 'logger'
|
|
2
|
+
require 'ipaddress'
|
|
3
|
+
require 'hybrid_platforms_conductor/nodes_handler'
|
|
4
|
+
require 'hybrid_platforms_conductor/json_dumper'
|
|
5
|
+
require 'hybrid_platforms_conductor/topographer/plugin'
|
|
6
|
+
require 'hybrid_platforms_conductor/logger_helpers'
|
|
7
|
+
|
|
8
|
+
module HybridPlatformsConductor
|
|
9
|
+
|
|
10
|
+
# Class giving an API to parse the graph of the TI network
|
|
11
|
+
class Topographer
|
|
12
|
+
|
|
13
|
+
include LoggerHelpers
|
|
14
|
+
|
|
15
|
+
# Give a default configuration
|
|
16
|
+
#
|
|
17
|
+
# Result::
|
|
18
|
+
# * Hash<Symbol,Object>: Default configuration
|
|
19
|
+
def self.default_config
|
|
20
|
+
{
|
|
21
|
+
# Directory from which the complete JSON files are to be read
|
|
22
|
+
json_files_dir: 'nodes_json',
|
|
23
|
+
# JSON keys to ignore when reading complete JSON files. Only leafs of this tree structure are ignored.
|
|
24
|
+
ignore_json_keys: {
|
|
25
|
+
# This should only duplicate the real configuration from the recipes, and it adds a lot of IP ranges that can be ignored.
|
|
26
|
+
'network' => nil,
|
|
27
|
+
# Contains simple network definition. Not a connection in itself.
|
|
28
|
+
'policy_xae_outproxy' => { 'local_network' => nil },
|
|
29
|
+
# Contains DNS entries. Not a connection in itself.
|
|
30
|
+
'policy_xae_xx_cdh' => { 'dns' => nil },
|
|
31
|
+
# This contains firewall rules, therefore representing who connects on the host, and not who the host connects to.
|
|
32
|
+
'policy_xae_xx_iptables' => nil,
|
|
33
|
+
# Contains the allowed network range. Not a connection in itself.
|
|
34
|
+
'postfix' => { 'main' => { 'mynetworks' => nil } },
|
|
35
|
+
# This contains sometime IP addresses in the key comments
|
|
36
|
+
'site_directory' => nil,
|
|
37
|
+
# This contains firewall rules, therefore representing who connects on the host, and not who the host connects to.
|
|
38
|
+
'site_iptables' => nil,
|
|
39
|
+
# This contains some user names having IP addresses inside
|
|
40
|
+
'site_xx_roles' => nil,
|
|
41
|
+
# This stores routes for all Proxmox instances.
|
|
42
|
+
'pve' => { 'vlan' => { 'routes' => nil } }
|
|
43
|
+
},
|
|
44
|
+
# JSON keys to ignore when reading complete JSON files, whatever their position
|
|
45
|
+
ignore_any_json_keys: [
|
|
46
|
+
# Those contain cache of MAC addresses to IP addresses
|
|
47
|
+
'arp',
|
|
48
|
+
# Those contain broadcast IP addresses
|
|
49
|
+
'broadcast',
|
|
50
|
+
# Those contain firewall rules, therefore representing who connects on the host, and not who the host connects to.
|
|
51
|
+
'firewall',
|
|
52
|
+
# Those contain version numbers with same format as IP addresses
|
|
53
|
+
'version'
|
|
54
|
+
],
|
|
55
|
+
# IPs to ignore while parsing complete JSON files
|
|
56
|
+
ignore_ips: [
|
|
57
|
+
/^0\./,
|
|
58
|
+
/^127\./,
|
|
59
|
+
/^255\./
|
|
60
|
+
],
|
|
61
|
+
# Maximum level of recursion while building the graph of connected nodes (nil = no limit).
|
|
62
|
+
connections_max_level: nil,
|
|
63
|
+
# Maximum label length for a link
|
|
64
|
+
max_link_label_length: 128
|
|
65
|
+
}
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Some getters that can be useful for clients of the Topographer
|
|
69
|
+
attr_reader :nodes_graph, :config, :node_metadata
|
|
70
|
+
|
|
71
|
+
# Constructor
|
|
72
|
+
#
|
|
73
|
+
# Parameters::
|
|
74
|
+
# * *logger* (Logger): Logger to be used [default = Logger.new(STDOUT)]
|
|
75
|
+
# * *logger_stderr* (Logger): Logger to be used for stderr [default = Logger.new(STDERR)]
|
|
76
|
+
# * *nodes_handler* (NodesHandler): The nodes handler to be used [default = NodesHandler.new]
|
|
77
|
+
# * *json_dumper* (JsonDumper): The JSON Dumper to be used [default = JsonDumper.new]
|
|
78
|
+
# * *config* (Hash<Symbol,Object>): Some configuration parameters that can override defaults. [default = {}] Here are the possible keys:
|
|
79
|
+
# * *json_files_dir* (String): Directory from which JSON files are taken. [default = nodes_json]
|
|
80
|
+
# * *connections_max_level* (Integer or nil): Number maximal of recursive passes to get hostname connections (nil means no limit). [default = nil]
|
|
81
|
+
def initialize(logger: Logger.new(STDOUT), logger_stderr: Logger.new(STDERR), nodes_handler: NodesHandler.new, json_dumper: JsonDumper.new, config: {})
|
|
82
|
+
init_loggers(logger, logger_stderr)
|
|
83
|
+
@nodes_handler = nodes_handler
|
|
84
|
+
@json_dumper = json_dumper
|
|
85
|
+
@config = Topographer.default_config.merge(config)
|
|
86
|
+
# Get the metadata of each node, per hostname
|
|
87
|
+
# Hash<String,Hash>
|
|
88
|
+
@node_metadata = {}
|
|
89
|
+
# Know for each IP what is the hostname it belongs to
|
|
90
|
+
# Hash<String,String>
|
|
91
|
+
@ips_to_host = {}
|
|
92
|
+
# Get the connection information per node name. A node reprensents 1 element that can be connected to other elements in the graph.
|
|
93
|
+
# Hash< String, Hash<Symbol,Object> >
|
|
94
|
+
# Here are the possible information keys:
|
|
95
|
+
# * *type* (Symbol): Type of the node. Can be one of: :node, :cluster, :unknown.
|
|
96
|
+
# * *connections* (Hash< String, Array<String> >): List of labels per connected node.
|
|
97
|
+
# * *includes* (Array<String>): List of nodes included in this one.
|
|
98
|
+
# * *includes_proc* (Proc): Proc called to know if a node belongs to this cluster [only if type == :cluster]:
|
|
99
|
+
# * Parameters::
|
|
100
|
+
# * *node_name* (String): Name of the node for the inclusion test
|
|
101
|
+
# * Result::
|
|
102
|
+
# * Boolean: Does the node belongs to this cluster?
|
|
103
|
+
# * *ipv4* (IPAddress::IPv4): Corresponding IPv4 object [only if type == :node and a private IP exists, or type == :unknown, or type == :cluster and the cluster name is an IP range]
|
|
104
|
+
@nodes_graph = {}
|
|
105
|
+
|
|
106
|
+
# Default values
|
|
107
|
+
@from_hosts = []
|
|
108
|
+
@to_hosts = []
|
|
109
|
+
@outputs = []
|
|
110
|
+
@skip_run = false
|
|
111
|
+
|
|
112
|
+
# Parse plugins
|
|
113
|
+
root_path = File.expand_path("#{File.dirname(__FILE__)}/..")
|
|
114
|
+
@plugins = Hash[Dir.
|
|
115
|
+
glob("#{File.dirname(__FILE__)}/topographer/plugins/*.rb").
|
|
116
|
+
map do |file_name|
|
|
117
|
+
plugin_name = File.basename(file_name)[0..-4].to_sym
|
|
118
|
+
require file_name
|
|
119
|
+
[
|
|
120
|
+
plugin_name,
|
|
121
|
+
Topographer::Plugins.const_get(plugin_name.to_s.split('_').collect(&:capitalize).join.to_sym)
|
|
122
|
+
]
|
|
123
|
+
end]
|
|
124
|
+
|
|
125
|
+
@ips_to_host = known_ips.clone
|
|
126
|
+
|
|
127
|
+
# Fill info from the metadata
|
|
128
|
+
metadata_properties = %i[
|
|
129
|
+
description
|
|
130
|
+
physical_node
|
|
131
|
+
private_ips
|
|
132
|
+
]
|
|
133
|
+
@nodes_handler.prefetch_metadata_of @nodes_handler.known_nodes, metadata_properties
|
|
134
|
+
@nodes_handler.known_nodes.each do |hostname|
|
|
135
|
+
@node_metadata[hostname] = Hash[metadata_properties.map { |property| [property, @nodes_handler.metadata_of(hostname, property)] }]
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
# Small cache of hostnames used a lot to parse JSON
|
|
139
|
+
@known_nodes = Hash[@nodes_handler.known_nodes.map { |hostname| [hostname, nil] }]
|
|
140
|
+
# Cache of objects being used a lot in parsing for performance
|
|
141
|
+
@non_word_regexp = /\W+/
|
|
142
|
+
@ip_regexp = /(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(\/(\d{1,2})|[^\d\/]|$)/
|
|
143
|
+
# Cache of ignored IPs
|
|
144
|
+
@ips_ignored = {}
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
# Complete an option parser with ways to tune the topographer
|
|
148
|
+
#
|
|
149
|
+
# Parameters::
|
|
150
|
+
# * *options_parser* (OptionParser): The option parser to complete
|
|
151
|
+
def options_parse(options_parser)
|
|
152
|
+
from_hosts_opts_parser = OptionParser.new do |opts|
|
|
153
|
+
@nodes_handler.options_parse_nodes_selectors(opts, @from_hosts)
|
|
154
|
+
end
|
|
155
|
+
to_hosts_opts_parser = OptionParser.new do |opts|
|
|
156
|
+
@nodes_handler.options_parse_nodes_selectors(opts, @to_hosts)
|
|
157
|
+
end
|
|
158
|
+
options_parser.separator ''
|
|
159
|
+
options_parser.separator 'Topographer options:'
|
|
160
|
+
options_parser.on('-F', '--from HOSTS_OPTIONS', 'Specify options for the set of nodes to start from (enclose them with ""). Default: all nodes. HOSTS_OPTIONS follows the following:', *from_hosts_opts_parser.to_s.split("\n")[3..-1]) do |hosts_options|
|
|
161
|
+
args = hosts_options.split(' ')
|
|
162
|
+
from_hosts_opts_parser.parse!(args)
|
|
163
|
+
raise "Unknown --from options: #{args.join(' ')}" unless args.empty?
|
|
164
|
+
end
|
|
165
|
+
options_parser.on('-k', '--skip-run', "Skip the actual gathering of JSON node files. If set, the current files in #{@config[:json_files_dir]} will be used.") do
|
|
166
|
+
@skip_run = true
|
|
167
|
+
end
|
|
168
|
+
options_parser.on('-p', '--output FORMAT:FILE_NAME', "Specify a format and file name. Can be used several times. FORMAT can be one of #{available_plugins.sort.join(', ')}. Ex.: graphviz:graph.gv") do |output|
|
|
169
|
+
format_str, file_name = output.split(':')
|
|
170
|
+
format = format_str.to_sym
|
|
171
|
+
raise "Unknown format: #{format}." unless available_plugins.include?(format)
|
|
172
|
+
@outputs << [format, file_name]
|
|
173
|
+
end
|
|
174
|
+
options_parser.on('-T', '--to HOSTS_OPTIONS', 'Specify options for the set of nodes to get to (enclose them with ""). Default: all nodes. HOSTS_OPTIONS follows the following:', *to_hosts_opts_parser.to_s.split("\n")[3..-1]) do |hosts_options|
|
|
175
|
+
args = hosts_options.split(' ')
|
|
176
|
+
to_hosts_opts_parser.parse!(args)
|
|
177
|
+
raise "Unknown --to options: #{args.join(' ')}" unless args.empty?
|
|
178
|
+
end
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# Validate that parsed parameters are valid
|
|
182
|
+
def validate_params
|
|
183
|
+
raise 'No output defined. Please use --output option.' if @outputs.empty?
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
# Resolve the from and to hosts descriptions
|
|
187
|
+
#
|
|
188
|
+
# Result::
|
|
189
|
+
# * Array<String>: The from hostnames
|
|
190
|
+
# * Array<String>: The to hostnames
|
|
191
|
+
def resolve_from_to
|
|
192
|
+
@from_hosts << { all: true } if @from_hosts.empty?
|
|
193
|
+
@to_hosts << { all: true } if @to_hosts.empty?
|
|
194
|
+
[
|
|
195
|
+
@nodes_handler.select_nodes(@from_hosts),
|
|
196
|
+
@nodes_handler.select_nodes(@to_hosts)
|
|
197
|
+
]
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
# Generate the JSON files to be used
|
|
201
|
+
def get_json_files
|
|
202
|
+
unless @skip_run
|
|
203
|
+
@json_dumper.dump_dir = @config[:json_files_dir]
|
|
204
|
+
# Generate all the jsons, even if 1 hostname is given, as it might be useful for the rest of the graph.
|
|
205
|
+
@json_dumper.dump_json_for(@nodes_handler.known_nodes)
|
|
206
|
+
end
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
# Dump the graph in the desired outputs
|
|
210
|
+
def dump_outputs
|
|
211
|
+
@outputs.each do |(format, file_name)|
|
|
212
|
+
section "Write #{format} file #{file_name}" do
|
|
213
|
+
write_graph(file_name, format)
|
|
214
|
+
end
|
|
215
|
+
end
|
|
216
|
+
end
|
|
217
|
+
|
|
218
|
+
# Get the list of available plugins
|
|
219
|
+
#
|
|
220
|
+
# Result::
|
|
221
|
+
# * Array<Symbol>: List of plugins
|
|
222
|
+
def available_plugins
|
|
223
|
+
@plugins.keys
|
|
224
|
+
end
|
|
225
|
+
|
|
226
|
+
# Add to the graph a given set of hostnames and their connected nodes.
|
|
227
|
+
#
|
|
228
|
+
# Parameters::
|
|
229
|
+
# * *hostnames* (Array<String>): List of hostnames
|
|
230
|
+
def graph_for(hostnames)
|
|
231
|
+
# Parse connections from JSON files
|
|
232
|
+
hostnames.each do |hostname|
|
|
233
|
+
parse_connections_for(hostname, @config[:connections_max_level])
|
|
234
|
+
end
|
|
235
|
+
end
|
|
236
|
+
|
|
237
|
+
# Add to the graph a given set of nodes lists and their connected nodes.
|
|
238
|
+
#
|
|
239
|
+
# Parameters::
|
|
240
|
+
# * *nodes_lists* (Array<String>): List of nodes lists
|
|
241
|
+
# * *only_add_cluster* (Boolean): If true, then don't add missing nodes from this graph to the graph [default = false]
|
|
242
|
+
def graph_for_nodes_lists(nodes_lists, only_add_cluster: false)
|
|
243
|
+
nodes_lists.each do |nodes_list|
|
|
244
|
+
hosts_list = @nodes_handler.select_nodes(@nodes_handler.nodes_from_list(nodes_list))
|
|
245
|
+
if only_add_cluster
|
|
246
|
+
# Select only the hosts list we know about
|
|
247
|
+
hosts_list.select! { |hostname| @nodes_graph.key?(hostname) }
|
|
248
|
+
else
|
|
249
|
+
# Parse JSON for all the hosts of this cluster
|
|
250
|
+
hosts_list.each do |hostname|
|
|
251
|
+
parse_connections_for(hostname, @config[:connections_max_level])
|
|
252
|
+
end
|
|
253
|
+
end
|
|
254
|
+
@nodes_graph[nodes_list] = {
|
|
255
|
+
type: :cluster,
|
|
256
|
+
connections: {},
|
|
257
|
+
includes: [],
|
|
258
|
+
includes_proc: proc { |node_name| hosts_list.include?(node_name) }
|
|
259
|
+
} unless @nodes_graph.key?(nodes_list)
|
|
260
|
+
@nodes_graph[nodes_list][:includes].concat(hosts_list)
|
|
261
|
+
@nodes_graph[nodes_list][:includes].uniq!
|
|
262
|
+
end
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
# Collapse a given list of nodes.
|
|
266
|
+
#
|
|
267
|
+
# Parameters::
|
|
268
|
+
# * *nodes_list* (Array<String>): List of nodes to collapse
|
|
269
|
+
def collapse_nodes(nodes_list)
|
|
270
|
+
nodes_list.each do |node_name_to_collapse|
|
|
271
|
+
included_nodes = @nodes_graph[node_name_to_collapse][:includes]
|
|
272
|
+
# First collapse its included nodes if any
|
|
273
|
+
collapse_nodes(included_nodes)
|
|
274
|
+
# Then collapse this one
|
|
275
|
+
collapsed_connections = {}
|
|
276
|
+
included_nodes.each do |included_node_name|
|
|
277
|
+
collapsed_connections.merge!(@nodes_graph[included_node_name][:connections]) { |_connected_node, labels1, labels2| (labels1 + labels2).uniq }
|
|
278
|
+
end
|
|
279
|
+
@nodes_graph[node_name_to_collapse][:connections] = collapsed_connections
|
|
280
|
+
@nodes_graph[node_name_to_collapse][:includes] = []
|
|
281
|
+
replace_nodes(included_nodes, node_name_to_collapse)
|
|
282
|
+
end
|
|
283
|
+
end
|
|
284
|
+
|
|
285
|
+
# Remove self connections.
|
|
286
|
+
def remove_self_connections
|
|
287
|
+
@nodes_graph.each do |node_name, node_info|
|
|
288
|
+
node_info[:connections].delete_if { |connected_node_name, _labels| connected_node_name == node_name }
|
|
289
|
+
end
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
# Remove empty clusters
|
|
293
|
+
def remove_empty_clusters
|
|
294
|
+
loop do
|
|
295
|
+
empty_clusters = @nodes_graph.keys.select { |node_name| @nodes_graph[node_name][:type] == :cluster && @nodes_graph[node_name][:includes].empty? }
|
|
296
|
+
break if empty_clusters.empty?
|
|
297
|
+
filter_out_nodes(empty_clusters)
|
|
298
|
+
end
|
|
299
|
+
end
|
|
300
|
+
|
|
301
|
+
# Define clusters of ips with 24 bits ranges.
|
|
302
|
+
def define_clusters_ip_24
|
|
303
|
+
@nodes_graph.keys.each do |node_name|
|
|
304
|
+
if @nodes_graph[node_name][:type] == :node && !@node_metadata[node_name][:private_ips].nil? && !@node_metadata[node_name][:private_ips].empty?
|
|
305
|
+
ip_24 = "#{@node_metadata[node_name][:private_ips].first.split('.')[0..2].join('.')}.0/24"
|
|
306
|
+
@nodes_graph[ip_24] = ip_range_graph_info(ip_24) unless @nodes_graph.key?(ip_24)
|
|
307
|
+
@nodes_graph[ip_24][:includes] << node_name unless @nodes_graph[ip_24][:includes].include?(node_name)
|
|
308
|
+
end
|
|
309
|
+
end
|
|
310
|
+
end
|
|
311
|
+
|
|
312
|
+
# Return the list of nodes and ancestors of a given list of nodes, recursively.
|
|
313
|
+
# An ancestor of a node is another node connected to it, or to a group including it.
|
|
314
|
+
# An ancestor of a node can be:
|
|
315
|
+
# * Another node connected to it.
|
|
316
|
+
# * Another node including it.
|
|
317
|
+
#
|
|
318
|
+
# Parameters::
|
|
319
|
+
# * *nodes_list* (Array<String>): List of nodes for which we look for ancestors.
|
|
320
|
+
# Result::
|
|
321
|
+
# * Array<String>: List of ancestor nodes.
|
|
322
|
+
def ancestor_nodes(nodes_list)
|
|
323
|
+
ancestor_nodes_list = []
|
|
324
|
+
@nodes_graph.each do |node_name, node_info|
|
|
325
|
+
ancestor_nodes_list << node_name if !nodes_list.include?(node_name) && (!(node_info[:connections].keys & nodes_list).empty? || !(node_info[:includes] & nodes_list).empty?)
|
|
326
|
+
end
|
|
327
|
+
if ancestor_nodes_list.empty?
|
|
328
|
+
nodes_list
|
|
329
|
+
else
|
|
330
|
+
ancestor_nodes(nodes_list + ancestor_nodes_list)
|
|
331
|
+
end
|
|
332
|
+
end
|
|
333
|
+
|
|
334
|
+
# Return the list of nodes and children of a given list of nodes, recursively.
|
|
335
|
+
# A child of a node is another node connected to it, or to a group including it.
|
|
336
|
+
# A child of a node can be:
|
|
337
|
+
# * Another node that it connects to.
|
|
338
|
+
# * Another node that it includes.
|
|
339
|
+
#
|
|
340
|
+
# Parameters::
|
|
341
|
+
# * *nodes_list* (Array<String>): List of nodes for which we look for children.
|
|
342
|
+
# Result::
|
|
343
|
+
# * Array<String>: List of children nodes.
|
|
344
|
+
def children_nodes(nodes_list)
|
|
345
|
+
children_nodes_list = []
|
|
346
|
+
nodes_list.each do |node_name|
|
|
347
|
+
children_nodes_list.concat(@nodes_graph[node_name][:connections].keys + @nodes_graph[node_name][:includes])
|
|
348
|
+
end
|
|
349
|
+
children_nodes_list.uniq!
|
|
350
|
+
new_children_nodes = children_nodes_list - nodes_list
|
|
351
|
+
if new_children_nodes.empty?
|
|
352
|
+
children_nodes_list
|
|
353
|
+
else
|
|
354
|
+
children_nodes(children_nodes_list)
|
|
355
|
+
end
|
|
356
|
+
end
|
|
357
|
+
|
|
358
|
+
# Return the list of nodes that are clusters
|
|
359
|
+
#
|
|
360
|
+
# Result::
|
|
361
|
+
# * Array<String>: List of cluster nodes
|
|
362
|
+
def cluster_nodes
|
|
363
|
+
cluster_nodes_list = []
|
|
364
|
+
@nodes_graph.each do |node_name, node_info|
|
|
365
|
+
cluster_nodes_list << node_name if node_info[:type] == :cluster
|
|
366
|
+
end
|
|
367
|
+
cluster_nodes_list
|
|
368
|
+
end
|
|
369
|
+
|
|
370
|
+
# Remove from the graph any node that is not part of a given list
|
|
371
|
+
#
|
|
372
|
+
# Parameters::
|
|
373
|
+
# * *nodes_list* (Array<String>): List of nodes to keep
|
|
374
|
+
def filter_in_nodes(nodes_list)
|
|
375
|
+
new_nodes_graph = {}
|
|
376
|
+
@nodes_graph.each do |node_name, node_info|
|
|
377
|
+
new_nodes_graph[node_name] = node_info.merge(
|
|
378
|
+
connections: node_info[:connections].select { |connected_hostname, _labels| nodes_list.include?(connected_hostname) },
|
|
379
|
+
includes: node_info[:includes] & nodes_list
|
|
380
|
+
) if nodes_list.include?(node_name)
|
|
381
|
+
end
|
|
382
|
+
@nodes_graph = new_nodes_graph
|
|
383
|
+
end
|
|
384
|
+
|
|
385
|
+
# Remove from the graph any node that is part of a given list
|
|
386
|
+
#
|
|
387
|
+
# Parameters::
|
|
388
|
+
# * *nodes_list* (Array<String>): List of nodes to remove
|
|
389
|
+
def filter_out_nodes(nodes_list)
|
|
390
|
+
new_nodes_graph = {}
|
|
391
|
+
@nodes_graph.each do |node_name, node_info|
|
|
392
|
+
new_nodes_graph[node_name] = node_info.merge(
|
|
393
|
+
connections: node_info[:connections].select { |connected_hostname, _labels| !nodes_list.include?(connected_hostname) },
|
|
394
|
+
includes: node_info[:includes] - nodes_list
|
|
395
|
+
) unless nodes_list.include?(node_name)
|
|
396
|
+
end
|
|
397
|
+
@nodes_graph = new_nodes_graph
|
|
398
|
+
end
|
|
399
|
+
|
|
400
|
+
# Replace a list of nodes by a given node.
|
|
401
|
+
#
|
|
402
|
+
# Parameters::
|
|
403
|
+
# * *nodes_to_be_replaced* (Array<String>): Nodes to be replaced
|
|
404
|
+
# * *replacement_node* (String): Node that is used for replacement
|
|
405
|
+
def replace_nodes(nodes_to_be_replaced, replacement_node)
|
|
406
|
+
# Delete references to the nodes to be replaced
|
|
407
|
+
@nodes_graph.delete_if { |node_name, _node_info| nodes_to_be_replaced.include?(node_name) }
|
|
408
|
+
# Change any connection or inclusions using nodes to be replaced
|
|
409
|
+
@nodes_graph.each do |node_name, node_info|
|
|
410
|
+
node_info[:includes] = node_info[:includes].map { |included_node_name| nodes_to_be_replaced.include?(included_node_name) ? replacement_node : included_node_name }.uniq
|
|
411
|
+
new_connections = {}
|
|
412
|
+
node_info[:connections].each do |connected_node_name, labels|
|
|
413
|
+
if nodes_to_be_replaced.include?(connected_node_name)
|
|
414
|
+
new_connections[replacement_node] = [] unless new_connections.key?(replacement_node)
|
|
415
|
+
new_connections[replacement_node].concat(labels)
|
|
416
|
+
new_connections[replacement_node].uniq!
|
|
417
|
+
else
|
|
418
|
+
new_connections[connected_node_name] = labels
|
|
419
|
+
end
|
|
420
|
+
end
|
|
421
|
+
node_info[:connections] = new_connections
|
|
422
|
+
end
|
|
423
|
+
end
|
|
424
|
+
|
|
425
|
+
# Make sure clusters follow a strict hierarchy and that 1 node belongs to at most 1 cluster.
|
|
426
|
+
def force_cluster_strict_hierarchy
|
|
427
|
+
# Find the nodes belonging to several clusters.
|
|
428
|
+
loop do
|
|
429
|
+
# First cluster found each node name
|
|
430
|
+
# Hash<String, String >
|
|
431
|
+
cluster_per_node = {}
|
|
432
|
+
conflicting_clusters = nil
|
|
433
|
+
@nodes_graph.each do |node_name, node_info|
|
|
434
|
+
node_info[:includes].each do |included_node_name|
|
|
435
|
+
if cluster_per_node.key?(included_node_name)
|
|
436
|
+
# Found a conflict between 2 clusters
|
|
437
|
+
conflicting_clusters = [node_name, cluster_per_node[included_node_name]]
|
|
438
|
+
log_error "Node #{included_node_name} found in both clusters #{node_name} and #{cluster_per_node[included_node_name]}"
|
|
439
|
+
break
|
|
440
|
+
else
|
|
441
|
+
cluster_per_node[included_node_name] = node_name
|
|
442
|
+
end
|
|
443
|
+
end
|
|
444
|
+
break unless conflicting_clusters.nil?
|
|
445
|
+
end
|
|
446
|
+
if conflicting_clusters.nil?
|
|
447
|
+
break
|
|
448
|
+
else
|
|
449
|
+
# We have conflicting clusters to resolve
|
|
450
|
+
cluster_1, cluster_2 = conflicting_clusters
|
|
451
|
+
c1_belongs_to_c2 = @nodes_graph[cluster_1][:includes].all? { |cluster_1_node_name| @nodes_graph[cluster_2][:includes_proc].call(cluster_1_node_name) }
|
|
452
|
+
c2_belongs_to_c1 = @nodes_graph[cluster_2][:includes].all? { |cluster_2_node_name| @nodes_graph[cluster_1][:includes_proc].call(cluster_2_node_name) }
|
|
453
|
+
if c1_belongs_to_c2
|
|
454
|
+
if c2_belongs_to_c1
|
|
455
|
+
# Both clusters have the same nodes
|
|
456
|
+
if @nodes_graph[cluster_1][:includes_proc].call(cluster_2)
|
|
457
|
+
@nodes_graph[cluster_2][:includes] = (@nodes_graph[cluster_1][:includes] + @nodes_graph[cluster_2][:includes]).uniq
|
|
458
|
+
@nodes_graph[cluster_1][:includes] = [cluster_2]
|
|
459
|
+
else
|
|
460
|
+
@nodes_graph[cluster_1][:includes] = (@nodes_graph[cluster_1][:includes] + @nodes_graph[cluster_2][:includes]).uniq
|
|
461
|
+
@nodes_graph[cluster_2][:includes] = [cluster_1]
|
|
462
|
+
end
|
|
463
|
+
else
|
|
464
|
+
# All nodes of cluster_1 belong to cluster_2, but some nodes of cluster_2 don't belong to cluster_1
|
|
465
|
+
@nodes_graph[cluster_2][:includes] = @nodes_graph[cluster_2][:includes] - @nodes_graph[cluster_1][:includes] + [cluster_1]
|
|
466
|
+
end
|
|
467
|
+
elsif c2_belongs_to_c1
|
|
468
|
+
# All nodes of cluster_2 belong to cluster_1, but some nodes of cluster_1 don't belong to cluster_2
|
|
469
|
+
@nodes_graph[cluster_1][:includes] = @nodes_graph[cluster_1][:includes] - @nodes_graph[cluster_2][:includes] + [cluster_2]
|
|
470
|
+
else
|
|
471
|
+
# cluster_1 and cluster_2 have to be merged
|
|
472
|
+
new_cluster_name = "#{cluster_1}_&_#{cluster_2}"
|
|
473
|
+
# Store thos proc in those variables as the cluster_1 and cluster_2 references are going to be removed
|
|
474
|
+
includes_proc_1 = @nodes_graph[cluster_1][:includes_proc]
|
|
475
|
+
includes_proc_2 = @nodes_graph[cluster_2][:includes_proc]
|
|
476
|
+
@nodes_graph[new_cluster_name] = {
|
|
477
|
+
type: :cluster,
|
|
478
|
+
includes: (@nodes_graph[cluster_1][:includes] + @nodes_graph[cluster_2][:includes]).uniq,
|
|
479
|
+
connections: @nodes_graph[cluster_1][:connections].merge!(@nodes_graph[cluster_2][:connections]) { |_connected_node, labels1, labels2| (labels1 + labels2).uniq },
|
|
480
|
+
includes_proc: proc do |hostname|
|
|
481
|
+
includes_proc_1.call(hostname) || includes_proc_2.call(hostname)
|
|
482
|
+
end
|
|
483
|
+
}
|
|
484
|
+
replace_nodes([cluster_1, cluster_2], new_cluster_name)
|
|
485
|
+
end
|
|
486
|
+
end
|
|
487
|
+
end
|
|
488
|
+
end
|
|
489
|
+
|
|
490
|
+
# Is the node represented as a cluster?
|
|
491
|
+
#
|
|
492
|
+
# Parameters::
|
|
493
|
+
# * *node_name* (String): Node name
|
|
494
|
+
# Result::
|
|
495
|
+
# * Boolean: Is the node represented as a cluster?
|
|
496
|
+
def is_node_cluster?(node_name)
|
|
497
|
+
@nodes_graph[node_name][:type] == :cluster || !@nodes_graph[node_name][:includes].empty?
|
|
498
|
+
end
|
|
499
|
+
|
|
500
|
+
# Is the node a physical node?
|
|
501
|
+
#
|
|
502
|
+
# Parameters::
|
|
503
|
+
# * *node_name* (String): Node name
|
|
504
|
+
# Result::
|
|
505
|
+
# * Boolean: Is the node a physical node?
|
|
506
|
+
def is_node_physical?(node_name)
|
|
507
|
+
@nodes_graph[node_name][:type] == :node && @node_metadata[node_name][:physical_node]
|
|
508
|
+
end
|
|
509
|
+
|
|
510
|
+
# Output the graph to a given file at a given format
|
|
511
|
+
#
|
|
512
|
+
# Parameters::
|
|
513
|
+
# * *file_name* (String): File name to output to.
|
|
514
|
+
# * *output_format* (Symbol): Output format to use (should be part of the plugins).
|
|
515
|
+
def write_graph(file_name, output_format)
|
|
516
|
+
if @plugins.key?(output_format)
|
|
517
|
+
@plugins[output_format].new(self).write_graph(file_name)
|
|
518
|
+
else
|
|
519
|
+
raise "Unknown topographer plugin #{output_format}"
|
|
520
|
+
end
|
|
521
|
+
end
|
|
522
|
+
|
|
523
|
+
# Get the title of a given node
|
|
524
|
+
#
|
|
525
|
+
# Parameters::
|
|
526
|
+
# * *node_name* (String): Node name
|
|
527
|
+
# Result::
|
|
528
|
+
# * String: Node title
|
|
529
|
+
def title_for(node_name)
|
|
530
|
+
case @nodes_graph[node_name][:type]
|
|
531
|
+
when :node
|
|
532
|
+
"#{node_name} - #{@node_metadata[node_name][:private_ips].nil? || @node_metadata[node_name][:private_ips].empty? ? 'No IP' : @node_metadata[node_name][:private_ips].first}"
|
|
533
|
+
when :cluster
|
|
534
|
+
"#{node_name} (#{@nodes_graph[node_name][:includes].size} nodes)"
|
|
535
|
+
when :unknown
|
|
536
|
+
"#{node_name} - Unknown node"
|
|
537
|
+
end
|
|
538
|
+
end
|
|
539
|
+
|
|
540
|
+
# Get the description of a given node
|
|
541
|
+
#
|
|
542
|
+
# Parameters::
|
|
543
|
+
# * *node_name* (String): Node name
|
|
544
|
+
# Result::
|
|
545
|
+
# * String: Node description, or nil if none
|
|
546
|
+
def description_for(node_name)
|
|
547
|
+
require 'byebug'
|
|
548
|
+
byebug if node_name == 'xaesbghad51'
|
|
549
|
+
case @nodes_graph[node_name][:type]
|
|
550
|
+
when :node
|
|
551
|
+
@node_metadata[node_name][:description]
|
|
552
|
+
when :cluster
|
|
553
|
+
nil
|
|
554
|
+
when :unknown
|
|
555
|
+
nil
|
|
556
|
+
end
|
|
557
|
+
end
|
|
558
|
+
|
|
559
|
+
private
|
|
560
|
+
|
|
561
|
+
# Get the list of known IPs (private and public), and return each associated node
|
|
562
|
+
#
|
|
563
|
+
# Result::
|
|
564
|
+
# * Hash<String,String>: List of nodes per IP address
|
|
565
|
+
def known_ips
|
|
566
|
+
# Keep a cache of it
|
|
567
|
+
unless defined?(@known_ips)
|
|
568
|
+
@known_ips = {}
|
|
569
|
+
# Fill info from the metadata
|
|
570
|
+
@nodes_handler.prefetch_metadata_of @nodes_handler.known_nodes, %i[private_ips public_ips]
|
|
571
|
+
@nodes_handler.known_nodes.each do |node|
|
|
572
|
+
%i[private_ips public_ips].each do |ip_type|
|
|
573
|
+
ips = @nodes_handler.metadata_of(node, ip_type)
|
|
574
|
+
if ips
|
|
575
|
+
ips.each do |ip|
|
|
576
|
+
raise "Conflict: #{ip} is already associated to #{@known_ips[ip]}. Cannot associate it to #{node}." if @known_ips.key?(ip)
|
|
577
|
+
@known_ips[ip] = node
|
|
578
|
+
end
|
|
579
|
+
end
|
|
580
|
+
end
|
|
581
|
+
end
|
|
582
|
+
end
|
|
583
|
+
@known_ips
|
|
584
|
+
end
|
|
585
|
+
|
|
586
|
+
# Get the list of known IP addresses matching a given IP mask
|
|
587
|
+
#
|
|
588
|
+
# Parameters::
|
|
589
|
+
# * *ip_def* (String): The ip definition (without mask).
|
|
590
|
+
# * *ip_mask* (Integer): The IP mask in bits.
|
|
591
|
+
# Result::
|
|
592
|
+
# * Array<String>: The list of IP addresses matching this mask
|
|
593
|
+
def ips_matching_mask(ip_def, ip_mask)
|
|
594
|
+
# Keep a cache of it
|
|
595
|
+
# Hash<String, Hash<Integer, Array<String> > >
|
|
596
|
+
# Hash<ip_def, ip_mask, ip
|
|
597
|
+
@ips_mask = {} unless defined?(@ips_mask)
|
|
598
|
+
@ips_mask[ip_def] = {} unless @ips_mask.key?(ip_def)
|
|
599
|
+
unless @ips_mask[ip_def].key?(ip_mask)
|
|
600
|
+
# For performance, keep a cache of all the IPAddress::IPv4 objects
|
|
601
|
+
@ip_v4_cache = Hash[known_ips.keys.map { |ip, _node| [ip, IPAddress::IPv4.new(ip)] }] unless defined?(@ip_v4_cache)
|
|
602
|
+
ip_range = IPAddress::IPv4.new("#{ip_def}/#{ip_mask}")
|
|
603
|
+
@ips_mask[ip_def][ip_mask] = @ip_v4_cache.select { |_ip, ip_v4| ip_range.include?(ip_v4) }.keys
|
|
604
|
+
end
|
|
605
|
+
@ips_mask[ip_def][ip_mask]
|
|
606
|
+
end
|
|
607
|
+
|
|
608
|
+
# Get the list of 24 bits IP addresses matching a given IP mask
|
|
609
|
+
#
|
|
610
|
+
# Parameters::
|
|
611
|
+
# * *ip_def* (String): The ip definition (without mask).
|
|
612
|
+
# * *ip_mask* (Integer): The IP mask in bits.
|
|
613
|
+
# Result::
|
|
614
|
+
# * Array<String>: The list of 24 bits IP addresses matching this mask
|
|
615
|
+
def ips_24_matching_mask(ip_def, ip_mask)
|
|
616
|
+
# Keep a cache of it
|
|
617
|
+
# Hash<String, Hash<Integer, Array<String> > >
|
|
618
|
+
# Hash<ip_def, ip_mask, ip_24
|
|
619
|
+
@ips_24_mask = {} unless defined?(@ips_24_mask)
|
|
620
|
+
@ips_24_mask[ip_def] = {} unless @ips_24_mask.key?(ip_def)
|
|
621
|
+
unless @ips_24_mask[ip_def].key?(ip_mask)
|
|
622
|
+
ip_range = IPAddress::IPv4.new("#{ip_def}/#{ip_mask}")
|
|
623
|
+
@ips_24_mask[ip_def][ip_mask] = []
|
|
624
|
+
(0..255).each do |ip_third|
|
|
625
|
+
ip_24 = "172.16.#{ip_third}.0/24"
|
|
626
|
+
@ips_24_mask[ip_def][ip_mask] << ip_24 if ip_range.include?(IPAddress::IPv4.new(ip_24))
|
|
627
|
+
end
|
|
628
|
+
end
|
|
629
|
+
@ips_24_mask[ip_def][ip_mask]
|
|
630
|
+
end
|
|
631
|
+
|
|
632
|
+
# Create a cluster of type IP range
|
|
633
|
+
#
|
|
634
|
+
# Parameters::
|
|
635
|
+
# * *ip* (String): The IP
|
|
636
|
+
# Result::
|
|
637
|
+
# * Hash<Symbol,Object>: Corresponding information to be stored in the graph
|
|
638
|
+
def ip_range_graph_info(ip)
|
|
639
|
+
ipv4 = IPAddress::IPv4.new(ip)
|
|
640
|
+
includes_proc = proc do |node_name|
|
|
641
|
+
if @nodes_graph[node_name][:ipv4].nil?
|
|
642
|
+
if is_node_cluster?(node_name)
|
|
643
|
+
# Here the node is a cluster that is not an IP range.
|
|
644
|
+
@nodes_graph[node_name][:includes].all? { |included_node_name| includes_proc.call(included_node_name) }
|
|
645
|
+
else
|
|
646
|
+
false
|
|
647
|
+
end
|
|
648
|
+
else
|
|
649
|
+
ipv4.include?(@nodes_graph[node_name][:ipv4])
|
|
650
|
+
end
|
|
651
|
+
end
|
|
652
|
+
{
|
|
653
|
+
type: :cluster,
|
|
654
|
+
connections: {},
|
|
655
|
+
includes: [],
|
|
656
|
+
ipv4: ipv4,
|
|
657
|
+
includes_proc: includes_proc
|
|
658
|
+
}
|
|
659
|
+
end
|
|
660
|
+
|
|
661
|
+
# Filter a JSON object.
|
|
662
|
+
# Any key from the JSON that is a leaf of the filter structure will be removed.
|
|
663
|
+
#
|
|
664
|
+
# Parameters::
|
|
665
|
+
# * *json* (Object): The JSON object
|
|
666
|
+
# * *json_filter* (Object): The JSON filter (or nil if none)
|
|
667
|
+
# Result::
|
|
668
|
+
# * *Object*: The filtered JSON object
|
|
669
|
+
def json_filter_out(json, json_filter)
|
|
670
|
+
if json.is_a?(Hash) && !json_filter.nil?
|
|
671
|
+
filtered_json = {}
|
|
672
|
+
json.each do |key, value|
|
|
673
|
+
if !json_filter.key?(key) || !json_filter[key].nil?
|
|
674
|
+
# We add this key in the result
|
|
675
|
+
filtered_json[key] = json_filter_out(value, json_filter[key])
|
|
676
|
+
end
|
|
677
|
+
end
|
|
678
|
+
filtered_json
|
|
679
|
+
else
|
|
680
|
+
json
|
|
681
|
+
end
|
|
682
|
+
end
|
|
683
|
+
|
|
684
|
+
# Get the complete JSON of a node
|
|
685
|
+
#
|
|
686
|
+
# Parameters::
|
|
687
|
+
# * *hostname* (String): Host name to fetch the complete JSON
|
|
688
|
+
# Result::
|
|
689
|
+
# * Hash: The corresponding JSON info
|
|
690
|
+
def node_json_for(hostname)
|
|
691
|
+
json_file_name = "#{@config[:json_files_dir]}/#{hostname}.json"
|
|
692
|
+
if File.exist?(json_file_name)
|
|
693
|
+
json_filter_out(JSON.parse(File.read(json_file_name)), @config[:ignore_json_keys])
|
|
694
|
+
else
|
|
695
|
+
log_warn "Missing JSON file #{json_file_name}"
|
|
696
|
+
{}
|
|
697
|
+
end
|
|
698
|
+
end
|
|
699
|
+
|
|
700
|
+
# Scrape connections from a JSON object.
|
|
701
|
+
# For each node found, return the list of labels.
|
|
702
|
+
#
|
|
703
|
+
# Parameters::
|
|
704
|
+
# * *json* (Object): JSON object
|
|
705
|
+
# * *current_ref* (String): The current reference. nil for the root.
|
|
706
|
+
# Result::
|
|
707
|
+
# * Hash<String,Array<String>>: List of references for each node.
|
|
708
|
+
def connections_from_json(json, current_ref = nil)
|
|
709
|
+
nodes = {}
|
|
710
|
+
if json.is_a?(String)
|
|
711
|
+
# Look for any IP
|
|
712
|
+
json.scan(@ip_regexp).each do |(ip_def, _grp_match, ip_mask_str)|
|
|
713
|
+
ip_mask = ip_mask_str.nil? ? 32 : ip_mask_str.to_i
|
|
714
|
+
ip_str =
|
|
715
|
+
if ip_mask == 32
|
|
716
|
+
ip_def
|
|
717
|
+
elsif ip_mask <= 24
|
|
718
|
+
"#{ip_def.split('.')[0..2].join('.')}.0/#{ip_mask}"
|
|
719
|
+
else
|
|
720
|
+
"#{ip_def}/#{ip_mask}"
|
|
721
|
+
end
|
|
722
|
+
# First check that we don't ignore this IP range
|
|
723
|
+
unless @ips_ignored.key?(ip_str)
|
|
724
|
+
connected_node_name =
|
|
725
|
+
if @nodes_graph.key?(ip_str)
|
|
726
|
+
# IP group already exists
|
|
727
|
+
ip_str
|
|
728
|
+
elsif @config[:ignore_ips].any? { |ip_regexp| ip_str =~ ip_regexp }
|
|
729
|
+
# This IP should be ignored
|
|
730
|
+
@ips_ignored[ip_str] = nil
|
|
731
|
+
nil
|
|
732
|
+
else
|
|
733
|
+
# New group to create.
|
|
734
|
+
if ip_mask <= 24
|
|
735
|
+
# This group will include all needed ip_24 IPs.
|
|
736
|
+
# Compute the list of 24 bits IPs that are referenced here.
|
|
737
|
+
ip_24_list =
|
|
738
|
+
if ip_mask == 24
|
|
739
|
+
[ip_str]
|
|
740
|
+
else
|
|
741
|
+
ips_24_matching_mask(ip_def, ip_mask).select do |ip|
|
|
742
|
+
unless @ips_ignored.key?(ip_str)
|
|
743
|
+
# Check if we should ignore it.
|
|
744
|
+
@ips_ignored[ip] = nil if @config[:ignore_ips].any? { |ip_regexp| ip =~ ip_regexp }
|
|
745
|
+
end
|
|
746
|
+
!@ips_ignored.key?(ip)
|
|
747
|
+
end
|
|
748
|
+
end
|
|
749
|
+
if ip_24_list.empty?
|
|
750
|
+
# All IPs of the group are to be ignored
|
|
751
|
+
nil
|
|
752
|
+
elsif ip_24_list.size == 1
|
|
753
|
+
# Just create 1 group.
|
|
754
|
+
ip_24 = ip_24_list.first
|
|
755
|
+
@nodes_graph[ip_24] = ip_range_graph_info(ip_24) unless @nodes_graph.key?(ip_24)
|
|
756
|
+
ip_24
|
|
757
|
+
else
|
|
758
|
+
# Create all ip_24 groups.
|
|
759
|
+
ip_24_list.each do |included_ip_24|
|
|
760
|
+
@nodes_graph[included_ip_24] = ip_range_graph_info(included_ip_24) unless @nodes_graph.key?(included_ip_24)
|
|
761
|
+
end
|
|
762
|
+
# Create a super group of it
|
|
763
|
+
@nodes_graph[ip_str] = ip_range_graph_info(ip_str)
|
|
764
|
+
@nodes_graph[ip_str][:includes] = ip_24_list
|
|
765
|
+
ip_str
|
|
766
|
+
end
|
|
767
|
+
else
|
|
768
|
+
# This group will include all individual IP addresses.
|
|
769
|
+
ips_list =
|
|
770
|
+
if ip_mask == 32
|
|
771
|
+
[ip_def]
|
|
772
|
+
else
|
|
773
|
+
ips_matching_mask(ip_def, ip_mask).select do |ip|
|
|
774
|
+
unless @ips_ignored.key?(ip_str)
|
|
775
|
+
# Check if we should ignore it.
|
|
776
|
+
@ips_ignored[ip] = nil if @config[:ignore_ips].any? { |ip_regexp| ip =~ ip_regexp }
|
|
777
|
+
end
|
|
778
|
+
!@ips_ignored.key?(ip)
|
|
779
|
+
end
|
|
780
|
+
end
|
|
781
|
+
if ips_list.empty?
|
|
782
|
+
# All IPs of the group are to be ignored
|
|
783
|
+
nil
|
|
784
|
+
elsif ips_list.size == 1
|
|
785
|
+
# Just create 1 node.
|
|
786
|
+
ip = ips_list.first
|
|
787
|
+
if @ips_to_host.key?(ip)
|
|
788
|
+
# Known hostname
|
|
789
|
+
@ips_to_host[ip]
|
|
790
|
+
else
|
|
791
|
+
# Unknown IP that should be added.
|
|
792
|
+
@nodes_graph[ip] = {
|
|
793
|
+
type: :unknown,
|
|
794
|
+
connections: {},
|
|
795
|
+
includes: [],
|
|
796
|
+
ipv4: IPAddress::IPv4.new(ip)
|
|
797
|
+
}
|
|
798
|
+
ip
|
|
799
|
+
end
|
|
800
|
+
else
|
|
801
|
+
# Create a super group of it
|
|
802
|
+
@nodes_graph[ip_str] = ip_range_graph_info(ip_str)
|
|
803
|
+
@nodes_graph[ip_str][:includes] = ips_list.map { |included_ip| @ips_to_host[included_ip] }
|
|
804
|
+
ip_str
|
|
805
|
+
end
|
|
806
|
+
end
|
|
807
|
+
end
|
|
808
|
+
unless connected_node_name.nil?
|
|
809
|
+
nodes[connected_node_name] = [] unless nodes.key?(connected_node_name)
|
|
810
|
+
nodes[connected_node_name] << current_ref
|
|
811
|
+
end
|
|
812
|
+
end
|
|
813
|
+
end
|
|
814
|
+
# Look for any known hostname
|
|
815
|
+
json.split(@non_word_regexp).each do |hostname|
|
|
816
|
+
if @known_nodes.key?(hostname)
|
|
817
|
+
nodes[hostname] = [] unless nodes.key?(hostname)
|
|
818
|
+
nodes[hostname] << current_ref
|
|
819
|
+
end
|
|
820
|
+
end
|
|
821
|
+
elsif json.is_a?(Array)
|
|
822
|
+
json.each do |sub_json|
|
|
823
|
+
nodes.merge!(connections_from_json(sub_json, current_ref)) { |_node_name, refs1, refs2| (refs1 + refs2).uniq }
|
|
824
|
+
end
|
|
825
|
+
elsif json.is_a?(Hash)
|
|
826
|
+
json.each do |sub_json_1, sub_json_2|
|
|
827
|
+
nodes.merge!(connections_from_json(sub_json_1, current_ref)) { |_node_name, refs1, refs2| (refs1 + refs2).uniq }
|
|
828
|
+
key_is_str = sub_json_1.is_a?(String)
|
|
829
|
+
nodes.merge!(connections_from_json(sub_json_2, key_is_str ? (current_ref.nil? ? sub_json_1 : "#{current_ref}/#{sub_json_1}") : current_ref)) { |_hostname, refs1, refs2| (refs1 + refs2).uniq } if !key_is_str || !@config[:ignore_any_json_keys].include?(sub_json_1)
|
|
830
|
+
end
|
|
831
|
+
end
|
|
832
|
+
nodes
|
|
833
|
+
end
|
|
834
|
+
|
|
835
|
+
# Fill all connections of a given hostname, up to a given recursive level.
|
|
836
|
+
#
|
|
837
|
+
# Parameters::
|
|
838
|
+
# * *hostname* (String): Hostname to parse for connections.
|
|
839
|
+
# * *max_level* (Integer): Maximum level of recursive passes (nil for no limit).
|
|
840
|
+
def parse_connections_for(hostname, max_level)
|
|
841
|
+
unless @nodes_graph.key?(hostname)
|
|
842
|
+
@nodes_graph[hostname] = {
|
|
843
|
+
type: :node,
|
|
844
|
+
connections: connections_from_json(node_json_for(hostname)),
|
|
845
|
+
includes: []
|
|
846
|
+
}
|
|
847
|
+
@nodes_graph[hostname][:ipv4] = IPAddress::IPv4.new(@node_metadata[hostname][:private_ips].first) if !@node_metadata[hostname][:private_ips].nil? && !@node_metadata[hostname][:private_ips].empty?
|
|
848
|
+
sub_max_level = max_level.nil? ? nil : max_level - 1
|
|
849
|
+
if sub_max_level != -1
|
|
850
|
+
@nodes_graph[hostname][:connections].keys.each do |connected_hostname|
|
|
851
|
+
parse_connections_for(connected_hostname, sub_max_level)
|
|
852
|
+
end
|
|
853
|
+
end
|
|
854
|
+
end
|
|
855
|
+
end
|
|
856
|
+
|
|
857
|
+
end
|
|
858
|
+
|
|
859
|
+
end
|