hybrid_platforms_conductor 32.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/check-node +24 -0
- data/bin/deploy +12 -0
- data/bin/dump_nodes_json +12 -0
- data/bin/free_ips +23 -0
- data/bin/free_veids +17 -0
- data/bin/get_impacted_nodes +43 -0
- data/bin/last_deploys +56 -0
- data/bin/nodes_to_deploy +104 -0
- data/bin/report +10 -0
- data/bin/run +39 -0
- data/bin/setup +11 -0
- data/bin/ssh_config +14 -0
- data/bin/test +13 -0
- data/bin/topograph +54 -0
- data/lib/hybrid_platforms_conductor/action.rb +82 -0
- data/lib/hybrid_platforms_conductor/actions_executor.rb +307 -0
- data/lib/hybrid_platforms_conductor/bitbucket.rb +123 -0
- data/lib/hybrid_platforms_conductor/cmd_runner.rb +188 -0
- data/lib/hybrid_platforms_conductor/cmdb.rb +34 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/bitbucket.rb +78 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/confluence.rb +43 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/file_system_tests.rb +110 -0
- data/lib/hybrid_platforms_conductor/common_config_dsl/idempotence_tests.rb +38 -0
- data/lib/hybrid_platforms_conductor/config.rb +263 -0
- data/lib/hybrid_platforms_conductor/confluence.rb +119 -0
- data/lib/hybrid_platforms_conductor/connector.rb +84 -0
- data/lib/hybrid_platforms_conductor/credentials.rb +127 -0
- data/lib/hybrid_platforms_conductor/current_dir_monitor.rb +42 -0
- data/lib/hybrid_platforms_conductor/deployer.rb +598 -0
- data/lib/hybrid_platforms_conductor/executable.rb +145 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/bash.rb +44 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/interactive.rb +44 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/my_action.rb.sample +79 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/remote_bash.rb +63 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/ruby.rb +69 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/action/scp.rb +61 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/config.rb +78 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/host_ip.rb +104 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/host_keys.rb +114 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/my_cmdb.rb.sample +129 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/cmdb/platform_handlers.rb +66 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/connector/my_connector.rb.sample +156 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/connector/ssh.rb +702 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/platform_handler/platform_handler_plugin.rb.sample +292 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/docker.rb +148 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/my_provisioner.rb.sample +103 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/podman.rb +125 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/proxmox.rb +522 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/proxmox/proxmox_waiter.rb +707 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/provisioner/proxmox/reserve_proxmox_container +122 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/confluence.rb +69 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/mediawiki.rb +164 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/my_report_plugin.rb.sample +88 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/stdout.rb +61 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/report/templates/confluence_inventory.html.erb +33 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/bitbucket_conf.rb +137 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/can_be_checked.rb +21 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/check_deploy_and_idempotence.rb +112 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/check_from_scratch.rb +35 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/connection.rb +28 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/deploy_freshness.rb +44 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/deploy_from_scratch.rb +36 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/deploy_removes_root_access.rb +49 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/divergence.rb +25 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/executables.rb +46 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/file_system.rb +45 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/file_system_hdfs.rb +45 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/hostname.rb +25 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/idempotence.rb +77 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/ip.rb +38 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/jenkins_ci_conf.rb +56 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/jenkins_ci_masters_ok.rb +54 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/linear_strategy.rb +47 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/local_users.rb +82 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/mounts.rb +120 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/my_test_plugin.rb.sample +143 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/orphan_files.rb +74 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/ports.rb +85 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/private_ips.rb +38 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/public_ips.rb +38 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/spectre-meltdown-checker.sh +1930 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/spectre.rb +56 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/veids.rb +31 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test/vulnerabilities.rb +159 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/confluence.rb +122 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/my_test_report.rb.sample +48 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/stdout.rb +120 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/templates/_confluence_errors_status.html.erb +46 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/templates/_confluence_gauge.html.erb +49 -0
- data/lib/hybrid_platforms_conductor/hpc_plugins/test_report/templates/confluence.html.erb +242 -0
- data/lib/hybrid_platforms_conductor/io_router.rb +70 -0
- data/lib/hybrid_platforms_conductor/json_dumper.rb +88 -0
- data/lib/hybrid_platforms_conductor/logger_helpers.rb +319 -0
- data/lib/hybrid_platforms_conductor/mutex_dir +76 -0
- data/lib/hybrid_platforms_conductor/nodes_handler.rb +597 -0
- data/lib/hybrid_platforms_conductor/parallel_threads.rb +97 -0
- data/lib/hybrid_platforms_conductor/platform_handler.rb +188 -0
- data/lib/hybrid_platforms_conductor/platforms_handler.rb +118 -0
- data/lib/hybrid_platforms_conductor/plugin.rb +53 -0
- data/lib/hybrid_platforms_conductor/plugins.rb +101 -0
- data/lib/hybrid_platforms_conductor/provisioner.rb +181 -0
- data/lib/hybrid_platforms_conductor/report.rb +31 -0
- data/lib/hybrid_platforms_conductor/reports_handler.rb +84 -0
- data/lib/hybrid_platforms_conductor/services_handler.rb +274 -0
- data/lib/hybrid_platforms_conductor/test.rb +141 -0
- data/lib/hybrid_platforms_conductor/test_by_service.rb +22 -0
- data/lib/hybrid_platforms_conductor/test_report.rb +282 -0
- data/lib/hybrid_platforms_conductor/tests_runner.rb +590 -0
- data/lib/hybrid_platforms_conductor/thycotic.rb +92 -0
- data/lib/hybrid_platforms_conductor/topographer.rb +859 -0
- data/lib/hybrid_platforms_conductor/topographer/plugin.rb +20 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/graphviz.rb +127 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/json.rb +72 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/my_topographer_output_plugin.rb.sample +37 -0
- data/lib/hybrid_platforms_conductor/topographer/plugins/svg.rb +30 -0
- data/lib/hybrid_platforms_conductor/version.rb +5 -0
- data/spec/hybrid_platforms_conductor_test.rb +159 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/bash_spec.rb +43 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/interactive_spec.rb +18 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/remote_bash_spec.rb +102 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/ruby_spec.rb +108 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions/scp_spec.rb +79 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/actions_spec.rb +199 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connection_spec.rb +212 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/cli_options_spec.rb +125 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/config_dsl_spec.rb +50 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/connectable_nodes_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/connections_spec.rb +448 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/global_helpers_spec.rb +313 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/node_helpers_spec.rb +32 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/connectors/ssh/remote_actions_spec.rb +134 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/logging_spec.rb +256 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/parallel_spec.rb +338 -0
- data/spec/hybrid_platforms_conductor_test/api/actions_executor/timeout_spec.rb +101 -0
- data/spec/hybrid_platforms_conductor_test/api/cmd_runner_spec.rb +165 -0
- data/spec/hybrid_platforms_conductor_test/api/config_spec.rb +238 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/check_spec.rb +9 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/deploy_spec.rb +243 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/parse_deploy_output_spec.rb +104 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioner_spec.rb +131 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/docker/Dockerfile +10 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/docker_spec.rb +123 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/podman_spec.rb +211 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/config_dsl_spec.rb +126 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/create_spec.rb +290 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/destroy_spec.rb +43 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/ip_spec.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/proxmox.json +3 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/destroy_vm_spec.rb +82 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/expired_containers_spec.rb +786 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/ips_assignment_spec.rb +112 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/other_lxc_containers_resources_spec.rb +190 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/pve_node_resources_spec.rb +200 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/retries_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/reserve_proxmox_container/vm_ids_assignment_spec.rb +67 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/start_spec.rb +79 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/state_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/api/deployer/provisioners/proxmox/stop_spec.rb +41 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/config_spec.rb +33 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/host_ip_spec.rb +64 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/host_keys_spec.rb +133 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs/platform_handlers_spec.rb +19 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs_plugins_api_spec.rb +446 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/common_spec.rb +127 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/git_diff_impacts_spec.rb +318 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/nodes_selectors_spec.rb +132 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/platform_handlers_plugins_api_spec.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/api/nodes_handler/several_platforms_spec.rb +58 -0
- data/spec/hybrid_platforms_conductor_test/api/platform_handler_spec.rb +97 -0
- data/spec/hybrid_platforms_conductor_test/api/platforms_handler_spec.rb +104 -0
- data/spec/hybrid_platforms_conductor_test/api/plugins_spec.rb +243 -0
- data/spec/hybrid_platforms_conductor_test/api/reports_handler_spec.rb +44 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/actions_to_deploy_spec.rb +121 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/deploy_allowed_spec.rb +142 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/log_info_spec.rb +101 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/package_spec.rb +388 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/parse_deploy_output_spec.rb +274 -0
- data/spec/hybrid_platforms_conductor_test/api/services_handler/prepare_for_deploy_spec.rb +264 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/common_spec.rb +194 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/global_spec.rb +37 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/node_check_spec.rb +194 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/node_spec.rb +137 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/node_ssh_spec.rb +257 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/platform_spec.rb +110 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/reports_spec.rb +367 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/test_plugins/bitbucket_conf_spec.rb +111 -0
- data/spec/hybrid_platforms_conductor_test/api/tests_runner/test_reports_plugins/confluence_spec.rb +29 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb.rb +166 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb2.rb +93 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb_others.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/cmdb_plugins/test_cmdb_others2.rb +58 -0
- data/spec/hybrid_platforms_conductor_test/executables/check-node_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/executables/deploy_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/executables/get_impacted_nodes_spec.rb +158 -0
- data/spec/hybrid_platforms_conductor_test/executables/last_deploys_spec.rb +173 -0
- data/spec/hybrid_platforms_conductor_test/executables/nodes_to_deploy_spec.rb +283 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/actions_executor_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/cmd_runner_spec.rb +28 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/common_spec.rb +67 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/deployer_spec.rb +251 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/nodes_handler_spec.rb +111 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/nodes_selectors_spec.rb +71 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/reports_handler_spec.rb +54 -0
- data/spec/hybrid_platforms_conductor_test/executables/options/tests_runner_spec.rb +139 -0
- data/spec/hybrid_platforms_conductor_test/executables/report_spec.rb +60 -0
- data/spec/hybrid_platforms_conductor_test/executables/run_spec.rb +173 -0
- data/spec/hybrid_platforms_conductor_test/executables/ssh_config_spec.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/executables/test_spec.rb +41 -0
- data/spec/hybrid_platforms_conductor_test/helpers/actions_executor_helpers.rb +98 -0
- data/spec/hybrid_platforms_conductor_test/helpers/cmd_runner_helpers.rb +92 -0
- data/spec/hybrid_platforms_conductor_test/helpers/cmdb_helpers.rb +37 -0
- data/spec/hybrid_platforms_conductor_test/helpers/config_helpers.rb +20 -0
- data/spec/hybrid_platforms_conductor_test/helpers/connector_ssh_helpers.rb +130 -0
- data/spec/hybrid_platforms_conductor_test/helpers/deployer_helpers.rb +149 -0
- data/spec/hybrid_platforms_conductor_test/helpers/deployer_test_helpers.rb +812 -0
- data/spec/hybrid_platforms_conductor_test/helpers/executables_helpers.rb +96 -0
- data/spec/hybrid_platforms_conductor_test/helpers/nodes_handler_helpers.rb +20 -0
- data/spec/hybrid_platforms_conductor_test/helpers/platform_handler_helpers.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/helpers/platforms_handler_helpers.rb +127 -0
- data/spec/hybrid_platforms_conductor_test/helpers/plugins_helpers.rb +48 -0
- data/spec/hybrid_platforms_conductor_test/helpers/provisioner_proxmox_helpers.rb +789 -0
- data/spec/hybrid_platforms_conductor_test/helpers/reports_handler_helpers.rb +29 -0
- data/spec/hybrid_platforms_conductor_test/helpers/services_handler_helpers.rb +20 -0
- data/spec/hybrid_platforms_conductor_test/helpers/tests_runner_helpers.rb +38 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem/hpc_plugins/test_plugin_type/test_plugin_id1.rb +22 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem/hpc_plugins/test_plugin_type/test_plugin_id2.rb +22 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem2/sub_dir/hpc_plugins/test_plugin_type/test_plugin_id3.rb +26 -0
- data/spec/hybrid_platforms_conductor_test/mocked_lib/my_test_gem2/sub_dir/hpc_plugins/test_plugin_type2/test_plugin_id4.rb +26 -0
- data/spec/hybrid_platforms_conductor_test/platform_handler_plugins/test.rb +225 -0
- data/spec/hybrid_platforms_conductor_test/platform_handler_plugins/test2.rb +11 -0
- data/spec/hybrid_platforms_conductor_test/report_plugin.rb +35 -0
- data/spec/hybrid_platforms_conductor_test/test_action.rb +66 -0
- data/spec/hybrid_platforms_conductor_test/test_connector.rb +151 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/global.rb +30 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/node.rb +53 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/node_check.rb +47 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/node_ssh.rb +42 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/platform.rb +50 -0
- data/spec/hybrid_platforms_conductor_test/test_plugins/several_checks.rb +50 -0
- data/spec/hybrid_platforms_conductor_test/test_provisioner.rb +95 -0
- data/spec/hybrid_platforms_conductor_test/tests_report_plugin.rb +49 -0
- data/spec/spec_helper.rb +111 -0
- metadata +566 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
require 'hybrid_platforms_conductor/logger_helpers'
|
|
2
|
+
require 'hybrid_platforms_conductor/plugin'
|
|
3
|
+
|
|
4
|
+
module HybridPlatformsConductor
|
|
5
|
+
|
|
6
|
+
# Common ancestor to any test class
|
|
7
|
+
class Test < Plugin
|
|
8
|
+
|
|
9
|
+
class << self
|
|
10
|
+
|
|
11
|
+
# A NodesHandler instance that can be useful for test classes that need to access nodes information
|
|
12
|
+
attr_accessor :nodes_handler
|
|
13
|
+
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
# Get errors encountered
|
|
17
|
+
# Array<String>
|
|
18
|
+
attr_reader :errors
|
|
19
|
+
|
|
20
|
+
# Get the test name
|
|
21
|
+
# String
|
|
22
|
+
attr_reader :name
|
|
23
|
+
|
|
24
|
+
# Get the platform being tested, or nil for global tests
|
|
25
|
+
# PlatformHandler or nil
|
|
26
|
+
attr_reader :platform
|
|
27
|
+
|
|
28
|
+
# Get the node name being tested, or nil for global and platform tests
|
|
29
|
+
# String or nil
|
|
30
|
+
attr_reader :node
|
|
31
|
+
|
|
32
|
+
# Expected failure, or nil if not expected to fail
|
|
33
|
+
# String or nil
|
|
34
|
+
attr_reader :expected_failure
|
|
35
|
+
|
|
36
|
+
# Constructor
|
|
37
|
+
#
|
|
38
|
+
# Parameters::
|
|
39
|
+
# * *logger* (Logger): Logger to be used
|
|
40
|
+
# * *logger_stderr* (Logger): Logger to be used for stderr
|
|
41
|
+
# * *config* (Config): Config to be used.
|
|
42
|
+
# * *cmd_runner* (CmdRunner): CmdRunner that can be used by tests
|
|
43
|
+
# * *nodes_handler* (NodesHandler): Nodes handler that can be used by tests
|
|
44
|
+
# * *deployer* (Deployer): Deployer that can be used by tests
|
|
45
|
+
# * *name* (String): Name of the test being instantiated [default: 'unknown_test']
|
|
46
|
+
# * *platform* (PlatformHandler): Platform handler for which the test is instantiated, or nil if global or node specific [default: nil]
|
|
47
|
+
# * *node* (String): Node name for which the test is instantiated, or nil if global or platform specific [default: nil]
|
|
48
|
+
# * *expected_failure* (String or nil): Expected failure, or nil if not expected to fail [default: nil]
|
|
49
|
+
def initialize(logger, logger_stderr, config, cmd_runner, nodes_handler, deployer, name: 'unknown_test', platform: nil, node: nil, expected_failure: nil)
|
|
50
|
+
super(logger: logger, logger_stderr: logger_stderr, config: config)
|
|
51
|
+
@cmd_runner = cmd_runner
|
|
52
|
+
@nodes_handler = nodes_handler
|
|
53
|
+
@deployer = deployer
|
|
54
|
+
@name = name
|
|
55
|
+
@platform = platform
|
|
56
|
+
@node = node
|
|
57
|
+
@expected_failure = expected_failure
|
|
58
|
+
@errors = []
|
|
59
|
+
@executed = false
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
# Get a String identifier of this test, useful for outputing messages
|
|
63
|
+
#
|
|
64
|
+
# Result::
|
|
65
|
+
# * String: Identifier of this test
|
|
66
|
+
def to_s
|
|
67
|
+
test_desc =
|
|
68
|
+
if !node.nil?
|
|
69
|
+
"Node #{@node}"
|
|
70
|
+
elsif !platform.nil?
|
|
71
|
+
"Platform #{@platform.name}"
|
|
72
|
+
else
|
|
73
|
+
'Global'
|
|
74
|
+
end
|
|
75
|
+
"#< Test #{name} - #{test_desc} >"
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
# Assert an equality
|
|
79
|
+
#
|
|
80
|
+
# Parameters::
|
|
81
|
+
# * *tested_object* (Object): The object being tested
|
|
82
|
+
# * *expected_object* (Object): The object being expected
|
|
83
|
+
# * *error_msg* (String): Error message to associate in case of inequality
|
|
84
|
+
# * *details* (String or nil): Additional details, or nil if none [default = nil]
|
|
85
|
+
def assert_equal(tested_object, expected_object, error_msg, details = nil)
|
|
86
|
+
error error_msg, details unless tested_object == expected_object
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
# Assert a String match
|
|
90
|
+
#
|
|
91
|
+
# Parameters::
|
|
92
|
+
# * *tested_object* (String): The object being tested
|
|
93
|
+
# * *expected_object* (Regex): The object being expected
|
|
94
|
+
# * *error_msg* (String): Error message to associate in case of inequality
|
|
95
|
+
# * *details* (String or nil): Additional details, or nil if none [default = nil]
|
|
96
|
+
def assert_match(tested_object, expected_object, error_msg, details = nil)
|
|
97
|
+
error error_msg, details unless tested_object =~ expected_object
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
# Register an error
|
|
101
|
+
#
|
|
102
|
+
# Parameters::
|
|
103
|
+
# * *message* (String): The error message
|
|
104
|
+
# * *details* (String or nil): Additional details, or nil if none [default = nil]
|
|
105
|
+
def error(message, details = nil)
|
|
106
|
+
log_error "[ #{self} ] - #{message}#{details.nil? ? '' : "\n#{details}"}" if @expected_failure.nil?
|
|
107
|
+
@errors << message
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# Mark the test has being executed
|
|
111
|
+
def executed
|
|
112
|
+
@executed = true
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
# Has the test been executed?
|
|
116
|
+
#
|
|
117
|
+
# Result::
|
|
118
|
+
# * Boolean: Has the test been executed?
|
|
119
|
+
def executed?
|
|
120
|
+
@executed
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
# Limit the list of platform types for these tests.
|
|
124
|
+
#
|
|
125
|
+
# Result::
|
|
126
|
+
# * Array<Symbol> or nil: List of platform types allowed for this test, or nil for all
|
|
127
|
+
def self.only_on_platforms
|
|
128
|
+
nil
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
# Limit the list of nodes for these tests.
|
|
132
|
+
#
|
|
133
|
+
# Result::
|
|
134
|
+
# * Array<String or Regex> or nil: List of nodes allowed for this test, or nil for all. Regular expressions matching node names can also be used.
|
|
135
|
+
def self.only_on_nodes
|
|
136
|
+
nil
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
end
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
module HybridPlatformsConductor
|
|
2
|
+
|
|
3
|
+
# Ancestor for all tests that should be run just once per service
|
|
4
|
+
class TestByService < Test
|
|
5
|
+
|
|
6
|
+
# Limit the list of nodes for these tests.
|
|
7
|
+
#
|
|
8
|
+
# Result::
|
|
9
|
+
# * Array<String or Regex> or nil: List of nodes allowed for this test, or nil for all. Regular expressions matching node names can also be used.
|
|
10
|
+
def self.only_on_nodes
|
|
11
|
+
# Just 1 node per service and platform
|
|
12
|
+
Test.nodes_handler.prefetch_metadata_of Test.nodes_handler.known_nodes, :services
|
|
13
|
+
Test.nodes_handler.
|
|
14
|
+
known_nodes.
|
|
15
|
+
sort.
|
|
16
|
+
group_by { |node| Test.nodes_handler.get_services_of(node).sort }.
|
|
17
|
+
map { |(_service, _platform), nodes| nodes.first }
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
end
|
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
require 'hybrid_platforms_conductor/logger_helpers'
|
|
2
|
+
require 'hybrid_platforms_conductor/plugin'
|
|
3
|
+
|
|
4
|
+
module HybridPlatformsConductor
|
|
5
|
+
|
|
6
|
+
# Base class for test reports plugins
|
|
7
|
+
class TestReport < Plugin
|
|
8
|
+
|
|
9
|
+
# Constructor
|
|
10
|
+
#
|
|
11
|
+
# Parameters::
|
|
12
|
+
# * *logger* (Logger): Logger to be used
|
|
13
|
+
# * *logger_stderr* (Logger): Logger to be used for stderr
|
|
14
|
+
# * *config* (Config): Config to be used.
|
|
15
|
+
# * *nodes_handler* (NodesHandler): Nodes handler that has been used by tests.
|
|
16
|
+
# * *tested_nodes* (Array<String>): List of nodes tests were run on.
|
|
17
|
+
# * *tested_platforms* (Array<PlatformHandler>): List of platforms tests were run on.
|
|
18
|
+
# * *tests* (Array<Test>): List of tests.
|
|
19
|
+
def initialize(logger, logger_stderr, config, nodes_handler, tested_nodes, tested_platforms, tests)
|
|
20
|
+
super(logger: logger, logger_stderr: logger_stderr, config: config)
|
|
21
|
+
@nodes_handler = nodes_handler
|
|
22
|
+
@tested_nodes = tested_nodes.uniq.sort
|
|
23
|
+
@tested_platforms = tested_platforms
|
|
24
|
+
@tests = tests
|
|
25
|
+
# Set additional variables that might get handy for reports
|
|
26
|
+
@global_test_names = global_tests.map(&:name).uniq.sort
|
|
27
|
+
@platform_test_names = platform_tests.map(&:name).uniq.sort
|
|
28
|
+
@node_test_names = node_tests.map(&:name).uniq.sort
|
|
29
|
+
# Always put global first
|
|
30
|
+
[@node_test_names, @platform_test_names, @global_test_names].each do |names_list|
|
|
31
|
+
if names_list.include?(:global)
|
|
32
|
+
names_list.delete(:global)
|
|
33
|
+
names_list.insert(0, :global)
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
private
|
|
39
|
+
|
|
40
|
+
# Return global tests
|
|
41
|
+
#
|
|
42
|
+
# Result::
|
|
43
|
+
# * Array<Test>: Global tests
|
|
44
|
+
def global_tests
|
|
45
|
+
@tests.select { |test| test.platform.nil? && test.node.nil? }
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Return platform tests
|
|
49
|
+
#
|
|
50
|
+
# Result::
|
|
51
|
+
# * Array<Test>: List of platform tests
|
|
52
|
+
def platform_tests
|
|
53
|
+
@tests.select { |test| !test.platform.nil? }
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
# Return node tests
|
|
57
|
+
#
|
|
58
|
+
# Result::
|
|
59
|
+
# * Array<Test>: List of node tests
|
|
60
|
+
def node_tests
|
|
61
|
+
@tests.select { |test| !test.node.nil? }
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
# Select tests corresponding to a given criteria
|
|
65
|
+
#
|
|
66
|
+
# Parameters::
|
|
67
|
+
# * *name* (String): Test name
|
|
68
|
+
# * *node* (String or nil): Node name, or nil for global/platform tests [default = nil]
|
|
69
|
+
# * *platform* (String or nil): Platform repository name, or nil for global/node tests. Ignored if node is set. [default = nil]
|
|
70
|
+
# Result::
|
|
71
|
+
# * Array<Test>: List of selected tests
|
|
72
|
+
def select_tests(name, node: nil, platform: nil)
|
|
73
|
+
@tests.select do |search_test|
|
|
74
|
+
search_test.name == name &&
|
|
75
|
+
search_test.node == node &&
|
|
76
|
+
search_test.platform == platform
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
# Is a given test supposed to have run?
|
|
81
|
+
#
|
|
82
|
+
# Parameters::
|
|
83
|
+
# * *name* (String): Test name
|
|
84
|
+
# * *node* (String or nil): Node name, or nil for global/platform tests [default = nil]
|
|
85
|
+
# * *platform* (String or nil): Platform repository name, or nil for global/node tests. Ignored if node is set. [default = nil]
|
|
86
|
+
# Result::
|
|
87
|
+
# * Boolean: Is a given test supposed to have run?
|
|
88
|
+
def should_have_been_tested?(name, node: nil, platform: nil)
|
|
89
|
+
!select_tests(name, node: node, platform: platform).empty?
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
# Does a given test on a given node have tests that have not been executed?
|
|
93
|
+
#
|
|
94
|
+
# Parameters::
|
|
95
|
+
# * *name* (String): Test name
|
|
96
|
+
# * *node* (String or nil): Node name, or nil for global/platform tests [default = nil]
|
|
97
|
+
# * *platform* (String or nil): Platform repository name, or nil for global/node tests. Ignored if node is set. [default = nil]
|
|
98
|
+
# Result::
|
|
99
|
+
# * Boolean: Does a given test on a given node have tests that have not been executed?
|
|
100
|
+
def missing_tests_for(name, node: nil, platform: nil)
|
|
101
|
+
select_tests(name, node: node, platform: platform).any? { |test| !test.executed? }
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
# Get the errors for a given test on a given node
|
|
105
|
+
#
|
|
106
|
+
# Parameters::
|
|
107
|
+
# * *name* (String): Test name
|
|
108
|
+
# * *node* (String or nil): Node name, or nil for global/platform tests [default = nil]
|
|
109
|
+
# * *platform* (String or nil): Platform repository name, or nil for global/node tests. Ignored if node is set. [default = nil]
|
|
110
|
+
# Result::
|
|
111
|
+
# * Array<String>: List of errors
|
|
112
|
+
def errors_for(name, node: nil, platform: nil)
|
|
113
|
+
select_tests(name, node: node, platform: platform).inject([]) { |errors, test| errors + test.errors }
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
# Return errors grouped by a given criteria from a list of tests.
|
|
117
|
+
# Don't create groups having no errors.
|
|
118
|
+
# Sort group keys.
|
|
119
|
+
#
|
|
120
|
+
# Parameters::
|
|
121
|
+
# * *tests* (Array<Test>): List of tests to group errors from
|
|
122
|
+
# * *group_criterias* (Symbol or Proc or Array<Symbol or Proc>): Ordered list (or single item) of group by criterias. Each criteria applies on a list of tests and can be one of the following:
|
|
123
|
+
# * Symbol: Named criteria. Can be one of the following:
|
|
124
|
+
# * test_name: Group by test name
|
|
125
|
+
# * platform: Group by platform
|
|
126
|
+
# * node: Group by node
|
|
127
|
+
# * Proc: Code given directly to the group_by method of an Array<test>:
|
|
128
|
+
# * Parameters::
|
|
129
|
+
# * *test* (Test): Test to extract group by criteria from
|
|
130
|
+
# * Result::
|
|
131
|
+
# * Object: The group by criteria
|
|
132
|
+
# * *filter* (Symbol or nil): Filter errors to be returned, or nil for no filter. Values can be: [default: nil]
|
|
133
|
+
# * *only_as_expected*: Only report errors that were expected
|
|
134
|
+
# * *only_as_non_expected*: Only report errors that were not expected.
|
|
135
|
+
# Result::
|
|
136
|
+
# * Hash or Array<String>: Resulting tree structure, following the group by criterias, giving as leaves the grouped list of errors. If the criterias are empty, return the list of errors.
|
|
137
|
+
def group_errors(tests, *group_criterias, filter: nil)
|
|
138
|
+
if group_criterias.empty?
|
|
139
|
+
tests.inject([]) do |errors, test|
|
|
140
|
+
errors +
|
|
141
|
+
case filter
|
|
142
|
+
when nil
|
|
143
|
+
test.errors
|
|
144
|
+
when :only_as_expected
|
|
145
|
+
test.expected_failure ? test.errors : []
|
|
146
|
+
when :only_as_non_expected
|
|
147
|
+
!test.expected_failure ? test.errors : []
|
|
148
|
+
else
|
|
149
|
+
raise "Unknown errors filter: #{fiter}"
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
else
|
|
153
|
+
first_criteria = group_criterias.first
|
|
154
|
+
if first_criteria.is_a?(Symbol)
|
|
155
|
+
first_criteria =
|
|
156
|
+
case first_criteria
|
|
157
|
+
when :test_name
|
|
158
|
+
proc { |test| test.name }
|
|
159
|
+
when :platform
|
|
160
|
+
proc { |test| test.platform }
|
|
161
|
+
when :node
|
|
162
|
+
proc { |test| test.node }
|
|
163
|
+
else
|
|
164
|
+
raise "Unknown group criteria name: #{first_criteria}"
|
|
165
|
+
end
|
|
166
|
+
end
|
|
167
|
+
groups = {}
|
|
168
|
+
tests.group_by(&first_criteria).each do |first_group, grouped_tests|
|
|
169
|
+
next_grouped_errors = group_errors(grouped_tests, *group_criterias[1..-1], filter: filter)
|
|
170
|
+
groups[first_group] = next_grouped_errors unless next_grouped_errors.empty?
|
|
171
|
+
end
|
|
172
|
+
Hash[groups.sort]
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
# Get nodes associated to nodes lists.
|
|
177
|
+
# Also include 2 special lists: 'No list' and 'All'.
|
|
178
|
+
#
|
|
179
|
+
# Result::
|
|
180
|
+
# * Hash< String, Hash<Symbol,Object> >: For each nodes list, we have the following properties:
|
|
181
|
+
# * *nodes* (Array<String>): Nodes in the list
|
|
182
|
+
# * *tested_nodes* (Array<String>): Tested nodes in the list
|
|
183
|
+
# * *tested_nodes_in_error* (Array<String>): Tested nodes in error in the list
|
|
184
|
+
# * *tested_nodes_in_error_as_expected* (Array<String>): Tested nodes in error in the list that are part of the expected failures
|
|
185
|
+
def nodes_by_nodes_list
|
|
186
|
+
no_list_nodes = @nodes_handler.known_nodes
|
|
187
|
+
Hash[(
|
|
188
|
+
@nodes_handler.known_nodes_lists.sort.map do |nodes_list|
|
|
189
|
+
nodes_from_list = @nodes_handler.nodes_from_list(nodes_list, ignore_unknowns: true)
|
|
190
|
+
no_list_nodes -= nodes_from_list
|
|
191
|
+
[nodes_list, nodes_from_list]
|
|
192
|
+
end + [
|
|
193
|
+
['No list', no_list_nodes],
|
|
194
|
+
['All', @nodes_handler.known_nodes]
|
|
195
|
+
]
|
|
196
|
+
).map do |list_name, list_nodes|
|
|
197
|
+
[
|
|
198
|
+
list_name,
|
|
199
|
+
{
|
|
200
|
+
nodes: list_nodes,
|
|
201
|
+
tested_nodes: list_nodes & @tested_nodes,
|
|
202
|
+
tested_nodes_in_error: list_nodes & group_errors(node_tests, :node).keys,
|
|
203
|
+
tested_nodes_in_error_as_expected: list_nodes & group_errors(node_tests, :node, filter: :only_as_expected).keys
|
|
204
|
+
}
|
|
205
|
+
]
|
|
206
|
+
end]
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
# Flatten a tree hash.
|
|
210
|
+
# For example:
|
|
211
|
+
# flatten_hash(
|
|
212
|
+
# foo: 'bar',
|
|
213
|
+
# hello: {
|
|
214
|
+
# world: 'Hello World',
|
|
215
|
+
# bro: 'What's up dude?'
|
|
216
|
+
# },
|
|
217
|
+
# a: {
|
|
218
|
+
# b: {
|
|
219
|
+
# c: 'd'
|
|
220
|
+
# }
|
|
221
|
+
# }
|
|
222
|
+
# )
|
|
223
|
+
# will give
|
|
224
|
+
# {
|
|
225
|
+
# :foo => 'bar',
|
|
226
|
+
# :'hello.world' => 'Hello World',
|
|
227
|
+
# :'hello.bro' => 'What's up dude?',
|
|
228
|
+
# :'a.b.c' => 'd'
|
|
229
|
+
# }
|
|
230
|
+
#
|
|
231
|
+
# Parameters::
|
|
232
|
+
# * *hash* (Hash): The tree hash to flatten
|
|
233
|
+
# Result::
|
|
234
|
+
# * Hash: Flatten tree hash
|
|
235
|
+
def flatten_hash(hash)
|
|
236
|
+
hash.each_with_object({}) do |(k, v), h|
|
|
237
|
+
if v.is_a? Hash
|
|
238
|
+
flatten_hash(v).map { |h_k, h_v| h["#{k}.#{h_k}".to_sym] = h_v }
|
|
239
|
+
else
|
|
240
|
+
h[k] = v
|
|
241
|
+
end
|
|
242
|
+
end
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
# Classify a given list of tests by their statuses
|
|
246
|
+
#
|
|
247
|
+
# Parameters::
|
|
248
|
+
# * *tests* (Array<Test>): List of tests to group
|
|
249
|
+
# Result::
|
|
250
|
+
# * Hash<Symbol, Object >: Info for this list of tests. Properties are:
|
|
251
|
+
# * *success* (Array<Test>): Successful tests
|
|
252
|
+
# * *unexpected_error* (Array<Test>): Tests in unexpected error
|
|
253
|
+
# * *expected_error* (Array<Test>): Tests in expected error
|
|
254
|
+
# * *not_run* (Array<Test>): Tests that were not run
|
|
255
|
+
# * *status* (Symbol): The global status of those tests (only the first matching status from this ordered list is returned):
|
|
256
|
+
# * *success*: All tests are successful
|
|
257
|
+
# * *unexpected_error*: Some tests have unexpected errors
|
|
258
|
+
# * *expected_error*: Some tests have expected errors
|
|
259
|
+
# * *not_run*: All non-successful tests have not been run
|
|
260
|
+
def classify_tests(tests)
|
|
261
|
+
info = {
|
|
262
|
+
not_run: tests.select { |test| !test.executed? },
|
|
263
|
+
success: tests.select { |test| test.executed? && test.errors.empty? },
|
|
264
|
+
unexpected_error: tests.select { |test| test.executed? && !test.errors.empty? && test.expected_failure.nil? },
|
|
265
|
+
expected_error: tests.select { |test| test.executed? && !test.errors.empty? && !test.expected_failure.nil? }
|
|
266
|
+
}
|
|
267
|
+
info[:status] =
|
|
268
|
+
if info[:success].size == tests.size
|
|
269
|
+
:success
|
|
270
|
+
elsif !info[:unexpected_error].empty?
|
|
271
|
+
:unexpected_error
|
|
272
|
+
elsif !info[:expected_error].empty?
|
|
273
|
+
:expected_error
|
|
274
|
+
else
|
|
275
|
+
:not_run
|
|
276
|
+
end
|
|
277
|
+
info
|
|
278
|
+
end
|
|
279
|
+
|
|
280
|
+
end
|
|
281
|
+
|
|
282
|
+
end
|
|
@@ -0,0 +1,590 @@
|
|
|
1
|
+
require 'logger'
|
|
2
|
+
require 'hybrid_platforms_conductor/actions_executor'
|
|
3
|
+
require 'hybrid_platforms_conductor/logger_helpers'
|
|
4
|
+
require 'hybrid_platforms_conductor/nodes_handler'
|
|
5
|
+
require 'hybrid_platforms_conductor/parallel_threads'
|
|
6
|
+
require 'hybrid_platforms_conductor/plugins'
|
|
7
|
+
require 'hybrid_platforms_conductor/test'
|
|
8
|
+
require 'hybrid_platforms_conductor/test_report'
|
|
9
|
+
|
|
10
|
+
module HybridPlatformsConductor
|
|
11
|
+
|
|
12
|
+
# Class running tests
|
|
13
|
+
class TestsRunner
|
|
14
|
+
|
|
15
|
+
include LoggerHelpers, ParallelThreads
|
|
16
|
+
|
|
17
|
+
# List of tests to execute [default: []]
|
|
18
|
+
# Array<Symbol>
|
|
19
|
+
attr_accessor :tests
|
|
20
|
+
|
|
21
|
+
# List of reports to use [default: []]
|
|
22
|
+
# Array<Symbol>
|
|
23
|
+
attr_accessor :reports
|
|
24
|
+
|
|
25
|
+
# Do we skip running check-node? [default: false]
|
|
26
|
+
# Boolean
|
|
27
|
+
attr_accessor :skip_run
|
|
28
|
+
|
|
29
|
+
# Number of threads max to use for tests connecting to nodes [default: 64]
|
|
30
|
+
# Integer
|
|
31
|
+
attr_accessor :max_threads_connection_on_nodes
|
|
32
|
+
|
|
33
|
+
# Number of threads max to use for tests running at node level [default: 8]
|
|
34
|
+
# Integer
|
|
35
|
+
attr_accessor :max_threads_nodes
|
|
36
|
+
|
|
37
|
+
# Number of threads max to use for tests running at platform level [default: 8]
|
|
38
|
+
# Integer
|
|
39
|
+
attr_accessor :max_threads_platforms
|
|
40
|
+
|
|
41
|
+
# Constructor
|
|
42
|
+
#
|
|
43
|
+
# Parameters::
|
|
44
|
+
# * *logger* (Logger): Logger to be used [default: Logger.new(STDOUT)]
|
|
45
|
+
# * *logger_stderr* (Logger): Logger to be used for stderr [default: Logger.new(STDERR)]
|
|
46
|
+
# * *config* (Config): Config to be used. [default: Config.new]
|
|
47
|
+
# * *cmd_runner* (Cmdrunner): CmdRunner to be used [default: CmdRunner.new]
|
|
48
|
+
# * *platforms_handler* (PlatformsHandler): Platforms handler to be used [default: PlatformsHandler.new]
|
|
49
|
+
# * *nodes_handler* (NodesHandler): Nodes handler to be used [default: NodesHandler.new]
|
|
50
|
+
# * *actions_executor* (ActionsExecutor): Actions Executor to be used for the tests [default: ActionsExecutor.new]
|
|
51
|
+
# * *deployer* (Deployer): Deployer to be used for the tests needed why-run deployments [default: Deployer.new]
|
|
52
|
+
def initialize(
|
|
53
|
+
logger: Logger.new(STDOUT),
|
|
54
|
+
logger_stderr: Logger.new(STDERR),
|
|
55
|
+
config: Config.new,
|
|
56
|
+
cmd_runner: CmdRunner.new,
|
|
57
|
+
platforms_handler: PlatformsHandler.new,
|
|
58
|
+
nodes_handler: NodesHandler.new,
|
|
59
|
+
actions_executor: ActionsExecutor.new,
|
|
60
|
+
deployer: Deployer.new
|
|
61
|
+
)
|
|
62
|
+
init_loggers(logger, logger_stderr)
|
|
63
|
+
@config = config
|
|
64
|
+
@cmd_runner = cmd_runner
|
|
65
|
+
@platforms_handler = platforms_handler
|
|
66
|
+
@nodes_handler = nodes_handler
|
|
67
|
+
@actions_executor = actions_executor
|
|
68
|
+
@deployer = deployer
|
|
69
|
+
@platforms_handler.inject_dependencies(nodes_handler: @nodes_handler, actions_executor: @actions_executor)
|
|
70
|
+
Test.nodes_handler = nodes_handler
|
|
71
|
+
@tests_plugins = Plugins.new(:test, logger: @logger, logger_stderr: @logger_stderr)
|
|
72
|
+
# The list of tests reports plugins, with their associated class
|
|
73
|
+
# Hash< Symbol, Class >
|
|
74
|
+
@reports_plugins = Plugins.new(:test_report, logger: @logger, logger_stderr: @logger_stderr)
|
|
75
|
+
# Register test classes from platforms
|
|
76
|
+
@platforms_handler.known_platforms.each do |platform|
|
|
77
|
+
if platform.respond_to?(:tests)
|
|
78
|
+
platform.tests.each do |test_name, test_class|
|
|
79
|
+
@tests_plugins[test_name] = test_class
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
# Do we skip running check-node?
|
|
84
|
+
@skip_run = false
|
|
85
|
+
# List of tests to be performed
|
|
86
|
+
@tests = []
|
|
87
|
+
# List of reports to be used
|
|
88
|
+
@reports = []
|
|
89
|
+
@max_threads_connection_on_nodes = 64
|
|
90
|
+
@max_threads_nodes = 8
|
|
91
|
+
@max_threads_platforms = 8
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
# Complete an option parser with options meant to control this tests runner
|
|
95
|
+
#
|
|
96
|
+
# Parameters::
|
|
97
|
+
# * *options_parser* (OptionParser): The option parser to complete
|
|
98
|
+
def options_parse(options_parser)
|
|
99
|
+
options_parser.separator ''
|
|
100
|
+
options_parser.separator 'Tests runner options:'
|
|
101
|
+
options_parser.on('-i', '--tests-list FILE_NAME', 'Specify a tests file name. The file should contain a list of tests name (1 per line). Can be used several times.') do |file_name|
|
|
102
|
+
@tests.concat(
|
|
103
|
+
File.read(file_name).
|
|
104
|
+
split("\n").
|
|
105
|
+
reject { |line| line.strip.empty? || line =~ /^#.+/ }.
|
|
106
|
+
map(&:to_sym)
|
|
107
|
+
)
|
|
108
|
+
end
|
|
109
|
+
options_parser.on('-k', '--skip-run', 'Skip running the check-node commands for real, and just analyze existing run logs.') do
|
|
110
|
+
@skip_run = true
|
|
111
|
+
end
|
|
112
|
+
options_parser.on('-r', '--report REPORT', "Specify a report name. Can be used several times. Can be all for all reports. Possible values: #{@reports_plugins.keys.sort.join(', ')} (defaults to stdout).") do |report|
|
|
113
|
+
@reports << report.to_sym
|
|
114
|
+
end
|
|
115
|
+
options_parser.on('-t', '--test TEST', "Specify a test name. Can be used several times. Can be all for all tests. Possible values: #{@tests_plugins.keys.sort.join(', ')} (defaults to all).") do |test_name|
|
|
116
|
+
@tests << test_name.to_sym
|
|
117
|
+
end
|
|
118
|
+
options_parser.on('--max-threads-connections NBR_THREADS', "Specify the max number of threads to parallelize tests connecting on nodes (defaults to #{@max_threads_connection_on_nodes}).") do |nbr_threads|
|
|
119
|
+
@max_threads_connection_on_nodes = Integer(nbr_threads)
|
|
120
|
+
end
|
|
121
|
+
options_parser.on('--max-threads-nodes NBR_THREADS', "Specify the max number of threads to parallelize tests at node level (defaults to #{@max_threads_nodes}).") do |nbr_threads|
|
|
122
|
+
@max_threads_nodes = Integer(nbr_threads)
|
|
123
|
+
end
|
|
124
|
+
options_parser.on('--max-threads-platforms NBR_THREADS', "Specify the max number of threads to parallelize tests at platform level (defaults to #{@max_threads_platforms}).") do |nbr_threads|
|
|
125
|
+
@max_threads_platforms = Integer(nbr_threads)
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
# Run the tests for a defined list of nodes selectors
|
|
130
|
+
#
|
|
131
|
+
# Parameters::
|
|
132
|
+
# * *nodes_selectors* (Array<Object>): List of nodes selectors on which tests should be run
|
|
133
|
+
# Result::
|
|
134
|
+
# * Integer: An exit code:
|
|
135
|
+
# * 0: Successful.
|
|
136
|
+
# * 1: Some tests have failed.
|
|
137
|
+
def run_tests(nodes_selectors)
|
|
138
|
+
# Compute the resolved list of tests to perform
|
|
139
|
+
@tests << :all if @tests.empty?
|
|
140
|
+
@tests = @tests_plugins.keys if @tests.include?(:all)
|
|
141
|
+
@tests.uniq!
|
|
142
|
+
@tests.sort!
|
|
143
|
+
@reports = [:stdout] if @reports.empty?
|
|
144
|
+
@reports = @reports_plugins.keys if @reports.include?(:all)
|
|
145
|
+
@reports.uniq!
|
|
146
|
+
@reports.sort!
|
|
147
|
+
unknown_tests = @tests - @tests_plugins.keys
|
|
148
|
+
raise "Unknown test names: #{unknown_tests.join(', ')}" unless unknown_tests.empty?
|
|
149
|
+
@nodes = @nodes_handler.select_nodes(nodes_selectors).uniq.sort
|
|
150
|
+
|
|
151
|
+
# Resolve the expected failures from the config.
|
|
152
|
+
# Expected failures at node level
|
|
153
|
+
# Hash< Symbol, Hash< String, String > >
|
|
154
|
+
# Hash< test_name, Hash< node, reason > >
|
|
155
|
+
@node_expected_failures = {}
|
|
156
|
+
@config.expected_failures.each do |expected_failure_info|
|
|
157
|
+
selected_nodes = @nodes_handler.select_from_nodes_selector_stack(expected_failure_info[:nodes_selectors_stack])
|
|
158
|
+
expected_failure_info[:tests].each do |test_name|
|
|
159
|
+
@node_expected_failures[test_name] = {} unless @node_expected_failures.key?(test_name)
|
|
160
|
+
selected_nodes.each do |node|
|
|
161
|
+
if @node_expected_failures[test_name].key?(node)
|
|
162
|
+
@node_expected_failures[test_name][node] += " + #{expected_failure_info[:reason]}"
|
|
163
|
+
else
|
|
164
|
+
@node_expected_failures[test_name][node] = expected_failure_info[:reason]
|
|
165
|
+
end
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
# Expected failures at platform level
|
|
170
|
+
# Hash< Symbol, Hash< String, String > >
|
|
171
|
+
# Hash< test_name, Hash< platform, reason > >
|
|
172
|
+
@platform_expected_failures = {}
|
|
173
|
+
@platforms_handler.known_platforms.each do |platform|
|
|
174
|
+
platform_nodes = platform.known_nodes
|
|
175
|
+
@node_expected_failures.each do |test_name, expected_failures_for_test|
|
|
176
|
+
if (platform_nodes - expected_failures_for_test.keys).empty?
|
|
177
|
+
# We have an expected failure for this test
|
|
178
|
+
@platform_expected_failures[test_name] = {} unless @platform_expected_failures.key?(test_name)
|
|
179
|
+
@platform_expected_failures[test_name][platform.name] = expected_failures_for_test.values.uniq.join(' + ')
|
|
180
|
+
end
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Keep a list of all tests that have run for the report
|
|
185
|
+
# Array< Test >
|
|
186
|
+
@tests_run = []
|
|
187
|
+
|
|
188
|
+
run_tests_global
|
|
189
|
+
run_tests_platform
|
|
190
|
+
run_tests_for_nodes
|
|
191
|
+
run_tests_connection_on_nodes
|
|
192
|
+
run_tests_on_check_nodes
|
|
193
|
+
|
|
194
|
+
@tested_platforms = @tests_run.map { |test| test.platform }.compact.uniq.sort
|
|
195
|
+
|
|
196
|
+
# Check that tests that were expected to fail did not succeed.
|
|
197
|
+
@tests_run.each do |test|
|
|
198
|
+
if test.executed?
|
|
199
|
+
expected_failure = test.expected_failure
|
|
200
|
+
if expected_failure
|
|
201
|
+
if test.errors.empty?
|
|
202
|
+
# Should have failed
|
|
203
|
+
error(
|
|
204
|
+
"Test #{test} was marked to fail (#{expected_failure}) but it succeeded. Please remove it from the expected failures in case the issue has been resolved.",
|
|
205
|
+
platform: test.platform,
|
|
206
|
+
node: test.node,
|
|
207
|
+
force_failure: true
|
|
208
|
+
)
|
|
209
|
+
else
|
|
210
|
+
out "Expected failure for #{test} (#{expected_failure}):\n#{test.errors.map { |error| " - #{error}" }.join("\n")}".yellow
|
|
211
|
+
end
|
|
212
|
+
end
|
|
213
|
+
end
|
|
214
|
+
end
|
|
215
|
+
# If all tests were executed, make sure that there are no expected failures that have not even been tested.
|
|
216
|
+
if @tests_plugins.keys - @tests == []
|
|
217
|
+
@node_expected_failures.each do |test_name, test_expected_failures|
|
|
218
|
+
test_expected_failures.each do |node, expected_failure|
|
|
219
|
+
# Check that a test has been run for this expected failure
|
|
220
|
+
unless @tests_run.find do |test|
|
|
221
|
+
test.name == test_name &&
|
|
222
|
+
(
|
|
223
|
+
(test.node.nil? && node == '') ||
|
|
224
|
+
(!test.node.nil? && node == test.node)
|
|
225
|
+
)
|
|
226
|
+
end
|
|
227
|
+
error("A test named #{test_name} for node #{node} was expected to fail (#{expected_failure}), but no test has been run. Please remove it from the expected failures if this expected failure is obsolete.")
|
|
228
|
+
end
|
|
229
|
+
end
|
|
230
|
+
end
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
# Produce reports
|
|
234
|
+
@reports.each do |report|
|
|
235
|
+
begin
|
|
236
|
+
@reports_plugins[report].new(@logger, @logger_stderr, @config, @nodes_handler, @nodes, @tested_platforms, @tests_run).report
|
|
237
|
+
rescue
|
|
238
|
+
log_error "Uncaught exception while producing report #{report}: #{$!}\n#{$!.backtrace.join("\n")}"
|
|
239
|
+
end
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
out
|
|
243
|
+
if @tests_run.all? { |test| test.errors.empty? || !test.expected_failure.nil? }
|
|
244
|
+
out '===== No unexpected errors ====='.green.bold
|
|
245
|
+
0
|
|
246
|
+
else
|
|
247
|
+
out '===== Some errors were found. Check output. ====='.red.bold
|
|
248
|
+
1
|
|
249
|
+
end
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
private
|
|
253
|
+
|
|
254
|
+
# Report an error, linked eventually to a given platform or node
|
|
255
|
+
#
|
|
256
|
+
# Parameters::
|
|
257
|
+
# * *message* (String): Error to be logged
|
|
258
|
+
# * *platform* (PlatformHandler or nil): PlatformHandler for a platform's test, or nil for a global or node test [default: nil]
|
|
259
|
+
# * *node* (String): Node for which the test is instantiated, or nil if global or platform [default: nil]
|
|
260
|
+
# * *force_failure* (Boolean): If true, then ignore expected failures for this error [default: false]
|
|
261
|
+
def error(message, platform: nil, node: nil, force_failure: false)
|
|
262
|
+
global_test = new_test(nil, platform: platform, node: node, ignore_expected_failure: force_failure)
|
|
263
|
+
global_test.errors << message
|
|
264
|
+
global_test.executed
|
|
265
|
+
@tests_run << global_test
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
# Instantiate a new test
|
|
269
|
+
#
|
|
270
|
+
# Parameters::
|
|
271
|
+
# * *test_name* (Symbol or nil): Test name to instantiate, or nil for unnamed tests
|
|
272
|
+
# * *platform* (PlatformHandler or nil): PlatformHandler for a platform's test, or nil for a global or node test [default: nil]
|
|
273
|
+
# * *node* (String or nil): Node for a node's test, or nil for a global or platform test [default: nil]
|
|
274
|
+
# * *ignore_expected_failure* (Boolean): If true, then ignore expected failures for this error [default: false]
|
|
275
|
+
# Result::
|
|
276
|
+
# * Test: Corresponding test
|
|
277
|
+
def new_test(test_name, platform: nil, node: nil, ignore_expected_failure: false)
|
|
278
|
+
(test_name.nil? ? Test : @tests_plugins[test_name]).new(
|
|
279
|
+
@logger,
|
|
280
|
+
@logger_stderr,
|
|
281
|
+
@config,
|
|
282
|
+
@cmd_runner,
|
|
283
|
+
@nodes_handler,
|
|
284
|
+
@deployer,
|
|
285
|
+
name: test_name.nil? ? :global : test_name,
|
|
286
|
+
platform: platform,
|
|
287
|
+
node: node,
|
|
288
|
+
expected_failure: if ignore_expected_failure
|
|
289
|
+
nil
|
|
290
|
+
elsif !node.nil?
|
|
291
|
+
# Node test
|
|
292
|
+
@node_expected_failures.dig(test_name.nil? ? 'global' : test_name, node)
|
|
293
|
+
elsif !platform.nil?
|
|
294
|
+
# Platform test
|
|
295
|
+
@platform_expected_failures.dig(test_name.nil? ? 'global' : test_name, platform.name)
|
|
296
|
+
else
|
|
297
|
+
# Global test
|
|
298
|
+
nil
|
|
299
|
+
end
|
|
300
|
+
)
|
|
301
|
+
end
|
|
302
|
+
|
|
303
|
+
# Run a test method on a set of test subjects.
|
|
304
|
+
# Provide harmonized logging, timings, exception handling...
|
|
305
|
+
# Make sure the tests should be run before running it.
|
|
306
|
+
#
|
|
307
|
+
# Parameters::
|
|
308
|
+
# * *title* (String): The title of such tests
|
|
309
|
+
# * *test_method* (Symbol): The test method to run (defined in tests plugins)
|
|
310
|
+
# * *test_subjects* (Array< Hash<Symbol,Object> >): List of test subjects. A test subject is defined as properties mapping the signature of the should_test_be_run_on and new_test methods.
|
|
311
|
+
# * *nbr_threads_max* (Integer): If > 1 then run the tests in parallel (with a limit in nuber of threads fixed by the value). Only when debug mode is false. [default: 1]
|
|
312
|
+
# * *tests_preparation* (Proc or nil): Code called to prepare tests, once test subjects have been selected, or nil if none [default: nil]
|
|
313
|
+
# * Parameters::
|
|
314
|
+
# * *selected_tests* (Array<Test>): List of selected tests.
|
|
315
|
+
# * *test_execution* (Proc): Code called to execute a test. Defaults to calling the test_method method on the test instance
|
|
316
|
+
# * Parameters::
|
|
317
|
+
# * *test* (Test): The test instance to be executed
|
|
318
|
+
def run_tests_on_subjects(
|
|
319
|
+
title,
|
|
320
|
+
test_method,
|
|
321
|
+
test_subjects,
|
|
322
|
+
nbr_threads_max: 1,
|
|
323
|
+
tests_preparation: nil,
|
|
324
|
+
test_execution: proc { |test| test.send(test_method) }
|
|
325
|
+
)
|
|
326
|
+
# Gather the list of tests to execute
|
|
327
|
+
tests_to_run = @tests.map do |test_name|
|
|
328
|
+
if @tests_plugins[test_name].method_defined?(test_method)
|
|
329
|
+
test_subjects.map do |test_subject|
|
|
330
|
+
should_test_be_run_on(test_name, **test_subject) ? new_test(test_name, **test_subject) : nil
|
|
331
|
+
end.compact
|
|
332
|
+
else
|
|
333
|
+
[]
|
|
334
|
+
end
|
|
335
|
+
end.flatten
|
|
336
|
+
unless tests_to_run.empty?
|
|
337
|
+
section "Run #{tests_to_run.size} #{title}" do
|
|
338
|
+
tests_preparation.call(tests_to_run) unless tests_preparation.nil?
|
|
339
|
+
for_each_element_in(
|
|
340
|
+
tests_to_run,
|
|
341
|
+
parallel: !log_debug? && nbr_threads_max > 1,
|
|
342
|
+
nbr_threads_max: nbr_threads_max,
|
|
343
|
+
progress: "Run #{title}"
|
|
344
|
+
) do |test|
|
|
345
|
+
test_category =
|
|
346
|
+
if test.platform.nil? && test.node.nil?
|
|
347
|
+
'Global'
|
|
348
|
+
elsif test.node.nil?
|
|
349
|
+
"Platform #{test.platform.name}"
|
|
350
|
+
elsif test.platform.nil?
|
|
351
|
+
"Node #{test.node}"
|
|
352
|
+
else
|
|
353
|
+
"Platform #{test.platform.name} / Node #{test.node}"
|
|
354
|
+
end
|
|
355
|
+
out "[ #{Time.now.utc.strftime('%F %T')} ] - [ #{test_category} ] - [ #{test.name} ] - Start test..."
|
|
356
|
+
begin_time = Time.now
|
|
357
|
+
begin
|
|
358
|
+
test_execution.call(test)
|
|
359
|
+
rescue
|
|
360
|
+
test.error "Uncaught exception during test: #{$!}", $!.backtrace.join("\n")
|
|
361
|
+
end
|
|
362
|
+
end_time = Time.now
|
|
363
|
+
test.executed
|
|
364
|
+
out "[ #{Time.now.utc.strftime('%F %T')} ] - [ #{test_category} ] - [ #{test.name} ] - Test finished in #{end_time - begin_time} seconds."
|
|
365
|
+
end
|
|
366
|
+
@tests_run.concat(tests_to_run)
|
|
367
|
+
end
|
|
368
|
+
end
|
|
369
|
+
end
|
|
370
|
+
|
|
371
|
+
# Run tests that are global
|
|
372
|
+
def run_tests_global
|
|
373
|
+
run_tests_on_subjects(
|
|
374
|
+
'global tests',
|
|
375
|
+
:test,
|
|
376
|
+
[{}]
|
|
377
|
+
)
|
|
378
|
+
end
|
|
379
|
+
|
|
380
|
+
# Run tests that are platform specific
|
|
381
|
+
def run_tests_platform
|
|
382
|
+
run_tests_on_subjects(
|
|
383
|
+
'platform tests',
|
|
384
|
+
:test_on_platform,
|
|
385
|
+
@platforms_handler.known_platforms.map { |platform| { platform: platform } },
|
|
386
|
+
nbr_threads_max: @max_threads_platforms
|
|
387
|
+
)
|
|
388
|
+
end
|
|
389
|
+
|
|
390
|
+
# Timeout in seconds given to the connection itself
|
|
391
|
+
# Integer
|
|
392
|
+
CONNECTION_TIMEOUT = 20
|
|
393
|
+
|
|
394
|
+
# Timeout in seconds given to a command by default
|
|
395
|
+
# Integer
|
|
396
|
+
DEFAULT_CMD_TIMEOUT = 5
|
|
397
|
+
|
|
398
|
+
# Separator used to differentiate different commands executed in stdout.
|
|
399
|
+
# It's important that this separator could not be the result of any command output.
|
|
400
|
+
# String
|
|
401
|
+
CMD_SEPARATOR = '===== TEST COMMAND EXECUTION ===== Separator generated by Hybrid Platforms Conductor test framework ====='
|
|
402
|
+
|
|
403
|
+
# Run tests that are node specific and require a connection to the node
|
|
404
|
+
def run_tests_connection_on_nodes
|
|
405
|
+
run_tests_on_subjects(
|
|
406
|
+
'connected tests',
|
|
407
|
+
:test_on_node,
|
|
408
|
+
@nodes.map { |node| { node: node } },
|
|
409
|
+
tests_preparation: proc do |selected_tests|
|
|
410
|
+
# Gather the list of commands to be run on each node with their corresponding test info, per node
|
|
411
|
+
# Hash< String, Array< [ String, Hash<Symbol,Object> ] > >
|
|
412
|
+
@cmds_to_run = {}
|
|
413
|
+
selected_tests.each do |test|
|
|
414
|
+
begin
|
|
415
|
+
test.test_on_node.each do |cmd, test_info|
|
|
416
|
+
test_info_normalized = test_info.is_a?(Hash) ? test_info.clone : { validator: test_info }
|
|
417
|
+
test_info_normalized[:timeout] = DEFAULT_CMD_TIMEOUT unless test_info_normalized.key?(:timeout)
|
|
418
|
+
test_info_normalized[:test] = test
|
|
419
|
+
@cmds_to_run[test.node] = [] unless @cmds_to_run.key?(test.node)
|
|
420
|
+
@cmds_to_run[test.node] << [
|
|
421
|
+
cmd,
|
|
422
|
+
test_info_normalized
|
|
423
|
+
]
|
|
424
|
+
end
|
|
425
|
+
rescue
|
|
426
|
+
test.error "Uncaught exception during test preparation: #{$!}", $!.backtrace.join("\n")
|
|
427
|
+
test.executed
|
|
428
|
+
end
|
|
429
|
+
end
|
|
430
|
+
# Compute the timeout that will be applied, from the max timeout sum for every node that has tests to run
|
|
431
|
+
timeout = CONNECTION_TIMEOUT + @cmds_to_run.map do |_node, cmds_list|
|
|
432
|
+
cmds_list.inject(0) { |total_timeout, (_cmd, test_info)| test_info[:timeout] + total_timeout }
|
|
433
|
+
end.max
|
|
434
|
+
# Run commands on nodes, in grouped way to avoid too many connections, per node
|
|
435
|
+
# Hash< String, Array<String> >
|
|
436
|
+
@test_cmds = Hash[@cmds_to_run.map do |node, cmds_list|
|
|
437
|
+
[
|
|
438
|
+
node,
|
|
439
|
+
{
|
|
440
|
+
remote_bash: cmds_list.map do |(cmd, _test_info)|
|
|
441
|
+
[
|
|
442
|
+
"echo '#{CMD_SEPARATOR}'",
|
|
443
|
+
">&2 echo '#{CMD_SEPARATOR}'",
|
|
444
|
+
cmd,
|
|
445
|
+
"echo \"$?\""
|
|
446
|
+
]
|
|
447
|
+
end.flatten
|
|
448
|
+
}
|
|
449
|
+
]
|
|
450
|
+
end]
|
|
451
|
+
section "Run test commands on #{@test_cmds.keys.size} connected nodes (timeout to #{timeout} secs)" do
|
|
452
|
+
start_time = Time.now
|
|
453
|
+
nbr_secs = nil
|
|
454
|
+
@actions_executor.max_threads = @max_threads_connection_on_nodes
|
|
455
|
+
@actions_result = @actions_executor.execute_actions(
|
|
456
|
+
@test_cmds,
|
|
457
|
+
concurrent: !log_debug?,
|
|
458
|
+
log_to_dir: nil,
|
|
459
|
+
log_to_stdout: log_debug?,
|
|
460
|
+
timeout: timeout
|
|
461
|
+
)
|
|
462
|
+
log_debug "----- Total commands executed in #{(Time.now - start_time).round(1)} secs"
|
|
463
|
+
end
|
|
464
|
+
end,
|
|
465
|
+
test_execution: proc do |test|
|
|
466
|
+
exit_status, stdout, stderr = @actions_result[test.node]
|
|
467
|
+
if exit_status.is_a?(Symbol)
|
|
468
|
+
test.error "Error while executing tests: #{exit_status}: #{stderr}"
|
|
469
|
+
else
|
|
470
|
+
log_debug <<~EOS
|
|
471
|
+
----- Commands for #{test.node}:
|
|
472
|
+
#{@test_cmds[test.node][:remote_bash].join("\n")}
|
|
473
|
+
----- STDOUT:
|
|
474
|
+
#{stdout}
|
|
475
|
+
----- STDERR:
|
|
476
|
+
#{stderr}
|
|
477
|
+
-----
|
|
478
|
+
EOS
|
|
479
|
+
# Skip the first section, as it can contain SSH banners
|
|
480
|
+
cmd_stdouts = stdout.split("#{CMD_SEPARATOR}\n")[1..-1]
|
|
481
|
+
cmd_stdouts = [] if cmd_stdouts.nil?
|
|
482
|
+
cmd_stderrs = stderr.split("#{CMD_SEPARATOR}\n")[1..-1]
|
|
483
|
+
cmd_stderrs = [] if cmd_stderrs.nil?
|
|
484
|
+
@cmds_to_run[test.node].zip(cmd_stdouts, cmd_stderrs).each do |(cmd, test_info), cmd_stdout, cmd_stderr|
|
|
485
|
+
# Find the section that corresponds to this test
|
|
486
|
+
if test_info[:test] == test
|
|
487
|
+
cmd_stdout = '' if cmd_stdout.nil?
|
|
488
|
+
cmd_stderr = '' if cmd_stderr.nil?
|
|
489
|
+
stdout_lines = cmd_stdout.split("\n")
|
|
490
|
+
# Last line of stdout is the return code
|
|
491
|
+
return_code = stdout_lines.empty? ? :command_cant_run : Integer(stdout_lines.last)
|
|
492
|
+
test.error "Command '#{cmd}' returned error code #{return_code}", "----- STDOUT:\n#{stdout_lines[0..-2].join("\n")}\n----- STDERR:\n#{cmd_stderr}" unless return_code == 0
|
|
493
|
+
test_info[:validator].call(stdout_lines[0..-2], cmd_stderr.split("\n"), return_code)
|
|
494
|
+
end
|
|
495
|
+
end
|
|
496
|
+
end
|
|
497
|
+
end
|
|
498
|
+
)
|
|
499
|
+
end
|
|
500
|
+
|
|
501
|
+
# Run tests that are node specific
|
|
502
|
+
def run_tests_for_nodes
|
|
503
|
+
run_tests_on_subjects(
|
|
504
|
+
'node tests',
|
|
505
|
+
:test_for_node,
|
|
506
|
+
@nodes.map { |node| { node: node } },
|
|
507
|
+
nbr_threads_max: @max_threads_nodes
|
|
508
|
+
)
|
|
509
|
+
end
|
|
510
|
+
|
|
511
|
+
# Timeout in seconds given to a check-node run
|
|
512
|
+
# Integer
|
|
513
|
+
CHECK_NODE_TIMEOUT = 30 * 60 # 30 minutes
|
|
514
|
+
|
|
515
|
+
# Run tests that use check-node results
|
|
516
|
+
def run_tests_on_check_nodes
|
|
517
|
+
run_tests_on_subjects(
|
|
518
|
+
'check-node tests',
|
|
519
|
+
:test_on_check_node,
|
|
520
|
+
@nodes.map { |node| { node: node } },
|
|
521
|
+
tests_preparation: proc do |selected_tests|
|
|
522
|
+
nodes_to_test = selected_tests.map { |test| test.node }.uniq.sort
|
|
523
|
+
@outputs =
|
|
524
|
+
if @skip_run
|
|
525
|
+
Hash[nodes_to_test.map do |node|
|
|
526
|
+
run_log_file_name = "#{@config.hybrid_platforms_dir}/run_logs/#{node}.stdout"
|
|
527
|
+
[
|
|
528
|
+
node,
|
|
529
|
+
# TODO: Find a way to also save stderr and the status code
|
|
530
|
+
[0, File.exists?(run_log_file_name) ? File.read(run_log_file_name) : nil, '']
|
|
531
|
+
]
|
|
532
|
+
end]
|
|
533
|
+
else
|
|
534
|
+
# Why-run deploy on all nodes
|
|
535
|
+
@deployer.concurrent_execution = !log_debug?
|
|
536
|
+
@deployer.use_why_run = true
|
|
537
|
+
@deployer.timeout = CHECK_NODE_TIMEOUT
|
|
538
|
+
begin
|
|
539
|
+
@deployer.deploy_on(nodes_to_test)
|
|
540
|
+
rescue
|
|
541
|
+
# If an exception occurred, make sure all concerned nodes are reporting the error
|
|
542
|
+
nodes_to_test.each do |node|
|
|
543
|
+
error "Error while checking check-node output: #{$!}#{log_debug? ? "\n#{$!.backtrace.join("\n")}" : ''}", node: node
|
|
544
|
+
end
|
|
545
|
+
{}
|
|
546
|
+
end
|
|
547
|
+
end
|
|
548
|
+
end,
|
|
549
|
+
test_execution: proc do |test|
|
|
550
|
+
exit_status, stdout, stderr = @outputs[test.node]
|
|
551
|
+
if stdout.nil?
|
|
552
|
+
test.error 'No check-node log file found despite the run of check-node.'
|
|
553
|
+
elsif stdout.is_a?(Symbol)
|
|
554
|
+
test.error "Check-node run failed: #{stdout}."
|
|
555
|
+
else
|
|
556
|
+
test.error "Check-node returned error code #{exit_status}" unless exit_status == 0
|
|
557
|
+
begin
|
|
558
|
+
test.test_on_check_node(stdout, stderr, exit_status)
|
|
559
|
+
rescue
|
|
560
|
+
test.error "Uncaught exception during test: #{$!}", $!.backtrace.join("\n")
|
|
561
|
+
end
|
|
562
|
+
end
|
|
563
|
+
end
|
|
564
|
+
)
|
|
565
|
+
end
|
|
566
|
+
|
|
567
|
+
# Should the given test name be run on a given node or platform?
|
|
568
|
+
#
|
|
569
|
+
# Parameters::
|
|
570
|
+
# * *test_name* (String): The test name.
|
|
571
|
+
# * *node* (String or nil): Node name, or nil for a platform or global test. [default: nil]
|
|
572
|
+
# * *platform* (PlatformHandler or nil): Platform or nil for a node or global test. [default: nil]
|
|
573
|
+
# Result::
|
|
574
|
+
# * Boolean: Should the given test name be run on a given node or platform?
|
|
575
|
+
def should_test_be_run_on(test_name, node: nil, platform: nil)
|
|
576
|
+
if !node.nil?
|
|
577
|
+
(@tests_plugins[test_name].only_on_nodes || [node]).any? do |allowed_node|
|
|
578
|
+
allowed_node.is_a?(String) ? allowed_node == node : node.match(allowed_node)
|
|
579
|
+
end
|
|
580
|
+
elsif !platform.nil?
|
|
581
|
+
(@tests_plugins[test_name].only_on_platforms || [platform.platform_type]).include?(platform.platform_type)
|
|
582
|
+
else
|
|
583
|
+
# Global tests should always be run
|
|
584
|
+
true
|
|
585
|
+
end
|
|
586
|
+
end
|
|
587
|
+
|
|
588
|
+
end
|
|
589
|
+
|
|
590
|
+
end
|