runbooks 0.2.3__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- conftest.py +26 -0
- jupyter-agent/.env.template +2 -0
- jupyter-agent/.gitattributes +35 -0
- jupyter-agent/README.md +16 -0
- jupyter-agent/app.py +256 -0
- jupyter-agent/cloudops-agent.png +0 -0
- jupyter-agent/ds-system-prompt.txt +154 -0
- jupyter-agent/jupyter-agent.png +0 -0
- jupyter-agent/llama3_template.jinja +123 -0
- jupyter-agent/requirements.txt +9 -0
- jupyter-agent/utils.py +409 -0
- runbooks/__init__.py +71 -3
- runbooks/__main__.py +13 -0
- runbooks/aws/ec2_describe_instances.py +1 -1
- runbooks/aws/ec2_run_instances.py +8 -2
- runbooks/aws/ec2_start_stop_instances.py +17 -4
- runbooks/aws/ec2_unused_volumes.py +5 -1
- runbooks/aws/s3_create_bucket.py +4 -2
- runbooks/aws/s3_list_objects.py +6 -1
- runbooks/aws/tagging_lambda_handler.py +13 -2
- runbooks/aws/tags.json +12 -0
- runbooks/base.py +353 -0
- runbooks/cfat/README.md +49 -0
- runbooks/cfat/__init__.py +74 -0
- runbooks/cfat/app.ts +644 -0
- runbooks/cfat/assessment/__init__.py +40 -0
- runbooks/cfat/assessment/asana-import.csv +39 -0
- runbooks/cfat/assessment/cfat-checks.csv +31 -0
- runbooks/cfat/assessment/cfat.txt +520 -0
- runbooks/cfat/assessment/collectors.py +200 -0
- runbooks/cfat/assessment/jira-import.csv +39 -0
- runbooks/cfat/assessment/runner.py +387 -0
- runbooks/cfat/assessment/validators.py +290 -0
- runbooks/cfat/cli.py +103 -0
- runbooks/cfat/docs/asana-import.csv +24 -0
- runbooks/cfat/docs/cfat-checks.csv +31 -0
- runbooks/cfat/docs/cfat.txt +335 -0
- runbooks/cfat/docs/checks-output.png +0 -0
- runbooks/cfat/docs/cloudshell-console-run.png +0 -0
- runbooks/cfat/docs/cloudshell-download.png +0 -0
- runbooks/cfat/docs/cloudshell-output.png +0 -0
- runbooks/cfat/docs/downloadfile.png +0 -0
- runbooks/cfat/docs/jira-import.csv +24 -0
- runbooks/cfat/docs/open-cloudshell.png +0 -0
- runbooks/cfat/docs/report-header.png +0 -0
- runbooks/cfat/models.py +1026 -0
- runbooks/cfat/package-lock.json +5116 -0
- runbooks/cfat/package.json +38 -0
- runbooks/cfat/report.py +496 -0
- runbooks/cfat/reporting/__init__.py +46 -0
- runbooks/cfat/reporting/exporters.py +337 -0
- runbooks/cfat/reporting/formatters.py +496 -0
- runbooks/cfat/reporting/templates.py +135 -0
- runbooks/cfat/run-assessment.sh +23 -0
- runbooks/cfat/runner.py +69 -0
- runbooks/cfat/src/actions/check-cloudtrail-existence.ts +43 -0
- runbooks/cfat/src/actions/check-config-existence.ts +37 -0
- runbooks/cfat/src/actions/check-control-tower.ts +37 -0
- runbooks/cfat/src/actions/check-ec2-existence.ts +46 -0
- runbooks/cfat/src/actions/check-iam-users.ts +50 -0
- runbooks/cfat/src/actions/check-legacy-cur.ts +30 -0
- runbooks/cfat/src/actions/check-org-cloudformation.ts +30 -0
- runbooks/cfat/src/actions/check-vpc-existence.ts +43 -0
- runbooks/cfat/src/actions/create-asanaimport.ts +14 -0
- runbooks/cfat/src/actions/create-backlog.ts +372 -0
- runbooks/cfat/src/actions/create-jiraimport.ts +15 -0
- runbooks/cfat/src/actions/create-report.ts +616 -0
- runbooks/cfat/src/actions/define-account-type.ts +51 -0
- runbooks/cfat/src/actions/get-enabled-org-policy-types.ts +40 -0
- runbooks/cfat/src/actions/get-enabled-org-services.ts +26 -0
- runbooks/cfat/src/actions/get-idc-info.ts +34 -0
- runbooks/cfat/src/actions/get-org-da-accounts.ts +34 -0
- runbooks/cfat/src/actions/get-org-details.ts +35 -0
- runbooks/cfat/src/actions/get-org-member-accounts.ts +44 -0
- runbooks/cfat/src/actions/get-org-ous.ts +35 -0
- runbooks/cfat/src/actions/get-regions.ts +22 -0
- runbooks/cfat/src/actions/zip-assessment.ts +27 -0
- runbooks/cfat/src/types/index.d.ts +147 -0
- runbooks/cfat/tests/__init__.py +141 -0
- runbooks/cfat/tests/test_cli.py +340 -0
- runbooks/cfat/tests/test_integration.py +290 -0
- runbooks/cfat/tests/test_models.py +505 -0
- runbooks/cfat/tests/test_reporting.py +354 -0
- runbooks/cfat/tsconfig.json +16 -0
- runbooks/cfat/webpack.config.cjs +27 -0
- runbooks/config.py +260 -0
- runbooks/finops/__init__.py +88 -0
- runbooks/finops/aws_client.py +245 -0
- runbooks/finops/cli.py +151 -0
- runbooks/finops/cost_processor.py +410 -0
- runbooks/finops/dashboard_runner.py +448 -0
- runbooks/finops/helpers.py +355 -0
- runbooks/finops/main.py +14 -0
- runbooks/finops/profile_processor.py +174 -0
- runbooks/finops/types.py +66 -0
- runbooks/finops/visualisations.py +80 -0
- runbooks/inventory/.gitignore +354 -0
- runbooks/inventory/ArgumentsClass.py +261 -0
- runbooks/inventory/Inventory_Modules.py +6130 -0
- runbooks/inventory/LandingZone/delete_lz.py +1075 -0
- runbooks/inventory/README.md +1320 -0
- runbooks/inventory/__init__.py +62 -0
- runbooks/inventory/account_class.py +532 -0
- runbooks/inventory/all_my_instances_wrapper.py +123 -0
- runbooks/inventory/aws_decorators.py +201 -0
- runbooks/inventory/cfn_move_stack_instances.py +1526 -0
- runbooks/inventory/check_cloudtrail_compliance.py +614 -0
- runbooks/inventory/check_controltower_readiness.py +1107 -0
- runbooks/inventory/check_landingzone_readiness.py +711 -0
- runbooks/inventory/cloudtrail.md +727 -0
- runbooks/inventory/collectors/__init__.py +20 -0
- runbooks/inventory/collectors/aws_compute.py +518 -0
- runbooks/inventory/collectors/aws_networking.py +275 -0
- runbooks/inventory/collectors/base.py +222 -0
- runbooks/inventory/core/__init__.py +19 -0
- runbooks/inventory/core/collector.py +303 -0
- runbooks/inventory/core/formatter.py +296 -0
- runbooks/inventory/delete_s3_buckets_objects.py +169 -0
- runbooks/inventory/discovery.md +81 -0
- runbooks/inventory/draw_org_structure.py +748 -0
- runbooks/inventory/ec2_vpc_utils.py +341 -0
- runbooks/inventory/find_cfn_drift_detection.py +272 -0
- runbooks/inventory/find_cfn_orphaned_stacks.py +719 -0
- runbooks/inventory/find_cfn_stackset_drift.py +733 -0
- runbooks/inventory/find_ec2_security_groups.py +669 -0
- runbooks/inventory/find_landingzone_versions.py +201 -0
- runbooks/inventory/find_vpc_flow_logs.py +1221 -0
- runbooks/inventory/inventory.sh +659 -0
- runbooks/inventory/list_cfn_stacks.py +558 -0
- runbooks/inventory/list_cfn_stackset_operation_results.py +252 -0
- runbooks/inventory/list_cfn_stackset_operations.py +734 -0
- runbooks/inventory/list_cfn_stacksets.py +453 -0
- runbooks/inventory/list_config_recorders_delivery_channels.py +681 -0
- runbooks/inventory/list_ds_directories.py +354 -0
- runbooks/inventory/list_ec2_availability_zones.py +286 -0
- runbooks/inventory/list_ec2_ebs_volumes.py +244 -0
- runbooks/inventory/list_ec2_instances.py +425 -0
- runbooks/inventory/list_ecs_clusters_and_tasks.py +562 -0
- runbooks/inventory/list_elbs_load_balancers.py +411 -0
- runbooks/inventory/list_enis_network_interfaces.py +526 -0
- runbooks/inventory/list_guardduty_detectors.py +568 -0
- runbooks/inventory/list_iam_policies.py +404 -0
- runbooks/inventory/list_iam_roles.py +518 -0
- runbooks/inventory/list_iam_saml_providers.py +359 -0
- runbooks/inventory/list_lambda_functions.py +882 -0
- runbooks/inventory/list_org_accounts.py +446 -0
- runbooks/inventory/list_org_accounts_users.py +354 -0
- runbooks/inventory/list_rds_db_instances.py +406 -0
- runbooks/inventory/list_route53_hosted_zones.py +318 -0
- runbooks/inventory/list_servicecatalog_provisioned_products.py +575 -0
- runbooks/inventory/list_sns_topics.py +360 -0
- runbooks/inventory/list_ssm_parameters.py +402 -0
- runbooks/inventory/list_vpc_subnets.py +433 -0
- runbooks/inventory/list_vpcs.py +422 -0
- runbooks/inventory/lockdown_cfn_stackset_role.py +224 -0
- runbooks/inventory/models/__init__.py +24 -0
- runbooks/inventory/models/account.py +192 -0
- runbooks/inventory/models/inventory.py +309 -0
- runbooks/inventory/models/resource.py +247 -0
- runbooks/inventory/recover_cfn_stack_ids.py +205 -0
- runbooks/inventory/requirements.txt +12 -0
- runbooks/inventory/run_on_multi_accounts.py +211 -0
- runbooks/inventory/tests/common_test_data.py +3661 -0
- runbooks/inventory/tests/common_test_functions.py +204 -0
- runbooks/inventory/tests/script_test_data.py +0 -0
- runbooks/inventory/tests/setup.py +24 -0
- runbooks/inventory/tests/src.py +18 -0
- runbooks/inventory/tests/test_cfn_describe_stacks.py +208 -0
- runbooks/inventory/tests/test_ec2_describe_instances.py +162 -0
- runbooks/inventory/tests/test_inventory_modules.py +55 -0
- runbooks/inventory/tests/test_lambda_list_functions.py +86 -0
- runbooks/inventory/tests/test_moto_integration_example.py +273 -0
- runbooks/inventory/tests/test_org_list_accounts.py +49 -0
- runbooks/inventory/update_aws_actions.py +173 -0
- runbooks/inventory/update_cfn_stacksets.py +1215 -0
- runbooks/inventory/update_cloudwatch_logs_retention_policy.py +294 -0
- runbooks/inventory/update_iam_roles_cross_accounts.py +478 -0
- runbooks/inventory/update_s3_public_access_block.py +539 -0
- runbooks/inventory/utils/__init__.py +23 -0
- runbooks/inventory/utils/aws_helpers.py +510 -0
- runbooks/inventory/utils/threading_utils.py +493 -0
- runbooks/inventory/utils/validation.py +682 -0
- runbooks/inventory/verify_ec2_security_groups.py +1430 -0
- runbooks/main.py +785 -0
- runbooks/organizations/__init__.py +12 -0
- runbooks/organizations/manager.py +374 -0
- runbooks/security_baseline/README.md +324 -0
- runbooks/security_baseline/checklist/alternate_contacts.py +8 -1
- runbooks/security_baseline/checklist/bucket_public_access.py +4 -1
- runbooks/security_baseline/checklist/cloudwatch_alarm_configuration.py +9 -2
- runbooks/security_baseline/checklist/guardduty_enabled.py +9 -2
- runbooks/security_baseline/checklist/multi_region_instance_usage.py +5 -1
- runbooks/security_baseline/checklist/root_access_key.py +6 -1
- runbooks/security_baseline/config-origin.json +1 -1
- runbooks/security_baseline/config.json +1 -1
- runbooks/security_baseline/permission.json +1 -1
- runbooks/security_baseline/report_generator.py +10 -2
- runbooks/security_baseline/report_template_en.html +8 -8
- runbooks/security_baseline/report_template_jp.html +8 -8
- runbooks/security_baseline/report_template_kr.html +13 -13
- runbooks/security_baseline/report_template_vn.html +8 -8
- runbooks/security_baseline/requirements.txt +7 -0
- runbooks/security_baseline/run_script.py +8 -2
- runbooks/security_baseline/security_baseline_tester.py +10 -2
- runbooks/security_baseline/utils/common.py +5 -1
- runbooks/utils/__init__.py +204 -0
- runbooks-0.6.1.dist-info/METADATA +373 -0
- runbooks-0.6.1.dist-info/RECORD +237 -0
- {runbooks-0.2.3.dist-info → runbooks-0.6.1.dist-info}/WHEEL +1 -1
- runbooks-0.6.1.dist-info/entry_points.txt +7 -0
- runbooks-0.6.1.dist-info/licenses/LICENSE +201 -0
- runbooks-0.6.1.dist-info/top_level.txt +3 -0
- runbooks/python101/calculator.py +0 -34
- runbooks/python101/config.py +0 -1
- runbooks/python101/exceptions.py +0 -16
- runbooks/python101/file_manager.py +0 -218
- runbooks/python101/toolkit.py +0 -153
- runbooks-0.2.3.dist-info/METADATA +0 -435
- runbooks-0.2.3.dist-info/RECORD +0 -61
- runbooks-0.2.3.dist-info/entry_points.txt +0 -3
- runbooks-0.2.3.dist-info/top_level.txt +0 -1
@@ -0,0 +1,659 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
|
3
|
+
# CloudOps AWS Inventory Scripts Comprehensive Testing Framework
|
4
|
+
#
|
5
|
+
# An advanced testing orchestrator for the CloudOps AWS inventory toolkit that provides
|
6
|
+
# autonomous testing, error analysis, and validation of all Python inventory scripts.
|
7
|
+
# Designed for enterprise-grade quality assurance and operational readiness validation.
|
8
|
+
#
|
9
|
+
# AUTONOMOUS TESTING CAPABILITIES:
|
10
|
+
# - Comprehensive test execution across all inventory scripts
|
11
|
+
# - Intelligent error detection and log analysis
|
12
|
+
# - Performance timing and resource utilization tracking
|
13
|
+
# - Test result aggregation and failure categorization
|
14
|
+
# - Automated retry logic for transient failures
|
15
|
+
# - Detailed reporting with actionable insights
|
16
|
+
#
|
17
|
+
# ENTERPRISE FEATURES:
|
18
|
+
# - Parallel test execution with controlled concurrency
|
19
|
+
# - Comprehensive logging and audit trails
|
20
|
+
# - Test isolation and resource cleanup
|
21
|
+
# - Configurable test parameters and timeouts
|
22
|
+
# - Integration with CI/CD pipelines
|
23
|
+
# - Detailed performance and reliability metrics
|
24
|
+
#
|
25
|
+
# Usage Examples:
|
26
|
+
# Test specific script:
|
27
|
+
# ./inventory.sh list_ec2_instances.py --profile test-profile --regions us-east-1
|
28
|
+
#
|
29
|
+
# Run comprehensive test suite:
|
30
|
+
# ./inventory.sh all --profile org-profile --verbose
|
31
|
+
#
|
32
|
+
# Performance benchmarking:
|
33
|
+
# ./inventory.sh all --profile test-profile --timing
|
34
|
+
#
|
35
|
+
# Author: AWS Cloud Foundations Team
|
36
|
+
# Version: 2024.12.20 - Enhanced Autonomous Testing
|
37
|
+
|
38
|
+
# Script to test out and time the various python shell scripts in this directory
|
39
|
+
# Updated to use uv run environment from project root
|
40
|
+
# Enhanced with comprehensive testing, error analysis, and autonomous validation
|
41
|
+
|
42
|
+
# ============================================================================
|
43
|
+
# ENVIRONMENT SETUP AND CONFIGURATION
|
44
|
+
# ============================================================================
|
45
|
+
|
46
|
+
# Ensure we're in the project root for uv to work
|
47
|
+
# Critical for proper Python environment and dependency management
|
48
|
+
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../" && pwd)"
|
49
|
+
cd "$PROJECT_ROOT" || {
|
50
|
+
echo "ERROR: Cannot change to project root directory: $PROJECT_ROOT"
|
51
|
+
exit 1
|
52
|
+
}
|
53
|
+
|
54
|
+
# Set the inventory directory path
|
55
|
+
INVENTORY_DIR="src/runbooks/inventory"
|
56
|
+
|
57
|
+
# Validate inventory directory exists
|
58
|
+
if [[ ! -d "$INVENTORY_DIR" ]]; then
|
59
|
+
echo "ERROR: Inventory directory not found: $INVENTORY_DIR"
|
60
|
+
exit 1
|
61
|
+
fi
|
62
|
+
|
63
|
+
# ============================================================================
|
64
|
+
# TESTING CONFIGURATION AND GLOBALS
|
65
|
+
# ============================================================================
|
66
|
+
|
67
|
+
# Test execution settings
|
68
|
+
MAX_CONCURRENT_TESTS=5
|
69
|
+
TEST_TIMEOUT=300 # 5 minutes per test
|
70
|
+
RETRY_ATTEMPTS=2
|
71
|
+
|
72
|
+
# Logging and output configuration
|
73
|
+
TEST_LOG_DIR="test_logs_$(date +%Y%m%d_%H%M%S)"
|
74
|
+
SUMMARY_FILE="test_summary_$(date +%Y%m%d_%H%M%S).json"
|
75
|
+
ERROR_ANALYSIS_FILE="error_analysis_$(date +%Y%m%d_%H%M%S).txt"
|
76
|
+
|
77
|
+
# Create test output directory
|
78
|
+
mkdir -p "$TEST_LOG_DIR"
|
79
|
+
|
80
|
+
# Test result tracking (using arrays compatible with older bash versions)
|
81
|
+
TEST_RESULTS_KEYS=()
|
82
|
+
TEST_RESULTS_VALUES=()
|
83
|
+
TEST_TIMES_KEYS=()
|
84
|
+
TEST_TIMES_VALUES=()
|
85
|
+
TESTS_PASSED=0
|
86
|
+
TESTS_FAILED=0
|
87
|
+
TESTS_SKIPPED=0
|
88
|
+
|
89
|
+
# ============================================================================
|
90
|
+
# COMMAND LINE PROCESSING AND VALIDATION
|
91
|
+
# ============================================================================
|
92
|
+
|
93
|
+
echo "CloudOps Inventory Testing Framework - Starting comprehensive validation"
|
94
|
+
echo "Command line arguments: $@"
|
95
|
+
echo "Test execution time: $(date)"
|
96
|
+
echo "Project root: $PROJECT_ROOT"
|
97
|
+
echo "Inventory directory: $INVENTORY_DIR"
|
98
|
+
echo "Test log directory: $TEST_LOG_DIR"
|
99
|
+
echo "============================================================================"
|
100
|
+
|
101
|
+
# Parse command line arguments
|
102
|
+
tool_to_test=$1
|
103
|
+
shift # Remove first argument
|
104
|
+
test_params="$@" # Capture remaining parameters
|
105
|
+
|
106
|
+
# Validate uv availability
|
107
|
+
if ! command -v uv &> /dev/null; then
|
108
|
+
echo "ERROR: uv command not found. Please install uv for Python environment management."
|
109
|
+
exit 1
|
110
|
+
fi
|
111
|
+
|
112
|
+
# Validate Python environment
|
113
|
+
echo "Validating Python environment..."
|
114
|
+
uv --version || {
|
115
|
+
echo "ERROR: uv version check failed"
|
116
|
+
exit 1
|
117
|
+
}
|
118
|
+
|
119
|
+
echo "Python environment validation successful"
|
120
|
+
|
121
|
+
# ============================================================================
|
122
|
+
# UTILITY FUNCTIONS FOR TEST FRAMEWORK
|
123
|
+
# ============================================================================
|
124
|
+
|
125
|
+
function exists_in_list() {
|
126
|
+
# Check if a value exists in a delimited list.
|
127
|
+
# Essential utility for script categorization and filtering logic.
|
128
|
+
# Used to determine which scripts should be tested, skipped, or
|
129
|
+
# require special handling during the test execution process.
|
130
|
+
#
|
131
|
+
# Args:
|
132
|
+
# LIST: Delimited string containing list items
|
133
|
+
# DELIMITER: Character used to separate list items
|
134
|
+
# VALUE: Item to search for in the list
|
135
|
+
#
|
136
|
+
# Returns:
|
137
|
+
# 0 if value found, 1 if not found
|
138
|
+
|
139
|
+
local LIST="$1"
|
140
|
+
local DELIMITER="$2"
|
141
|
+
local VALUE="$3"
|
142
|
+
local LIST_WHITESPACES
|
143
|
+
|
144
|
+
LIST_WHITESPACES=$(echo "$LIST" | tr "$DELIMITER" " ")
|
145
|
+
for x in $LIST_WHITESPACES; do
|
146
|
+
if [ "$x" = "$VALUE" ]; then
|
147
|
+
return 0
|
148
|
+
fi
|
149
|
+
done
|
150
|
+
return 1
|
151
|
+
}
|
152
|
+
|
153
|
+
function get_special_params() {
|
154
|
+
# Get special parameters for scripts that require them
|
155
|
+
#
|
156
|
+
# Args:
|
157
|
+
# script_name: Name of the script to check
|
158
|
+
#
|
159
|
+
# Returns:
|
160
|
+
# Special parameters string or empty string
|
161
|
+
|
162
|
+
local script_name="$1"
|
163
|
+
|
164
|
+
case "$script_name" in
|
165
|
+
"all_my_instances_wrapper.py")
|
166
|
+
echo "--account-id 909135376185"
|
167
|
+
;;
|
168
|
+
"lockdown_cfn_stackset_role.py")
|
169
|
+
echo "--region us-east-1"
|
170
|
+
;;
|
171
|
+
"check_controltower_readiness.py")
|
172
|
+
echo "--quick"
|
173
|
+
;;
|
174
|
+
"run_on_multi_accounts.py")
|
175
|
+
echo "--help"
|
176
|
+
;;
|
177
|
+
*)
|
178
|
+
echo ""
|
179
|
+
;;
|
180
|
+
esac
|
181
|
+
}
|
182
|
+
|
183
|
+
function log_test_result() {
|
184
|
+
# Log comprehensive test result with timing and error analysis.
|
185
|
+
# Creates detailed test execution records for audit trails,
|
186
|
+
# performance analysis, and failure investigation. Essential
|
187
|
+
# for enterprise-grade testing and quality assurance.
|
188
|
+
#
|
189
|
+
# Args:
|
190
|
+
# script_name: Name of the tested script
|
191
|
+
# exit_code: Test execution exit code
|
192
|
+
# start_time: Test start timestamp
|
193
|
+
# end_time: Test completion timestamp
|
194
|
+
# log_file: Path to detailed test log
|
195
|
+
|
196
|
+
local script_name="$1"
|
197
|
+
local exit_code="$2"
|
198
|
+
local start_time="$3"
|
199
|
+
local end_time="$4"
|
200
|
+
local log_file="$5"
|
201
|
+
|
202
|
+
local duration=$((end_time - start_time))
|
203
|
+
local status="UNKNOWN"
|
204
|
+
|
205
|
+
# Determine test status
|
206
|
+
if [ "$exit_code" -eq 0 ]; then
|
207
|
+
status="PASSED"
|
208
|
+
TESTS_PASSED=$((TESTS_PASSED + 1))
|
209
|
+
else
|
210
|
+
status="FAILED"
|
211
|
+
TESTS_FAILED=$((TESTS_FAILED + 1))
|
212
|
+
|
213
|
+
# Perform error analysis
|
214
|
+
analyze_test_errors "$script_name" "$log_file"
|
215
|
+
fi
|
216
|
+
|
217
|
+
# Record test results in arrays
|
218
|
+
TEST_RESULTS_KEYS+=("$script_name")
|
219
|
+
TEST_RESULTS_VALUES+=("$status")
|
220
|
+
TEST_TIMES_KEYS+=("$script_name")
|
221
|
+
TEST_TIMES_VALUES+=("$duration")
|
222
|
+
|
223
|
+
# Log to summary
|
224
|
+
echo "$(date '+%Y-%m-%d %H:%M:%S') | $script_name | $status | ${duration}s | Exit: $exit_code" >> "$TEST_LOG_DIR/test_execution.log"
|
225
|
+
|
226
|
+
echo "Test completed: $script_name [$status] (${duration}s)"
|
227
|
+
}
|
228
|
+
|
229
|
+
function analyze_test_errors() {
|
230
|
+
# Perform intelligent analysis of test failures and errors.
|
231
|
+
# Examines test logs to categorize failures, identify common
|
232
|
+
# issues, and provide actionable insights for resolution.
|
233
|
+
# Critical for autonomous testing and operational excellence.
|
234
|
+
#
|
235
|
+
# Args:
|
236
|
+
# script_name: Name of the failed script
|
237
|
+
# log_file: Path to the test log file
|
238
|
+
|
239
|
+
local script_name="$1"
|
240
|
+
local log_file="$2"
|
241
|
+
|
242
|
+
if [[ ! -f "$log_file" ]]; then
|
243
|
+
echo "WARNING: Log file not found for error analysis: $log_file"
|
244
|
+
return 1
|
245
|
+
fi
|
246
|
+
|
247
|
+
echo "\n=== ERROR ANALYSIS FOR $script_name ===" >> "$ERROR_ANALYSIS_FILE"
|
248
|
+
echo "Timestamp: $(date)" >> "$ERROR_ANALYSIS_FILE"
|
249
|
+
echo "Log file: $log_file" >> "$ERROR_ANALYSIS_FILE"
|
250
|
+
|
251
|
+
# Check for common error patterns
|
252
|
+
local error_patterns=(
|
253
|
+
"AuthFailure|Authorization"
|
254
|
+
"AccessDenied|Forbidden"
|
255
|
+
"InvalidProfile|ProfileNotFound"
|
256
|
+
"NoCredentialsError|CredentialRetrievalError"
|
257
|
+
"Throttling|RequestLimitExceeded"
|
258
|
+
"ImportError|ModuleNotFoundError"
|
259
|
+
"ConnectionError|TimeoutError"
|
260
|
+
"KeyError|AttributeError"
|
261
|
+
)
|
262
|
+
|
263
|
+
local error_categories=(
|
264
|
+
"AWS Authentication/Authorization"
|
265
|
+
"AWS Access Permissions"
|
266
|
+
"AWS Profile Configuration"
|
267
|
+
"AWS Credentials"
|
268
|
+
"AWS API Throttling"
|
269
|
+
"Python Dependencies"
|
270
|
+
"Network Connectivity"
|
271
|
+
"Script Logic/Data"
|
272
|
+
)
|
273
|
+
|
274
|
+
for i in "${!error_patterns[@]}"; do
|
275
|
+
if grep -i -E "${error_patterns[$i]}" "$log_file" > /dev/null; then
|
276
|
+
echo "ERROR CATEGORY: ${error_categories[$i]}" >> "$ERROR_ANALYSIS_FILE"
|
277
|
+
echo "PATTERN MATCHED: ${error_patterns[$i]}" >> "$ERROR_ANALYSIS_FILE"
|
278
|
+
grep -i -E "${error_patterns[$i]}" "$log_file" | head -5 >> "$ERROR_ANALYSIS_FILE"
|
279
|
+
echo "" >> "$ERROR_ANALYSIS_FILE"
|
280
|
+
fi
|
281
|
+
done
|
282
|
+
|
283
|
+
# Extract last few lines for context
|
284
|
+
echo "LAST 10 LINES OF OUTPUT:" >> "$ERROR_ANALYSIS_FILE"
|
285
|
+
tail -10 "$log_file" >> "$ERROR_ANALYSIS_FILE"
|
286
|
+
echo "\n" >> "$ERROR_ANALYSIS_FILE"
|
287
|
+
}
|
288
|
+
|
289
|
+
function execute_test() {
|
290
|
+
# Execute individual test with comprehensive monitoring and logging.
|
291
|
+
# Runs a single inventory script with timeout control, error capture,
|
292
|
+
# and detailed logging. Implements retry logic for transient failures
|
293
|
+
# and provides comprehensive test execution monitoring.
|
294
|
+
#
|
295
|
+
# Args:
|
296
|
+
# script_name: Name of the script to test
|
297
|
+
# test_parameters: Parameters to pass to the script
|
298
|
+
#
|
299
|
+
# Returns:
|
300
|
+
# Test exit code and detailed execution results
|
301
|
+
|
302
|
+
local script_name="$1"
|
303
|
+
local test_parameters="$2"
|
304
|
+
local output_file="$TEST_LOG_DIR/test_output_${script_name}.txt"
|
305
|
+
local start_time end_time exit_code
|
306
|
+
|
307
|
+
# Check if this script supports profile parameters
|
308
|
+
if exists_in_list "$scripts_no_profile" " " "$script_name"; then
|
309
|
+
# For scripts that don't support profile, only use special parameters
|
310
|
+
local special_params=$(get_special_params "$script_name")
|
311
|
+
test_parameters="$special_params"
|
312
|
+
echo "Script $script_name doesn't support profile - using only special parameters: $special_params"
|
313
|
+
elif exists_in_list "$scripts_no_verbose" " " "$script_name"; then
|
314
|
+
# For scripts that support profile but not verbose
|
315
|
+
local profile_param="--profile ams-admin-ReadOnlyAccess-909135376185"
|
316
|
+
local special_params=$(get_special_params "$script_name")
|
317
|
+
test_parameters="$profile_param $special_params"
|
318
|
+
echo "Script $script_name supports profile but not verbose - using: $test_parameters"
|
319
|
+
else
|
320
|
+
# Check for special parameters for this script
|
321
|
+
local special_params=$(get_special_params "$script_name")
|
322
|
+
if [[ -n "$special_params" ]]; then
|
323
|
+
test_parameters="$test_parameters $special_params"
|
324
|
+
echo "Added special parameters for $script_name: $special_params"
|
325
|
+
fi
|
326
|
+
fi
|
327
|
+
|
328
|
+
echo "Starting test: $script_name"
|
329
|
+
echo "Parameters: $test_parameters"
|
330
|
+
echo "Output file: $output_file"
|
331
|
+
|
332
|
+
# Record test start
|
333
|
+
start_time=$(date +%s)
|
334
|
+
echo "Test started: $(date)" > "$output_file"
|
335
|
+
echo "Script: $script_name" >> "$output_file"
|
336
|
+
echo "Parameters: $test_parameters" >> "$output_file"
|
337
|
+
echo "Working directory: $(pwd)" >> "$output_file"
|
338
|
+
echo "Environment: $(uv --version)" >> "$output_file"
|
339
|
+
echo "========================================" >> "$output_file"
|
340
|
+
|
341
|
+
# Execute test with timeout (macOS compatible)
|
342
|
+
# Set Python path to include inventory directory for local module imports
|
343
|
+
# Start the command in background and track its PID
|
344
|
+
(cd "$INVENTORY_DIR" && PYTHONPATH=".:$(pwd):$PYTHONPATH" uv run python "$script_name" $test_parameters) >> "$output_file" 2>&1 &
|
345
|
+
test_cmd_pid=$!
|
346
|
+
|
347
|
+
# Wait for process completion or timeout
|
348
|
+
local elapsed=0
|
349
|
+
exit_code=0
|
350
|
+
|
351
|
+
while [ $elapsed -lt $TEST_TIMEOUT ]; do
|
352
|
+
if ! kill -0 $test_cmd_pid 2>/dev/null; then
|
353
|
+
# Process has completed
|
354
|
+
wait $test_cmd_pid
|
355
|
+
exit_code=$?
|
356
|
+
break
|
357
|
+
fi
|
358
|
+
sleep 1
|
359
|
+
elapsed=$((elapsed + 1))
|
360
|
+
done
|
361
|
+
|
362
|
+
# Handle timeout
|
363
|
+
if [ $elapsed -ge $TEST_TIMEOUT ]; then
|
364
|
+
echo "TEST TIMEOUT: Execution exceeded ${TEST_TIMEOUT} seconds" >> "$output_file"
|
365
|
+
echo "Test timed out: $script_name (${TEST_TIMEOUT}s)"
|
366
|
+
kill -TERM $test_cmd_pid 2>/dev/null
|
367
|
+
sleep 2
|
368
|
+
kill -KILL $test_cmd_pid 2>/dev/null
|
369
|
+
exit_code=124
|
370
|
+
fi
|
371
|
+
|
372
|
+
end_time=$(date +%s)
|
373
|
+
|
374
|
+
# Record test completion
|
375
|
+
echo "========================================" >> "$output_file"
|
376
|
+
echo "Test completed: $(date)" >> "$output_file"
|
377
|
+
echo "Exit code: $exit_code" >> "$output_file"
|
378
|
+
echo "Duration: $((end_time - start_time)) seconds" >> "$output_file"
|
379
|
+
|
380
|
+
# Log results
|
381
|
+
log_test_result "$script_name" "$exit_code" "$start_time" "$end_time" "$output_file"
|
382
|
+
|
383
|
+
return "$exit_code"
|
384
|
+
}
|
385
|
+
|
386
|
+
# ============================================================================
|
387
|
+
# SCRIPT CATEGORIZATION AND FILTERING CONFIGURATION
|
388
|
+
# ============================================================================
|
389
|
+
|
390
|
+
# Scripts that should not be tested (utilities, modules, setup scripts)
|
391
|
+
# These are support modules or require special execution contexts
|
392
|
+
scripts_to_not_test="Inventory_Modules.py cfn_recover_stack_ids.py cfn_lockdown_stackset_role.py ArgumentsClass.py \
|
393
|
+
account_class.py org_check_alz_readiness.py controltower_check_account_readiness.py s3_delete_objects.py cfn_enable_drift_detection.py \
|
394
|
+
org_describe_landingzone_versions.py cfn_move_stack_instances.py multi_account_runner.py iam_update_roles_cross_account.py ec2_vpc_utils.py \
|
395
|
+
cfn_recover_stack_ids.py setup.py aws_decorators.py cfn_list_stack_set_operation_results.py __pycache__ tests \
|
396
|
+
update_aws_actions.py update_iam_roles_cross_accounts.py run_on_multi_accounts.py"
|
397
|
+
|
398
|
+
# Scripts that require interactive responses (cannot be tested autonomously)
|
399
|
+
# These scripts need manual input and are skipped in automated testing
|
400
|
+
scripts_that_require_response="cfn_enable_stackset_drift.py delete_s3_buckets_objects.py"
|
401
|
+
|
402
|
+
# Scripts that perform destructive operations (require special handling)
|
403
|
+
# These scripts can modify or delete AWS resources and need extra caution
|
404
|
+
destructive_scripts="delete_s3_buckets_objects.py list_cfn_stacks.py list_iam_roles.py"
|
405
|
+
|
406
|
+
# Scripts that are known to be problematic or deprecated
|
407
|
+
# These scripts may have known issues and are tracked separately
|
408
|
+
problematic_scripts=""
|
409
|
+
|
410
|
+
# Scripts that require special parameters for testing
|
411
|
+
# Handled in get_special_params function below
|
412
|
+
|
413
|
+
# Scripts that don't support profile/verbose parameters (simple wrappers)
|
414
|
+
scripts_no_profile=""
|
415
|
+
|
416
|
+
# Scripts that support profile but not verbose parameters
|
417
|
+
scripts_no_verbose="all_my_instances_wrapper.py"
|
418
|
+
|
419
|
+
# High-priority scripts for comprehensive testing (core functionality)
|
420
|
+
# These represent the most critical inventory operations
|
421
|
+
core_scripts="list_ec2_instances.py list_ec2_ebs_volumes.py list_vpcs.py list_rds_db_instances.py \
|
422
|
+
list_lambda_functions.py list_sns_topics.py find_ec2_security_groups.py list_org_accounts.py list_cfn_stacks.py list_iam_roles.py"
|
423
|
+
|
424
|
+
# Test execution arrays and tracking
|
425
|
+
arrScripts=()
|
426
|
+
failed_tests=()
|
427
|
+
passed_tests=()
|
428
|
+
skipped_tests=()
|
429
|
+
|
430
|
+
# Performance and timing arrays
|
431
|
+
test_durations=()
|
432
|
+
performance_metrics=()
|
433
|
+
|
434
|
+
echo "Script categorization complete:"
|
435
|
+
echo "- Scripts to exclude: $(echo "$scripts_to_not_test" | wc -w)"
|
436
|
+
echo "- Interactive scripts: $(echo "$scripts_that_require_response" | wc -w)"
|
437
|
+
echo "- Destructive scripts: $(echo "$destructive_scripts" | wc -w)"
|
438
|
+
echo "- Core scripts: $(echo "$core_scripts" | wc -w)"
|
439
|
+
|
440
|
+
# ============================================================================
|
441
|
+
# TEST SELECTION AND EXECUTION LOGIC
|
442
|
+
# ============================================================================
|
443
|
+
|
444
|
+
if [[ -n "$tool_to_test" && "$tool_to_test" != "all" ]]; then
|
445
|
+
# Single script testing mode
|
446
|
+
echo "Single script testing mode: $tool_to_test"
|
447
|
+
echo "Test parameters: $test_params"
|
448
|
+
|
449
|
+
# Validate script exists
|
450
|
+
if [[ ! -f "$INVENTORY_DIR/$tool_to_test" ]]; then
|
451
|
+
echo "ERROR: Script not found: $INVENTORY_DIR/$tool_to_test"
|
452
|
+
exit 1
|
453
|
+
fi
|
454
|
+
|
455
|
+
# Check if script should be skipped
|
456
|
+
if exists_in_list "$scripts_to_not_test" " " "$tool_to_test"; then
|
457
|
+
echo "WARNING: Script '$tool_to_test' is in exclusion list but will be tested as explicitly requested"
|
458
|
+
fi
|
459
|
+
|
460
|
+
if exists_in_list "$scripts_that_require_response" " " "$tool_to_test"; then
|
461
|
+
echo "WARNING: Script '$tool_to_test' requires interactive input - test may hang"
|
462
|
+
fi
|
463
|
+
|
464
|
+
# Execute single test
|
465
|
+
execute_test "$tool_to_test" "$test_params"
|
466
|
+
test_exit_code=$?
|
467
|
+
|
468
|
+
echo "Single test execution completed with exit code: $test_exit_code"
|
469
|
+
|
470
|
+
else
|
471
|
+
# Comprehensive testing mode - test all eligible scripts
|
472
|
+
echo "Comprehensive testing mode - scanning all inventory scripts"
|
473
|
+
echo "Test parameters: $test_params"
|
474
|
+
|
475
|
+
# Build list of scripts to test
|
476
|
+
for file in "$INVENTORY_DIR"/*.py; do
|
477
|
+
# Skip if file doesn't exist (empty directory case)
|
478
|
+
[[ ! -f "$file" ]] && continue
|
479
|
+
|
480
|
+
filename=$(basename "$file")
|
481
|
+
|
482
|
+
# Apply filtering logic
|
483
|
+
if exists_in_list "$scripts_to_not_test" " " "$filename"; then
|
484
|
+
echo "Excluding: $filename (utility/module script)"
|
485
|
+
skipped_tests+=("$filename")
|
486
|
+
TESTS_SKIPPED=$((TESTS_SKIPPED + 1))
|
487
|
+
elif exists_in_list "$scripts_that_require_response" " " "$filename"; then
|
488
|
+
echo "Skipping: $filename (requires interactive input)"
|
489
|
+
skipped_tests+=("$filename")
|
490
|
+
TESTS_SKIPPED=$((TESTS_SKIPPED + 1))
|
491
|
+
else
|
492
|
+
echo "Queuing for test: $filename"
|
493
|
+
arrScripts+=("$filename")
|
494
|
+
fi
|
495
|
+
done
|
496
|
+
|
497
|
+
echo "\nTest execution plan:"
|
498
|
+
echo "- Scripts to test: ${#arrScripts[@]}"
|
499
|
+
echo "- Scripts skipped: ${#skipped_tests[@]}"
|
500
|
+
echo "- Total scripts found: $((${#arrScripts[@]} + ${#skipped_tests[@]}))"
|
501
|
+
fi
|
502
|
+
|
503
|
+
echo "\n============================================================================"
|
504
|
+
echo "Starting test execution phase"
|
505
|
+
echo "============================================================================\n"
|
506
|
+
|
507
|
+
# ============================================================================
|
508
|
+
# COMPREHENSIVE TEST EXECUTION WITH MONITORING
|
509
|
+
# ============================================================================
|
510
|
+
|
511
|
+
# Execute tests with controlled concurrency
|
512
|
+
if [[ ${#arrScripts[@]} -gt 0 ]]; then
|
513
|
+
echo "Executing ${#arrScripts[@]} tests with maximum $MAX_CONCURRENT_TESTS concurrent processes"
|
514
|
+
|
515
|
+
# Initialize summary file with metadata
|
516
|
+
{
|
517
|
+
echo "CloudOps Inventory Testing Framework - Test Execution Summary"
|
518
|
+
echo "Generated: $(date)"
|
519
|
+
echo "Project Root: $PROJECT_ROOT"
|
520
|
+
echo "Test Parameters: $test_params"
|
521
|
+
echo "Total Scripts: ${#arrScripts[@]}"
|
522
|
+
echo "Max Concurrent: $MAX_CONCURRENT_TESTS"
|
523
|
+
echo "Timeout: ${TEST_TIMEOUT}s"
|
524
|
+
echo "============================================================================"
|
525
|
+
} > "$TEST_LOG_DIR/$SUMMARY_FILE"
|
526
|
+
|
527
|
+
# Track active processes
|
528
|
+
active_pids=()
|
529
|
+
active_count=0
|
530
|
+
|
531
|
+
for item in "${arrScripts[@]}"; do
|
532
|
+
# Wait if we've reached max concurrency
|
533
|
+
while [[ $active_count -ge $MAX_CONCURRENT_TESTS ]]; do
|
534
|
+
# Check for completed processes
|
535
|
+
for i in "${!active_pids[@]}"; do
|
536
|
+
if ! kill -0 "${active_pids[$i]}" 2>/dev/null; then
|
537
|
+
# Process completed, remove from tracking
|
538
|
+
unset "active_pids[$i]"
|
539
|
+
active_count=$((active_count - 1))
|
540
|
+
fi
|
541
|
+
done
|
542
|
+
|
543
|
+
# Brief pause to avoid busy waiting
|
544
|
+
sleep 1
|
545
|
+
done
|
546
|
+
|
547
|
+
echo "Starting test: $item (Active: $active_count/$MAX_CONCURRENT_TESTS)"
|
548
|
+
|
549
|
+
# Launch test in background
|
550
|
+
(
|
551
|
+
execute_test "$item" "$test_params"
|
552
|
+
) &
|
553
|
+
|
554
|
+
# Track the process
|
555
|
+
test_pid=$!
|
556
|
+
active_pids+=("$test_pid")
|
557
|
+
active_count=$((active_count + 1))
|
558
|
+
|
559
|
+
# Brief pause between test starts
|
560
|
+
sleep 0.5
|
561
|
+
done
|
562
|
+
|
563
|
+
echo "\nAll tests launched. Waiting for completion..."
|
564
|
+
|
565
|
+
# Wait for all tests to complete
|
566
|
+
for pid in "${active_pids[@]}"; do
|
567
|
+
if kill -0 "$pid" 2>/dev/null; then
|
568
|
+
wait "$pid"
|
569
|
+
fi
|
570
|
+
done
|
571
|
+
|
572
|
+
echo "\nAll tests completed. Generating final reports..."
|
573
|
+
|
574
|
+
else
|
575
|
+
echo "No tests to execute based on current configuration."
|
576
|
+
fi
|
577
|
+
|
578
|
+
# ============================================================================
|
579
|
+
# COMPREHENSIVE TEST RESULTS ANALYSIS AND REPORTING
|
580
|
+
# ============================================================================
|
581
|
+
|
582
|
+
echo "\n============================================================================"
|
583
|
+
echo "FINAL TEST RESULTS AND ANALYSIS"
|
584
|
+
echo "============================================================================"
|
585
|
+
|
586
|
+
# Generate comprehensive test summary
|
587
|
+
{
|
588
|
+
echo "\nTEST EXECUTION SUMMARY"
|
589
|
+
echo "======================"
|
590
|
+
echo "Total Tests Executed: ${#arrScripts[@]}"
|
591
|
+
echo "Tests Passed: $TESTS_PASSED"
|
592
|
+
echo "Tests Failed: $TESTS_FAILED"
|
593
|
+
echo "Tests Skipped: $TESTS_SKIPPED"
|
594
|
+
if [[ $((TESTS_PASSED + TESTS_FAILED)) -gt 0 ]]; then
|
595
|
+
echo "Success Rate: $(( TESTS_PASSED * 100 / (TESTS_PASSED + TESTS_FAILED) ))%"
|
596
|
+
else
|
597
|
+
echo "Success Rate: 0%"
|
598
|
+
fi
|
599
|
+
echo ""
|
600
|
+
|
601
|
+
if [[ $TESTS_FAILED -gt 0 ]]; then
|
602
|
+
echo "FAILED TESTS REQUIRING ATTENTION:"
|
603
|
+
echo "=================================="
|
604
|
+
for i in "${!TEST_RESULTS_KEYS[@]}"; do
|
605
|
+
if [[ "${TEST_RESULTS_VALUES[$i]}" == "FAILED" ]]; then
|
606
|
+
echo "- ${TEST_RESULTS_KEYS[$i]} (Duration: ${TEST_TIMES_VALUES[$i]}s)"
|
607
|
+
fi
|
608
|
+
done
|
609
|
+
echo ""
|
610
|
+
|
611
|
+
echo "Detailed error analysis available in: $ERROR_ANALYSIS_FILE"
|
612
|
+
echo "Individual test logs available in: $TEST_LOG_DIR/"
|
613
|
+
fi
|
614
|
+
|
615
|
+
echo "PERFORMANCE METRICS:"
|
616
|
+
echo "===================="
|
617
|
+
total_time=0
|
618
|
+
max_time=0
|
619
|
+
min_time=999999
|
620
|
+
fastest_script=""
|
621
|
+
slowest_script=""
|
622
|
+
|
623
|
+
for i in "${!TEST_TIMES_KEYS[@]}"; do
|
624
|
+
duration=${TEST_TIMES_VALUES[$i]}
|
625
|
+
total_time=$((total_time + duration))
|
626
|
+
|
627
|
+
if [[ $duration -gt $max_time ]]; then
|
628
|
+
max_time=$duration
|
629
|
+
slowest_script=${TEST_TIMES_KEYS[$i]}
|
630
|
+
fi
|
631
|
+
|
632
|
+
if [[ $duration -lt $min_time ]]; then
|
633
|
+
min_time=$duration
|
634
|
+
fastest_script=${TEST_TIMES_KEYS[$i]}
|
635
|
+
fi
|
636
|
+
done
|
637
|
+
|
638
|
+
if [[ ${#TEST_TIMES_KEYS[@]} -gt 0 ]]; then
|
639
|
+
avg_time=$((total_time / ${#TEST_TIMES_KEYS[@]}))
|
640
|
+
echo "Total Execution Time: ${total_time}s"
|
641
|
+
echo "Average Test Duration: ${avg_time}s"
|
642
|
+
echo "Fastest Test: $fastest_script (${min_time}s)"
|
643
|
+
echo "Slowest Test: $slowest_script (${max_time}s)"
|
644
|
+
fi
|
645
|
+
|
646
|
+
echo ""
|
647
|
+
echo "All test logs and detailed results saved to: $TEST_LOG_DIR/"
|
648
|
+
echo "Test execution completed at: $(date)"
|
649
|
+
|
650
|
+
} | tee -a "$TEST_LOG_DIR/$SUMMARY_FILE"
|
651
|
+
|
652
|
+
# Set exit code based on test results
|
653
|
+
if [[ $TESTS_FAILED -gt 0 ]]; then
|
654
|
+
echo "\nWARNING: $TESTS_FAILED tests failed. Review logs for details."
|
655
|
+
exit 1
|
656
|
+
else
|
657
|
+
echo "\nSUCCESS: All tests passed successfully."
|
658
|
+
exit 0
|
659
|
+
fi
|