runbooks 0.2.5__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- conftest.py +26 -0
- jupyter-agent/.env.template +2 -0
- jupyter-agent/.gitattributes +35 -0
- jupyter-agent/README.md +16 -0
- jupyter-agent/app.py +256 -0
- jupyter-agent/cloudops-agent.png +0 -0
- jupyter-agent/ds-system-prompt.txt +154 -0
- jupyter-agent/jupyter-agent.png +0 -0
- jupyter-agent/llama3_template.jinja +123 -0
- jupyter-agent/requirements.txt +9 -0
- jupyter-agent/utils.py +409 -0
- runbooks/__init__.py +71 -3
- runbooks/__main__.py +13 -0
- runbooks/aws/ec2_describe_instances.py +1 -1
- runbooks/aws/ec2_run_instances.py +8 -2
- runbooks/aws/ec2_start_stop_instances.py +17 -4
- runbooks/aws/ec2_unused_volumes.py +5 -1
- runbooks/aws/s3_create_bucket.py +4 -2
- runbooks/aws/s3_list_objects.py +6 -1
- runbooks/aws/tagging_lambda_handler.py +13 -2
- runbooks/aws/tags.json +12 -0
- runbooks/base.py +353 -0
- runbooks/cfat/README.md +49 -0
- runbooks/cfat/__init__.py +74 -0
- runbooks/cfat/app.ts +644 -0
- runbooks/cfat/assessment/__init__.py +40 -0
- runbooks/cfat/assessment/asana-import.csv +39 -0
- runbooks/cfat/assessment/cfat-checks.csv +31 -0
- runbooks/cfat/assessment/cfat.txt +520 -0
- runbooks/cfat/assessment/collectors.py +200 -0
- runbooks/cfat/assessment/jira-import.csv +39 -0
- runbooks/cfat/assessment/runner.py +387 -0
- runbooks/cfat/assessment/validators.py +290 -0
- runbooks/cfat/cli.py +103 -0
- runbooks/cfat/docs/asana-import.csv +24 -0
- runbooks/cfat/docs/cfat-checks.csv +31 -0
- runbooks/cfat/docs/cfat.txt +335 -0
- runbooks/cfat/docs/checks-output.png +0 -0
- runbooks/cfat/docs/cloudshell-console-run.png +0 -0
- runbooks/cfat/docs/cloudshell-download.png +0 -0
- runbooks/cfat/docs/cloudshell-output.png +0 -0
- runbooks/cfat/docs/downloadfile.png +0 -0
- runbooks/cfat/docs/jira-import.csv +24 -0
- runbooks/cfat/docs/open-cloudshell.png +0 -0
- runbooks/cfat/docs/report-header.png +0 -0
- runbooks/cfat/models.py +1026 -0
- runbooks/cfat/package-lock.json +5116 -0
- runbooks/cfat/package.json +38 -0
- runbooks/cfat/report.py +496 -0
- runbooks/cfat/reporting/__init__.py +46 -0
- runbooks/cfat/reporting/exporters.py +337 -0
- runbooks/cfat/reporting/formatters.py +496 -0
- runbooks/cfat/reporting/templates.py +135 -0
- runbooks/cfat/run-assessment.sh +23 -0
- runbooks/cfat/runner.py +69 -0
- runbooks/cfat/src/actions/check-cloudtrail-existence.ts +43 -0
- runbooks/cfat/src/actions/check-config-existence.ts +37 -0
- runbooks/cfat/src/actions/check-control-tower.ts +37 -0
- runbooks/cfat/src/actions/check-ec2-existence.ts +46 -0
- runbooks/cfat/src/actions/check-iam-users.ts +50 -0
- runbooks/cfat/src/actions/check-legacy-cur.ts +30 -0
- runbooks/cfat/src/actions/check-org-cloudformation.ts +30 -0
- runbooks/cfat/src/actions/check-vpc-existence.ts +43 -0
- runbooks/cfat/src/actions/create-asanaimport.ts +14 -0
- runbooks/cfat/src/actions/create-backlog.ts +372 -0
- runbooks/cfat/src/actions/create-jiraimport.ts +15 -0
- runbooks/cfat/src/actions/create-report.ts +616 -0
- runbooks/cfat/src/actions/define-account-type.ts +51 -0
- runbooks/cfat/src/actions/get-enabled-org-policy-types.ts +40 -0
- runbooks/cfat/src/actions/get-enabled-org-services.ts +26 -0
- runbooks/cfat/src/actions/get-idc-info.ts +34 -0
- runbooks/cfat/src/actions/get-org-da-accounts.ts +34 -0
- runbooks/cfat/src/actions/get-org-details.ts +35 -0
- runbooks/cfat/src/actions/get-org-member-accounts.ts +44 -0
- runbooks/cfat/src/actions/get-org-ous.ts +35 -0
- runbooks/cfat/src/actions/get-regions.ts +22 -0
- runbooks/cfat/src/actions/zip-assessment.ts +27 -0
- runbooks/cfat/src/types/index.d.ts +147 -0
- runbooks/cfat/tests/__init__.py +141 -0
- runbooks/cfat/tests/test_cli.py +340 -0
- runbooks/cfat/tests/test_integration.py +290 -0
- runbooks/cfat/tests/test_models.py +505 -0
- runbooks/cfat/tests/test_reporting.py +354 -0
- runbooks/cfat/tsconfig.json +16 -0
- runbooks/cfat/webpack.config.cjs +27 -0
- runbooks/config.py +260 -0
- runbooks/finops/__init__.py +88 -0
- runbooks/finops/aws_client.py +245 -0
- runbooks/finops/cli.py +151 -0
- runbooks/finops/cost_processor.py +410 -0
- runbooks/finops/dashboard_runner.py +448 -0
- runbooks/finops/helpers.py +355 -0
- runbooks/finops/main.py +14 -0
- runbooks/finops/profile_processor.py +174 -0
- runbooks/finops/types.py +66 -0
- runbooks/finops/visualisations.py +80 -0
- runbooks/inventory/.gitignore +354 -0
- runbooks/inventory/ArgumentsClass.py +261 -0
- runbooks/inventory/Inventory_Modules.py +6130 -0
- runbooks/inventory/LandingZone/delete_lz.py +1075 -0
- runbooks/inventory/README.md +1320 -0
- runbooks/inventory/__init__.py +62 -0
- runbooks/inventory/account_class.py +532 -0
- runbooks/inventory/all_my_instances_wrapper.py +123 -0
- runbooks/inventory/aws_decorators.py +201 -0
- runbooks/inventory/cfn_move_stack_instances.py +1526 -0
- runbooks/inventory/check_cloudtrail_compliance.py +614 -0
- runbooks/inventory/check_controltower_readiness.py +1107 -0
- runbooks/inventory/check_landingzone_readiness.py +711 -0
- runbooks/inventory/cloudtrail.md +727 -0
- runbooks/inventory/collectors/__init__.py +20 -0
- runbooks/inventory/collectors/aws_compute.py +518 -0
- runbooks/inventory/collectors/aws_networking.py +275 -0
- runbooks/inventory/collectors/base.py +222 -0
- runbooks/inventory/core/__init__.py +19 -0
- runbooks/inventory/core/collector.py +303 -0
- runbooks/inventory/core/formatter.py +296 -0
- runbooks/inventory/delete_s3_buckets_objects.py +169 -0
- runbooks/inventory/discovery.md +81 -0
- runbooks/inventory/draw_org_structure.py +748 -0
- runbooks/inventory/ec2_vpc_utils.py +341 -0
- runbooks/inventory/find_cfn_drift_detection.py +272 -0
- runbooks/inventory/find_cfn_orphaned_stacks.py +719 -0
- runbooks/inventory/find_cfn_stackset_drift.py +733 -0
- runbooks/inventory/find_ec2_security_groups.py +669 -0
- runbooks/inventory/find_landingzone_versions.py +201 -0
- runbooks/inventory/find_vpc_flow_logs.py +1221 -0
- runbooks/inventory/inventory.sh +659 -0
- runbooks/inventory/list_cfn_stacks.py +558 -0
- runbooks/inventory/list_cfn_stackset_operation_results.py +252 -0
- runbooks/inventory/list_cfn_stackset_operations.py +734 -0
- runbooks/inventory/list_cfn_stacksets.py +453 -0
- runbooks/inventory/list_config_recorders_delivery_channels.py +681 -0
- runbooks/inventory/list_ds_directories.py +354 -0
- runbooks/inventory/list_ec2_availability_zones.py +286 -0
- runbooks/inventory/list_ec2_ebs_volumes.py +244 -0
- runbooks/inventory/list_ec2_instances.py +425 -0
- runbooks/inventory/list_ecs_clusters_and_tasks.py +562 -0
- runbooks/inventory/list_elbs_load_balancers.py +411 -0
- runbooks/inventory/list_enis_network_interfaces.py +526 -0
- runbooks/inventory/list_guardduty_detectors.py +568 -0
- runbooks/inventory/list_iam_policies.py +404 -0
- runbooks/inventory/list_iam_roles.py +518 -0
- runbooks/inventory/list_iam_saml_providers.py +359 -0
- runbooks/inventory/list_lambda_functions.py +882 -0
- runbooks/inventory/list_org_accounts.py +446 -0
- runbooks/inventory/list_org_accounts_users.py +354 -0
- runbooks/inventory/list_rds_db_instances.py +406 -0
- runbooks/inventory/list_route53_hosted_zones.py +318 -0
- runbooks/inventory/list_servicecatalog_provisioned_products.py +575 -0
- runbooks/inventory/list_sns_topics.py +360 -0
- runbooks/inventory/list_ssm_parameters.py +402 -0
- runbooks/inventory/list_vpc_subnets.py +433 -0
- runbooks/inventory/list_vpcs.py +422 -0
- runbooks/inventory/lockdown_cfn_stackset_role.py +224 -0
- runbooks/inventory/models/__init__.py +24 -0
- runbooks/inventory/models/account.py +192 -0
- runbooks/inventory/models/inventory.py +309 -0
- runbooks/inventory/models/resource.py +247 -0
- runbooks/inventory/recover_cfn_stack_ids.py +205 -0
- runbooks/inventory/requirements.txt +12 -0
- runbooks/inventory/run_on_multi_accounts.py +211 -0
- runbooks/inventory/tests/common_test_data.py +3661 -0
- runbooks/inventory/tests/common_test_functions.py +204 -0
- runbooks/inventory/tests/script_test_data.py +0 -0
- runbooks/inventory/tests/setup.py +24 -0
- runbooks/inventory/tests/src.py +18 -0
- runbooks/inventory/tests/test_cfn_describe_stacks.py +208 -0
- runbooks/inventory/tests/test_ec2_describe_instances.py +162 -0
- runbooks/inventory/tests/test_inventory_modules.py +55 -0
- runbooks/inventory/tests/test_lambda_list_functions.py +86 -0
- runbooks/inventory/tests/test_moto_integration_example.py +273 -0
- runbooks/inventory/tests/test_org_list_accounts.py +49 -0
- runbooks/inventory/update_aws_actions.py +173 -0
- runbooks/inventory/update_cfn_stacksets.py +1215 -0
- runbooks/inventory/update_cloudwatch_logs_retention_policy.py +294 -0
- runbooks/inventory/update_iam_roles_cross_accounts.py +478 -0
- runbooks/inventory/update_s3_public_access_block.py +539 -0
- runbooks/inventory/utils/__init__.py +23 -0
- runbooks/inventory/utils/aws_helpers.py +510 -0
- runbooks/inventory/utils/threading_utils.py +493 -0
- runbooks/inventory/utils/validation.py +682 -0
- runbooks/inventory/verify_ec2_security_groups.py +1430 -0
- runbooks/main.py +785 -0
- runbooks/organizations/__init__.py +12 -0
- runbooks/organizations/manager.py +374 -0
- runbooks/security_baseline/README.md +324 -0
- runbooks/security_baseline/checklist/alternate_contacts.py +8 -1
- runbooks/security_baseline/checklist/bucket_public_access.py +4 -1
- runbooks/security_baseline/checklist/cloudwatch_alarm_configuration.py +9 -2
- runbooks/security_baseline/checklist/guardduty_enabled.py +9 -2
- runbooks/security_baseline/checklist/multi_region_instance_usage.py +5 -1
- runbooks/security_baseline/checklist/root_access_key.py +6 -1
- runbooks/security_baseline/config-origin.json +1 -1
- runbooks/security_baseline/config.json +1 -1
- runbooks/security_baseline/permission.json +1 -1
- runbooks/security_baseline/report_generator.py +10 -2
- runbooks/security_baseline/report_template_en.html +7 -7
- runbooks/security_baseline/report_template_jp.html +7 -7
- runbooks/security_baseline/report_template_kr.html +12 -12
- runbooks/security_baseline/report_template_vn.html +7 -7
- runbooks/security_baseline/requirements.txt +7 -0
- runbooks/security_baseline/run_script.py +8 -2
- runbooks/security_baseline/security_baseline_tester.py +10 -2
- runbooks/security_baseline/utils/common.py +5 -1
- runbooks/utils/__init__.py +204 -0
- runbooks-0.6.1.dist-info/METADATA +373 -0
- runbooks-0.6.1.dist-info/RECORD +237 -0
- {runbooks-0.2.5.dist-info → runbooks-0.6.1.dist-info}/WHEEL +1 -1
- runbooks-0.6.1.dist-info/entry_points.txt +7 -0
- runbooks-0.6.1.dist-info/licenses/LICENSE +201 -0
- runbooks-0.6.1.dist-info/top_level.txt +3 -0
- runbooks/python101/calculator.py +0 -34
- runbooks/python101/config.py +0 -1
- runbooks/python101/exceptions.py +0 -16
- runbooks/python101/file_manager.py +0 -218
- runbooks/python101/toolkit.py +0 -153
- runbooks-0.2.5.dist-info/METADATA +0 -439
- runbooks-0.2.5.dist-info/RECORD +0 -61
- runbooks-0.2.5.dist-info/entry_points.txt +0 -3
- runbooks-0.2.5.dist-info/top_level.txt +0 -1
runbooks/cfat/models.py
ADDED
@@ -0,0 +1,1026 @@
|
|
1
|
+
"""
|
2
|
+
Enhanced Data Models for Cloud Foundations Assessment Tool (CFAT).
|
3
|
+
|
4
|
+
This module provides enterprise-grade Pydantic models for representing
|
5
|
+
assessment results, checks, and reports with comprehensive type hints,
|
6
|
+
validation, and documentation suitable for MkDocs generation.
|
7
|
+
|
8
|
+
The models follow AWS Cloud Foundations best practices and provide
|
9
|
+
structured data for assessment reporting, compliance tracking, and
|
10
|
+
remediation guidance.
|
11
|
+
|
12
|
+
Example:
|
13
|
+
```python
|
14
|
+
from runbooks.cfat.models import AssessmentResult, Severity, CheckStatus
|
15
|
+
|
16
|
+
# Create an assessment result
|
17
|
+
result = AssessmentResult(
|
18
|
+
finding_id="IAM-001",
|
19
|
+
check_name="root_mfa_enabled",
|
20
|
+
check_category="iam",
|
21
|
+
status=CheckStatus.FAIL,
|
22
|
+
severity=Severity.CRITICAL,
|
23
|
+
message="Root account MFA is not enabled",
|
24
|
+
remediation="Enable MFA for the root account"
|
25
|
+
)
|
26
|
+
|
27
|
+
# Generate report
|
28
|
+
report = AssessmentReport(
|
29
|
+
account_id="123456789012",
|
30
|
+
region="us-east-1",
|
31
|
+
profile="default",
|
32
|
+
results=[result]
|
33
|
+
)
|
34
|
+
```
|
35
|
+
|
36
|
+
Todo:
|
37
|
+
- Add custom validators for AWS-specific formats
|
38
|
+
- Implement result aggregation methods
|
39
|
+
- Add export format configurations
|
40
|
+
"""
|
41
|
+
|
42
|
+
from datetime import datetime
|
43
|
+
from enum import Enum
|
44
|
+
from pathlib import Path
|
45
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
46
|
+
|
47
|
+
from loguru import logger
|
48
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
49
|
+
|
50
|
+
|
51
|
+
class Severity(str, Enum):
|
52
|
+
"""Assessment result severity levels."""
|
53
|
+
|
54
|
+
INFO = "INFO"
|
55
|
+
WARNING = "WARNING"
|
56
|
+
CRITICAL = "CRITICAL"
|
57
|
+
|
58
|
+
|
59
|
+
class CheckStatus(str, Enum):
|
60
|
+
"""Assessment check status."""
|
61
|
+
|
62
|
+
PASS = "PASS"
|
63
|
+
FAIL = "FAIL"
|
64
|
+
SKIP = "SKIP"
|
65
|
+
ERROR = "ERROR"
|
66
|
+
|
67
|
+
|
68
|
+
class AssessmentResult(BaseModel):
|
69
|
+
"""
|
70
|
+
Individual Cloud Foundations Assessment Result.
|
71
|
+
|
72
|
+
Represents a single assessment check result with finding details,
|
73
|
+
compliance status, and remediation recommendations following
|
74
|
+
AWS Cloud Foundations best practices.
|
75
|
+
|
76
|
+
This model provides comprehensive assessment information including:
|
77
|
+
- Unique finding identification
|
78
|
+
- Compliance status and severity
|
79
|
+
- AWS resource details
|
80
|
+
- Remediation guidance
|
81
|
+
- Execution metrics
|
82
|
+
|
83
|
+
Attributes:
|
84
|
+
finding_id: Unique identifier for the assessment finding
|
85
|
+
check_name: Name of the assessment check performed
|
86
|
+
check_category: Category grouping (iam, vpc, cloudtrail, etc.)
|
87
|
+
status: Compliance status (PASS, FAIL, WARNING, INFO)
|
88
|
+
severity: Criticality level (CRITICAL, HIGH, MEDIUM, LOW, INFO)
|
89
|
+
message: Human-readable finding description
|
90
|
+
details: Additional structured details about the finding
|
91
|
+
resource_arn: AWS resource ARN being assessed (if applicable)
|
92
|
+
recommendations: List of recommended remediation steps
|
93
|
+
execution_time: Check execution time in seconds
|
94
|
+
timestamp: When the check was performed
|
95
|
+
|
96
|
+
Example:
|
97
|
+
```python
|
98
|
+
result = AssessmentResult(
|
99
|
+
finding_id="IAM-001",
|
100
|
+
check_name="root_mfa_enabled",
|
101
|
+
check_category="iam",
|
102
|
+
status=CheckStatus.FAIL,
|
103
|
+
severity=Severity.CRITICAL,
|
104
|
+
message="Root account MFA is not enabled",
|
105
|
+
resource_arn="arn:aws:iam::123456789012:root",
|
106
|
+
recommendations=[
|
107
|
+
"Enable MFA for the root account",
|
108
|
+
"Follow AWS IAM best practices documentation"
|
109
|
+
],
|
110
|
+
execution_time=0.5
|
111
|
+
)
|
112
|
+
```
|
113
|
+
"""
|
114
|
+
|
115
|
+
model_config = ConfigDict(
|
116
|
+
str_strip_whitespace=True, validate_assignment=True, extra="forbid", frozen=False, validate_default=True
|
117
|
+
)
|
118
|
+
|
119
|
+
# Core identification
|
120
|
+
finding_id: str = Field(
|
121
|
+
..., description="Unique finding identifier (e.g., IAM-001, VPC-002)", min_length=1, max_length=50
|
122
|
+
)
|
123
|
+
check_name: str = Field(..., description="Name of the assessment check performed", min_length=1, max_length=100)
|
124
|
+
check_category: str = Field(
|
125
|
+
..., description="Category grouping (iam, vpc, cloudtrail, config, etc.)", min_length=1, max_length=50
|
126
|
+
)
|
127
|
+
|
128
|
+
# Assessment results
|
129
|
+
status: CheckStatus = Field(..., description="Compliance check status")
|
130
|
+
severity: Severity = Field(..., description="Finding severity level")
|
131
|
+
message: str = Field(..., description="Human-readable finding description", min_length=1, max_length=500)
|
132
|
+
|
133
|
+
# Additional details
|
134
|
+
details: Optional[Dict[str, Any]] = Field(
|
135
|
+
default=None, description="Additional structured details about the finding"
|
136
|
+
)
|
137
|
+
resource_arn: Optional[str] = Field(default=None, description="AWS resource ARN being assessed (if applicable)")
|
138
|
+
recommendations: List[str] = Field(default_factory=list, description="List of recommended remediation steps")
|
139
|
+
|
140
|
+
# Metadata
|
141
|
+
execution_time: float = Field(..., description="Check execution time in seconds", ge=0.0)
|
142
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow, description="When the check was performed")
|
143
|
+
|
144
|
+
@field_validator("resource_arn")
|
145
|
+
@classmethod
|
146
|
+
def validate_arn_format(cls, v: Optional[str]) -> Optional[str]:
|
147
|
+
"""Validate AWS ARN format if provided."""
|
148
|
+
if v is None:
|
149
|
+
return v
|
150
|
+
|
151
|
+
if not v.startswith("arn:aws:"):
|
152
|
+
logger.warning(f"Invalid ARN format: {v}")
|
153
|
+
# Don't fail validation, just log warning
|
154
|
+
|
155
|
+
return v
|
156
|
+
|
157
|
+
@field_validator("finding_id")
|
158
|
+
@classmethod
|
159
|
+
def validate_finding_id_format(cls, v: str) -> str:
|
160
|
+
"""Validate finding ID follows expected pattern."""
|
161
|
+
if not v or len(v.strip()) == 0:
|
162
|
+
raise ValueError("Finding ID cannot be empty")
|
163
|
+
|
164
|
+
# Expected format: CATEGORY-XXX (e.g., IAM-001, VPC-002)
|
165
|
+
if "-" not in v:
|
166
|
+
logger.warning(f"Finding ID should follow CATEGORY-XXX format: {v}")
|
167
|
+
|
168
|
+
return v.strip().upper()
|
169
|
+
|
170
|
+
# Properties for convenience
|
171
|
+
@property
|
172
|
+
def passed(self) -> bool:
|
173
|
+
"""Check if assessment result passed."""
|
174
|
+
return self.status == CheckStatus.PASS
|
175
|
+
|
176
|
+
@property
|
177
|
+
def failed(self) -> bool:
|
178
|
+
"""Check if assessment result failed."""
|
179
|
+
return self.status == CheckStatus.FAIL
|
180
|
+
|
181
|
+
@property
|
182
|
+
def is_critical(self) -> bool:
|
183
|
+
"""Check if finding is critical severity."""
|
184
|
+
return self.severity == Severity.CRITICAL
|
185
|
+
|
186
|
+
@property
|
187
|
+
def is_warning(self) -> bool:
|
188
|
+
"""Check if finding is warning severity."""
|
189
|
+
return self.severity == Severity.WARNING
|
190
|
+
|
191
|
+
@property
|
192
|
+
def category_prefix(self) -> str:
|
193
|
+
"""Get the category prefix from finding ID."""
|
194
|
+
return self.finding_id.split("-")[0] if "-" in self.finding_id else self.check_category.upper()
|
195
|
+
|
196
|
+
def add_recommendation(self, recommendation: str) -> None:
|
197
|
+
"""Add a recommendation to the result."""
|
198
|
+
if recommendation and recommendation not in self.recommendations:
|
199
|
+
self.recommendations.append(recommendation)
|
200
|
+
|
201
|
+
def to_dict(self) -> Dict[str, Any]:
|
202
|
+
"""Convert to dictionary for serialization."""
|
203
|
+
return self.model_dump(exclude_none=True)
|
204
|
+
|
205
|
+
|
206
|
+
class AssessmentSummary(BaseModel):
|
207
|
+
"""
|
208
|
+
Cloud Foundations Assessment Summary Statistics.
|
209
|
+
|
210
|
+
Provides comprehensive statistics and metrics for assessment results,
|
211
|
+
including pass rates, severity breakdowns, and execution performance
|
212
|
+
data for management reporting and compliance tracking.
|
213
|
+
|
214
|
+
This summary enables quick assessment of AWS account compliance
|
215
|
+
status and prioritization of remediation efforts based on
|
216
|
+
severity and impact analysis.
|
217
|
+
|
218
|
+
Attributes:
|
219
|
+
total_checks: Total number of assessment checks performed
|
220
|
+
passed_checks: Number of checks that passed successfully
|
221
|
+
failed_checks: Number of checks that failed compliance
|
222
|
+
skipped_checks: Number of checks that were skipped
|
223
|
+
error_checks: Number of checks that encountered errors
|
224
|
+
warnings: Count of warning-level findings
|
225
|
+
critical_issues: Count of critical severity findings
|
226
|
+
info_issues: Count of informational findings
|
227
|
+
total_execution_time: Total time for all checks in seconds
|
228
|
+
compliance_score: Overall compliance score (0-100)
|
229
|
+
|
230
|
+
Example:
|
231
|
+
```python
|
232
|
+
summary = AssessmentSummary(
|
233
|
+
total_checks=25,
|
234
|
+
passed_checks=20,
|
235
|
+
failed_checks=3,
|
236
|
+
skipped_checks=1,
|
237
|
+
error_checks=1,
|
238
|
+
warnings=2,
|
239
|
+
critical_issues=1,
|
240
|
+
total_execution_time=45.5
|
241
|
+
)
|
242
|
+
|
243
|
+
print(f"Pass rate: {summary.pass_rate:.1f}%")
|
244
|
+
print(f"Compliance score: {summary.compliance_score}")
|
245
|
+
```
|
246
|
+
"""
|
247
|
+
|
248
|
+
model_config = ConfigDict(validate_assignment=True, extra="forbid", frozen=False)
|
249
|
+
|
250
|
+
# Check counts
|
251
|
+
total_checks: int = Field(..., description="Total number of assessment checks performed", ge=0)
|
252
|
+
passed_checks: int = Field(..., description="Number of checks that passed successfully", ge=0)
|
253
|
+
failed_checks: int = Field(..., description="Number of checks that failed compliance", ge=0)
|
254
|
+
skipped_checks: int = Field(..., description="Number of checks that were skipped", ge=0)
|
255
|
+
error_checks: int = Field(..., description="Number of checks that encountered errors", ge=0)
|
256
|
+
|
257
|
+
# Severity breakdowns
|
258
|
+
warnings: int = Field(..., description="Count of warning-level findings", ge=0)
|
259
|
+
critical_issues: int = Field(..., description="Count of critical severity findings", ge=0)
|
260
|
+
info_issues: int = Field(default=0, description="Count of informational findings", ge=0)
|
261
|
+
|
262
|
+
# Performance metrics
|
263
|
+
total_execution_time: float = Field(..., description="Total execution time for all checks in seconds", ge=0.0)
|
264
|
+
|
265
|
+
@field_validator("total_checks")
|
266
|
+
@classmethod
|
267
|
+
def validate_total_checks(cls, v: int, info) -> int:
|
268
|
+
"""Validate that total checks is consistent with individual counts."""
|
269
|
+
# This validator will run before the other fields are available
|
270
|
+
# So we can't validate consistency here - we'll do it in model_post_init
|
271
|
+
return v
|
272
|
+
|
273
|
+
def model_post_init(self, __context) -> None:
|
274
|
+
"""Validate consistency after all fields are set."""
|
275
|
+
calculated_total = self.passed_checks + self.failed_checks + self.skipped_checks + self.error_checks
|
276
|
+
|
277
|
+
if calculated_total != self.total_checks:
|
278
|
+
logger.warning(
|
279
|
+
f"Total checks ({self.total_checks}) doesn't match sum of individual counts ({calculated_total})"
|
280
|
+
)
|
281
|
+
|
282
|
+
@property
|
283
|
+
def pass_rate(self) -> float:
|
284
|
+
"""Calculate pass rate percentage (0-100)."""
|
285
|
+
if self.total_checks == 0:
|
286
|
+
return 0.0
|
287
|
+
return (self.passed_checks / self.total_checks) * 100
|
288
|
+
|
289
|
+
@property
|
290
|
+
def failure_rate(self) -> float:
|
291
|
+
"""Calculate failure rate percentage (0-100)."""
|
292
|
+
if self.total_checks == 0:
|
293
|
+
return 0.0
|
294
|
+
return (self.failed_checks / self.total_checks) * 100
|
295
|
+
|
296
|
+
@property
|
297
|
+
def compliance_score(self) -> int:
|
298
|
+
"""
|
299
|
+
Calculate overall compliance score (0-100).
|
300
|
+
|
301
|
+
Score is weighted by severity:
|
302
|
+
- Critical failures: -10 points each
|
303
|
+
- Warning failures: -5 points each
|
304
|
+
- Passed checks: +4 points each
|
305
|
+
- Maximum score: 100
|
306
|
+
"""
|
307
|
+
if self.total_checks == 0:
|
308
|
+
return 100
|
309
|
+
|
310
|
+
base_score = 100
|
311
|
+
|
312
|
+
# Penalize critical issues more heavily
|
313
|
+
critical_penalty = self.critical_issues * 10
|
314
|
+
warning_penalty = (self.failed_checks - self.critical_issues) * 5
|
315
|
+
|
316
|
+
# Reward passed checks
|
317
|
+
pass_bonus = self.passed_checks * 4
|
318
|
+
|
319
|
+
# Calculate score relative to total possible points
|
320
|
+
max_possible_points = self.total_checks * 4
|
321
|
+
actual_points = max(0, pass_bonus - critical_penalty - warning_penalty)
|
322
|
+
|
323
|
+
if max_possible_points == 0:
|
324
|
+
return 100
|
325
|
+
|
326
|
+
score = int((actual_points / max_possible_points) * 100)
|
327
|
+
return min(100, max(0, score))
|
328
|
+
|
329
|
+
@property
|
330
|
+
def risk_level(self) -> str:
|
331
|
+
"""Determine risk level based on compliance score and critical issues."""
|
332
|
+
if self.critical_issues > 0:
|
333
|
+
return "HIGH"
|
334
|
+
elif self.compliance_score < 70:
|
335
|
+
return "MEDIUM"
|
336
|
+
elif self.compliance_score < 90:
|
337
|
+
return "LOW"
|
338
|
+
else:
|
339
|
+
return "MINIMAL"
|
340
|
+
|
341
|
+
@property
|
342
|
+
def execution_summary(self) -> str:
|
343
|
+
"""Generate human-readable execution summary."""
|
344
|
+
return (
|
345
|
+
f"{self.total_checks} checks completed in {self.total_execution_time:.1f}s "
|
346
|
+
f"(avg: {self.avg_execution_time:.2f}s per check)"
|
347
|
+
)
|
348
|
+
|
349
|
+
@property
|
350
|
+
def avg_execution_time(self) -> float:
|
351
|
+
"""Calculate average execution time per check."""
|
352
|
+
if self.total_checks == 0:
|
353
|
+
return 0.0
|
354
|
+
return self.total_execution_time / self.total_checks
|
355
|
+
|
356
|
+
|
357
|
+
class AssessmentReport(BaseModel):
|
358
|
+
"""
|
359
|
+
Complete Cloud Foundations Assessment Report.
|
360
|
+
|
361
|
+
Comprehensive assessment report containing metadata, configuration,
|
362
|
+
results, and analysis for AWS account compliance evaluation.
|
363
|
+
|
364
|
+
This report serves as the primary output of CFAT assessments,
|
365
|
+
providing detailed findings, summary statistics, and export
|
366
|
+
capabilities for various formats (HTML, CSV, JSON).
|
367
|
+
|
368
|
+
The report includes:
|
369
|
+
- Account and environment metadata
|
370
|
+
- Assessment configuration details
|
371
|
+
- Individual check results
|
372
|
+
- Summary statistics and scoring
|
373
|
+
- Risk analysis and prioritization
|
374
|
+
- Export and reporting capabilities
|
375
|
+
|
376
|
+
Attributes:
|
377
|
+
timestamp: When the assessment was performed
|
378
|
+
account_id: AWS account ID being assessed
|
379
|
+
region: Primary AWS region for the assessment
|
380
|
+
profile: AWS CLI profile used for the assessment
|
381
|
+
version: CFAT tool version
|
382
|
+
included_checks: List of check categories included
|
383
|
+
excluded_checks: List of specific checks excluded
|
384
|
+
severity_threshold: Minimum severity level reported
|
385
|
+
results: Individual assessment check results
|
386
|
+
summary: Statistical summary of all results
|
387
|
+
metadata: Additional metadata and configuration
|
388
|
+
|
389
|
+
Example:
|
390
|
+
```python
|
391
|
+
report = AssessmentReport(
|
392
|
+
account_id="123456789012",
|
393
|
+
region="us-east-1",
|
394
|
+
profile="default",
|
395
|
+
version="0.5.0",
|
396
|
+
included_checks=["iam", "vpc", "cloudtrail"],
|
397
|
+
results=[result1, result2, result3],
|
398
|
+
summary=summary,
|
399
|
+
severity_threshold=Severity.WARNING
|
400
|
+
)
|
401
|
+
|
402
|
+
# Export in different formats
|
403
|
+
report.to_html("report.html")
|
404
|
+
report.to_json("report.json")
|
405
|
+
report.to_csv("findings.csv")
|
406
|
+
```
|
407
|
+
"""
|
408
|
+
|
409
|
+
model_config = ConfigDict(
|
410
|
+
validate_assignment=True,
|
411
|
+
extra="allow", # Allow additional metadata
|
412
|
+
frozen=False,
|
413
|
+
json_encoders={datetime: lambda dt: dt.isoformat()},
|
414
|
+
)
|
415
|
+
|
416
|
+
# Core metadata
|
417
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow, description="When the assessment was performed")
|
418
|
+
account_id: str = Field(..., description="AWS account ID being assessed", min_length=12, max_length=12)
|
419
|
+
region: str = Field(..., description="Primary AWS region for the assessment")
|
420
|
+
profile: str = Field(..., description="AWS CLI profile used for the assessment")
|
421
|
+
version: str = Field(..., description="CFAT tool version")
|
422
|
+
|
423
|
+
# Assessment configuration
|
424
|
+
included_checks: List[str] = Field(..., description="List of check categories included in assessment")
|
425
|
+
excluded_checks: List[str] = Field(
|
426
|
+
default_factory=list, description="List of specific checks excluded from assessment"
|
427
|
+
)
|
428
|
+
severity_threshold: Severity = Field(..., description="Minimum severity level reported")
|
429
|
+
|
430
|
+
# Core results
|
431
|
+
results: List[AssessmentResult] = Field(..., description="Individual assessment check results")
|
432
|
+
summary: AssessmentSummary = Field(..., description="Statistical summary of assessment results")
|
433
|
+
|
434
|
+
# Additional data
|
435
|
+
metadata: Dict[str, Any] = Field(default_factory=dict, description="Additional metadata and configuration details")
|
436
|
+
|
437
|
+
@field_validator("account_id")
|
438
|
+
@classmethod
|
439
|
+
def validate_account_id(cls, v: str) -> str:
|
440
|
+
"""Validate AWS account ID format."""
|
441
|
+
if not v.isdigit() or len(v) != 12:
|
442
|
+
raise ValueError("AWS account ID must be 12 digits")
|
443
|
+
return v
|
444
|
+
|
445
|
+
def model_post_init(self, __context) -> None:
|
446
|
+
"""Validate report consistency after initialization."""
|
447
|
+
# Ensure summary is consistent with results
|
448
|
+
actual_total = len(self.results)
|
449
|
+
if hasattr(self.summary, "total_checks") and self.summary.total_checks != actual_total:
|
450
|
+
logger.warning(
|
451
|
+
f"Summary total_checks ({self.summary.total_checks}) doesn't match "
|
452
|
+
f"actual results count ({actual_total})"
|
453
|
+
)
|
454
|
+
|
455
|
+
# Query methods
|
456
|
+
def get_results_by_category(self, category: str) -> List[AssessmentResult]:
|
457
|
+
"""
|
458
|
+
Get assessment results filtered by category.
|
459
|
+
|
460
|
+
Args:
|
461
|
+
category: Category to filter by (e.g., 'iam', 'vpc', 'cloudtrail')
|
462
|
+
|
463
|
+
Returns:
|
464
|
+
List of results matching the specified category
|
465
|
+
"""
|
466
|
+
return [result for result in self.results if result.check_category.lower() == category.lower()]
|
467
|
+
|
468
|
+
def get_results_by_severity(self, severity: Severity) -> List[AssessmentResult]:
|
469
|
+
"""
|
470
|
+
Get assessment results filtered by severity level.
|
471
|
+
|
472
|
+
Args:
|
473
|
+
severity: Severity level to filter by
|
474
|
+
|
475
|
+
Returns:
|
476
|
+
List of results matching the specified severity
|
477
|
+
"""
|
478
|
+
return [result for result in self.results if result.severity == severity]
|
479
|
+
|
480
|
+
def get_failed_results(self) -> List[AssessmentResult]:
|
481
|
+
"""
|
482
|
+
Get all failed assessment results.
|
483
|
+
|
484
|
+
Returns:
|
485
|
+
List of results with FAIL status
|
486
|
+
"""
|
487
|
+
return [result for result in self.results if result.failed]
|
488
|
+
|
489
|
+
def get_critical_results(self) -> List[AssessmentResult]:
|
490
|
+
"""
|
491
|
+
Get all critical severity results.
|
492
|
+
|
493
|
+
Returns:
|
494
|
+
List of results with CRITICAL severity
|
495
|
+
"""
|
496
|
+
return self.get_results_by_severity(Severity.CRITICAL)
|
497
|
+
|
498
|
+
def get_passed_results(self) -> List[AssessmentResult]:
|
499
|
+
"""
|
500
|
+
Get all passed assessment results.
|
501
|
+
|
502
|
+
Returns:
|
503
|
+
List of results with PASS status
|
504
|
+
"""
|
505
|
+
return [result for result in self.results if result.passed]
|
506
|
+
|
507
|
+
def get_categories(self) -> List[str]:
|
508
|
+
"""
|
509
|
+
Get unique categories from all results.
|
510
|
+
|
511
|
+
Returns:
|
512
|
+
Sorted list of unique categories
|
513
|
+
"""
|
514
|
+
categories = {result.check_category for result in self.results}
|
515
|
+
return sorted(categories)
|
516
|
+
|
517
|
+
def get_category_summary(self) -> Dict[str, Dict[str, int]]:
|
518
|
+
"""
|
519
|
+
Get summary statistics by category.
|
520
|
+
|
521
|
+
Returns:
|
522
|
+
Dictionary mapping categories to their pass/fail counts
|
523
|
+
"""
|
524
|
+
category_stats = {}
|
525
|
+
for category in self.get_categories():
|
526
|
+
category_results = self.get_results_by_category(category)
|
527
|
+
category_stats[category] = {
|
528
|
+
"total": len(category_results),
|
529
|
+
"passed": len([r for r in category_results if r.passed]),
|
530
|
+
"failed": len([r for r in category_results if r.failed]),
|
531
|
+
"critical": len([r for r in category_results if r.is_critical]),
|
532
|
+
}
|
533
|
+
return category_stats
|
534
|
+
|
535
|
+
# Export methods
|
536
|
+
def to_html(self, file_path: Union[str, Path]) -> None:
|
537
|
+
"""
|
538
|
+
Generate comprehensive HTML assessment report.
|
539
|
+
|
540
|
+
Creates a styled, interactive HTML report with charts,
|
541
|
+
filtering capabilities, and detailed findings.
|
542
|
+
|
543
|
+
Args:
|
544
|
+
file_path: Output file path for HTML report
|
545
|
+
|
546
|
+
Raises:
|
547
|
+
ImportError: If required HTML generation dependencies are missing
|
548
|
+
"""
|
549
|
+
from runbooks.cfat.report import HTMLReportGenerator
|
550
|
+
|
551
|
+
output_path = Path(file_path)
|
552
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
553
|
+
|
554
|
+
generator = HTMLReportGenerator(self)
|
555
|
+
generator.generate(output_path)
|
556
|
+
|
557
|
+
logger.info(f"HTML report generated: {output_path}")
|
558
|
+
|
559
|
+
def to_csv(self, file_path: Union[str, Path]) -> None:
|
560
|
+
"""
|
561
|
+
Generate CSV export of assessment findings.
|
562
|
+
|
563
|
+
Creates a comma-separated values file suitable for
|
564
|
+
import into spreadsheet applications or project
|
565
|
+
management tools.
|
566
|
+
|
567
|
+
Args:
|
568
|
+
file_path: Output file path for CSV report
|
569
|
+
|
570
|
+
Note:
|
571
|
+
CSV format is optimized for import into Jira, Asana,
|
572
|
+
and other project management systems.
|
573
|
+
"""
|
574
|
+
import csv
|
575
|
+
|
576
|
+
output_path = Path(file_path)
|
577
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
578
|
+
|
579
|
+
# Define CSV headers
|
580
|
+
headers = [
|
581
|
+
"finding_id",
|
582
|
+
"check_name",
|
583
|
+
"category",
|
584
|
+
"status",
|
585
|
+
"severity",
|
586
|
+
"message",
|
587
|
+
"resource_arn",
|
588
|
+
"execution_time",
|
589
|
+
"timestamp",
|
590
|
+
"recommendations",
|
591
|
+
]
|
592
|
+
|
593
|
+
with open(output_path, "w", newline="", encoding="utf-8") as csvfile:
|
594
|
+
writer = csv.DictWriter(csvfile, fieldnames=headers)
|
595
|
+
writer.writeheader()
|
596
|
+
|
597
|
+
for result in self.results:
|
598
|
+
row = {
|
599
|
+
"finding_id": result.finding_id,
|
600
|
+
"check_name": result.check_name,
|
601
|
+
"category": result.check_category,
|
602
|
+
"status": result.status.value,
|
603
|
+
"severity": result.severity.value,
|
604
|
+
"message": result.message,
|
605
|
+
"resource_arn": result.resource_arn or "",
|
606
|
+
"execution_time": result.execution_time,
|
607
|
+
"timestamp": result.timestamp.isoformat(),
|
608
|
+
"recommendations": "; ".join(result.recommendations),
|
609
|
+
}
|
610
|
+
writer.writerow(row)
|
611
|
+
|
612
|
+
logger.info(f"CSV report generated: {output_path}")
|
613
|
+
|
614
|
+
def to_json(self, file_path: Union[str, Path]) -> None:
|
615
|
+
"""
|
616
|
+
Generate JSON export of complete assessment data.
|
617
|
+
|
618
|
+
Creates a structured JSON file containing all assessment
|
619
|
+
data including metadata, results, and summary statistics.
|
620
|
+
Suitable for programmatic processing and API integration.
|
621
|
+
|
622
|
+
Args:
|
623
|
+
file_path: Output file path for JSON report
|
624
|
+
"""
|
625
|
+
import json
|
626
|
+
|
627
|
+
output_path = Path(file_path)
|
628
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
629
|
+
|
630
|
+
# Custom JSON encoder for datetime and other types
|
631
|
+
def json_encoder(obj):
|
632
|
+
if isinstance(obj, datetime):
|
633
|
+
return obj.isoformat()
|
634
|
+
elif hasattr(obj, "value"): # Handle Enum types
|
635
|
+
return obj.value
|
636
|
+
return str(obj)
|
637
|
+
|
638
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
639
|
+
json.dump(self.model_dump(exclude_none=True), f, indent=2, default=json_encoder, ensure_ascii=False)
|
640
|
+
|
641
|
+
logger.info(f"JSON report generated: {output_path}")
|
642
|
+
|
643
|
+
def to_markdown(self, file_path: Union[str, Path]) -> None:
|
644
|
+
"""
|
645
|
+
Generate Markdown assessment report.
|
646
|
+
|
647
|
+
Creates a structured Markdown document suitable for
|
648
|
+
documentation systems, GitHub, and technical reviews.
|
649
|
+
|
650
|
+
Args:
|
651
|
+
file_path: Output file path for Markdown report
|
652
|
+
"""
|
653
|
+
output_path = Path(file_path)
|
654
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
655
|
+
|
656
|
+
# Generate markdown content
|
657
|
+
md_content = self._generate_markdown_content()
|
658
|
+
|
659
|
+
with open(output_path, "w", encoding="utf-8") as f:
|
660
|
+
f.write(md_content)
|
661
|
+
|
662
|
+
logger.info(f"Markdown report generated: {output_path}")
|
663
|
+
|
664
|
+
def _generate_markdown_content(self) -> str:
|
665
|
+
"""Generate markdown content for the report."""
|
666
|
+
lines = []
|
667
|
+
|
668
|
+
# Header
|
669
|
+
lines.append(f"# Cloud Foundations Assessment Report")
|
670
|
+
lines.append(f"")
|
671
|
+
lines.append(f"**Account:** {self.account_id}")
|
672
|
+
lines.append(f"**Region:** {self.region}")
|
673
|
+
lines.append(f"**Assessment Date:** {self.timestamp.strftime('%Y-%m-%d %H:%M:%S')}")
|
674
|
+
lines.append(f"**Tool Version:** {self.version}")
|
675
|
+
lines.append(f"")
|
676
|
+
|
677
|
+
# Summary
|
678
|
+
lines.append("## Executive Summary")
|
679
|
+
lines.append("")
|
680
|
+
lines.append(f"- **Total Checks:** {self.summary.total_checks}")
|
681
|
+
lines.append(f"- **Pass Rate:** {self.summary.pass_rate:.1f}%")
|
682
|
+
lines.append(f"- **Compliance Score:** {self.summary.compliance_score}/100")
|
683
|
+
lines.append(f"- **Risk Level:** {self.summary.risk_level}")
|
684
|
+
lines.append(f"- **Critical Issues:** {self.summary.critical_issues}")
|
685
|
+
lines.append(f"- **Execution Time:** {self.summary.total_execution_time:.1f}s")
|
686
|
+
lines.append("")
|
687
|
+
|
688
|
+
# Critical findings
|
689
|
+
critical_results = self.get_critical_results()
|
690
|
+
if critical_results:
|
691
|
+
lines.append("## 🚨 Critical Findings")
|
692
|
+
lines.append("")
|
693
|
+
for result in critical_results:
|
694
|
+
lines.append(f"### {result.finding_id}: {result.check_name}")
|
695
|
+
lines.append(f"**Status:** {result.status.value}")
|
696
|
+
lines.append(f"**Message:** {result.message}")
|
697
|
+
if result.recommendations:
|
698
|
+
lines.append("**Recommendations:**")
|
699
|
+
for rec in result.recommendations:
|
700
|
+
lines.append(f"- {rec}")
|
701
|
+
lines.append("")
|
702
|
+
|
703
|
+
# Category breakdown
|
704
|
+
lines.append("## Assessment Results by Category")
|
705
|
+
lines.append("")
|
706
|
+
category_summary = self.get_category_summary()
|
707
|
+
for category, stats in category_summary.items():
|
708
|
+
pass_rate = (stats["passed"] / stats["total"] * 100) if stats["total"] > 0 else 0
|
709
|
+
lines.append(f"### {category.upper()}")
|
710
|
+
lines.append(f"- Total: {stats['total']}")
|
711
|
+
lines.append(f"- Passed: {stats['passed']} ({pass_rate:.1f}%)")
|
712
|
+
lines.append(f"- Failed: {stats['failed']}")
|
713
|
+
lines.append(f"- Critical: {stats['critical']}")
|
714
|
+
lines.append("")
|
715
|
+
|
716
|
+
return "\n".join(lines)
|
717
|
+
|
718
|
+
|
719
|
+
class CheckConfig(BaseModel):
|
720
|
+
"""
|
721
|
+
Configuration for Individual Assessment Checks.
|
722
|
+
|
723
|
+
Defines configuration parameters for specific assessment checks,
|
724
|
+
including execution settings, severity overrides, and custom
|
725
|
+
parameters for check behavior customization.
|
726
|
+
|
727
|
+
This allows fine-grained control over assessment execution,
|
728
|
+
enabling organizations to customize checks based on their
|
729
|
+
specific requirements and compliance frameworks.
|
730
|
+
|
731
|
+
Attributes:
|
732
|
+
name: Unique identifier for the check
|
733
|
+
enabled: Whether the check should be executed
|
734
|
+
severity: Default severity level for this check
|
735
|
+
timeout: Maximum execution time in seconds
|
736
|
+
parameters: Check-specific configuration parameters
|
737
|
+
description: Human-readable description of the check
|
738
|
+
category: Category grouping for organization
|
739
|
+
|
740
|
+
Example:
|
741
|
+
```python
|
742
|
+
check_config = CheckConfig(
|
743
|
+
name="iam_root_mfa",
|
744
|
+
enabled=True,
|
745
|
+
severity=Severity.CRITICAL,
|
746
|
+
timeout=30,
|
747
|
+
description="Verify root account MFA is enabled",
|
748
|
+
category="iam",
|
749
|
+
parameters={
|
750
|
+
"enforce_hardware_mfa": True,
|
751
|
+
"check_all_regions": False
|
752
|
+
}
|
753
|
+
)
|
754
|
+
```
|
755
|
+
"""
|
756
|
+
|
757
|
+
model_config = ConfigDict(
|
758
|
+
validate_assignment=True,
|
759
|
+
extra="allow", # Allow custom parameters
|
760
|
+
frozen=False,
|
761
|
+
)
|
762
|
+
|
763
|
+
# Core identification
|
764
|
+
name: str = Field(..., description="Unique identifier for the check", min_length=1, max_length=100)
|
765
|
+
enabled: bool = Field(default=True, description="Whether the check should be executed")
|
766
|
+
|
767
|
+
# Execution settings
|
768
|
+
severity: Severity = Field(
|
769
|
+
default=Severity.WARNING, description="Default severity level for findings from this check"
|
770
|
+
)
|
771
|
+
timeout: int = Field(
|
772
|
+
default=60,
|
773
|
+
description="Maximum execution time in seconds",
|
774
|
+
gt=0,
|
775
|
+
le=3600, # Max 1 hour
|
776
|
+
)
|
777
|
+
|
778
|
+
# Additional metadata
|
779
|
+
description: Optional[str] = Field(
|
780
|
+
default=None, description="Human-readable description of what this check validates"
|
781
|
+
)
|
782
|
+
category: Optional[str] = Field(default=None, description="Category grouping for organization (iam, vpc, etc.)")
|
783
|
+
|
784
|
+
# Custom parameters
|
785
|
+
parameters: Dict[str, Any] = Field(default_factory=dict, description="Check-specific configuration parameters")
|
786
|
+
|
787
|
+
@field_validator("name")
|
788
|
+
@classmethod
|
789
|
+
def validate_check_name(cls, v: str) -> str:
|
790
|
+
"""Validate check name format."""
|
791
|
+
# Convert to standard format
|
792
|
+
name = v.strip().lower().replace(" ", "_").replace("-", "_")
|
793
|
+
|
794
|
+
# Basic validation
|
795
|
+
if not name.replace("_", "").isalnum():
|
796
|
+
raise ValueError("Check name must contain only alphanumeric characters and underscores")
|
797
|
+
|
798
|
+
return name
|
799
|
+
|
800
|
+
def get_parameter(self, key: str, default: Any = None) -> Any:
|
801
|
+
"""Get a specific parameter value with default fallback."""
|
802
|
+
return self.parameters.get(key, default)
|
803
|
+
|
804
|
+
def set_parameter(self, key: str, value: Any) -> None:
|
805
|
+
"""Set a specific parameter value."""
|
806
|
+
self.parameters[key] = value
|
807
|
+
|
808
|
+
|
809
|
+
class AssessmentConfig(BaseModel):
|
810
|
+
"""
|
811
|
+
Comprehensive Configuration for Cloud Foundations Assessment Execution.
|
812
|
+
|
813
|
+
Defines all aspects of assessment execution including check selection,
|
814
|
+
parallel execution settings, reporting preferences, and individual
|
815
|
+
check configurations. This provides enterprise-grade control over
|
816
|
+
assessment behavior and customization capabilities.
|
817
|
+
|
818
|
+
The configuration supports:
|
819
|
+
- Flexible check filtering by category or specific checks
|
820
|
+
- Performance tuning through parallel execution controls
|
821
|
+
- Reporting customization for different audiences
|
822
|
+
- Per-check configuration overrides
|
823
|
+
- Compliance framework alignment
|
824
|
+
|
825
|
+
Attributes:
|
826
|
+
included_categories: Categories to include in assessment
|
827
|
+
excluded_categories: Categories to exclude from assessment
|
828
|
+
included_checks: Specific checks to include (overrides categories)
|
829
|
+
excluded_checks: Specific checks to exclude
|
830
|
+
parallel_execution: Enable parallel check execution
|
831
|
+
max_workers: Maximum number of parallel worker threads
|
832
|
+
timeout: Overall assessment timeout in seconds
|
833
|
+
severity_threshold: Minimum severity level to include in reports
|
834
|
+
include_passed: Include passed checks in detailed reports
|
835
|
+
include_skipped: Include skipped checks in detailed reports
|
836
|
+
check_configs: Per-check configuration overrides
|
837
|
+
compliance_framework: Target compliance framework alignment
|
838
|
+
|
839
|
+
Example:
|
840
|
+
```python
|
841
|
+
config = AssessmentConfig(
|
842
|
+
included_categories=["iam", "cloudtrail", "config"],
|
843
|
+
excluded_checks=["iam_unused_credentials"],
|
844
|
+
parallel_execution=True,
|
845
|
+
max_workers=5,
|
846
|
+
severity_threshold=Severity.WARNING,
|
847
|
+
compliance_framework="SOC2"
|
848
|
+
)
|
849
|
+
|
850
|
+
# Add custom check configuration
|
851
|
+
config.add_check_config("iam_root_mfa", {
|
852
|
+
"severity": Severity.CRITICAL,
|
853
|
+
"timeout": 30
|
854
|
+
})
|
855
|
+
```
|
856
|
+
"""
|
857
|
+
|
858
|
+
model_config = ConfigDict(validate_assignment=True, extra="forbid", frozen=False)
|
859
|
+
|
860
|
+
# Check selection configuration
|
861
|
+
included_categories: List[str] = Field(
|
862
|
+
default_factory=lambda: ["iam", "vpc", "ec2", "cloudtrail", "config", "organizations"],
|
863
|
+
description="Assessment categories to include (default: all core categories)",
|
864
|
+
)
|
865
|
+
excluded_categories: List[str] = Field(
|
866
|
+
default_factory=list, description="Assessment categories to exclude from execution"
|
867
|
+
)
|
868
|
+
included_checks: List[str] = Field(
|
869
|
+
default_factory=list, description="Specific checks to include (when specified, overrides category selection)"
|
870
|
+
)
|
871
|
+
excluded_checks: List[str] = Field(default_factory=list, description="Specific checks to exclude from execution")
|
872
|
+
|
873
|
+
# Execution performance configuration
|
874
|
+
parallel_execution: bool = Field(default=True, description="Enable parallel execution of assessment checks")
|
875
|
+
max_workers: int = Field(
|
876
|
+
default=10,
|
877
|
+
description="Maximum number of parallel worker threads",
|
878
|
+
gt=0,
|
879
|
+
le=50, # Reasonable upper limit
|
880
|
+
)
|
881
|
+
timeout: int = Field(
|
882
|
+
default=300,
|
883
|
+
description="Overall assessment timeout in seconds",
|
884
|
+
gt=0,
|
885
|
+
le=7200, # Max 2 hours
|
886
|
+
)
|
887
|
+
|
888
|
+
# Reporting and output configuration
|
889
|
+
severity_threshold: Severity = Field(
|
890
|
+
default=Severity.WARNING, description="Minimum severity level to include in reports"
|
891
|
+
)
|
892
|
+
include_passed: bool = Field(default=True, description="Include passed checks in detailed reports")
|
893
|
+
include_skipped: bool = Field(default=False, description="Include skipped checks in detailed reports")
|
894
|
+
|
895
|
+
# Advanced configuration
|
896
|
+
check_configs: Dict[str, CheckConfig] = Field(default_factory=dict, description="Per-check configuration overrides")
|
897
|
+
compliance_framework: Optional[str] = Field(
|
898
|
+
default=None, description="Target compliance framework (SOC2, PCI-DSS, HIPAA, etc.)"
|
899
|
+
)
|
900
|
+
custom_metadata: Dict[str, Any] = Field(default_factory=dict, description="Custom metadata for assessment tracking")
|
901
|
+
|
902
|
+
@field_validator("max_workers")
|
903
|
+
@classmethod
|
904
|
+
def validate_max_workers(cls, v: int) -> int:
|
905
|
+
"""Validate max_workers is reasonable."""
|
906
|
+
if v > 50:
|
907
|
+
logger.warning(f"max_workers ({v}) is very high, consider reducing for stability")
|
908
|
+
elif v < 1:
|
909
|
+
raise ValueError("max_workers must be at least 1")
|
910
|
+
return v
|
911
|
+
|
912
|
+
@field_validator("included_categories", "excluded_categories")
|
913
|
+
@classmethod
|
914
|
+
def validate_categories(cls, v: List[str]) -> List[str]:
|
915
|
+
"""Validate and normalize category names."""
|
916
|
+
if not v:
|
917
|
+
return v
|
918
|
+
|
919
|
+
# Normalize category names to lowercase
|
920
|
+
normalized = [cat.strip().lower() for cat in v if cat.strip()]
|
921
|
+
|
922
|
+
# Define valid categories
|
923
|
+
valid_categories = {
|
924
|
+
"iam",
|
925
|
+
"vpc",
|
926
|
+
"ec2",
|
927
|
+
"cloudtrail",
|
928
|
+
"config",
|
929
|
+
"organizations",
|
930
|
+
"cloudformation",
|
931
|
+
"s3",
|
932
|
+
"rds",
|
933
|
+
"lambda",
|
934
|
+
"kms",
|
935
|
+
"backup",
|
936
|
+
"guardduty",
|
937
|
+
"securityhub",
|
938
|
+
"accessanalyzer",
|
939
|
+
}
|
940
|
+
|
941
|
+
# Log warnings for unknown categories
|
942
|
+
for cat in normalized:
|
943
|
+
if cat not in valid_categories:
|
944
|
+
logger.warning(f"Unknown assessment category: {cat}")
|
945
|
+
|
946
|
+
return normalized
|
947
|
+
|
948
|
+
def get_check_config(self, check_name: str) -> CheckConfig:
|
949
|
+
"""
|
950
|
+
Get configuration for a specific check.
|
951
|
+
|
952
|
+
Args:
|
953
|
+
check_name: Name of the check to get configuration for
|
954
|
+
|
955
|
+
Returns:
|
956
|
+
CheckConfig object with default or custom configuration
|
957
|
+
"""
|
958
|
+
return self.check_configs.get(check_name, CheckConfig(name=check_name))
|
959
|
+
|
960
|
+
def add_check_config(self, check_name: str, config: Union[CheckConfig, Dict[str, Any]]) -> None:
|
961
|
+
"""
|
962
|
+
Add or update configuration for a specific check.
|
963
|
+
|
964
|
+
Args:
|
965
|
+
check_name: Name of the check to configure
|
966
|
+
config: CheckConfig object or dictionary of configuration parameters
|
967
|
+
"""
|
968
|
+
if isinstance(config, dict):
|
969
|
+
config = CheckConfig(name=check_name, **config)
|
970
|
+
elif not isinstance(config, CheckConfig):
|
971
|
+
raise ValueError("config must be CheckConfig instance or dictionary")
|
972
|
+
|
973
|
+
self.check_configs[check_name] = config
|
974
|
+
|
975
|
+
def remove_check_config(self, check_name: str) -> bool:
|
976
|
+
"""
|
977
|
+
Remove configuration for a specific check.
|
978
|
+
|
979
|
+
Args:
|
980
|
+
check_name: Name of the check to remove configuration for
|
981
|
+
|
982
|
+
Returns:
|
983
|
+
True if configuration was removed, False if it didn't exist
|
984
|
+
"""
|
985
|
+
return self.check_configs.pop(check_name, None) is not None
|
986
|
+
|
987
|
+
def get_effective_checks(self, available_checks: List[str]) -> List[str]:
|
988
|
+
"""
|
989
|
+
Determine which checks should be executed based on configuration.
|
990
|
+
|
991
|
+
Args:
|
992
|
+
available_checks: List of all available check names
|
993
|
+
|
994
|
+
Returns:
|
995
|
+
List of check names that should be executed
|
996
|
+
"""
|
997
|
+
# Start with available checks
|
998
|
+
effective_checks = set(available_checks)
|
999
|
+
|
1000
|
+
# Apply category filtering if no specific checks are included
|
1001
|
+
if not self.included_checks:
|
1002
|
+
if self.included_categories:
|
1003
|
+
category_checks = set()
|
1004
|
+
for check in available_checks:
|
1005
|
+
for category in self.included_categories:
|
1006
|
+
if check.startswith(category.lower()):
|
1007
|
+
category_checks.add(check)
|
1008
|
+
effective_checks = category_checks
|
1009
|
+
|
1010
|
+
# Remove excluded categories
|
1011
|
+
if self.excluded_categories:
|
1012
|
+
for category in self.excluded_categories:
|
1013
|
+
effective_checks = {check for check in effective_checks if not check.startswith(category.lower())}
|
1014
|
+
else:
|
1015
|
+
# Use only specifically included checks
|
1016
|
+
effective_checks = set(self.included_checks) & effective_checks
|
1017
|
+
|
1018
|
+
# Remove specifically excluded checks
|
1019
|
+
if self.excluded_checks:
|
1020
|
+
effective_checks = effective_checks - set(self.excluded_checks)
|
1021
|
+
|
1022
|
+
return sorted(list(effective_checks))
|
1023
|
+
|
1024
|
+
def to_dict(self) -> Dict[str, Any]:
|
1025
|
+
"""Convert configuration to dictionary for serialization."""
|
1026
|
+
return self.model_dump(exclude_none=True)
|