runbooks 0.2.5__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- conftest.py +26 -0
- jupyter-agent/.env +2 -0
- jupyter-agent/.env.template +2 -0
- jupyter-agent/.gitattributes +35 -0
- jupyter-agent/.gradio/certificate.pem +31 -0
- jupyter-agent/README.md +16 -0
- jupyter-agent/__main__.log +8 -0
- jupyter-agent/app.py +256 -0
- jupyter-agent/cloudops-agent.png +0 -0
- jupyter-agent/ds-system-prompt.txt +154 -0
- jupyter-agent/jupyter-agent.png +0 -0
- jupyter-agent/llama3_template.jinja +123 -0
- jupyter-agent/requirements.txt +9 -0
- jupyter-agent/tmp/4ojbs8a02ir/jupyter-agent.ipynb +68 -0
- jupyter-agent/tmp/cm5iasgpm3p/jupyter-agent.ipynb +91 -0
- jupyter-agent/tmp/crqbsseag5/jupyter-agent.ipynb +91 -0
- jupyter-agent/tmp/hohanq1u097/jupyter-agent.ipynb +57 -0
- jupyter-agent/tmp/jns1sam29wm/jupyter-agent.ipynb +53 -0
- jupyter-agent/tmp/jupyter-agent.ipynb +27 -0
- jupyter-agent/utils.py +409 -0
- runbooks/__init__.py +71 -3
- runbooks/__main__.py +13 -0
- runbooks/aws/ec2_describe_instances.py +1 -1
- runbooks/aws/ec2_run_instances.py +8 -2
- runbooks/aws/ec2_start_stop_instances.py +17 -4
- runbooks/aws/ec2_unused_volumes.py +5 -1
- runbooks/aws/s3_create_bucket.py +4 -2
- runbooks/aws/s3_list_objects.py +6 -1
- runbooks/aws/tagging_lambda_handler.py +13 -2
- runbooks/aws/tags.json +12 -0
- runbooks/base.py +353 -0
- runbooks/cfat/README.md +49 -0
- runbooks/cfat/__init__.py +74 -0
- runbooks/cfat/app.ts +644 -0
- runbooks/cfat/assessment/__init__.py +40 -0
- runbooks/cfat/assessment/asana-import.csv +39 -0
- runbooks/cfat/assessment/cfat-checks.csv +31 -0
- runbooks/cfat/assessment/cfat.txt +520 -0
- runbooks/cfat/assessment/collectors.py +200 -0
- runbooks/cfat/assessment/jira-import.csv +39 -0
- runbooks/cfat/assessment/runner.py +387 -0
- runbooks/cfat/assessment/validators.py +290 -0
- runbooks/cfat/cli.py +103 -0
- runbooks/cfat/docs/asana-import.csv +24 -0
- runbooks/cfat/docs/cfat-checks.csv +31 -0
- runbooks/cfat/docs/cfat.txt +335 -0
- runbooks/cfat/docs/checks-output.png +0 -0
- runbooks/cfat/docs/cloudshell-console-run.png +0 -0
- runbooks/cfat/docs/cloudshell-download.png +0 -0
- runbooks/cfat/docs/cloudshell-output.png +0 -0
- runbooks/cfat/docs/downloadfile.png +0 -0
- runbooks/cfat/docs/jira-import.csv +24 -0
- runbooks/cfat/docs/open-cloudshell.png +0 -0
- runbooks/cfat/docs/report-header.png +0 -0
- runbooks/cfat/models.py +1026 -0
- runbooks/cfat/package-lock.json +5116 -0
- runbooks/cfat/package.json +38 -0
- runbooks/cfat/report.py +496 -0
- runbooks/cfat/reporting/__init__.py +46 -0
- runbooks/cfat/reporting/exporters.py +337 -0
- runbooks/cfat/reporting/formatters.py +496 -0
- runbooks/cfat/reporting/templates.py +135 -0
- runbooks/cfat/run-assessment.sh +23 -0
- runbooks/cfat/runner.py +69 -0
- runbooks/cfat/src/actions/check-cloudtrail-existence.ts +43 -0
- runbooks/cfat/src/actions/check-config-existence.ts +37 -0
- runbooks/cfat/src/actions/check-control-tower.ts +37 -0
- runbooks/cfat/src/actions/check-ec2-existence.ts +46 -0
- runbooks/cfat/src/actions/check-iam-users.ts +50 -0
- runbooks/cfat/src/actions/check-legacy-cur.ts +30 -0
- runbooks/cfat/src/actions/check-org-cloudformation.ts +30 -0
- runbooks/cfat/src/actions/check-vpc-existence.ts +43 -0
- runbooks/cfat/src/actions/create-asanaimport.ts +14 -0
- runbooks/cfat/src/actions/create-backlog.ts +372 -0
- runbooks/cfat/src/actions/create-jiraimport.ts +15 -0
- runbooks/cfat/src/actions/create-report.ts +616 -0
- runbooks/cfat/src/actions/define-account-type.ts +51 -0
- runbooks/cfat/src/actions/get-enabled-org-policy-types.ts +40 -0
- runbooks/cfat/src/actions/get-enabled-org-services.ts +26 -0
- runbooks/cfat/src/actions/get-idc-info.ts +34 -0
- runbooks/cfat/src/actions/get-org-da-accounts.ts +34 -0
- runbooks/cfat/src/actions/get-org-details.ts +35 -0
- runbooks/cfat/src/actions/get-org-member-accounts.ts +44 -0
- runbooks/cfat/src/actions/get-org-ous.ts +35 -0
- runbooks/cfat/src/actions/get-regions.ts +22 -0
- runbooks/cfat/src/actions/zip-assessment.ts +27 -0
- runbooks/cfat/src/types/index.d.ts +147 -0
- runbooks/cfat/tests/__init__.py +141 -0
- runbooks/cfat/tests/test_cli.py +340 -0
- runbooks/cfat/tests/test_integration.py +290 -0
- runbooks/cfat/tests/test_models.py +505 -0
- runbooks/cfat/tests/test_reporting.py +354 -0
- runbooks/cfat/tsconfig.json +16 -0
- runbooks/cfat/webpack.config.cjs +27 -0
- runbooks/config.py +260 -0
- runbooks/finops/README.md +337 -0
- runbooks/finops/__init__.py +86 -0
- runbooks/finops/aws_client.py +245 -0
- runbooks/finops/cli.py +151 -0
- runbooks/finops/cost_processor.py +410 -0
- runbooks/finops/dashboard_runner.py +448 -0
- runbooks/finops/helpers.py +355 -0
- runbooks/finops/main.py +14 -0
- runbooks/finops/profile_processor.py +174 -0
- runbooks/finops/types.py +66 -0
- runbooks/finops/visualisations.py +80 -0
- runbooks/inventory/.gitignore +354 -0
- runbooks/inventory/ArgumentsClass.py +261 -0
- runbooks/inventory/FAILED_SCRIPTS_TROUBLESHOOTING.md +619 -0
- runbooks/inventory/Inventory_Modules.py +6130 -0
- runbooks/inventory/LandingZone/delete_lz.py +1075 -0
- runbooks/inventory/PASSED_SCRIPTS_GUIDE.md +738 -0
- runbooks/inventory/README.md +1320 -0
- runbooks/inventory/__init__.py +62 -0
- runbooks/inventory/account_class.py +532 -0
- runbooks/inventory/all_my_instances_wrapper.py +123 -0
- runbooks/inventory/aws_decorators.py +201 -0
- runbooks/inventory/aws_organization.png +0 -0
- runbooks/inventory/cfn_move_stack_instances.py +1526 -0
- runbooks/inventory/check_cloudtrail_compliance.py +614 -0
- runbooks/inventory/check_controltower_readiness.py +1107 -0
- runbooks/inventory/check_landingzone_readiness.py +711 -0
- runbooks/inventory/cloudtrail.md +727 -0
- runbooks/inventory/collectors/__init__.py +20 -0
- runbooks/inventory/collectors/aws_compute.py +518 -0
- runbooks/inventory/collectors/aws_networking.py +275 -0
- runbooks/inventory/collectors/base.py +222 -0
- runbooks/inventory/core/__init__.py +19 -0
- runbooks/inventory/core/collector.py +303 -0
- runbooks/inventory/core/formatter.py +296 -0
- runbooks/inventory/delete_s3_buckets_objects.py +169 -0
- runbooks/inventory/discovery.md +81 -0
- runbooks/inventory/draw_org_structure.py +748 -0
- runbooks/inventory/ec2_vpc_utils.py +341 -0
- runbooks/inventory/find_cfn_drift_detection.py +272 -0
- runbooks/inventory/find_cfn_orphaned_stacks.py +719 -0
- runbooks/inventory/find_cfn_stackset_drift.py +733 -0
- runbooks/inventory/find_ec2_security_groups.py +669 -0
- runbooks/inventory/find_landingzone_versions.py +201 -0
- runbooks/inventory/find_vpc_flow_logs.py +1221 -0
- runbooks/inventory/inventory.sh +659 -0
- runbooks/inventory/list_cfn_stacks.py +558 -0
- runbooks/inventory/list_cfn_stackset_operation_results.py +252 -0
- runbooks/inventory/list_cfn_stackset_operations.py +734 -0
- runbooks/inventory/list_cfn_stacksets.py +453 -0
- runbooks/inventory/list_config_recorders_delivery_channels.py +681 -0
- runbooks/inventory/list_ds_directories.py +354 -0
- runbooks/inventory/list_ec2_availability_zones.py +286 -0
- runbooks/inventory/list_ec2_ebs_volumes.py +244 -0
- runbooks/inventory/list_ec2_instances.py +425 -0
- runbooks/inventory/list_ecs_clusters_and_tasks.py +562 -0
- runbooks/inventory/list_elbs_load_balancers.py +411 -0
- runbooks/inventory/list_enis_network_interfaces.py +526 -0
- runbooks/inventory/list_guardduty_detectors.py +568 -0
- runbooks/inventory/list_iam_policies.py +404 -0
- runbooks/inventory/list_iam_roles.py +518 -0
- runbooks/inventory/list_iam_saml_providers.py +359 -0
- runbooks/inventory/list_lambda_functions.py +882 -0
- runbooks/inventory/list_org_accounts.py +446 -0
- runbooks/inventory/list_org_accounts_users.py +354 -0
- runbooks/inventory/list_rds_db_instances.py +406 -0
- runbooks/inventory/list_route53_hosted_zones.py +318 -0
- runbooks/inventory/list_servicecatalog_provisioned_products.py +575 -0
- runbooks/inventory/list_sns_topics.py +360 -0
- runbooks/inventory/list_ssm_parameters.py +402 -0
- runbooks/inventory/list_vpc_subnets.py +433 -0
- runbooks/inventory/list_vpcs.py +422 -0
- runbooks/inventory/lockdown_cfn_stackset_role.py +224 -0
- runbooks/inventory/models/__init__.py +24 -0
- runbooks/inventory/models/account.py +192 -0
- runbooks/inventory/models/inventory.py +309 -0
- runbooks/inventory/models/resource.py +247 -0
- runbooks/inventory/recover_cfn_stack_ids.py +205 -0
- runbooks/inventory/requirements.txt +12 -0
- runbooks/inventory/run_on_multi_accounts.py +211 -0
- runbooks/inventory/tests/common_test_data.py +3661 -0
- runbooks/inventory/tests/common_test_functions.py +204 -0
- runbooks/inventory/tests/setup.py +24 -0
- runbooks/inventory/tests/src.py +18 -0
- runbooks/inventory/tests/test_cfn_describe_stacks.py +208 -0
- runbooks/inventory/tests/test_ec2_describe_instances.py +162 -0
- runbooks/inventory/tests/test_inventory_modules.py +55 -0
- runbooks/inventory/tests/test_lambda_list_functions.py +86 -0
- runbooks/inventory/tests/test_moto_integration_example.py +273 -0
- runbooks/inventory/tests/test_org_list_accounts.py +49 -0
- runbooks/inventory/update_aws_actions.py +173 -0
- runbooks/inventory/update_cfn_stacksets.py +1215 -0
- runbooks/inventory/update_cloudwatch_logs_retention_policy.py +294 -0
- runbooks/inventory/update_iam_roles_cross_accounts.py +478 -0
- runbooks/inventory/update_s3_public_access_block.py +539 -0
- runbooks/inventory/utils/__init__.py +23 -0
- runbooks/inventory/utils/aws_helpers.py +510 -0
- runbooks/inventory/utils/threading_utils.py +493 -0
- runbooks/inventory/utils/validation.py +682 -0
- runbooks/inventory/verify_ec2_security_groups.py +1430 -0
- runbooks/main.py +1004 -0
- runbooks/organizations/__init__.py +12 -0
- runbooks/organizations/manager.py +374 -0
- runbooks/security/README.md +447 -0
- runbooks/security/__init__.py +71 -0
- runbooks/{security_baseline → security}/checklist/alternate_contacts.py +8 -1
- runbooks/{security_baseline → security}/checklist/bucket_public_access.py +4 -1
- runbooks/{security_baseline → security}/checklist/cloudwatch_alarm_configuration.py +9 -2
- runbooks/{security_baseline → security}/checklist/guardduty_enabled.py +9 -2
- runbooks/{security_baseline → security}/checklist/multi_region_instance_usage.py +5 -1
- runbooks/{security_baseline → security}/checklist/root_access_key.py +6 -1
- runbooks/{security_baseline → security}/config-origin.json +1 -1
- runbooks/{security_baseline → security}/config.json +1 -1
- runbooks/{security_baseline → security}/permission.json +1 -1
- runbooks/{security_baseline → security}/report_generator.py +10 -2
- runbooks/{security_baseline → security}/report_template_en.html +7 -7
- runbooks/{security_baseline → security}/report_template_jp.html +7 -7
- runbooks/{security_baseline → security}/report_template_kr.html +12 -12
- runbooks/{security_baseline → security}/report_template_vn.html +7 -7
- runbooks/{security_baseline → security}/run_script.py +8 -2
- runbooks/{security_baseline → security}/security_baseline_tester.py +12 -4
- runbooks/{security_baseline → security}/utils/common.py +5 -1
- runbooks/utils/__init__.py +204 -0
- runbooks-0.7.0.dist-info/METADATA +375 -0
- runbooks-0.7.0.dist-info/RECORD +249 -0
- {runbooks-0.2.5.dist-info → runbooks-0.7.0.dist-info}/WHEEL +1 -1
- runbooks-0.7.0.dist-info/entry_points.txt +7 -0
- runbooks-0.7.0.dist-info/licenses/LICENSE +201 -0
- runbooks-0.7.0.dist-info/top_level.txt +3 -0
- runbooks/python101/calculator.py +0 -34
- runbooks/python101/config.py +0 -1
- runbooks/python101/exceptions.py +0 -16
- runbooks/python101/file_manager.py +0 -218
- runbooks/python101/toolkit.py +0 -153
- runbooks-0.2.5.dist-info/METADATA +0 -439
- runbooks-0.2.5.dist-info/RECORD +0 -61
- runbooks-0.2.5.dist-info/entry_points.txt +0 -3
- runbooks-0.2.5.dist-info/top_level.txt +0 -1
- /runbooks/{security_baseline/__init__.py → inventory/tests/script_test_data.py} +0 -0
- /runbooks/{security_baseline → security}/checklist/__init__.py +0 -0
- /runbooks/{security_baseline → security}/checklist/account_level_bucket_public_access.py +0 -0
- /runbooks/{security_baseline → security}/checklist/direct_attached_policy.py +0 -0
- /runbooks/{security_baseline → security}/checklist/iam_password_policy.py +0 -0
- /runbooks/{security_baseline → security}/checklist/iam_user_mfa.py +0 -0
- /runbooks/{security_baseline → security}/checklist/multi_region_trail.py +0 -0
- /runbooks/{security_baseline → security}/checklist/root_mfa.py +0 -0
- /runbooks/{security_baseline → security}/checklist/root_usage.py +0 -0
- /runbooks/{security_baseline → security}/checklist/trail_enabled.py +0 -0
- /runbooks/{security_baseline → security}/checklist/trusted_advisor.py +0 -0
- /runbooks/{security_baseline → security}/utils/__init__.py +0 -0
- /runbooks/{security_baseline → security}/utils/enums.py +0 -0
- /runbooks/{security_baseline → security}/utils/language.py +0 -0
- /runbooks/{security_baseline → security}/utils/level_const.py +0 -0
- /runbooks/{security_baseline → security}/utils/permission_list.py +0 -0
@@ -0,0 +1,1430 @@
|
|
1
|
+
# © 2024 Amazon Web Services, Inc. or its affiliates. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# This AWS Content is provided subject to the terms of the AWS Customer Agreement available at
|
4
|
+
# http://aws.amazon.com/agreement or other written agreement between Customer and either
|
5
|
+
# Amazon Web Services, Inc. or Amazon Web Services EMEA SARL or both.
|
6
|
+
|
7
|
+
import csv
|
8
|
+
import logging
|
9
|
+
import os
|
10
|
+
from typing import Any, Dict, List
|
11
|
+
|
12
|
+
import boto3
|
13
|
+
import botocore
|
14
|
+
import jmespath
|
15
|
+
from Inventory_Modules import (
|
16
|
+
find_account_ecs_clusters_services_and_tasks2,
|
17
|
+
find_account_instances2,
|
18
|
+
find_account_rds_instances2,
|
19
|
+
find_lambda_functions2,
|
20
|
+
find_load_balancers2,
|
21
|
+
)
|
22
|
+
|
23
|
+
__version__ = "2024.09.25"
|
24
|
+
# import time
|
25
|
+
|
26
|
+
# Global Variables
|
27
|
+
CSV_FILE = os.getenv("CSV_FILE", "./all.csv")
|
28
|
+
LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", logging.ERROR)
|
29
|
+
FILENAME_TO_SAVE_TO = os.getenv("VERIFY_FILENAME", "results.csv")
|
30
|
+
VERIFICATION = os.getenv("VERIFICATION", False)
|
31
|
+
FIND_EVERYTHING = os.getenv("FIND_EVERYTHING", False)
|
32
|
+
TAG_VALUE_TO_FILTER = os.getenv("TAG_VALUE_TO_FILTER", None)
|
33
|
+
|
34
|
+
|
35
|
+
##################
|
36
|
+
# Functions
|
37
|
+
##################
|
38
|
+
|
39
|
+
# TODO:
|
40
|
+
# Can we add a verification for the script o validate that the security groups applied to the resources are applied to the proper resources?
|
41
|
+
|
42
|
+
|
43
|
+
def main(CSV_FILE):
|
44
|
+
"""
|
45
|
+
Main Python function to attach security group to ENIs. Responsible for:
|
46
|
+
1. Identifying current account id, region.
|
47
|
+
2. Importing CSV file and loading into state.
|
48
|
+
3. Comparing CSV ARN entry with AWS current ID and Region.
|
49
|
+
4. Identifying Security Group ID and matching to contextual ARNs.
|
50
|
+
5. Attaching valid Security Groups with valid ARNS.
|
51
|
+
|
52
|
+
Args:
|
53
|
+
CSV_FILE (str): CSV file path
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
Pass or Failure [0/1]
|
57
|
+
"""
|
58
|
+
logging.basicConfig(level=LOGGING_LEVEL, format="[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
|
59
|
+
logging_minimum = logging.ERROR
|
60
|
+
logging.getLogger("boto3").setLevel(logging_minimum)
|
61
|
+
logging.getLogger("botocore").setLevel(logging_minimum)
|
62
|
+
logging.getLogger("csv").setLevel(logging_minimum)
|
63
|
+
logging.getLogger("jmespath").setLevel(logging_minimum)
|
64
|
+
logging.getLogger("typing").setLevel(logging_minimum)
|
65
|
+
logging.getLogger("os").setLevel(logging_minimum)
|
66
|
+
logging.getLogger("connectionpool").setLevel(logging_minimum)
|
67
|
+
logging.info("Initializing Security Group attachment script.")
|
68
|
+
|
69
|
+
try:
|
70
|
+
csv_data = csv_import(CSV_FILE)
|
71
|
+
except Exception as e:
|
72
|
+
logging.error(f"ERROR: Unable to importing CSV file.\nError Message: {e}")
|
73
|
+
|
74
|
+
try:
|
75
|
+
account_id, region = current_contextual_identity()
|
76
|
+
if account_id is None or region is None:
|
77
|
+
raise ValueError("Unable to determine current account id and region.")
|
78
|
+
logging.info(f"Found that we're working in account {account_id} in region {region}")
|
79
|
+
except:
|
80
|
+
logging.error("ERROR: Unable to get AWS IAM contextual identity.")
|
81
|
+
|
82
|
+
try:
|
83
|
+
matching_entries = get_arns_for_current_account(csv_data, account_id, region)
|
84
|
+
logging.info(f"Found {len(matching_entries)} matching entries")
|
85
|
+
except:
|
86
|
+
logging.error("ERROR: Unable to get matching CSV entries.")
|
87
|
+
|
88
|
+
if VERIFICATION in ["True", "true", True]:
|
89
|
+
try:
|
90
|
+
display_dict = {
|
91
|
+
"arn": {"DisplayOrder": 1, "Heading": "ARN"},
|
92
|
+
"Success": {"DisplayOrder": 2, "Heading": "Success", "Condition": ["False", "false", False]},
|
93
|
+
"Compliant": {"DisplayOrder": 3, "Heading": "Compliance", "Condition": ["False", "false", False]},
|
94
|
+
"SecurityGroupsAttached": {"DisplayOrder": 4, "Heading": "SecGrps Attached"},
|
95
|
+
"security_group_name": {"DisplayOrder": 5, "Heading": "Requested Sec Grp Name"},
|
96
|
+
"security_group": {"DisplayOrder": 6, "Heading": "Requested Sec Grp ID"},
|
97
|
+
"ErrorMessage": {"DisplayOrder": 7, "Heading": "Error Message"},
|
98
|
+
}
|
99
|
+
# Script only supports validation, and not attachment.
|
100
|
+
results = validate_security_groups(matching_entries)
|
101
|
+
logging.debug(results)
|
102
|
+
successful_results = 0
|
103
|
+
compliant_results = 0
|
104
|
+
for result in results:
|
105
|
+
if result["Success"]:
|
106
|
+
successful_results += 1
|
107
|
+
if result["Compliant"]:
|
108
|
+
compliant_results += 1
|
109
|
+
display_results(results, display_dict, None, FILENAME_TO_SAVE_TO)
|
110
|
+
print(
|
111
|
+
f"Finished validation with {successful_results} successful checks and {compliant_results} compliant resources out of a total of {len(matching_entries)} requested resources."
|
112
|
+
)
|
113
|
+
except Exception as e:
|
114
|
+
logging.error(f"ERROR: Unable to validate security groups. Error Message: {e}")
|
115
|
+
|
116
|
+
if FIND_EVERYTHING in ["True", "true", True]:
|
117
|
+
try:
|
118
|
+
all_arns = find_all_arns(account_id, region, TAG_VALUE_TO_FILTER)
|
119
|
+
display_dict = {
|
120
|
+
"ARN": {"DisplayOrder": 1, "Heading": "ARN"},
|
121
|
+
"Success": {"DisplayOrder": 2, "Heading": "Success", "Condition": ["False", "false", False]},
|
122
|
+
"Account": {"DisplayOrder": 3, "Heading": "Account"},
|
123
|
+
"Region": {"DisplayOrder": 4, "Heading": "Region"},
|
124
|
+
"ResourceType": {"DisplayOrder": 5, "Heading": "Resource Type"},
|
125
|
+
"ErrorMessage": {"DisplayOrder": 6, "Heading": "Error Message"},
|
126
|
+
}
|
127
|
+
display_results(all_arns, display_dict, None, FILENAME_TO_SAVE_TO)
|
128
|
+
except Exception as e:
|
129
|
+
logging.error(f"ERROR: Unable to find all ARNs. Error Message: {e}")
|
130
|
+
|
131
|
+
print("Finalized script. Exiting")
|
132
|
+
|
133
|
+
|
134
|
+
def current_contextual_identity():
|
135
|
+
"""
|
136
|
+
Identify the current account id and region using a Boto3 call.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
Returns:
|
140
|
+
String : Account ID
|
141
|
+
String : Region
|
142
|
+
"""
|
143
|
+
try:
|
144
|
+
sts = boto3.client("sts")
|
145
|
+
account_id = sts.get_caller_identity()["Account"]
|
146
|
+
region = boto3.Session().region_name
|
147
|
+
logging.info(f"Account: {account_id} | Region: {region}")
|
148
|
+
return account_id, region
|
149
|
+
|
150
|
+
except Exception as e:
|
151
|
+
logging.error(f"ERROR: Unable to determine current account id and region: {e}")
|
152
|
+
return None, None
|
153
|
+
|
154
|
+
|
155
|
+
def csv_import(csv_file_path: str) -> List[Dict[str, Any]]:
|
156
|
+
"""
|
157
|
+
Import a CSV file and return the data as a list of dictionaries.
|
158
|
+
|
159
|
+
Args:
|
160
|
+
csv_file_path (str): The path to the CSV file.
|
161
|
+
Returns:
|
162
|
+
List[Dict[str, Any]]: A list of dictionaries representing the CSV data.
|
163
|
+
"""
|
164
|
+
try:
|
165
|
+
with open(csv_file_path, "r") as csv_file:
|
166
|
+
reader = csv.DictReader(csv_file, delimiter=",")
|
167
|
+
data = list(reader)
|
168
|
+
return data
|
169
|
+
except Exception as e:
|
170
|
+
logging.error(f"Error importing CSV file: {e}")
|
171
|
+
return []
|
172
|
+
|
173
|
+
|
174
|
+
def get_arns_for_current_account(csv_data: List[Dict[str, Any]], account_id: str, region: str) -> List[Dict[str, Any]]:
|
175
|
+
"""
|
176
|
+
Compare the CSV ARN entries with the current account id and region, and return a list of matching entries.
|
177
|
+
|
178
|
+
Args:
|
179
|
+
csv_data (List[Dict[str, Any]]): The CSV data imported as a list of dictionaries.
|
180
|
+
account_id (str): The current account id.
|
181
|
+
region (str): The current region.
|
182
|
+
Returns:
|
183
|
+
List[Dict[str, Any]]: A list of dictionaries representing the matching CSV entries.
|
184
|
+
"""
|
185
|
+
matching_entries = []
|
186
|
+
all_security_groups = get_security_groups()
|
187
|
+
for entry in csv_data:
|
188
|
+
# Test 1: Check to see if Account ID and Region match
|
189
|
+
# Test 2: Check to see if the Security Group is valid
|
190
|
+
# Test 3: Check to see if the Security Group name is unique (multiple sgs named "default" is possible given multiple VPCs)
|
191
|
+
# If Test 1 and Test 2 pass - add to matching entries (after stripping all whitespace, tabs, etc.)
|
192
|
+
logging.info(f"Entry: {entry}")
|
193
|
+
try:
|
194
|
+
target_account_id = entry["arn"].strip().split(":")[4]
|
195
|
+
target_region = entry["arn"].strip().split(":")[3]
|
196
|
+
security_group_name = str(entry["security_group"].strip()).lower()
|
197
|
+
security_group_id = get_security_group_id_from_name(security_group_name, all_security_groups)
|
198
|
+
|
199
|
+
if (target_account_id == account_id and target_region == region) and security_group_id != "":
|
200
|
+
clean_entry = {
|
201
|
+
"arn": entry["arn"].strip(),
|
202
|
+
"security_group_name": security_group_name,
|
203
|
+
"security_group": security_group_id,
|
204
|
+
}
|
205
|
+
matching_entries.append(clean_entry)
|
206
|
+
|
207
|
+
if matching_entries == []:
|
208
|
+
logging.info("No matching entries found, returning empty list")
|
209
|
+
except Exception as e:
|
210
|
+
logging.error(f"Error processing entry: {e}")
|
211
|
+
|
212
|
+
return matching_entries
|
213
|
+
|
214
|
+
|
215
|
+
def check_security_group_validity(security_group_name: str) -> bool:
|
216
|
+
"""
|
217
|
+
Check the validity of the security group. Returns true if security exists, else false
|
218
|
+
|
219
|
+
Args:
|
220
|
+
security_group_name (str): The security group dictionary.
|
221
|
+
Returns:
|
222
|
+
bool: True if the security group is valid, False otherwise.
|
223
|
+
"""
|
224
|
+
try:
|
225
|
+
security_group_response = boto3.client("ec2").describe_security_groups()
|
226
|
+
if security_group_name in jmespath.search("SecurityGroups[].GroupName", security_group_response):
|
227
|
+
return True
|
228
|
+
except Exception as e:
|
229
|
+
logging.error(f"Security Group is not valid: {e}")
|
230
|
+
return False
|
231
|
+
|
232
|
+
|
233
|
+
def get_security_groups() -> List[str]:
|
234
|
+
"""
|
235
|
+
Get the Security Group ID from the Security Group Name. Returns a list of matching security group IDs.
|
236
|
+
|
237
|
+
Args:
|
238
|
+
Returns:
|
239
|
+
List[str]: A list of matching security group IDs.
|
240
|
+
"""
|
241
|
+
try:
|
242
|
+
security_group_response = boto3.client("ec2").describe_security_groups()
|
243
|
+
security_group_response2 = dict_lower(security_group_response.copy())
|
244
|
+
return security_group_response2
|
245
|
+
except Exception as e:
|
246
|
+
logging.error(f"Had a problem retrieving security groups: {e}")
|
247
|
+
|
248
|
+
|
249
|
+
def get_security_group_id_from_name(security_group_name: str, security_group_response: dict) -> str:
|
250
|
+
"""
|
251
|
+
Get the Security Group ID from the Security Group Name. Returns sg-id or empty string
|
252
|
+
|
253
|
+
Args:
|
254
|
+
security_group_name (Dict[str, Any]): The security group name dictionary.
|
255
|
+
security_group_response (Dict[str, Any]): The security group response dictionary (lowercased).
|
256
|
+
Returns:
|
257
|
+
str: Security Group ID
|
258
|
+
"""
|
259
|
+
try:
|
260
|
+
# The problem here is that the result of the search can bring back multiple matching security group ids for the same named security group ("default")
|
261
|
+
matching_security_group_ids = jmespath.search(
|
262
|
+
f"SecurityGroups[?GroupName==`{security_group_name}`].GroupId", security_group_response
|
263
|
+
)
|
264
|
+
if len(matching_security_group_ids) == 1:
|
265
|
+
return matching_security_group_ids[0]
|
266
|
+
elif len(matching_security_group_ids) > 1:
|
267
|
+
logging.error(
|
268
|
+
f'Security Group name "{security_group_name}" represents more than one specific SG.\n'
|
269
|
+
f"This script can only validate uniquely named Security Groups.\n"
|
270
|
+
)
|
271
|
+
return ""
|
272
|
+
else:
|
273
|
+
logging.error(f'Security Group name "{security_group_name}" wasn\'t found in account.')
|
274
|
+
return ""
|
275
|
+
except Exception as e:
|
276
|
+
logging.error(f"Security Group doesn't exist: {e}")
|
277
|
+
|
278
|
+
|
279
|
+
def dict_lower(dict_object: dict) -> dict:
|
280
|
+
"""
|
281
|
+
Convert all keys and values in a dictionary to lowercase.
|
282
|
+
|
283
|
+
Args:
|
284
|
+
dict_object (dict): The dictionary to convert.
|
285
|
+
Returns:
|
286
|
+
dict: The dictionary with all keys and values converted to lowercase.
|
287
|
+
"""
|
288
|
+
|
289
|
+
def handle_int(item: int) -> int:
|
290
|
+
return item
|
291
|
+
|
292
|
+
def handle_string(item: str) -> str:
|
293
|
+
return item.lower()
|
294
|
+
|
295
|
+
def handle_list(item: list) -> list:
|
296
|
+
for i in item:
|
297
|
+
if type(i) == int:
|
298
|
+
item[item.index(i)] = handle_int(i)
|
299
|
+
elif type(i) == str:
|
300
|
+
item[item.index(i)] = handle_string(i)
|
301
|
+
elif type(i) == dict:
|
302
|
+
item[item.index(i)] = dict_lower(i)
|
303
|
+
return item
|
304
|
+
|
305
|
+
for k, v in dict_object.items():
|
306
|
+
logging.info(f"Pre change - Key: {k}, Value: {v}")
|
307
|
+
value_type = type(dict_object[k])
|
308
|
+
if type(dict_object[k]) == int:
|
309
|
+
dict_object[k] = handle_int(dict_object[k])
|
310
|
+
elif type(dict_object[k]) == str:
|
311
|
+
dict_object[k] = handle_string(dict_object[k])
|
312
|
+
elif type(dict_object[k]) == dict:
|
313
|
+
logging.info(f"Recursive dict - {dict_object[k]}")
|
314
|
+
dict_object[k] = dict_lower(dict_object[k].copy())
|
315
|
+
elif type(dict_object[k]) == list:
|
316
|
+
logging.info(f"List - {dict_object[k]}")
|
317
|
+
dict_object[k] = handle_list(dict_object[k])
|
318
|
+
logging.info(f"Post change {value_type} - Value: {dict_object[k]}")
|
319
|
+
return dict_object
|
320
|
+
|
321
|
+
|
322
|
+
def get_resource_type_from_arn(arn) -> str:
|
323
|
+
"""
|
324
|
+
Extracts the resource type from an Amazon Resource Name (ARN).
|
325
|
+
|
326
|
+
Args:
|
327
|
+
arn (str): The ARN string.
|
328
|
+
|
329
|
+
Returns:
|
330
|
+
str: The resource type extracted from the ARN.
|
331
|
+
"""
|
332
|
+
# Split the ARN string by the ':' delimiter
|
333
|
+
arn_parts = arn.split(":")
|
334
|
+
|
335
|
+
# The resource type is the sixth part of the ARN
|
336
|
+
resource_type = arn_parts[2].split("/")[0]
|
337
|
+
|
338
|
+
return resource_type
|
339
|
+
|
340
|
+
|
341
|
+
def validate_security_groups(matching_entries: List[Dict[str, Any]]) -> list:
|
342
|
+
"""
|
343
|
+
Validate the security groups by printing them out.
|
344
|
+
|
345
|
+
Args:
|
346
|
+
matching_entries (List[Dict[str, Any]]): A list of dictionaries representing the matching CSV entries.
|
347
|
+
Returns:
|
348
|
+
List of everything we validated
|
349
|
+
Raises:
|
350
|
+
Exception: If an error occurs while attaching security groups.
|
351
|
+
"""
|
352
|
+
try:
|
353
|
+
multiple_responses = []
|
354
|
+
for entry in matching_entries:
|
355
|
+
entry_type = get_resource_type_from_arn(entry["arn"])
|
356
|
+
logging.info(f"*********** ARN: {entry['arn']}")
|
357
|
+
logging.info(f"Security Group Name: {entry['security_group_name']}")
|
358
|
+
logging.info(f"Security Group ID: {entry['security_group']}")
|
359
|
+
if entry_type == "ec2":
|
360
|
+
single_response = validate_security_groups_to_ec2(entry)
|
361
|
+
elif entry_type == "elasticloadbalancing":
|
362
|
+
single_response = validate_security_groups_to_elasticloadbalancing(entry)
|
363
|
+
elif entry_type == "ecs":
|
364
|
+
single_response = validate_security_groups_to_ecs_task(entry)
|
365
|
+
elif entry_type == "rds":
|
366
|
+
single_response = validate_security_groups_to_rds(entry)
|
367
|
+
elif entry_type == "lambda":
|
368
|
+
single_response = validate_security_groups_to_lambda(entry)
|
369
|
+
else:
|
370
|
+
error_message = f"Unsupported resource type: {entry_type}"
|
371
|
+
logging.info(error_message)
|
372
|
+
single_response = {"Success": False, "ErrorMessage": error_message}
|
373
|
+
multiple_responses.append(single_response)
|
374
|
+
except Exception as e:
|
375
|
+
error_message = (
|
376
|
+
f"ERROR: Validating security groups: {e}\n"
|
377
|
+
f"\tResource Type: {entry_type}\n"
|
378
|
+
f"\tARN: {entry['arn']}\n"
|
379
|
+
f"\tSecurity Group: {entry['security_group']}"
|
380
|
+
)
|
381
|
+
single_response = {"Success": False, "ErrorMessage": error_message}
|
382
|
+
return [single_response]
|
383
|
+
|
384
|
+
return multiple_responses
|
385
|
+
|
386
|
+
|
387
|
+
def validate_security_groups_to_elasticloadbalancing(matching_entry: Dict[str, Any]) -> dict:
|
388
|
+
"""
|
389
|
+
Attach the valid security groups to the matching ELBv2 ARNs.
|
390
|
+
|
391
|
+
Args:
|
392
|
+
matching_entry (Dict[str, Any]): A dictionary representing the matching CSV entry.
|
393
|
+
Returns:
|
394
|
+
Compliance Status (Dict[str, Any): The submitted dictionary, along with whether the security group specified is attached or not.
|
395
|
+
"""
|
396
|
+
return_response = matching_entry.copy()
|
397
|
+
return_response.update({"Compliant": False, "Success": False, "SecurityGroupsAttached": None, "ErrorMessage": ""})
|
398
|
+
try:
|
399
|
+
elbv2_arn = matching_entry["arn"]
|
400
|
+
elbv2_response = boto3.client("elbv2").describe_load_balancers(LoadBalancerArns=[elbv2_arn])
|
401
|
+
elbv2_security_groups = jmespath.search("LoadBalancers[].SecurityGroups", elbv2_response)[0]
|
402
|
+
|
403
|
+
# Primary Case: Security Group not in security rules and there could be 0+ security groups.
|
404
|
+
if not elbv2_security_groups:
|
405
|
+
error_message = f"No security groups applied to resource: {matching_entry['arn']}"
|
406
|
+
logging.error(error_message)
|
407
|
+
return_response.update(
|
408
|
+
{
|
409
|
+
"ErrorMessage": error_message,
|
410
|
+
"SecurityGroupsAttached": elbv2_security_groups,
|
411
|
+
"Success": True,
|
412
|
+
"Compliant": False,
|
413
|
+
}
|
414
|
+
)
|
415
|
+
elif not matching_entry["security_group"] in elbv2_security_groups:
|
416
|
+
error_message = (
|
417
|
+
f"Security group {matching_entry['security_group']} is not attached to {matching_entry['arn']}"
|
418
|
+
)
|
419
|
+
logging.info(error_message)
|
420
|
+
return_response.update(
|
421
|
+
{
|
422
|
+
"ErrorMessage": error_message,
|
423
|
+
"SecurityGroupsAttached": elbv2_security_groups,
|
424
|
+
"Success": True,
|
425
|
+
"Compliant": False,
|
426
|
+
}
|
427
|
+
)
|
428
|
+
elif matching_entry["security_group"] in elbv2_security_groups:
|
429
|
+
error_message = (
|
430
|
+
f"Security group {matching_entry['security_group']} found attached to {matching_entry['arn']}"
|
431
|
+
)
|
432
|
+
logging.info(error_message)
|
433
|
+
return_response.update(
|
434
|
+
{
|
435
|
+
"ErrorMessage": error_message,
|
436
|
+
"SecurityGroupsAttached": elbv2_security_groups,
|
437
|
+
"Success": True,
|
438
|
+
"Compliant": True,
|
439
|
+
}
|
440
|
+
)
|
441
|
+
|
442
|
+
# Secondary Case: there are no ELBv2s to attach
|
443
|
+
elif elbv2_response["LoadBalancers"] == []:
|
444
|
+
error_message = f"ELBv2 {elbv2_arn} not found"
|
445
|
+
logging.error(error_message)
|
446
|
+
return_response.update({"ErrorMessage": error_message, "Success": False, "Compliant": False})
|
447
|
+
else:
|
448
|
+
error_message = "Provided ELBv2 did not meet use cases. Skipping"
|
449
|
+
logging.info(error_message)
|
450
|
+
return_response.update({"ErrorMessage": error_message, "Success": False, "Compliant": False})
|
451
|
+
|
452
|
+
except botocore.exceptions.ClientError as e:
|
453
|
+
error_message = (
|
454
|
+
f"Error validating security group {matching_entry['security_group']} to ELBv2 {matching_entry['arn']}:"
|
455
|
+
f"Error: {e}"
|
456
|
+
)
|
457
|
+
logging.error(error_message)
|
458
|
+
return_response.update({"ErrorMessage": error_message, "Success": False, "Compliant": False})
|
459
|
+
except Exception as e:
|
460
|
+
error_message = f"Problem finding security groups attached to {matching_entry['arn']}Error: {e}"
|
461
|
+
logging.error(error_message)
|
462
|
+
return_response.update({"ErrorMessage": error_message, "Success": False, "Compliant": False})
|
463
|
+
return return_response
|
464
|
+
|
465
|
+
|
466
|
+
def validate_security_groups_to_ec2(matching_entry: Dict[str, Any]) -> dict:
|
467
|
+
# def validate_security_groups_to_ec2(matching_entry: Dict[str, Any]):
|
468
|
+
"""
|
469
|
+
Validate that the valid security groups to the matching EC2 ARNs.
|
470
|
+
|
471
|
+
Args:
|
472
|
+
matching_entry (Dict[str, Any]): A dictionary representing the matching CSV entry.
|
473
|
+
Returns:
|
474
|
+
Compliance Status (Dict[str, Any, bool, bool, List, str]): The submitted dictionary, along with whether the security group specified is attached or not.
|
475
|
+
"""
|
476
|
+
return_response = matching_entry.copy()
|
477
|
+
return_response.update({"Compliant": False, "Success": False, "ErrorMessage": ""})
|
478
|
+
try:
|
479
|
+
ec2_id = matching_entry["arn"].split("/")[1]
|
480
|
+
ec2_response = boto3.client("ec2").describe_instances(InstanceIds=[ec2_id])
|
481
|
+
ec2_security_groups = jmespath.search(
|
482
|
+
"Reservations[].Instances[].SecurityGroups[].GroupId",
|
483
|
+
ec2_response,
|
484
|
+
)
|
485
|
+
|
486
|
+
# Primary Case: Security Group not in security rules and there could be 0+ security groups.
|
487
|
+
if ec2_security_groups == []:
|
488
|
+
error_message = f"No security groups applied to resource: {matching_entry['arn']}"
|
489
|
+
logging.error(error_message)
|
490
|
+
return_response.update(
|
491
|
+
{
|
492
|
+
"ErrorMessage": error_message,
|
493
|
+
"SecurityGroupsAttached": ec2_security_groups,
|
494
|
+
"Success": True,
|
495
|
+
"Compliant": False,
|
496
|
+
}
|
497
|
+
)
|
498
|
+
elif not matching_entry["security_group"] in ec2_security_groups:
|
499
|
+
error_message = (
|
500
|
+
f"Security group {matching_entry['security_group']} is not attached to {matching_entry['arn']}"
|
501
|
+
)
|
502
|
+
logging.error(error_message)
|
503
|
+
return_response.update(
|
504
|
+
{
|
505
|
+
"ErrorMessage": error_message,
|
506
|
+
"SecurityGroupsAttached": ec2_security_groups,
|
507
|
+
"Success": True,
|
508
|
+
"Compliant": False,
|
509
|
+
}
|
510
|
+
)
|
511
|
+
elif matching_entry["security_group"] in ec2_security_groups:
|
512
|
+
error_message = (
|
513
|
+
f"Security group {matching_entry['security_group']} found attached to {matching_entry['arn']}"
|
514
|
+
)
|
515
|
+
logging.info(error_message)
|
516
|
+
return_response.update(
|
517
|
+
{
|
518
|
+
"ErrorMessage": error_message,
|
519
|
+
"SecurityGroupsAttached": ec2_security_groups,
|
520
|
+
"Success": True,
|
521
|
+
"Compliant": True,
|
522
|
+
}
|
523
|
+
)
|
524
|
+
|
525
|
+
# Secondary Case: there are no EC2s to attach
|
526
|
+
elif not ec2_response["Reservations"]:
|
527
|
+
error_message = f"EC2 {ec2_id} not found"
|
528
|
+
logging.error(error_message)
|
529
|
+
return_response.update(
|
530
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
531
|
+
)
|
532
|
+
|
533
|
+
else:
|
534
|
+
error_message = f"Provided EC2 Instances {ec2_id} did not meet use cases. Skipping"
|
535
|
+
logging.debug(error_message)
|
536
|
+
return_response.update(
|
537
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
538
|
+
)
|
539
|
+
|
540
|
+
except botocore.exceptions.ClientError as e:
|
541
|
+
error_message = (
|
542
|
+
f"Error attaching security group {matching_entry['security_group']} to EC2 {matching_entry['arn']}. \n"
|
543
|
+
f"Error: {e}"
|
544
|
+
)
|
545
|
+
logging.debug(error_message)
|
546
|
+
return_response.update(
|
547
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": True, "Compliant": True}
|
548
|
+
)
|
549
|
+
return return_response
|
550
|
+
|
551
|
+
|
552
|
+
def validate_security_groups_to_ecs_task(matching_entry: Dict[str, Any]) -> dict:
|
553
|
+
"""
|
554
|
+
Validate that the valid security groups to the matching EC2 ARNs.
|
555
|
+
|
556
|
+
Args:
|
557
|
+
matching_entry (Dict[str, Any]): A dictionary representing the matching CSV entry.
|
558
|
+
Returns:
|
559
|
+
Compliance Status (Dict[str, Any, bool, bool, List, str]): The submitted dictionary, along with whether the security group specified is attached or not.
|
560
|
+
"""
|
561
|
+
return_response = matching_entry.copy()
|
562
|
+
return_response.update({"Compliant": False, "Success": False, "SecurityGroupsAttached": None, "ErrorMessage": ""})
|
563
|
+
try:
|
564
|
+
cluster_name = matching_entry["arn"].split("/")[1]
|
565
|
+
service_name = matching_entry["arn"].split("/")[2]
|
566
|
+
ecs_service_response = boto3.client("ecs").describe_services(cluster=cluster_name, services=[service_name])
|
567
|
+
|
568
|
+
# Simplified way of saying "Not a null response"
|
569
|
+
if ecs_service_response:
|
570
|
+
ecs_security_group_ids = jmespath.search(
|
571
|
+
"services[].networkConfiguration.awsvpcConfiguration.securityGroups", ecs_service_response
|
572
|
+
)[0]
|
573
|
+
# Primary Case: Security Group not in security rules and there could be 0+ security groups.
|
574
|
+
if not ecs_security_group_ids:
|
575
|
+
error_message = f"No security groups applied to resource: {matching_entry['arn']}"
|
576
|
+
logging.info(error_message)
|
577
|
+
return_response.update(
|
578
|
+
{
|
579
|
+
"ErrorMessage": error_message,
|
580
|
+
"SecurityGroupsAttached": ecs_security_group_ids,
|
581
|
+
"Success": True,
|
582
|
+
"Compliant": False,
|
583
|
+
}
|
584
|
+
)
|
585
|
+
elif matching_entry["security_group"] in ecs_security_group_ids:
|
586
|
+
error_message = (
|
587
|
+
f"Security group {matching_entry['security_group']} found attached to {matching_entry['arn']}"
|
588
|
+
)
|
589
|
+
logging.info(error_message)
|
590
|
+
return_response.update(
|
591
|
+
{
|
592
|
+
"ErrorMessage": error_message,
|
593
|
+
"SecurityGroupsAttached": ecs_security_group_ids,
|
594
|
+
"Success": True,
|
595
|
+
"Compliant": True,
|
596
|
+
}
|
597
|
+
)
|
598
|
+
elif not matching_entry["security_group"] in ecs_security_group_ids:
|
599
|
+
error_message = (
|
600
|
+
f"Security group {matching_entry['security_group']} is not attached to {matching_entry['arn']}"
|
601
|
+
)
|
602
|
+
logging.info(error_message)
|
603
|
+
return_response.update(
|
604
|
+
{
|
605
|
+
"ErrorMessage": error_message,
|
606
|
+
"SecurityGroupsAttached": ecs_security_group_ids,
|
607
|
+
"Success": True,
|
608
|
+
"Compliant": False,
|
609
|
+
}
|
610
|
+
)
|
611
|
+
# Secondary Case: there are no ECS Services to attach
|
612
|
+
elif not ecs_service_response:
|
613
|
+
error_message = f"ECS {matching_entry['arn']} not found"
|
614
|
+
logging.info(error_message)
|
615
|
+
return_response.update(
|
616
|
+
{
|
617
|
+
"ErrorMessage": error_message,
|
618
|
+
"SecurityGroupsAttached": None,
|
619
|
+
"Success": False,
|
620
|
+
"Compliant": False,
|
621
|
+
}
|
622
|
+
)
|
623
|
+
else:
|
624
|
+
error_message = "Provided ECS Services did not meet use cases. Skipping"
|
625
|
+
logging.debug(error_message)
|
626
|
+
return_response.update(
|
627
|
+
{
|
628
|
+
"ErrorMessage": error_message,
|
629
|
+
"SecurityGroupsAttached": None,
|
630
|
+
"Success": False,
|
631
|
+
"Compliant": False,
|
632
|
+
}
|
633
|
+
)
|
634
|
+
|
635
|
+
else:
|
636
|
+
error_message = f"Cluster '{cluster_name}' not found."
|
637
|
+
logging.info(error_message)
|
638
|
+
return_response.update(
|
639
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
640
|
+
)
|
641
|
+
|
642
|
+
except botocore.exceptions.ClientError as e:
|
643
|
+
error_message = (
|
644
|
+
f"Error validating security group {matching_entry['security_group']} to EC2 {matching_entry['arn']}. \n"
|
645
|
+
f"Error: {e}"
|
646
|
+
)
|
647
|
+
logging.info(error_message)
|
648
|
+
return_response.update(
|
649
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
650
|
+
)
|
651
|
+
return return_response
|
652
|
+
|
653
|
+
|
654
|
+
def validate_security_groups_to_rds(matching_entry: Dict[str, Any]) -> dict:
|
655
|
+
"""
|
656
|
+
Attach the valid security groups to the matching RDS ARNs.
|
657
|
+
|
658
|
+
Args:
|
659
|
+
matching_entry (Dict[str, Any]): A dictionary representing the matching CSV entry.
|
660
|
+
Returns:
|
661
|
+
Compliance Status (Dict[str, Any, bool, bool, list | None, str]): The submitted dictionary, along with whether the security group specified is attached or not.
|
662
|
+
|
663
|
+
"""
|
664
|
+
return_response = matching_entry.copy()
|
665
|
+
return_response.update({"Compliant": False, "Success": False, "SecurityGroupsAttached": None, "ErrorMessage": ""})
|
666
|
+
try:
|
667
|
+
rds_response = boto3.client("rds").describe_db_instances(DBInstanceIdentifier=matching_entry["arn"])
|
668
|
+
rds_security_groups = jmespath.search("DBInstances[].VpcSecurityGroups[].VpcSecurityGroupId", rds_response)
|
669
|
+
rds_instance_id = jmespath.search("DBInstances[].DBInstanceIdentifier", rds_response)[0]
|
670
|
+
|
671
|
+
# Primary Case: Security Group not in security rules and there could be 0+ security groups.
|
672
|
+
if not rds_security_groups:
|
673
|
+
error_message = f"No security groups applied to resource: {matching_entry['arn']}"
|
674
|
+
logging.info(error_message)
|
675
|
+
return_response.update(
|
676
|
+
{
|
677
|
+
"ErrorMessage": error_message,
|
678
|
+
"SecurityGroupsAttached": rds_security_groups,
|
679
|
+
"Success": True,
|
680
|
+
"Compliant": False,
|
681
|
+
}
|
682
|
+
)
|
683
|
+
elif matching_entry["security_group"] in rds_security_groups:
|
684
|
+
error_message = (
|
685
|
+
f"Security group {matching_entry['security_group']} found attached to {matching_entry['arn']}"
|
686
|
+
)
|
687
|
+
logging.info(error_message)
|
688
|
+
return_response.update(
|
689
|
+
{
|
690
|
+
"ErrorMessage": error_message,
|
691
|
+
"SecurityGroupsAttached": rds_security_groups,
|
692
|
+
"Success": True,
|
693
|
+
"Compliant": True,
|
694
|
+
}
|
695
|
+
)
|
696
|
+
|
697
|
+
elif not matching_entry["security_group"] in rds_security_groups:
|
698
|
+
error_message = (
|
699
|
+
f"Security group {matching_entry['security_group']} not found attached to {matching_entry['arn']}"
|
700
|
+
)
|
701
|
+
logging.info(error_message)
|
702
|
+
return_response.update(
|
703
|
+
{
|
704
|
+
"ErrorMessage": error_message,
|
705
|
+
"SecurityGroupsAttached": rds_security_groups,
|
706
|
+
"Success": True,
|
707
|
+
"Compliant": False,
|
708
|
+
}
|
709
|
+
)
|
710
|
+
|
711
|
+
# Secondary Case: there are no RDS Instances to attach
|
712
|
+
elif not rds_response["DBInstances"]:
|
713
|
+
error_message = f"RDS {matching_entry['arn']} not found"
|
714
|
+
logging.info(error_message)
|
715
|
+
return_response.update(
|
716
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
717
|
+
)
|
718
|
+
else:
|
719
|
+
error_message = "Provided RDS Instances did not meet use cases. Skipping"
|
720
|
+
logging.debug(error_message)
|
721
|
+
return_response.update(
|
722
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
723
|
+
)
|
724
|
+
|
725
|
+
except botocore.exceptions.ClientError as e:
|
726
|
+
error_message = (
|
727
|
+
f"Error attaching security group {matching_entry['security_group']} to RDS {matching_entry['arn']}: \n"
|
728
|
+
f"Error: {e}"
|
729
|
+
)
|
730
|
+
logging.debug(error_message)
|
731
|
+
return_response.update(
|
732
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
733
|
+
)
|
734
|
+
return return_response
|
735
|
+
|
736
|
+
|
737
|
+
def validate_security_groups_to_lambda(matching_entry: Dict[str, Any]) -> dict:
|
738
|
+
"""
|
739
|
+
Attach the valid security groups to the matching Lambda ARNs.
|
740
|
+
|
741
|
+
Args:
|
742
|
+
matching_entry (Dict[str, Any]): A dictionary representing the matching CSV entry.
|
743
|
+
Returns:
|
744
|
+
Compliance Status (Dict[str, Any, bool, bool, list | None, str]): The submitted dictionary, along with whether the security group specified is attached or not.
|
745
|
+
|
746
|
+
"""
|
747
|
+
return_response = matching_entry.copy()
|
748
|
+
return_response.update({"Compliant": False, "Success": False, "SecurityGroupsAttached": None, "ErrorMessage": ""})
|
749
|
+
try:
|
750
|
+
lambda_arn = matching_entry["arn"]
|
751
|
+
lambda_response = boto3.client("lambda").get_function(FunctionName=lambda_arn)
|
752
|
+
lambda_security_groups = jmespath.search("Configuration.VpcConfig.SecurityGroupIds", lambda_response)
|
753
|
+
|
754
|
+
# Primary Case: Security Group not in security rules and there could be 0+ security groups.
|
755
|
+
if not lambda_security_groups:
|
756
|
+
error_message = f"No security groups applied to resource: {matching_entry['arn']}"
|
757
|
+
logging.error(error_message)
|
758
|
+
return_response.update(
|
759
|
+
{
|
760
|
+
"ErrorMessage": error_message,
|
761
|
+
"SecurityGroupsAttached": lambda_security_groups,
|
762
|
+
"Success": True,
|
763
|
+
"Compliant": False,
|
764
|
+
}
|
765
|
+
)
|
766
|
+
elif matching_entry["security_group"] in lambda_security_groups:
|
767
|
+
error_message = (
|
768
|
+
f"Security group {matching_entry['security_group']} found attached to {matching_entry['arn']}"
|
769
|
+
)
|
770
|
+
logging.info(error_message)
|
771
|
+
return_response.update(
|
772
|
+
{
|
773
|
+
"ErrorMessage": error_message,
|
774
|
+
"SecurityGroupsAttached": lambda_security_groups,
|
775
|
+
"Success": True,
|
776
|
+
"Compliant": True,
|
777
|
+
}
|
778
|
+
)
|
779
|
+
elif not matching_entry["security_group"] in lambda_security_groups:
|
780
|
+
error_message = (
|
781
|
+
f"Security group {matching_entry['security_group']} is not attached to {matching_entry['arn']}"
|
782
|
+
)
|
783
|
+
logging.info(error_message)
|
784
|
+
return_response.update(
|
785
|
+
{
|
786
|
+
"ErrorMessage": error_message,
|
787
|
+
"SecurityGroupsAttached": lambda_security_groups,
|
788
|
+
"Success": True,
|
789
|
+
"Compliant": False,
|
790
|
+
}
|
791
|
+
)
|
792
|
+
# Second Case: there are no Lambda Functions to attach
|
793
|
+
elif not lambda_response:
|
794
|
+
error_message = f"Lambda {matching_entry['arn']} not found"
|
795
|
+
logging.error(error_message)
|
796
|
+
return_response.update(
|
797
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
798
|
+
)
|
799
|
+
else:
|
800
|
+
error_message = f"Provided Lambda Function {matching_entry['arn']} did not meet use cases. Skipping"
|
801
|
+
logging.error(error_message)
|
802
|
+
return_response.update(
|
803
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
804
|
+
)
|
805
|
+
|
806
|
+
except botocore.exceptions.ClientError as e:
|
807
|
+
error_message = (
|
808
|
+
f"Error attaching security group {matching_entry['security_group']} to EC2 {matching_entry['arn']}. \n"
|
809
|
+
f"Error: {e}"
|
810
|
+
)
|
811
|
+
logging.error(error_message)
|
812
|
+
return_response.update(
|
813
|
+
{"ErrorMessage": error_message, "SecurityGroupsAttached": None, "Success": False, "Compliant": False}
|
814
|
+
)
|
815
|
+
return return_response
|
816
|
+
|
817
|
+
|
818
|
+
def find_all_arns(account_id, region, tag_value_to_filter: str = None) -> list:
|
819
|
+
"""
|
820
|
+
@Description: This function will find the arns in the account (and region) so that QA can validate they've covered all resources expected.
|
821
|
+
Resource Types covered thus far:
|
822
|
+
- EC2
|
823
|
+
- Lambda
|
824
|
+
- ECS
|
825
|
+
- Load Balancers
|
826
|
+
- RDS
|
827
|
+
@account_id: AWS Account ID
|
828
|
+
@region: AWS Region
|
829
|
+
@tag_values_to_filter: list of tag values to filter on.
|
830
|
+
@return: list of dicts, with arns and types
|
831
|
+
"""
|
832
|
+
try:
|
833
|
+
multiple_responses = []
|
834
|
+
supported_resource_types = ["ec2", "ecs", "rds", "elasticloadbalancing", "lambda"]
|
835
|
+
for resource_type in supported_resource_types:
|
836
|
+
single_response_list = []
|
837
|
+
if resource_type == "ec2":
|
838
|
+
list_of_ec2_arns = find_all_ec2_arns(account_id, region)
|
839
|
+
logging.info(f"Found {len(list_of_ec2_arns)} ec2 arns")
|
840
|
+
for arn in list_of_ec2_arns:
|
841
|
+
single_response_list.append(
|
842
|
+
{
|
843
|
+
"Success": True,
|
844
|
+
"Account": account_id,
|
845
|
+
"Region": region,
|
846
|
+
"ResourceType": resource_type,
|
847
|
+
"ARN": arn,
|
848
|
+
"ErrorMessage": None,
|
849
|
+
}
|
850
|
+
)
|
851
|
+
elif resource_type == "ecs":
|
852
|
+
list_of_ecs_arns = find_all_ecs_service_arns(account_id, region)
|
853
|
+
logging.info(f"Found {len(list_of_ecs_arns)} ecs arns")
|
854
|
+
for arn in list_of_ecs_arns:
|
855
|
+
single_response_list.append(
|
856
|
+
{
|
857
|
+
"Success": True,
|
858
|
+
"Account": account_id,
|
859
|
+
"Region": region,
|
860
|
+
"ResourceType": resource_type,
|
861
|
+
"ARN": arn,
|
862
|
+
"ErrorMessage": None,
|
863
|
+
}
|
864
|
+
)
|
865
|
+
elif resource_type == "rds":
|
866
|
+
list_of_rds_arns = find_all_rds_arns(account_id, region)
|
867
|
+
logging.info(f"Found {len(list_of_rds_arns)} rds arns")
|
868
|
+
for arn in list_of_rds_arns:
|
869
|
+
single_response_list.append(
|
870
|
+
{
|
871
|
+
"Success": True,
|
872
|
+
"Account": account_id,
|
873
|
+
"Region": region,
|
874
|
+
"ResourceType": resource_type,
|
875
|
+
"ARN": arn,
|
876
|
+
"ErrorMessage": None,
|
877
|
+
}
|
878
|
+
)
|
879
|
+
elif resource_type == "elasticloadbalancing":
|
880
|
+
list_of_elasticloadbalancing_arns = find_all_elasticloadbalancing_arns(account_id, region)
|
881
|
+
for arn in list_of_elasticloadbalancing_arns:
|
882
|
+
single_response_list.append(
|
883
|
+
{
|
884
|
+
"Success": True,
|
885
|
+
"Account": account_id,
|
886
|
+
"Region": region,
|
887
|
+
"ResourceType": resource_type,
|
888
|
+
"ARN": arn,
|
889
|
+
"ErrorMessage": None,
|
890
|
+
}
|
891
|
+
)
|
892
|
+
elif resource_type == "lambda":
|
893
|
+
list_of_lambda_arns = find_all_lambda_arns(account_id, region, tag_value_to_filter)
|
894
|
+
logging.info(f"Found {len(list_of_lambda_arns)} lambda arns")
|
895
|
+
for arn in list_of_lambda_arns:
|
896
|
+
single_response_list.append(
|
897
|
+
{
|
898
|
+
"Success": True,
|
899
|
+
"Account": account_id,
|
900
|
+
"Region": region,
|
901
|
+
"ResourceType": resource_type,
|
902
|
+
"ARN": arn,
|
903
|
+
"ErrorMessage": None,
|
904
|
+
}
|
905
|
+
)
|
906
|
+
else:
|
907
|
+
error_message = f"Unsupported resource type: {resource_type}"
|
908
|
+
logging.error(error_message)
|
909
|
+
single_response = {
|
910
|
+
"Success": False,
|
911
|
+
"Account": account_id,
|
912
|
+
"Region": region,
|
913
|
+
"ResourceType": resource_type,
|
914
|
+
"ARN": None,
|
915
|
+
"ErrorMessage": error_message,
|
916
|
+
}
|
917
|
+
single_response_list.append(single_response)
|
918
|
+
|
919
|
+
multiple_responses.extend(single_response_list)
|
920
|
+
except Exception as e:
|
921
|
+
error_message = f"ERROR: Finding all arns: {e} | Resource Type: {resource_type}"
|
922
|
+
single_response = {
|
923
|
+
"Success": False,
|
924
|
+
"Account": account_id,
|
925
|
+
"Region": region,
|
926
|
+
"ResourceType": resource_type,
|
927
|
+
"ARNs": None,
|
928
|
+
"ErrorMessage": error_message,
|
929
|
+
}
|
930
|
+
|
931
|
+
return [single_response]
|
932
|
+
|
933
|
+
return multiple_responses
|
934
|
+
|
935
|
+
|
936
|
+
def find_all_ec2_arns(account_id: str, region: str) -> list:
|
937
|
+
"""
|
938
|
+
@Description: This function will find all EC2 ARNs in the account (and region)
|
939
|
+
@account_id: The account ID to search for instances in
|
940
|
+
@region: The region to search for instances in
|
941
|
+
@return: list of the ec2_arns found in the account/region
|
942
|
+
"""
|
943
|
+
|
944
|
+
try:
|
945
|
+
ec2_response = find_account_instances2()
|
946
|
+
ec2_instances = jmespath.search("Reservations[].Instances[].InstanceId", ec2_response)
|
947
|
+
ec2_arns = []
|
948
|
+
for instance in ec2_instances:
|
949
|
+
instance_arn = f"arn:aws:ec2:{region}:{account_id}:instance/{instance}"
|
950
|
+
ec2_arns.append(instance_arn)
|
951
|
+
return ec2_arns
|
952
|
+
except Exception as e:
|
953
|
+
logging.error(f"Failed to get instances from account {account_id} and region {region} | {e}")
|
954
|
+
return None
|
955
|
+
|
956
|
+
|
957
|
+
def find_all_ecs_service_arns(account_id: str, region: str) -> list:
|
958
|
+
"""
|
959
|
+
@Description: This function will find all ECS ARNs in the account (and region)
|
960
|
+
@account_id: The account ID to search for instances in
|
961
|
+
@region: The region to search for instances in
|
962
|
+
@return: list of the ecs_arns found in the account/region
|
963
|
+
"""
|
964
|
+
|
965
|
+
try:
|
966
|
+
ecs_response = find_account_ecs_clusters_services_and_tasks2()
|
967
|
+
ecs_arns = [resource["ServiceArn"] for resource in ecs_response if "ServiceArn" in resource.keys()]
|
968
|
+
logging.info(f"Found {len(ecs_arns)} ECS services")
|
969
|
+
return ecs_arns
|
970
|
+
except Exception as e:
|
971
|
+
error_message = f"Failed to get instances from account {account_id} and region {region} | {e}"
|
972
|
+
logging.error(error_message)
|
973
|
+
return []
|
974
|
+
|
975
|
+
|
976
|
+
def find_all_rds_arns(account_id: str, region: str) -> list:
|
977
|
+
"""
|
978
|
+
@Description: This function will find all RDS ARNs in the account (and region)
|
979
|
+
@account_id: The account ID to search for instances in
|
980
|
+
@region: The region to search for instances in
|
981
|
+
@return: list of the rds_arns found in the account/region
|
982
|
+
"""
|
983
|
+
|
984
|
+
try:
|
985
|
+
rds_response = find_account_rds_instances2()
|
986
|
+
# Should look like this: arn:aws:rds:us-west-2:513645610340:db:test-rds
|
987
|
+
rds_arns = [
|
988
|
+
f"arn:aws:rds:{region}:{account_id}:db:{db['DBInstanceIdentifier']}" for db in rds_response["DBInstances"]
|
989
|
+
]
|
990
|
+
logging.info(f"Found {len(rds_arns)} ECS services")
|
991
|
+
return rds_arns
|
992
|
+
except Exception as e:
|
993
|
+
error_message = f"Failed to get RDS instances from account {account_id} and region {region} | {e}"
|
994
|
+
logging.error(error_message)
|
995
|
+
return []
|
996
|
+
|
997
|
+
|
998
|
+
def find_all_elasticloadbalancing_arns(account_id, region):
|
999
|
+
"""
|
1000
|
+
@Description: This function will find all Elastic Load Balancer ARNs in the account (and region)
|
1001
|
+
@account_id: The account ID to search for instances in
|
1002
|
+
@region: The region to search for instances in
|
1003
|
+
@return: list of the elasticloadbalancing_arns found in the account/region
|
1004
|
+
"""
|
1005
|
+
|
1006
|
+
try:
|
1007
|
+
list_of_elbs = find_load_balancers2()
|
1008
|
+
# Should look like this: 'arn:aws:elasticloadbalancing:us-west-2:513645610340:loadbalancer/app/test-lb-tf/17a80f4fa92cedaf'
|
1009
|
+
elb_arns = [lb["LoadBalancerArn"] for lb in list_of_elbs]
|
1010
|
+
logging.info(f"Successfully found {len(elb_arns)} elb arns in account {account_id} and region {region}")
|
1011
|
+
return elb_arns
|
1012
|
+
except Exception as e:
|
1013
|
+
error_message = f"Failed to get ELB instances from account {account_id} and region {region} | {e}"
|
1014
|
+
logging.error(error_message)
|
1015
|
+
return []
|
1016
|
+
|
1017
|
+
|
1018
|
+
def find_all_lambda_arns(account_id, region, tag_value_to_filter: str = None):
|
1019
|
+
"""
|
1020
|
+
@Description: This function will find all Lambda ARNs in the account (and region)
|
1021
|
+
@account_id: The account ID to search for instances in
|
1022
|
+
@region: The region to search for instances in
|
1023
|
+
@return: list of the lambdas found in the account/region
|
1024
|
+
"""
|
1025
|
+
|
1026
|
+
try:
|
1027
|
+
list_of_lambdas = find_lambda_functions2(fTagValueToFilter=tag_value_to_filter)
|
1028
|
+
# Should look like this: 'arn:aws:elasticloadbalancing:us-west-2:513645610340:loadbalancer/app/test-lb-tf/17a80f4fa92cedaf'
|
1029
|
+
lambda_arns = [function["FunctionArn"] for function in list_of_lambdas]
|
1030
|
+
logging.info(f"Successfully found {len(lambda_arns)} elb arns in account {account_id} and region {region}")
|
1031
|
+
return lambda_arns
|
1032
|
+
except Exception as e:
|
1033
|
+
error_message = f"Failed to get Lambda functions from account {account_id} and region {region} | {e}"
|
1034
|
+
logging.error(error_message)
|
1035
|
+
return []
|
1036
|
+
|
1037
|
+
|
1038
|
+
def display_results(
|
1039
|
+
results_list, fdisplay_dict: dict, defaultAction=None, file_to_save: str = None, subdisplay: bool = False
|
1040
|
+
):
|
1041
|
+
from datetime import datetime
|
1042
|
+
|
1043
|
+
from colorama import Fore, init
|
1044
|
+
|
1045
|
+
init()
|
1046
|
+
"""
|
1047
|
+
Note that this function simply formats the output of the data within the list provided
|
1048
|
+
@param: results_list: This should be a list of dictionaries, matching to the fields in fdisplay_dict
|
1049
|
+
@param: fdisplay_dict: Should look like the below. It's simply a list of fields and formats
|
1050
|
+
@param: defaultAction: this is a default string or type to assign to fields that (for some reason) don't exist within the results_list.
|
1051
|
+
@param: file_to_save: If you want to save the output to a file, specify the filename here.
|
1052
|
+
display_dict = {'ParentProfile': {'DisplayOrder': 1, 'Heading': 'Parent Profile'},
|
1053
|
+
'MgmtAccount' : {'DisplayOrder': 2, 'Heading': 'Mgmt Acct'},
|
1054
|
+
'AccountId' : {'DisplayOrder': 3, 'Heading': 'Acct Number'},
|
1055
|
+
'Region' : {'DisplayOrder': 4, 'Heading': 'Region', 'Condition': ['us-east-2']},
|
1056
|
+
'Retention' : {'DisplayOrder': 5, 'Heading': 'Days Retention', 'Condition': ['Never']},
|
1057
|
+
'Name' : {'DisplayOrder': 7, 'Heading': 'CW Log Name'},
|
1058
|
+
'Size' : {'DisplayOrder': 6, 'Heading': 'Size (Bytes)'}}
|
1059
|
+
- The first field ("MgmtAccount") should match the field name within the list of dictionaries you're passing in (results_list)
|
1060
|
+
- The first field within the nested dictionary is the SortOrder you want the results to show up in
|
1061
|
+
- The second field within the nested dictionary is the heading you want to display at the top of the column (which allows spaces)
|
1062
|
+
- The third field ('Condition') is new, and allows to highlight a special value within the output. This can be used multiple times.
|
1063
|
+
The dictionary doesn't have to be ordered, as long as the 'SortOrder' field is correct.
|
1064
|
+
|
1065
|
+
Enhancements:
|
1066
|
+
- How to create a break between rows, like after every account, or Management Org, or region, or whatever...
|
1067
|
+
- How to do sub-sections, where there is more data to show per row...
|
1068
|
+
"""
|
1069
|
+
|
1070
|
+
def handle_list():
|
1071
|
+
# If no results were passed, print nothing and just return
|
1072
|
+
if len(results_list) == 0:
|
1073
|
+
logging.warning("There were no results passed in to display")
|
1074
|
+
return
|
1075
|
+
|
1076
|
+
# TODO:
|
1077
|
+
# Probably have to do a pre-emptive error-check to ensure the SortOrder is unique within the Dictionary
|
1078
|
+
# Also need to enclose this whole thing in a try...except to trap errors.
|
1079
|
+
# Decided not to try to order the data passed in, as that should be done within the original function
|
1080
|
+
|
1081
|
+
sorted_display_dict = dict(sorted(fdisplay_dict.items(), key=lambda x: x[1]["DisplayOrder"]))
|
1082
|
+
|
1083
|
+
# This is an effort to find the right size spaces for the dictionary to properly show the results
|
1084
|
+
print()
|
1085
|
+
needed_space = {}
|
1086
|
+
for field, value in sorted_display_dict.items():
|
1087
|
+
needed_space[field] = 0
|
1088
|
+
try:
|
1089
|
+
for result in results_list:
|
1090
|
+
for field, value in sorted_display_dict.items():
|
1091
|
+
if field not in result:
|
1092
|
+
needed_space[field] = max(len(value["Heading"]), needed_space[field])
|
1093
|
+
continue
|
1094
|
+
elif isinstance(result[field], bool):
|
1095
|
+
# Recognizes the field as a Boolean, and finds the necessary amount of space to show that data, and assigns the length to "needed_space"
|
1096
|
+
# I use "5" as the minimum space, to show that displaying "False" would take up 5 spaces...
|
1097
|
+
needed_space[field] = max(5, len(value["Heading"]), needed_space[field])
|
1098
|
+
elif isinstance(result[field], int):
|
1099
|
+
# This section is to compensate for the fact that the len of numbers in string format doesn't include the commas.
|
1100
|
+
# I know - I've been very US-centric here, since I haven't figured out how to achieve this in a locale-agnostic way
|
1101
|
+
num_width = len(str(result[field]))
|
1102
|
+
if len(str(result[field])) % 3 == 0:
|
1103
|
+
num_width += (len(str(result[field])) // 3) - 1
|
1104
|
+
else:
|
1105
|
+
num_width += len(str(result[field])) // 3
|
1106
|
+
needed_space[field] = max(num_width, len(value["Heading"]), needed_space[field])
|
1107
|
+
elif isinstance(result[field], float):
|
1108
|
+
# This section is to compensate for the fact that the len of numbers in string format doesn't include the commas.
|
1109
|
+
# I know - I've been very US-centric here, since I haven't figured out how to achieve this in a locale-agnostic way
|
1110
|
+
num_width = len(str(result[field]))
|
1111
|
+
if len(str(result[field])) % 3 == 0:
|
1112
|
+
num_width += (len(str(result[field])) // 3) - 1
|
1113
|
+
else:
|
1114
|
+
num_width += len(str(result[field])) // 3
|
1115
|
+
needed_space[field] = max(num_width, len(value["Heading"]), needed_space[field])
|
1116
|
+
elif isinstance(result[field], str):
|
1117
|
+
# Recognizes the field as a string, and finds the necessary amount of space to show that data, and assigns the length to "needed_space"
|
1118
|
+
needed_space[field] = max(len(result[field]), len(value["Heading"]), needed_space[field])
|
1119
|
+
elif isinstance(result[field], datetime):
|
1120
|
+
# Recognizes the field as a date, and finds the necessary amount of string space to show that date, and assigns the length to "needed_space"
|
1121
|
+
needed_space[field] = max(len(datetime.now().strftime("%x %X")), len(value["Heading"]))
|
1122
|
+
else:
|
1123
|
+
# In case the field is a list or dict - for a subdisplay...
|
1124
|
+
needed_space[field] = max(len(value["Heading"]), needed_space[field])
|
1125
|
+
except KeyError as my_Error:
|
1126
|
+
logging.error(f"Error: {my_Error}")
|
1127
|
+
|
1128
|
+
# This writes out the headings
|
1129
|
+
print("\t", end="") if subdisplay else None
|
1130
|
+
for field, value in sorted_display_dict.items():
|
1131
|
+
# If this is a sub-display field, there's no need to write out the heading above
|
1132
|
+
if "SubDisplay" in value.keys():
|
1133
|
+
continue
|
1134
|
+
header_format = needed_space[field]
|
1135
|
+
print(f"{value['Heading']:{header_format}s} ", end="")
|
1136
|
+
# Newline at the end of the headings
|
1137
|
+
print()
|
1138
|
+
# This writes out the dashes (separators)
|
1139
|
+
print("\t", end="") if subdisplay else None
|
1140
|
+
for field, value in sorted_display_dict.items():
|
1141
|
+
# If this is a sub-display field, there's no need to write out the heading above
|
1142
|
+
if "SubDisplay" in value.keys():
|
1143
|
+
continue
|
1144
|
+
repeatvalue = needed_space[field]
|
1145
|
+
print(f"{'-' * repeatvalue} ", end="")
|
1146
|
+
# Newline after the dashes
|
1147
|
+
print()
|
1148
|
+
|
1149
|
+
# This writes out the data
|
1150
|
+
for result in results_list:
|
1151
|
+
print("\t", end="") if subdisplay else None
|
1152
|
+
for field, value in sorted_display_dict.items():
|
1153
|
+
# This determines whether ths row provided is supposed to be displayed as a sub-report of the main row
|
1154
|
+
if "SubDisplay" in value.keys():
|
1155
|
+
SubDisplay = True
|
1156
|
+
else:
|
1157
|
+
SubDisplay = False
|
1158
|
+
# This assigns the proper space for the output
|
1159
|
+
data_format = needed_space[field]
|
1160
|
+
if field not in result.keys():
|
1161
|
+
result[field] = defaultAction
|
1162
|
+
# This allows for a condition to highlight a specific value
|
1163
|
+
highlight = False
|
1164
|
+
if "Condition" in value and result[field] in value["Condition"]:
|
1165
|
+
highlight = True
|
1166
|
+
if result[field] is None:
|
1167
|
+
print(f"{'':{data_format}} ", end="")
|
1168
|
+
elif isinstance(result[field], str):
|
1169
|
+
print(
|
1170
|
+
f"{Fore.RED if highlight else ''}{result[field]:{data_format}s}{Fore.RESET if highlight else ''} ",
|
1171
|
+
end="",
|
1172
|
+
)
|
1173
|
+
elif isinstance(result[field], bool):
|
1174
|
+
# This is needed, otherwise it prints "0" for False and "1" for True... Essentially treating the bool like an integer.
|
1175
|
+
if result[field]:
|
1176
|
+
display_text = "True"
|
1177
|
+
else:
|
1178
|
+
display_text = "False"
|
1179
|
+
print(
|
1180
|
+
f"{Fore.RED if highlight else ''}{display_text:{data_format}s}{Fore.RESET if highlight else ''} ",
|
1181
|
+
end="",
|
1182
|
+
)
|
1183
|
+
elif isinstance(result[field], int):
|
1184
|
+
print(
|
1185
|
+
f"{Fore.RED if highlight else ''}{result[field]:<{data_format}{',' if 'Delimiter' in value.keys() and value['Delimiter'] else ''}}{Fore.RESET if highlight else ''} ",
|
1186
|
+
end="",
|
1187
|
+
)
|
1188
|
+
elif isinstance(result[field], float):
|
1189
|
+
print(
|
1190
|
+
f"{Fore.RED if highlight else ''}{result[field]:{data_format}f}{Fore.RESET if highlight else ''} ",
|
1191
|
+
end="",
|
1192
|
+
)
|
1193
|
+
elif isinstance(result[field], datetime):
|
1194
|
+
print(
|
1195
|
+
f"{Fore.RED if highlight else ''}{result[field].strftime('%x %X')}{Fore.RESET if highlight else ''} ",
|
1196
|
+
end="",
|
1197
|
+
)
|
1198
|
+
elif isinstance(result[field], list) and SubDisplay:
|
1199
|
+
# Re-use this same function - but with the sub-data used for display, while passing in that this is a "sub-display" to indent the new records.
|
1200
|
+
display_results(result[field], value["SubDisplay"], None, subdisplay=SubDisplay)
|
1201
|
+
elif isinstance(result[field], list):
|
1202
|
+
# This is a cheat, since I'm using this function for a specific use for the "find_security_groups.py" script
|
1203
|
+
for item in result[field]:
|
1204
|
+
if isinstance(item, dict):
|
1205
|
+
logging.debug(f"Item is a dictionary - {item}")
|
1206
|
+
if "CidrIp" in item.keys() and "Description" in item.keys():
|
1207
|
+
print(
|
1208
|
+
f"{Fore.RED if highlight else ''}{item['CidrIp']} ({item['Description']}){Fore.RESET if highlight else ''}, ",
|
1209
|
+
end="",
|
1210
|
+
)
|
1211
|
+
elif "CidrIp" in item.keys():
|
1212
|
+
print(
|
1213
|
+
f"{Fore.RED if highlight else ''}{item['CidrIp']}{Fore.RESET if highlight else ''}, ",
|
1214
|
+
end="",
|
1215
|
+
)
|
1216
|
+
elif "GroupId" in item.keys() and "Description" in item.keys():
|
1217
|
+
print(
|
1218
|
+
f"{Fore.RED if highlight else ''}{item['GroupId']} ({item['Description']}){Fore.RESET if highlight else ''}, ",
|
1219
|
+
end="",
|
1220
|
+
)
|
1221
|
+
elif "GroupId" in item.keys():
|
1222
|
+
print(
|
1223
|
+
f"{Fore.RED if highlight else ''}{item['GroupId']}{Fore.RESET if highlight else ''}, ",
|
1224
|
+
end="",
|
1225
|
+
)
|
1226
|
+
elif "PrefixListId" in item.keys() and "Description" in item.keys():
|
1227
|
+
print(
|
1228
|
+
f"{Fore.RED if highlight else ''}{item['PrefixListId']} ({item['Description']}){Fore.RESET if highlight else ''}, ",
|
1229
|
+
end="",
|
1230
|
+
)
|
1231
|
+
elif "PrefixListId" in item.keys():
|
1232
|
+
print(
|
1233
|
+
f"{Fore.RED if highlight else ''}{item['PrefixListId']}{Fore.RESET if highlight else ''}, ",
|
1234
|
+
end="",
|
1235
|
+
)
|
1236
|
+
else:
|
1237
|
+
print(f"{Fore.RED if highlight else ''}{item}{Fore.RESET if highlight else ''}, ", end="")
|
1238
|
+
print() # This is the end of line character needed at the end of every line
|
1239
|
+
print() # This is the new line needed at the end of the script.
|
1240
|
+
# TODO: We need to add some analytics here... Trying to come up with what would make sense across all displays.
|
1241
|
+
# Possibly we can have a setting where this data is written to a csv locally. We could create separate analytics once the data was saved.
|
1242
|
+
|
1243
|
+
# This is where the data is written to a file
|
1244
|
+
if file_to_save is not None:
|
1245
|
+
Heading = ""
|
1246
|
+
my_filename = f"{file_to_save.split('.')[0]}-{datetime.now().strftime('%y-%m-%d--%H-%M-%S')}.csv"
|
1247
|
+
logging.info(f"Writing your data to: {my_filename}")
|
1248
|
+
try:
|
1249
|
+
with open(my_filename, "w") as savefile:
|
1250
|
+
for field, value in sorted_display_dict.items():
|
1251
|
+
Heading += f"{value['Heading']}|"
|
1252
|
+
Heading += "\n"
|
1253
|
+
savefile.write(Heading)
|
1254
|
+
logging.debug(f"Writing {len(results_list)} rows of the result to the savefile")
|
1255
|
+
for result in results_list:
|
1256
|
+
row = ""
|
1257
|
+
for field, value in sorted_display_dict.items():
|
1258
|
+
data_format = 0
|
1259
|
+
if field not in result.keys():
|
1260
|
+
result[field] = defaultAction
|
1261
|
+
if result[field] is None:
|
1262
|
+
row += "|"
|
1263
|
+
elif isinstance(result[field], str):
|
1264
|
+
# row += f"{result[field]:{data_format}s}|"
|
1265
|
+
row += f"{result[field]:s}|"
|
1266
|
+
elif isinstance(result[field], bool):
|
1267
|
+
if result[field]:
|
1268
|
+
row += f"True|"
|
1269
|
+
else:
|
1270
|
+
row += f"False|"
|
1271
|
+
elif isinstance(result[field], int):
|
1272
|
+
row += f"{result[field]:<{data_format},}|"
|
1273
|
+
elif isinstance(result[field], float):
|
1274
|
+
row += f"{result[field]:{data_format}f}|"
|
1275
|
+
elif isinstance(result[field], datetime):
|
1276
|
+
row += f"{result[field].strftime('%c')}|"
|
1277
|
+
row += "\n"
|
1278
|
+
savefile.write(row)
|
1279
|
+
print(f"Data written to {my_filename}")
|
1280
|
+
except Exception as e:
|
1281
|
+
logging.error(f"Error writing to file: {e}")
|
1282
|
+
|
1283
|
+
def handle_dict():
|
1284
|
+
# If no results were passed, print nothing and just return
|
1285
|
+
if len(results_list) == 0:
|
1286
|
+
logging.warning("There were no results passed in to display")
|
1287
|
+
return
|
1288
|
+
|
1289
|
+
# TODO:
|
1290
|
+
# Probably have to do a pre-emptive error-check to ensure the SortOrder is unique within the Dictionary
|
1291
|
+
# Also need to enclose this whole thing in a try...except to trap errors.
|
1292
|
+
# Also need to find a way to order the data within this function.
|
1293
|
+
|
1294
|
+
sorted_display_dict = dict(sorted(fdisplay_dict.items(), key=lambda x: x[1]["DisplayOrder"]))
|
1295
|
+
|
1296
|
+
# This is an effort to find the right size spaces for the dictionary to properly show the results
|
1297
|
+
print()
|
1298
|
+
needed_space = {}
|
1299
|
+
for field, value in sorted_display_dict.items():
|
1300
|
+
needed_space[field] = 0
|
1301
|
+
try:
|
1302
|
+
for row, row_data in results_list.items():
|
1303
|
+
for field, value in sorted_display_dict.items():
|
1304
|
+
if field == row:
|
1305
|
+
needed_space[field] = max(len(value["Heading"]), needed_space[field])
|
1306
|
+
continue
|
1307
|
+
elif field not in row_data.keys():
|
1308
|
+
needed_space[field] = max(len(value["Heading"]), needed_space[field])
|
1309
|
+
continue
|
1310
|
+
elif isinstance(row_data[field], bool):
|
1311
|
+
# Recognizes the field as a Boolean, and finds the necessary amount of space to show that data, and assigns the length to "needed_space"
|
1312
|
+
# I use "5" as the minimum space, to show that displaying "False" would take up 5 spaces...
|
1313
|
+
needed_space[field] = max(5, len(value["Heading"]), needed_space[field])
|
1314
|
+
elif isinstance(row_data[field], int):
|
1315
|
+
# This section is to compensate for the fact that the len of numbers in string format doesn't include the commas.
|
1316
|
+
# I know - I've been very US-centric here, since I haven't figured out how to achieve this in a locale-agnostic way
|
1317
|
+
num_width = len(str(row_data[field]))
|
1318
|
+
if len(str(row_data[field])) % 3 == 0:
|
1319
|
+
num_width += (len(str(row_data[field])) // 3) - 1
|
1320
|
+
else:
|
1321
|
+
num_width += len(str(row_data[field])) // 3
|
1322
|
+
needed_space[field] = max(num_width, len(value["Heading"]), needed_space[field])
|
1323
|
+
elif isinstance(row_data[field], str):
|
1324
|
+
# Recognizes the field as a string, and finds the necessary amount of space to show that data, and assigns the length to "needed_space"
|
1325
|
+
needed_space[field] = max(len(row_data[field]), len(value["Heading"]), needed_space[field])
|
1326
|
+
elif isinstance(row_data[field], datetime):
|
1327
|
+
# Recognizes the field as a date, and finds the necessary amount of string space to show that date, and assigns the length to "needed_space"
|
1328
|
+
# needed_space[field] = max(len(result[field]), len(datetime.now().strftime('%x %X')))
|
1329
|
+
needed_space[field] = max(len(datetime.now().strftime("%x %X")), len(value["Heading"]))
|
1330
|
+
except KeyError as my_Error:
|
1331
|
+
logging.error(f"Error: {my_Error}")
|
1332
|
+
|
1333
|
+
# This writes out the headings
|
1334
|
+
for field, value in sorted_display_dict.items():
|
1335
|
+
header_format = needed_space[field]
|
1336
|
+
print(f"{value['Heading']:{header_format}s} ", end="")
|
1337
|
+
print()
|
1338
|
+
# This writes out the dashes (separators)
|
1339
|
+
for field, value in sorted_display_dict.items():
|
1340
|
+
repeatvalue = needed_space[field]
|
1341
|
+
print(f"{'-' * repeatvalue} ", end="")
|
1342
|
+
print()
|
1343
|
+
|
1344
|
+
# This writes out the data
|
1345
|
+
for row, row_data in results_list.items():
|
1346
|
+
for field, value in sorted_display_dict.items():
|
1347
|
+
# This assigns the proper space for the output
|
1348
|
+
data_format = needed_space[field]
|
1349
|
+
if field not in row_data.keys():
|
1350
|
+
row_data[field] = defaultAction
|
1351
|
+
# This allows for a condition to highlight a specific value
|
1352
|
+
highlight = False
|
1353
|
+
if "Condition" in value and row_data[field] in value["Condition"]:
|
1354
|
+
highlight = True
|
1355
|
+
if row_data[field] is None:
|
1356
|
+
print(f"{'':{data_format}} ", end="")
|
1357
|
+
elif isinstance(row_data[field], str):
|
1358
|
+
print(
|
1359
|
+
f"{Fore.RED if highlight else ''}{row_data[field]:{data_format}s}{Fore.RESET if highlight else ''} ",
|
1360
|
+
end="",
|
1361
|
+
)
|
1362
|
+
elif isinstance(row_data[field], bool):
|
1363
|
+
# This is needed, otherwise it prints "0" for False and "1" for True... Essentially treating the bool like an integer.
|
1364
|
+
if row_data[field]:
|
1365
|
+
display_text = "True"
|
1366
|
+
else:
|
1367
|
+
display_text = "False"
|
1368
|
+
print(
|
1369
|
+
f"{Fore.RED if highlight else ''}{display_text:{data_format}s}{Fore.RESET if highlight else ''} ",
|
1370
|
+
end="",
|
1371
|
+
)
|
1372
|
+
elif isinstance(row_data[field], int):
|
1373
|
+
print(
|
1374
|
+
f"{Fore.RED if highlight else ''}{row_data[field]:<{data_format},}{Fore.RESET if highlight else ''} ",
|
1375
|
+
end="",
|
1376
|
+
)
|
1377
|
+
elif isinstance(row_data[field], float):
|
1378
|
+
print(
|
1379
|
+
f"{Fore.RED if highlight else ''}{row_data[field]:{data_format}f}{Fore.RESET if highlight else ''} ",
|
1380
|
+
end="",
|
1381
|
+
)
|
1382
|
+
elif isinstance(row_data[field], datetime):
|
1383
|
+
print(
|
1384
|
+
f"{Fore.RED if highlight else ''}{row_data[field].strftime('%x %X')}{Fore.RESET if highlight else ''} ",
|
1385
|
+
end="",
|
1386
|
+
)
|
1387
|
+
print() # This is the end of line character needed at the end of every line
|
1388
|
+
print() # This is the new line needed at the end of the script.
|
1389
|
+
# TODO: We need to add some analytics here... Trying to come up with what would make sense across all displays.
|
1390
|
+
# Possibly we can have a setting where this data is written to a csv locally. We could create separate analytics once the data was saved.
|
1391
|
+
if file_to_save is not None:
|
1392
|
+
Heading = ""
|
1393
|
+
my_filename = f"{file_to_save}-{datetime.now().strftime('%y-%m-%d--%H-%M-%S')}"
|
1394
|
+
logging.info(f"Writing your data to: {my_filename}")
|
1395
|
+
with open(my_filename, "w") as savefile:
|
1396
|
+
for field, value in sorted_display_dict.items():
|
1397
|
+
Heading += f"{value['Heading']}|"
|
1398
|
+
Heading += "\n"
|
1399
|
+
savefile.write(Heading)
|
1400
|
+
for row, row_data in results_list.items():
|
1401
|
+
row = ""
|
1402
|
+
for field, value in sorted_display_dict.items():
|
1403
|
+
data_format = 0
|
1404
|
+
if field not in row_data.keys():
|
1405
|
+
row_data[field] = defaultAction
|
1406
|
+
if row_data[field] is None:
|
1407
|
+
row += "|"
|
1408
|
+
elif isinstance(row_data[field], str):
|
1409
|
+
row += f"{row_data[field]:{data_format}s}|"
|
1410
|
+
elif isinstance(row_data[field], int):
|
1411
|
+
row += f"{row_data[field]:<{data_format},}|"
|
1412
|
+
elif isinstance(row_data[field], float):
|
1413
|
+
row += f"{row_data[field]:{data_format}f}|"
|
1414
|
+
row += "\n"
|
1415
|
+
savefile.write(row)
|
1416
|
+
print(f"\nData written to {my_filename}\n")
|
1417
|
+
|
1418
|
+
if isinstance(results_list, list):
|
1419
|
+
handle_list()
|
1420
|
+
elif isinstance(results_list, dict):
|
1421
|
+
# This doesn't work really yet, but it's a start
|
1422
|
+
handle_dict()
|
1423
|
+
|
1424
|
+
|
1425
|
+
##################
|
1426
|
+
# Main
|
1427
|
+
##################
|
1428
|
+
|
1429
|
+
if __name__ == "__main__":
|
1430
|
+
main(CSV_FILE)
|