cartography 0.107.0rc3__py3-none-any.whl → 0.108.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/_version.py +2 -2
- cartography/cli.py +10 -0
- cartography/config.py +5 -0
- cartography/data/indexes.cypher +0 -10
- cartography/data/jobs/cleanup/github_repos_cleanup.json +2 -0
- cartography/intel/aws/__init__.py +1 -0
- cartography/intel/aws/cloudtrail.py +17 -4
- cartography/intel/aws/cloudtrail_management_events.py +560 -16
- cartography/intel/aws/cloudwatch.py +73 -4
- cartography/intel/aws/ec2/security_groups.py +140 -122
- cartography/intel/aws/ec2/snapshots.py +47 -84
- cartography/intel/aws/ec2/subnets.py +37 -63
- cartography/intel/aws/ecr.py +55 -80
- cartography/intel/aws/elasticache.py +102 -79
- cartography/intel/aws/guardduty.py +275 -0
- cartography/intel/aws/resources.py +2 -0
- cartography/intel/aws/secretsmanager.py +62 -44
- cartography/intel/github/repos.py +370 -28
- cartography/models/aws/cloudtrail/management_events.py +95 -6
- cartography/models/aws/cloudtrail/trail.py +21 -0
- cartography/models/aws/cloudwatch/metric_alarm.py +53 -0
- cartography/models/aws/ec2/security_group_rules.py +109 -0
- cartography/models/aws/ec2/security_groups.py +90 -0
- cartography/models/aws/ec2/snapshots.py +58 -0
- cartography/models/aws/ec2/subnets.py +65 -0
- cartography/models/aws/ec2/volumes.py +20 -0
- cartography/models/aws/ecr/__init__.py +0 -0
- cartography/models/aws/ecr/image.py +41 -0
- cartography/models/aws/ecr/repository.py +72 -0
- cartography/models/aws/ecr/repository_image.py +95 -0
- cartography/models/aws/elasticache/__init__.py +0 -0
- cartography/models/aws/elasticache/cluster.py +65 -0
- cartography/models/aws/elasticache/topic.py +67 -0
- cartography/models/aws/guardduty/__init__.py +1 -0
- cartography/models/aws/guardduty/findings.py +102 -0
- cartography/models/aws/secretsmanager/secret.py +106 -0
- cartography/models/github/dependencies.py +74 -0
- cartography/models/github/manifests.py +49 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/METADATA +3 -3
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/RECORD +44 -29
- cartography/data/jobs/cleanup/aws_import_ec2_security_groupinfo_cleanup.json +0 -24
- cartography/data/jobs/cleanup/aws_import_secrets_cleanup.json +0 -8
- cartography/data/jobs/cleanup/aws_import_snapshots_cleanup.json +0 -30
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/WHEEL +0 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/entry_points.txt +0 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/licenses/LICENSE +0 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/top_level.txt +0 -0
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
2
|
+
from typing import Any
|
|
4
3
|
|
|
5
4
|
import boto3
|
|
6
5
|
import neo4j
|
|
7
6
|
|
|
7
|
+
from cartography.client.core.tx import load
|
|
8
8
|
from cartography.graph.job import GraphJob
|
|
9
9
|
from cartography.models.aws.ec2.auto_scaling_groups import (
|
|
10
10
|
EC2SubnetAutoScalingGroupSchema,
|
|
11
11
|
)
|
|
12
12
|
from cartography.models.aws.ec2.subnet_instance import EC2SubnetInstanceSchema
|
|
13
|
+
from cartography.models.aws.ec2.subnets import EC2SubnetSchema
|
|
13
14
|
from cartography.util import aws_handle_regions
|
|
14
|
-
from cartography.util import run_cleanup_job
|
|
15
15
|
from cartography.util import timeit
|
|
16
16
|
|
|
17
17
|
from .util import get_botocore_config
|
|
@@ -21,86 +21,53 @@ logger = logging.getLogger(__name__)
|
|
|
21
21
|
|
|
22
22
|
@timeit
|
|
23
23
|
@aws_handle_regions
|
|
24
|
-
def get_subnet_data(
|
|
24
|
+
def get_subnet_data(
|
|
25
|
+
boto3_session: boto3.session.Session, region: str
|
|
26
|
+
) -> list[dict[str, Any]]:
|
|
25
27
|
client = boto3_session.client(
|
|
26
28
|
"ec2",
|
|
27
29
|
region_name=region,
|
|
28
30
|
config=get_botocore_config(),
|
|
29
31
|
)
|
|
30
32
|
paginator = client.get_paginator("describe_subnets")
|
|
31
|
-
subnets:
|
|
33
|
+
subnets: list[dict[str, Any]] = []
|
|
32
34
|
for page in paginator.paginate():
|
|
33
35
|
subnets.extend(page["Subnets"])
|
|
34
36
|
return subnets
|
|
35
37
|
|
|
36
38
|
|
|
39
|
+
def transform_subnet_data(subnets: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
40
|
+
"""Transform subnet data into a loadable format."""
|
|
41
|
+
transformed: list[dict[str, Any]] = []
|
|
42
|
+
for subnet in subnets:
|
|
43
|
+
transformed.append(subnet.copy())
|
|
44
|
+
return transformed
|
|
45
|
+
|
|
46
|
+
|
|
37
47
|
@timeit
|
|
38
48
|
def load_subnets(
|
|
39
49
|
neo4j_session: neo4j.Session,
|
|
40
|
-
data:
|
|
50
|
+
data: list[dict[str, Any]],
|
|
41
51
|
region: str,
|
|
42
52
|
aws_account_id: str,
|
|
43
53
|
aws_update_tag: int,
|
|
44
54
|
) -> None:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
snet.map_customer_owned_ip_on_launch = subnet.MapCustomerOwnedIpOnLaunch,
|
|
53
|
-
snet.state = subnet.State, snet.assignipv6addressoncreation = subnet.AssignIpv6AddressOnCreation,
|
|
54
|
-
snet.map_public_ip_on_launch = subnet.MapPublicIpOnLaunch, snet.subnet_arn = subnet.SubnetArn,
|
|
55
|
-
snet.availability_zone = subnet.AvailabilityZone, snet.availability_zone_id = subnet.AvailabilityZoneId,
|
|
56
|
-
snet.subnet_id = subnet.SubnetId
|
|
57
|
-
"""
|
|
58
|
-
|
|
59
|
-
ingest_subnet_vpc_relations = """
|
|
60
|
-
UNWIND $subnets as subnet
|
|
61
|
-
MATCH (snet:EC2Subnet{subnetid: subnet.SubnetId}), (vpc:AWSVpc{id: subnet.VpcId})
|
|
62
|
-
MERGE (snet)-[r:MEMBER_OF_AWS_VPC]->(vpc)
|
|
63
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
64
|
-
SET r.lastupdated = $aws_update_tag
|
|
65
|
-
"""
|
|
66
|
-
|
|
67
|
-
ingest_subnet_aws_account_relations = """
|
|
68
|
-
UNWIND $subnets as subnet
|
|
69
|
-
MATCH (snet:EC2Subnet{subnetid: subnet.SubnetId}), (aws:AWSAccount{id: $aws_account_id})
|
|
70
|
-
MERGE (aws)-[r:RESOURCE]->(snet)
|
|
71
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
72
|
-
SET r.lastupdated = $aws_update_tag
|
|
73
|
-
"""
|
|
74
|
-
|
|
75
|
-
neo4j_session.run(
|
|
76
|
-
ingest_subnets,
|
|
77
|
-
subnets=data,
|
|
78
|
-
aws_update_tag=aws_update_tag,
|
|
79
|
-
region=region,
|
|
80
|
-
aws_account_id=aws_account_id,
|
|
81
|
-
)
|
|
82
|
-
neo4j_session.run(
|
|
83
|
-
ingest_subnet_vpc_relations,
|
|
84
|
-
subnets=data,
|
|
85
|
-
aws_update_tag=aws_update_tag,
|
|
86
|
-
region=region,
|
|
87
|
-
aws_account_id=aws_account_id,
|
|
88
|
-
)
|
|
89
|
-
neo4j_session.run(
|
|
90
|
-
ingest_subnet_aws_account_relations,
|
|
91
|
-
subnets=data,
|
|
92
|
-
aws_update_tag=aws_update_tag,
|
|
93
|
-
region=region,
|
|
94
|
-
aws_account_id=aws_account_id,
|
|
55
|
+
load(
|
|
56
|
+
neo4j_session,
|
|
57
|
+
EC2SubnetSchema(),
|
|
58
|
+
data,
|
|
59
|
+
Region=region,
|
|
60
|
+
AWS_ID=aws_account_id,
|
|
61
|
+
lastupdated=aws_update_tag,
|
|
95
62
|
)
|
|
96
63
|
|
|
97
64
|
|
|
98
65
|
@timeit
|
|
99
|
-
def cleanup_subnets(
|
|
100
|
-
|
|
101
|
-
|
|
66
|
+
def cleanup_subnets(
|
|
67
|
+
neo4j_session: neo4j.Session, common_job_parameters: dict[str, Any]
|
|
68
|
+
) -> None:
|
|
69
|
+
GraphJob.from_node_schema(EC2SubnetSchema(), common_job_parameters).run(
|
|
102
70
|
neo4j_session,
|
|
103
|
-
common_job_parameters,
|
|
104
71
|
)
|
|
105
72
|
GraphJob.from_node_schema(EC2SubnetInstanceSchema(), common_job_parameters).run(
|
|
106
73
|
neo4j_session,
|
|
@@ -115,10 +82,10 @@ def cleanup_subnets(neo4j_session: neo4j.Session, common_job_parameters: Dict) -
|
|
|
115
82
|
def sync_subnets(
|
|
116
83
|
neo4j_session: neo4j.Session,
|
|
117
84
|
boto3_session: boto3.session.Session,
|
|
118
|
-
regions:
|
|
85
|
+
regions: list[str],
|
|
119
86
|
current_aws_account_id: str,
|
|
120
87
|
update_tag: int,
|
|
121
|
-
common_job_parameters:
|
|
88
|
+
common_job_parameters: dict[str, Any],
|
|
122
89
|
) -> None:
|
|
123
90
|
for region in regions:
|
|
124
91
|
logger.info(
|
|
@@ -127,5 +94,12 @@ def sync_subnets(
|
|
|
127
94
|
current_aws_account_id,
|
|
128
95
|
)
|
|
129
96
|
data = get_subnet_data(boto3_session, region)
|
|
130
|
-
|
|
97
|
+
transformed = transform_subnet_data(data)
|
|
98
|
+
load_subnets(
|
|
99
|
+
neo4j_session,
|
|
100
|
+
transformed,
|
|
101
|
+
region,
|
|
102
|
+
current_aws_account_id,
|
|
103
|
+
update_tag,
|
|
104
|
+
)
|
|
131
105
|
cleanup_subnets(neo4j_session, common_job_parameters)
|
cartography/intel/aws/ecr.py
CHANGED
|
@@ -6,9 +6,12 @@ from typing import List
|
|
|
6
6
|
import boto3
|
|
7
7
|
import neo4j
|
|
8
8
|
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
11
|
+
from cartography.models.aws.ecr.image import ECRImageSchema
|
|
12
|
+
from cartography.models.aws.ecr.repository import ECRRepositorySchema
|
|
13
|
+
from cartography.models.aws.ecr.repository_image import ECRRepositoryImageSchema
|
|
9
14
|
from cartography.util import aws_handle_regions
|
|
10
|
-
from cartography.util import batch
|
|
11
|
-
from cartography.util import run_cleanup_job
|
|
12
15
|
from cartography.util import timeit
|
|
13
16
|
from cartography.util import to_asynchronous
|
|
14
17
|
from cartography.util import to_synchronous
|
|
@@ -74,33 +77,17 @@ def load_ecr_repositories(
|
|
|
74
77
|
current_aws_account_id: str,
|
|
75
78
|
aws_update_tag: int,
|
|
76
79
|
) -> None:
|
|
77
|
-
query = """
|
|
78
|
-
UNWIND $Repositories as ecr_repo
|
|
79
|
-
MERGE (repo:ECRRepository{id: ecr_repo.repositoryArn})
|
|
80
|
-
ON CREATE SET repo.firstseen = timestamp(),
|
|
81
|
-
repo.arn = ecr_repo.repositoryArn,
|
|
82
|
-
repo.name = ecr_repo.repositoryName,
|
|
83
|
-
repo.region = $Region,
|
|
84
|
-
repo.created_at = ecr_repo.createdAt
|
|
85
|
-
SET repo.lastupdated = $aws_update_tag,
|
|
86
|
-
repo.uri = ecr_repo.repositoryUri
|
|
87
|
-
WITH repo
|
|
88
|
-
|
|
89
|
-
MATCH (owner:AWSAccount{id: $AWS_ACCOUNT_ID})
|
|
90
|
-
MERGE (owner)-[r:RESOURCE]->(repo)
|
|
91
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
92
|
-
SET r.lastupdated = $aws_update_tag
|
|
93
|
-
"""
|
|
94
80
|
logger.info(
|
|
95
81
|
f"Loading {len(repos)} ECR repositories for region {region} into graph.",
|
|
96
82
|
)
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
83
|
+
load(
|
|
84
|
+
neo4j_session,
|
|
85
|
+
ECRRepositorySchema(),
|
|
86
|
+
repos,
|
|
87
|
+
lastupdated=aws_update_tag,
|
|
100
88
|
Region=region,
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
).consume() # See issue #440
|
|
89
|
+
AWS_ID=current_aws_account_id,
|
|
90
|
+
)
|
|
104
91
|
|
|
105
92
|
|
|
106
93
|
@timeit
|
|
@@ -114,8 +101,13 @@ def transform_ecr_repository_images(repo_data: Dict) -> List[Dict]:
|
|
|
114
101
|
for repo_uri in sorted(repo_data.keys()):
|
|
115
102
|
repo_images = repo_data[repo_uri]
|
|
116
103
|
for img in repo_images:
|
|
117
|
-
|
|
104
|
+
digest = img.get("imageDigest")
|
|
105
|
+
if digest:
|
|
106
|
+
tag = img.get("imageTag")
|
|
107
|
+
uri = repo_uri + (f":{tag}" if tag else "")
|
|
118
108
|
img["repo_uri"] = repo_uri
|
|
109
|
+
img["uri"] = uri
|
|
110
|
+
img["id"] = uri
|
|
119
111
|
repo_images_list.append(img)
|
|
120
112
|
else:
|
|
121
113
|
logger.warning(
|
|
@@ -127,74 +119,51 @@ def transform_ecr_repository_images(repo_data: Dict) -> List[Dict]:
|
|
|
127
119
|
return repo_images_list
|
|
128
120
|
|
|
129
121
|
|
|
130
|
-
def _load_ecr_repo_img_tx(
|
|
131
|
-
tx: neo4j.Transaction,
|
|
132
|
-
repo_images_list: List[Dict],
|
|
133
|
-
aws_update_tag: int,
|
|
134
|
-
region: str,
|
|
135
|
-
) -> None:
|
|
136
|
-
query = """
|
|
137
|
-
UNWIND $RepoList as repo_img
|
|
138
|
-
MERGE (ri:ECRRepositoryImage{id: repo_img.repo_uri + COALESCE(":" + repo_img.imageTag, '')})
|
|
139
|
-
ON CREATE SET ri.firstseen = timestamp()
|
|
140
|
-
SET ri.lastupdated = $aws_update_tag,
|
|
141
|
-
ri.tag = repo_img.imageTag,
|
|
142
|
-
ri.uri = repo_img.repo_uri + COALESCE(":" + repo_img.imageTag, ''),
|
|
143
|
-
ri.image_size_bytes = repo_img.imageSizeInBytes,
|
|
144
|
-
ri.image_pushed_at = repo_img.imagePushedAt,
|
|
145
|
-
ri.image_manifest_media_type = repo_img.imageManifestMediaType,
|
|
146
|
-
ri.artifact_media_type = repo_img.artifactMediaType,
|
|
147
|
-
ri.last_recorded_pull_time = repo_img.lastRecordedPullTime
|
|
148
|
-
WITH ri, repo_img
|
|
149
|
-
|
|
150
|
-
MERGE (img:ECRImage{id: repo_img.imageDigest})
|
|
151
|
-
ON CREATE SET img.firstseen = timestamp(),
|
|
152
|
-
img.digest = repo_img.imageDigest
|
|
153
|
-
SET img.lastupdated = $aws_update_tag,
|
|
154
|
-
img.region = $Region
|
|
155
|
-
WITH ri, img, repo_img
|
|
156
|
-
|
|
157
|
-
MERGE (ri)-[r1:IMAGE]->(img)
|
|
158
|
-
ON CREATE SET r1.firstseen = timestamp()
|
|
159
|
-
SET r1.lastupdated = $aws_update_tag
|
|
160
|
-
WITH ri, repo_img
|
|
161
|
-
|
|
162
|
-
MATCH (repo:ECRRepository{uri: repo_img.repo_uri})
|
|
163
|
-
MERGE (repo)-[r2:REPO_IMAGE]->(ri)
|
|
164
|
-
ON CREATE SET r2.firstseen = timestamp()
|
|
165
|
-
SET r2.lastupdated = $aws_update_tag
|
|
166
|
-
"""
|
|
167
|
-
tx.run(
|
|
168
|
-
query,
|
|
169
|
-
RepoList=repo_images_list,
|
|
170
|
-
Region=region,
|
|
171
|
-
aws_update_tag=aws_update_tag,
|
|
172
|
-
)
|
|
173
|
-
|
|
174
|
-
|
|
175
122
|
@timeit
|
|
176
123
|
def load_ecr_repository_images(
|
|
177
124
|
neo4j_session: neo4j.Session,
|
|
178
125
|
repo_images_list: List[Dict],
|
|
179
126
|
region: str,
|
|
127
|
+
current_aws_account_id: str,
|
|
180
128
|
aws_update_tag: int,
|
|
181
129
|
) -> None:
|
|
182
130
|
logger.info(
|
|
183
131
|
f"Loading {len(repo_images_list)} ECR repository images in {region} into graph.",
|
|
184
132
|
)
|
|
185
|
-
for
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
133
|
+
image_digests = {img["imageDigest"] for img in repo_images_list}
|
|
134
|
+
ecr_images = [{"imageDigest": d} for d in image_digests]
|
|
135
|
+
|
|
136
|
+
load(
|
|
137
|
+
neo4j_session,
|
|
138
|
+
ECRImageSchema(),
|
|
139
|
+
ecr_images,
|
|
140
|
+
lastupdated=aws_update_tag,
|
|
141
|
+
Region=region,
|
|
142
|
+
AWS_ID=current_aws_account_id,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
load(
|
|
146
|
+
neo4j_session,
|
|
147
|
+
ECRRepositoryImageSchema(),
|
|
148
|
+
repo_images_list,
|
|
149
|
+
lastupdated=aws_update_tag,
|
|
150
|
+
Region=region,
|
|
151
|
+
AWS_ID=current_aws_account_id,
|
|
152
|
+
)
|
|
192
153
|
|
|
193
154
|
|
|
194
155
|
@timeit
|
|
195
156
|
def cleanup(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
|
|
196
157
|
logger.debug("Running ECR cleanup job.")
|
|
197
|
-
|
|
158
|
+
GraphJob.from_node_schema(ECRRepositorySchema(), common_job_parameters).run(
|
|
159
|
+
neo4j_session
|
|
160
|
+
)
|
|
161
|
+
GraphJob.from_node_schema(ECRRepositoryImageSchema(), common_job_parameters).run(
|
|
162
|
+
neo4j_session
|
|
163
|
+
)
|
|
164
|
+
GraphJob.from_node_schema(ECRImageSchema(), common_job_parameters).run(
|
|
165
|
+
neo4j_session
|
|
166
|
+
)
|
|
198
167
|
|
|
199
168
|
|
|
200
169
|
def _get_image_data(
|
|
@@ -251,5 +220,11 @@ def sync(
|
|
|
251
220
|
update_tag,
|
|
252
221
|
)
|
|
253
222
|
repo_images_list = transform_ecr_repository_images(image_data)
|
|
254
|
-
load_ecr_repository_images(
|
|
223
|
+
load_ecr_repository_images(
|
|
224
|
+
neo4j_session,
|
|
225
|
+
repo_images_list,
|
|
226
|
+
region,
|
|
227
|
+
current_aws_account_id,
|
|
228
|
+
update_tag,
|
|
229
|
+
)
|
|
255
230
|
cleanup(neo4j_session, common_job_parameters)
|
|
@@ -1,118 +1,132 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
4
|
-
from typing import Set
|
|
2
|
+
from typing import Any
|
|
5
3
|
|
|
6
4
|
import boto3
|
|
7
5
|
import neo4j
|
|
8
6
|
|
|
7
|
+
from cartography.client.core.tx import load
|
|
8
|
+
from cartography.graph.job import GraphJob
|
|
9
|
+
from cartography.models.aws.elasticache.cluster import ElasticacheClusterSchema
|
|
10
|
+
from cartography.models.aws.elasticache.topic import ElasticacheTopicSchema
|
|
9
11
|
from cartography.stats import get_stats_client
|
|
10
12
|
from cartography.util import aws_handle_regions
|
|
11
13
|
from cartography.util import merge_module_sync_metadata
|
|
12
|
-
from cartography.util import run_cleanup_job
|
|
13
14
|
from cartography.util import timeit
|
|
14
15
|
|
|
15
16
|
logger = logging.getLogger(__name__)
|
|
16
17
|
stat_handler = get_stats_client(__name__)
|
|
17
18
|
|
|
18
19
|
|
|
19
|
-
def _get_topic(cluster: Dict) -> Dict:
|
|
20
|
-
return cluster["NotificationConfiguration"]
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def transform_elasticache_topics(cluster_data: List[Dict]) -> List[Dict]:
|
|
24
|
-
"""
|
|
25
|
-
Collect unique TopicArns from the cluster data
|
|
26
|
-
"""
|
|
27
|
-
seen: Set[str] = set()
|
|
28
|
-
topics: List[Dict] = []
|
|
29
|
-
for cluster in cluster_data:
|
|
30
|
-
topic = _get_topic(cluster)
|
|
31
|
-
topic_arn = topic["TopicArn"]
|
|
32
|
-
if topic_arn not in seen:
|
|
33
|
-
seen.add(topic_arn)
|
|
34
|
-
topics.append(topic)
|
|
35
|
-
return topics
|
|
36
|
-
|
|
37
|
-
|
|
38
20
|
@timeit
|
|
39
21
|
@aws_handle_regions
|
|
40
22
|
def get_elasticache_clusters(
|
|
41
23
|
boto3_session: boto3.session.Session,
|
|
42
24
|
region: str,
|
|
43
|
-
) ->
|
|
44
|
-
logger.debug(f"Getting ElastiCache Clusters in region '{region}'.")
|
|
25
|
+
) -> list[dict[str, Any]]:
|
|
45
26
|
client = boto3_session.client("elasticache", region_name=region)
|
|
46
27
|
paginator = client.get_paginator("describe_cache_clusters")
|
|
47
|
-
clusters:
|
|
28
|
+
clusters: list[dict[str, Any]] = []
|
|
48
29
|
for page in paginator.paginate():
|
|
49
|
-
clusters.extend(page
|
|
30
|
+
clusters.extend(page.get("CacheClusters", []))
|
|
50
31
|
return clusters
|
|
51
32
|
|
|
52
33
|
|
|
34
|
+
def transform_elasticache_clusters(
|
|
35
|
+
clusters: list[dict[str, Any]], region: str
|
|
36
|
+
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
37
|
+
cluster_data: list[dict[str, Any]] = []
|
|
38
|
+
topics: dict[str, dict[str, Any]] = {}
|
|
39
|
+
|
|
40
|
+
for cluster in clusters:
|
|
41
|
+
notification = cluster.get("NotificationConfiguration", {})
|
|
42
|
+
topic_arn = notification.get("TopicArn")
|
|
43
|
+
cluster_record = {
|
|
44
|
+
"ARN": cluster["ARN"],
|
|
45
|
+
"CacheClusterId": cluster["CacheClusterId"],
|
|
46
|
+
"CacheNodeType": cluster.get("CacheNodeType"),
|
|
47
|
+
"Engine": cluster.get("Engine"),
|
|
48
|
+
"EngineVersion": cluster.get("EngineVersion"),
|
|
49
|
+
"CacheClusterStatus": cluster.get("CacheClusterStatus"),
|
|
50
|
+
"NumCacheNodes": cluster.get("NumCacheNodes"),
|
|
51
|
+
"PreferredAvailabilityZone": cluster.get("PreferredAvailabilityZone"),
|
|
52
|
+
"PreferredMaintenanceWindow": cluster.get("PreferredMaintenanceWindow"),
|
|
53
|
+
"CacheClusterCreateTime": cluster.get("CacheClusterCreateTime"),
|
|
54
|
+
"CacheSubnetGroupName": cluster.get("CacheSubnetGroupName"),
|
|
55
|
+
"AutoMinorVersionUpgrade": cluster.get("AutoMinorVersionUpgrade"),
|
|
56
|
+
"ReplicationGroupId": cluster.get("ReplicationGroupId"),
|
|
57
|
+
"SnapshotRetentionLimit": cluster.get("SnapshotRetentionLimit"),
|
|
58
|
+
"SnapshotWindow": cluster.get("SnapshotWindow"),
|
|
59
|
+
"AuthTokenEnabled": cluster.get("AuthTokenEnabled"),
|
|
60
|
+
"TransitEncryptionEnabled": cluster.get("TransitEncryptionEnabled"),
|
|
61
|
+
"AtRestEncryptionEnabled": cluster.get("AtRestEncryptionEnabled"),
|
|
62
|
+
"TopicArn": topic_arn,
|
|
63
|
+
"Region": region,
|
|
64
|
+
}
|
|
65
|
+
cluster_data.append(cluster_record)
|
|
66
|
+
|
|
67
|
+
if topic_arn:
|
|
68
|
+
topics.setdefault(
|
|
69
|
+
topic_arn,
|
|
70
|
+
{
|
|
71
|
+
"TopicArn": topic_arn,
|
|
72
|
+
"TopicStatus": notification.get("TopicStatus"),
|
|
73
|
+
"cluster_arns": [],
|
|
74
|
+
},
|
|
75
|
+
)["cluster_arns"].append(cluster["ARN"])
|
|
76
|
+
|
|
77
|
+
return cluster_data, list(topics.values())
|
|
78
|
+
|
|
79
|
+
|
|
53
80
|
@timeit
|
|
54
81
|
def load_elasticache_clusters(
|
|
55
82
|
neo4j_session: neo4j.Session,
|
|
56
|
-
clusters:
|
|
83
|
+
clusters: list[dict[str, Any]],
|
|
57
84
|
region: str,
|
|
58
85
|
aws_account_id: str,
|
|
59
86
|
update_tag: int,
|
|
60
87
|
) -> None:
|
|
61
|
-
query = """
|
|
62
|
-
UNWIND $clusters as elasticache_cluster
|
|
63
|
-
MERGE (cluster:ElasticacheCluster{id:elasticache_cluster.ARN})
|
|
64
|
-
ON CREATE SET cluster.firstseen = timestamp(),
|
|
65
|
-
cluster.arn = elasticache_cluster.ARN,
|
|
66
|
-
cluster.topic_arn = elasticache_cluster.NotificationConfiguration.TopicArn,
|
|
67
|
-
cluster.id = elasticache_cluster.CacheClusterId,
|
|
68
|
-
cluster.region = $region
|
|
69
|
-
SET cluster.lastupdated = $aws_update_tag
|
|
70
|
-
|
|
71
|
-
WITH cluster, elasticache_cluster
|
|
72
|
-
MATCH (owner:AWSAccount{id: $aws_account_id})
|
|
73
|
-
MERGE (owner)-[r3:RESOURCE]->(cluster)
|
|
74
|
-
ON CREATE SET r3.firstseen = timestamp()
|
|
75
|
-
SET r3.lastupdated = $aws_update_tag
|
|
76
|
-
|
|
77
|
-
WITH elasticache_cluster, owner
|
|
78
|
-
WHERE NOT elasticache_cluster.NotificationConfiguration IS NULL
|
|
79
|
-
MERGE (topic:ElasticacheTopic{id: elasticache_cluster.NotificationConfiguration.TopicArn})
|
|
80
|
-
ON CREATE SET topic.firstseen = timestamp(),
|
|
81
|
-
topic.arn = elasticache_cluster.NotificationConfiguration.TopicArn
|
|
82
|
-
SET topic.lastupdated = $aws_update_tag,
|
|
83
|
-
topic.status = elasticache_cluster.NotificationConfiguration.Status
|
|
84
|
-
|
|
85
|
-
MERGE (topic)-[r:CACHE_CLUSTER]->(cluster)
|
|
86
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
87
|
-
SET r.lastupdated = $aws_update_tag
|
|
88
|
-
WITH cluster, topic
|
|
89
|
-
|
|
90
|
-
MERGE (owner)-[r2:RESOURCE]->(topic)
|
|
91
|
-
ON CREATE SET r2.firstseen = timestamp()
|
|
92
|
-
SET r2.lastupdated = $aws_update_tag
|
|
93
|
-
"""
|
|
94
88
|
logger.info(
|
|
95
|
-
f"Loading
|
|
89
|
+
f"Loading {len(clusters)} ElastiCache clusters for region '{region}' into graph."
|
|
96
90
|
)
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
91
|
+
load(
|
|
92
|
+
neo4j_session,
|
|
93
|
+
ElasticacheClusterSchema(),
|
|
94
|
+
clusters,
|
|
95
|
+
lastupdated=update_tag,
|
|
96
|
+
Region=region,
|
|
97
|
+
AWS_ID=aws_account_id,
|
|
103
98
|
)
|
|
104
99
|
|
|
105
100
|
|
|
106
101
|
@timeit
|
|
107
|
-
def
|
|
102
|
+
def load_elasticache_topics(
|
|
108
103
|
neo4j_session: neo4j.Session,
|
|
109
|
-
|
|
104
|
+
topics: list[dict[str, Any]],
|
|
105
|
+
aws_account_id: str,
|
|
110
106
|
update_tag: int,
|
|
111
107
|
) -> None:
|
|
112
|
-
|
|
113
|
-
|
|
108
|
+
if not topics:
|
|
109
|
+
return
|
|
110
|
+
logger.info(f"Loading {len(topics)} ElastiCache topics into graph.")
|
|
111
|
+
load(
|
|
114
112
|
neo4j_session,
|
|
115
|
-
|
|
113
|
+
ElasticacheTopicSchema(),
|
|
114
|
+
topics,
|
|
115
|
+
lastupdated=update_tag,
|
|
116
|
+
AWS_ID=aws_account_id,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@timeit
|
|
121
|
+
def cleanup(
|
|
122
|
+
neo4j_session: neo4j.Session,
|
|
123
|
+
common_job_parameters: dict[str, Any],
|
|
124
|
+
) -> None:
|
|
125
|
+
GraphJob.from_node_schema(ElasticacheClusterSchema(), common_job_parameters).run(
|
|
126
|
+
neo4j_session
|
|
127
|
+
)
|
|
128
|
+
GraphJob.from_node_schema(ElasticacheTopicSchema(), common_job_parameters).run(
|
|
129
|
+
neo4j_session
|
|
116
130
|
)
|
|
117
131
|
|
|
118
132
|
|
|
@@ -120,24 +134,33 @@ def cleanup(
|
|
|
120
134
|
def sync(
|
|
121
135
|
neo4j_session: neo4j.Session,
|
|
122
136
|
boto3_session: boto3.session.Session,
|
|
123
|
-
regions:
|
|
137
|
+
regions: list[str],
|
|
124
138
|
current_aws_account_id: str,
|
|
125
139
|
update_tag: int,
|
|
126
|
-
common_job_parameters:
|
|
140
|
+
common_job_parameters: dict[str, Any],
|
|
127
141
|
) -> None:
|
|
128
142
|
for region in regions:
|
|
129
143
|
logger.info(
|
|
130
|
-
|
|
144
|
+
"Syncing ElastiCache clusters for region '%s' in account '%s'.",
|
|
145
|
+
region,
|
|
146
|
+
current_aws_account_id,
|
|
131
147
|
)
|
|
132
|
-
|
|
148
|
+
raw_clusters = get_elasticache_clusters(boto3_session, region)
|
|
149
|
+
cluster_data, topic_data = transform_elasticache_clusters(raw_clusters, region)
|
|
133
150
|
load_elasticache_clusters(
|
|
134
151
|
neo4j_session,
|
|
135
|
-
|
|
152
|
+
cluster_data,
|
|
136
153
|
region,
|
|
137
154
|
current_aws_account_id,
|
|
138
155
|
update_tag,
|
|
139
156
|
)
|
|
140
|
-
|
|
157
|
+
load_elasticache_topics(
|
|
158
|
+
neo4j_session,
|
|
159
|
+
topic_data,
|
|
160
|
+
current_aws_account_id,
|
|
161
|
+
update_tag,
|
|
162
|
+
)
|
|
163
|
+
cleanup(neo4j_session, common_job_parameters)
|
|
141
164
|
merge_module_sync_metadata(
|
|
142
165
|
neo4j_session,
|
|
143
166
|
group_type="AWSAccount",
|