cartography 0.107.0rc3__py3-none-any.whl → 0.108.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/_version.py +2 -2
- cartography/cli.py +10 -0
- cartography/config.py +5 -0
- cartography/data/indexes.cypher +0 -10
- cartography/data/jobs/cleanup/github_repos_cleanup.json +2 -0
- cartography/intel/aws/__init__.py +1 -0
- cartography/intel/aws/cloudtrail.py +17 -4
- cartography/intel/aws/cloudtrail_management_events.py +560 -16
- cartography/intel/aws/cloudwatch.py +73 -4
- cartography/intel/aws/ec2/security_groups.py +140 -122
- cartography/intel/aws/ec2/snapshots.py +47 -84
- cartography/intel/aws/ec2/subnets.py +37 -63
- cartography/intel/aws/ecr.py +55 -80
- cartography/intel/aws/elasticache.py +102 -79
- cartography/intel/aws/guardduty.py +275 -0
- cartography/intel/aws/resources.py +2 -0
- cartography/intel/aws/secretsmanager.py +62 -44
- cartography/intel/github/repos.py +370 -28
- cartography/models/aws/cloudtrail/management_events.py +95 -6
- cartography/models/aws/cloudtrail/trail.py +21 -0
- cartography/models/aws/cloudwatch/metric_alarm.py +53 -0
- cartography/models/aws/ec2/security_group_rules.py +109 -0
- cartography/models/aws/ec2/security_groups.py +90 -0
- cartography/models/aws/ec2/snapshots.py +58 -0
- cartography/models/aws/ec2/subnets.py +65 -0
- cartography/models/aws/ec2/volumes.py +20 -0
- cartography/models/aws/ecr/__init__.py +0 -0
- cartography/models/aws/ecr/image.py +41 -0
- cartography/models/aws/ecr/repository.py +72 -0
- cartography/models/aws/ecr/repository_image.py +95 -0
- cartography/models/aws/elasticache/__init__.py +0 -0
- cartography/models/aws/elasticache/cluster.py +65 -0
- cartography/models/aws/elasticache/topic.py +67 -0
- cartography/models/aws/guardduty/__init__.py +1 -0
- cartography/models/aws/guardduty/findings.py +102 -0
- cartography/models/aws/secretsmanager/secret.py +106 -0
- cartography/models/github/dependencies.py +74 -0
- cartography/models/github/manifests.py +49 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/METADATA +3 -3
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/RECORD +44 -29
- cartography/data/jobs/cleanup/aws_import_ec2_security_groupinfo_cleanup.json +0 -24
- cartography/data/jobs/cleanup/aws_import_secrets_cleanup.json +0 -8
- cartography/data/jobs/cleanup/aws_import_snapshots_cleanup.json +0 -30
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/WHEEL +0 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/entry_points.txt +0 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/licenses/LICENSE +0 -0
- {cartography-0.107.0rc3.dist-info → cartography-0.108.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any
|
|
3
|
+
from typing import Dict
|
|
4
|
+
from typing import List
|
|
5
|
+
|
|
6
|
+
import boto3
|
|
7
|
+
import boto3.session
|
|
8
|
+
import neo4j
|
|
9
|
+
|
|
10
|
+
from cartography.client.core.tx import load
|
|
11
|
+
from cartography.graph.job import GraphJob
|
|
12
|
+
from cartography.models.aws.guardduty.findings import GuardDutyFindingSchema
|
|
13
|
+
from cartography.stats import get_stats_client
|
|
14
|
+
from cartography.util import aws_handle_regions
|
|
15
|
+
from cartography.util import aws_paginate
|
|
16
|
+
from cartography.util import merge_module_sync_metadata
|
|
17
|
+
from cartography.util import timeit
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
stat_handler = get_stats_client(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _get_severity_range_for_threshold(
|
|
24
|
+
severity_threshold: str | None,
|
|
25
|
+
) -> List[str] | None:
|
|
26
|
+
"""
|
|
27
|
+
Convert severity threshold string to GuardDuty numeric severity range.
|
|
28
|
+
|
|
29
|
+
GuardDuty severity mappings:
|
|
30
|
+
- LOW: 1.0-3.9
|
|
31
|
+
- MEDIUM: 4.0-6.9
|
|
32
|
+
- HIGH: 7.0-8.9
|
|
33
|
+
- CRITICAL: 9.0-10.0
|
|
34
|
+
|
|
35
|
+
:param severity_threshold: Severity threshold (LOW, MEDIUM, HIGH, CRITICAL)
|
|
36
|
+
:return: List of numeric severity ranges to include, or None for no filtering
|
|
37
|
+
"""
|
|
38
|
+
if not severity_threshold:
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
threshold_upper = severity_threshold.upper().strip()
|
|
42
|
+
|
|
43
|
+
# Map threshold to numeric ranges - include threshold level and above
|
|
44
|
+
if threshold_upper == "LOW":
|
|
45
|
+
return ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] # All severities
|
|
46
|
+
elif threshold_upper == "MEDIUM":
|
|
47
|
+
return ["4", "5", "6", "7", "8", "9", "10"] # MEDIUM and above
|
|
48
|
+
elif threshold_upper == "HIGH":
|
|
49
|
+
return ["7", "8", "9", "10"] # HIGH and CRITICAL only
|
|
50
|
+
elif threshold_upper == "CRITICAL":
|
|
51
|
+
return ["9", "10"] # CRITICAL only
|
|
52
|
+
else:
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@aws_handle_regions
|
|
57
|
+
def get_detectors(
|
|
58
|
+
boto3_session: boto3.session.Session,
|
|
59
|
+
region: str,
|
|
60
|
+
) -> List[str]:
|
|
61
|
+
"""
|
|
62
|
+
Get GuardDuty detector IDs for all detectors in a region.
|
|
63
|
+
"""
|
|
64
|
+
client = boto3_session.client("guardduty", region_name=region)
|
|
65
|
+
|
|
66
|
+
# Get all detector IDs in this region
|
|
67
|
+
detectors_response = client.list_detectors()
|
|
68
|
+
detector_ids = detectors_response.get("DetectorIds", [])
|
|
69
|
+
|
|
70
|
+
if not detector_ids:
|
|
71
|
+
logger.info(f"No GuardDuty detectors found in region {region}")
|
|
72
|
+
return []
|
|
73
|
+
|
|
74
|
+
logger.info(f"Found {len(detector_ids)} GuardDuty detectors in region {region}")
|
|
75
|
+
return detector_ids
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@aws_handle_regions
|
|
79
|
+
@timeit
|
|
80
|
+
def get_findings(
|
|
81
|
+
boto3_session: boto3.session.Session,
|
|
82
|
+
region: str,
|
|
83
|
+
detector_id: str,
|
|
84
|
+
severity_threshold: str | None = None,
|
|
85
|
+
) -> List[Dict[str, Any]]:
|
|
86
|
+
"""
|
|
87
|
+
Get GuardDuty findings for a specific detector.
|
|
88
|
+
Only fetches unarchived findings to avoid including closed/resolved findings.
|
|
89
|
+
Optionally filters by severity threshold.
|
|
90
|
+
"""
|
|
91
|
+
client = boto3_session.client("guardduty", region_name=region)
|
|
92
|
+
|
|
93
|
+
# Build FindingCriteria - always exclude archived findings
|
|
94
|
+
criteria = {"service.archived": {"Equals": ["false"]}}
|
|
95
|
+
|
|
96
|
+
# Add severity filtering if threshold is provided
|
|
97
|
+
severity_range = _get_severity_range_for_threshold(severity_threshold)
|
|
98
|
+
if severity_range:
|
|
99
|
+
min_severity = min(
|
|
100
|
+
float(s) for s in severity_range
|
|
101
|
+
) # get min severity from range
|
|
102
|
+
# I chose to ignore the type error here because the AWS API has fields that require different types
|
|
103
|
+
criteria["severity"] = {"GreaterThanOrEqual": int(min_severity)} # type: ignore
|
|
104
|
+
|
|
105
|
+
# Get all finding IDs for this detector with filtering
|
|
106
|
+
finding_ids = list(
|
|
107
|
+
aws_paginate(
|
|
108
|
+
client,
|
|
109
|
+
"list_findings",
|
|
110
|
+
"FindingIds",
|
|
111
|
+
DetectorId=detector_id,
|
|
112
|
+
FindingCriteria={"Criterion": criteria},
|
|
113
|
+
)
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
if not finding_ids:
|
|
117
|
+
logger.info(f"No findings found for detector {detector_id} in region {region}")
|
|
118
|
+
return []
|
|
119
|
+
|
|
120
|
+
findings_data = []
|
|
121
|
+
|
|
122
|
+
# Process findings in batches (GuardDuty API limit is 50)
|
|
123
|
+
batch_size = 50
|
|
124
|
+
for i in range(0, len(finding_ids), batch_size):
|
|
125
|
+
batch_ids = finding_ids[i : i + batch_size]
|
|
126
|
+
|
|
127
|
+
findings_response = client.get_findings(
|
|
128
|
+
DetectorId=detector_id, FindingIds=batch_ids
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
findings_batch = findings_response.get("Findings", [])
|
|
132
|
+
findings_data.extend(findings_batch)
|
|
133
|
+
|
|
134
|
+
logger.info(
|
|
135
|
+
f"Retrieved {len(findings_data)} findings for detector {detector_id} in region {region}"
|
|
136
|
+
)
|
|
137
|
+
return findings_data
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def transform_findings(findings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
141
|
+
"""Transform GuardDuty findings from API response to schema format."""
|
|
142
|
+
transformed: List[Dict[str, Any]] = []
|
|
143
|
+
for f in findings:
|
|
144
|
+
item: Dict[str, Any] = {
|
|
145
|
+
"id": f["Id"],
|
|
146
|
+
"arn": f.get("Arn"),
|
|
147
|
+
"type": f.get("Type"),
|
|
148
|
+
"severity": f.get("Severity"),
|
|
149
|
+
"title": f.get("Title"),
|
|
150
|
+
"description": f.get("Description"),
|
|
151
|
+
"confidence": f.get("Confidence"),
|
|
152
|
+
"eventfirstseen": f.get("EventFirstSeen"),
|
|
153
|
+
"eventlastseen": f.get("EventLastSeen"),
|
|
154
|
+
"accountid": f.get("AccountId"),
|
|
155
|
+
"region": f.get("Region"),
|
|
156
|
+
"detectorid": f.get("DetectorId"),
|
|
157
|
+
"archived": f.get("Archived"),
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# Handle nested resource information
|
|
161
|
+
resource = f.get("Resource", {})
|
|
162
|
+
item["resource_type"] = resource.get("ResourceType")
|
|
163
|
+
|
|
164
|
+
# Extract resource ID based on resource type
|
|
165
|
+
if item["resource_type"] == "Instance":
|
|
166
|
+
details = resource.get("InstanceDetails", {})
|
|
167
|
+
item["resource_id"] = details.get("InstanceId")
|
|
168
|
+
elif item["resource_type"] == "S3Bucket":
|
|
169
|
+
buckets = resource.get("S3BucketDetails") or []
|
|
170
|
+
if buckets:
|
|
171
|
+
item["resource_id"] = buckets[0].get("Name")
|
|
172
|
+
else:
|
|
173
|
+
item["resource_id"] = None
|
|
174
|
+
|
|
175
|
+
transformed.append(item)
|
|
176
|
+
|
|
177
|
+
return transformed
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
@timeit
|
|
181
|
+
def load_guardduty_findings(
|
|
182
|
+
neo4j_session: neo4j.Session,
|
|
183
|
+
data: List[Dict[str, Any]],
|
|
184
|
+
region: str,
|
|
185
|
+
aws_account_id: str,
|
|
186
|
+
update_tag: int,
|
|
187
|
+
) -> None:
|
|
188
|
+
"""
|
|
189
|
+
Load GuardDuty findings information into the graph.
|
|
190
|
+
"""
|
|
191
|
+
logger.info(
|
|
192
|
+
f"Loading {len(data)} GuardDuty findings for region {region} into graph."
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
load(
|
|
196
|
+
neo4j_session,
|
|
197
|
+
GuardDutyFindingSchema(),
|
|
198
|
+
data,
|
|
199
|
+
lastupdated=update_tag,
|
|
200
|
+
Region=region,
|
|
201
|
+
AWS_ID=aws_account_id,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
@timeit
|
|
206
|
+
def cleanup_guardduty(
|
|
207
|
+
neo4j_session: neo4j.Session, common_job_parameters: Dict
|
|
208
|
+
) -> None:
|
|
209
|
+
"""
|
|
210
|
+
Run GuardDuty cleanup job.
|
|
211
|
+
"""
|
|
212
|
+
logger.debug("Running GuardDuty cleanup job.")
|
|
213
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
214
|
+
GuardDutyFindingSchema(), common_job_parameters
|
|
215
|
+
)
|
|
216
|
+
cleanup_job.run(neo4j_session)
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
@timeit
|
|
220
|
+
def sync(
|
|
221
|
+
neo4j_session: neo4j.Session,
|
|
222
|
+
boto3_session: boto3.session.Session,
|
|
223
|
+
regions: List[str],
|
|
224
|
+
current_aws_account_id: str,
|
|
225
|
+
update_tag: int,
|
|
226
|
+
common_job_parameters: Dict,
|
|
227
|
+
) -> None:
|
|
228
|
+
"""
|
|
229
|
+
Sync GuardDuty findings for all regions.
|
|
230
|
+
Severity threshold filter is obtained from common_job_parameters.
|
|
231
|
+
"""
|
|
232
|
+
# Get severity threshold from common job parameters
|
|
233
|
+
severity_threshold = common_job_parameters.get("aws_guardduty_severity_threshold")
|
|
234
|
+
for region in regions:
|
|
235
|
+
logger.info(
|
|
236
|
+
f"Syncing GuardDuty findings for {region} in account {current_aws_account_id}"
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Get all detectors in the region
|
|
240
|
+
detector_ids = get_detectors(boto3_session, region)
|
|
241
|
+
|
|
242
|
+
if not detector_ids:
|
|
243
|
+
logger.info(f"No GuardDuty detectors found in region {region}, skipping.")
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
all_findings = []
|
|
247
|
+
|
|
248
|
+
# Get findings for each detector
|
|
249
|
+
for detector_id in detector_ids:
|
|
250
|
+
findings = get_findings(
|
|
251
|
+
boto3_session, region, detector_id, severity_threshold
|
|
252
|
+
)
|
|
253
|
+
all_findings.extend(findings)
|
|
254
|
+
|
|
255
|
+
transformed_findings = transform_findings(all_findings)
|
|
256
|
+
|
|
257
|
+
load_guardduty_findings(
|
|
258
|
+
neo4j_session,
|
|
259
|
+
transformed_findings,
|
|
260
|
+
region,
|
|
261
|
+
current_aws_account_id,
|
|
262
|
+
update_tag,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
# Cleanup and metadata update (outside region loop)
|
|
266
|
+
cleanup_guardduty(neo4j_session, common_job_parameters)
|
|
267
|
+
|
|
268
|
+
merge_module_sync_metadata(
|
|
269
|
+
neo4j_session,
|
|
270
|
+
group_type="AWSAccount",
|
|
271
|
+
group_id=current_aws_account_id,
|
|
272
|
+
synced_type="GuardDutyFinding",
|
|
273
|
+
update_tag=update_tag,
|
|
274
|
+
stat_handler=stat_handler,
|
|
275
|
+
)
|
|
@@ -18,6 +18,7 @@ from . import eks
|
|
|
18
18
|
from . import elasticache
|
|
19
19
|
from . import elasticsearch
|
|
20
20
|
from . import emr
|
|
21
|
+
from . import guardduty
|
|
21
22
|
from . import iam
|
|
22
23
|
from . import identitycenter
|
|
23
24
|
from . import inspector
|
|
@@ -111,5 +112,6 @@ RESOURCE_FUNCTIONS: Dict[str, Callable[..., None]] = {
|
|
|
111
112
|
"cloudtrail_management_events": cloudtrail_management_events.sync,
|
|
112
113
|
"cloudwatch": cloudwatch.sync,
|
|
113
114
|
"efs": efs.sync,
|
|
115
|
+
"guardduty": guardduty.sync,
|
|
114
116
|
"codebuild": codebuild.sync,
|
|
115
117
|
}
|
|
@@ -7,6 +7,7 @@ import neo4j
|
|
|
7
7
|
|
|
8
8
|
from cartography.client.core.tx import load
|
|
9
9
|
from cartography.graph.job import GraphJob
|
|
10
|
+
from cartography.models.aws.secretsmanager.secret import SecretsManagerSecretSchema
|
|
10
11
|
from cartography.models.aws.secretsmanager.secret_version import (
|
|
11
12
|
SecretsManagerSecretVersionSchema,
|
|
12
13
|
)
|
|
@@ -14,7 +15,6 @@ from cartography.stats import get_stats_client
|
|
|
14
15
|
from cartography.util import aws_handle_regions
|
|
15
16
|
from cartography.util import dict_date_to_epoch
|
|
16
17
|
from cartography.util import merge_module_sync_metadata
|
|
17
|
-
from cartography.util import run_cleanup_job
|
|
18
18
|
from cartography.util import timeit
|
|
19
19
|
|
|
20
20
|
logger = logging.getLogger(__name__)
|
|
@@ -32,6 +32,37 @@ def get_secret_list(boto3_session: boto3.session.Session, region: str) -> List[D
|
|
|
32
32
|
return secrets
|
|
33
33
|
|
|
34
34
|
|
|
35
|
+
def transform_secrets(
|
|
36
|
+
secrets: List[Dict],
|
|
37
|
+
) -> List[Dict]:
|
|
38
|
+
"""
|
|
39
|
+
Transform AWS Secrets Manager Secrets to match the data model.
|
|
40
|
+
"""
|
|
41
|
+
transformed_data = []
|
|
42
|
+
for secret in secrets:
|
|
43
|
+
# Start with a copy of the original secret data
|
|
44
|
+
transformed = dict(secret)
|
|
45
|
+
|
|
46
|
+
# Convert date fields to epoch timestamps
|
|
47
|
+
transformed["CreatedDate"] = dict_date_to_epoch(secret, "CreatedDate")
|
|
48
|
+
transformed["LastRotatedDate"] = dict_date_to_epoch(secret, "LastRotatedDate")
|
|
49
|
+
transformed["LastChangedDate"] = dict_date_to_epoch(secret, "LastChangedDate")
|
|
50
|
+
transformed["LastAccessedDate"] = dict_date_to_epoch(secret, "LastAccessedDate")
|
|
51
|
+
transformed["DeletedDate"] = dict_date_to_epoch(secret, "DeletedDate")
|
|
52
|
+
|
|
53
|
+
# Flatten nested RotationRules.AutomaticallyAfterDays property
|
|
54
|
+
if "RotationRules" in secret and secret["RotationRules"]:
|
|
55
|
+
rotation_rules = secret["RotationRules"]
|
|
56
|
+
if "AutomaticallyAfterDays" in rotation_rules:
|
|
57
|
+
transformed["RotationRulesAutomaticallyAfterDays"] = rotation_rules[
|
|
58
|
+
"AutomaticallyAfterDays"
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
transformed_data.append(transformed)
|
|
62
|
+
|
|
63
|
+
return transformed_data
|
|
64
|
+
|
|
65
|
+
|
|
35
66
|
@timeit
|
|
36
67
|
def load_secrets(
|
|
37
68
|
neo4j_session: neo4j.Session,
|
|
@@ -40,48 +71,33 @@ def load_secrets(
|
|
|
40
71
|
current_aws_account_id: str,
|
|
41
72
|
aws_update_tag: int,
|
|
42
73
|
) -> None:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
s.lastupdated = $aws_update_tag
|
|
56
|
-
WITH s
|
|
57
|
-
MATCH (owner:AWSAccount{id: $AWS_ACCOUNT_ID})
|
|
58
|
-
MERGE (owner)-[r:RESOURCE]->(s)
|
|
59
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
60
|
-
SET r.lastupdated = $aws_update_tag
|
|
61
|
-
"""
|
|
62
|
-
for secret in data:
|
|
63
|
-
secret["LastRotatedDate"] = dict_date_to_epoch(secret, "LastRotatedDate")
|
|
64
|
-
secret["LastChangedDate"] = dict_date_to_epoch(secret, "LastChangedDate")
|
|
65
|
-
secret["LastAccessedDate"] = dict_date_to_epoch(secret, "LastAccessedDate")
|
|
66
|
-
secret["DeletedDate"] = dict_date_to_epoch(secret, "DeletedDate")
|
|
67
|
-
secret["CreatedDate"] = dict_date_to_epoch(secret, "CreatedDate")
|
|
68
|
-
|
|
69
|
-
neo4j_session.run(
|
|
70
|
-
ingest_secrets,
|
|
71
|
-
Secrets=data,
|
|
74
|
+
"""
|
|
75
|
+
Load transformed secrets into Neo4j using the data model.
|
|
76
|
+
Expects data to already be transformed by transform_secrets().
|
|
77
|
+
"""
|
|
78
|
+
logger.info(f"Loading {len(data)} Secrets for region {region} into graph.")
|
|
79
|
+
|
|
80
|
+
# Load using the schema-based approach
|
|
81
|
+
load(
|
|
82
|
+
neo4j_session,
|
|
83
|
+
SecretsManagerSecretSchema(),
|
|
84
|
+
data,
|
|
85
|
+
lastupdated=aws_update_tag,
|
|
72
86
|
Region=region,
|
|
73
|
-
|
|
74
|
-
aws_update_tag=aws_update_tag,
|
|
87
|
+
AWS_ID=current_aws_account_id,
|
|
75
88
|
)
|
|
76
89
|
|
|
77
90
|
|
|
78
91
|
@timeit
|
|
79
92
|
def cleanup_secrets(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
93
|
+
"""
|
|
94
|
+
Run Secrets cleanup job using the data model.
|
|
95
|
+
"""
|
|
96
|
+
logger.debug("Running Secrets cleanup job.")
|
|
97
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
98
|
+
SecretsManagerSecretSchema(), common_job_parameters
|
|
84
99
|
)
|
|
100
|
+
cleanup_job.run(neo4j_session)
|
|
85
101
|
|
|
86
102
|
|
|
87
103
|
@timeit
|
|
@@ -121,8 +137,6 @@ def get_secret_versions(
|
|
|
121
137
|
|
|
122
138
|
def transform_secret_versions(
|
|
123
139
|
versions: List[Dict],
|
|
124
|
-
region: str,
|
|
125
|
-
aws_account_id: str,
|
|
126
140
|
) -> List[Dict]:
|
|
127
141
|
"""
|
|
128
142
|
Transform AWS Secrets Manager Secret Versions to match the data model.
|
|
@@ -203,7 +217,15 @@ def sync(
|
|
|
203
217
|
)
|
|
204
218
|
secrets = get_secret_list(boto3_session, region)
|
|
205
219
|
|
|
206
|
-
|
|
220
|
+
transformed_secrets = transform_secrets(secrets)
|
|
221
|
+
|
|
222
|
+
load_secrets(
|
|
223
|
+
neo4j_session,
|
|
224
|
+
transformed_secrets,
|
|
225
|
+
region,
|
|
226
|
+
current_aws_account_id,
|
|
227
|
+
update_tag,
|
|
228
|
+
)
|
|
207
229
|
|
|
208
230
|
all_versions = []
|
|
209
231
|
for secret in secrets:
|
|
@@ -216,11 +238,7 @@ def sync(
|
|
|
216
238
|
)
|
|
217
239
|
all_versions.extend(versions)
|
|
218
240
|
|
|
219
|
-
transformed_data = transform_secret_versions(
|
|
220
|
-
all_versions,
|
|
221
|
-
region,
|
|
222
|
-
current_aws_account_id,
|
|
223
|
-
)
|
|
241
|
+
transformed_data = transform_secret_versions(all_versions)
|
|
224
242
|
|
|
225
243
|
load_secret_versions(
|
|
226
244
|
neo4j_session,
|