cartography 0.101.1rc2__py3-none-any.whl → 0.102.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/_version.py +2 -2
- cartography/cli.py +38 -0
- cartography/config.py +12 -0
- cartography/data/indexes.cypher +0 -3
- cartography/intel/aws/ec2/launch_templates.py +27 -32
- cartography/intel/aws/ec2/load_balancers.py +126 -148
- cartography/intel/aws/ec2/route_tables.py +287 -0
- cartography/intel/aws/resources.py +2 -0
- cartography/intel/entra/__init__.py +43 -0
- cartography/intel/entra/users.py +205 -0
- cartography/models/aws/ec2/load_balancer_listeners.py +68 -0
- cartography/models/aws/ec2/load_balancers.py +102 -0
- cartography/models/aws/ec2/route_table_associations.py +87 -0
- cartography/models/aws/ec2/route_tables.py +121 -0
- cartography/models/aws/ec2/routes.py +77 -0
- cartography/models/entra/__init__.py +0 -0
- cartography/models/entra/tenant.py +33 -0
- cartography/models/entra/user.py +83 -0
- cartography/sync.py +2 -0
- {cartography-0.101.1rc2.dist-info → cartography-0.102.0rc2.dist-info}/METADATA +4 -1
- {cartography-0.101.1rc2.dist-info → cartography-0.102.0rc2.dist-info}/RECORD +25 -14
- {cartography-0.101.1rc2.dist-info → cartography-0.102.0rc2.dist-info}/WHEEL +0 -0
- {cartography-0.101.1rc2.dist-info → cartography-0.102.0rc2.dist-info}/entry_points.txt +0 -0
- {cartography-0.101.1rc2.dist-info → cartography-0.102.0rc2.dist-info}/licenses/LICENSE +0 -0
- {cartography-0.101.1rc2.dist-info → cartography-0.102.0rc2.dist-info}/top_level.txt +0 -0
cartography/_version.py
CHANGED
|
@@ -17,5 +17,5 @@ __version__: str
|
|
|
17
17
|
__version_tuple__: VERSION_TUPLE
|
|
18
18
|
version_tuple: VERSION_TUPLE
|
|
19
19
|
|
|
20
|
-
__version__ = version = '0.
|
|
21
|
-
__version_tuple__ = version_tuple = (0,
|
|
20
|
+
__version__ = version = '0.102.0rc2'
|
|
21
|
+
__version_tuple__ = version_tuple = (0, 102, 0)
|
cartography/cli.py
CHANGED
|
@@ -211,6 +211,30 @@ class CLI:
|
|
|
211
211
|
'The name of environment variable containing Azure Client Secret for Service Principal Authentication.'
|
|
212
212
|
),
|
|
213
213
|
)
|
|
214
|
+
parser.add_argument(
|
|
215
|
+
'--entra-tenant-id',
|
|
216
|
+
type=str,
|
|
217
|
+
default=None,
|
|
218
|
+
help=(
|
|
219
|
+
'Entra Tenant Id for Service Principal Authentication.'
|
|
220
|
+
),
|
|
221
|
+
)
|
|
222
|
+
parser.add_argument(
|
|
223
|
+
'--entra-client-id',
|
|
224
|
+
type=str,
|
|
225
|
+
default=None,
|
|
226
|
+
help=(
|
|
227
|
+
'Entra Client Id for Service Principal Authentication.'
|
|
228
|
+
),
|
|
229
|
+
)
|
|
230
|
+
parser.add_argument(
|
|
231
|
+
'--entra-client-secret-env-var',
|
|
232
|
+
type=str,
|
|
233
|
+
default=None,
|
|
234
|
+
help=(
|
|
235
|
+
'The name of environment variable containing Entra Client Secret for Service Principal Authentication.'
|
|
236
|
+
),
|
|
237
|
+
)
|
|
214
238
|
parser.add_argument(
|
|
215
239
|
'--aws-requested-syncs',
|
|
216
240
|
type=str,
|
|
@@ -615,6 +639,16 @@ class CLI:
|
|
|
615
639
|
else:
|
|
616
640
|
config.azure_client_secret = None
|
|
617
641
|
|
|
642
|
+
# Entra config
|
|
643
|
+
if config.entra_tenant_id and config.entra_client_id and config.entra_client_secret_env_var:
|
|
644
|
+
logger.debug(
|
|
645
|
+
"Reading Client Secret for Entra Authentication from environment variable %s",
|
|
646
|
+
config.entra_client_secret_env_var,
|
|
647
|
+
)
|
|
648
|
+
config.entra_client_secret = os.environ.get(config.entra_client_secret_env_var)
|
|
649
|
+
else:
|
|
650
|
+
config.entra_client_secret = None
|
|
651
|
+
|
|
618
652
|
# Okta config
|
|
619
653
|
if config.okta_org_id and config.okta_api_key_env_var:
|
|
620
654
|
logger.debug(f"Reading API key for Okta from environment variable {config.okta_api_key_env_var}")
|
|
@@ -798,5 +832,9 @@ def main(argv=None):
|
|
|
798
832
|
logging.getLogger('botocore').setLevel(logging.WARNING)
|
|
799
833
|
logging.getLogger('googleapiclient').setLevel(logging.WARNING)
|
|
800
834
|
logging.getLogger('neo4j').setLevel(logging.WARNING)
|
|
835
|
+
logging.getLogger('azure.identity').setLevel(logging.WARNING)
|
|
836
|
+
logging.getLogger('httpx').setLevel(logging.WARNING)
|
|
837
|
+
logging.getLogger('azure.core.pipeline.policies.http_logging_policy').setLevel(logging.WARNING)
|
|
838
|
+
|
|
801
839
|
argv = argv if argv is not None else sys.argv[1:]
|
|
802
840
|
sys.exit(CLI(prog='cartography').main(argv))
|
cartography/config.py
CHANGED
|
@@ -41,6 +41,12 @@ class Config:
|
|
|
41
41
|
:param azure_client_id: Client Id for connecting in a Service Principal Authentication approach. Optional.
|
|
42
42
|
:type azure_client_secret: str
|
|
43
43
|
:param azure_client_secret: Client Secret for connecting in a Service Principal Authentication approach. Optional.
|
|
44
|
+
:type entra_tenant_id: str
|
|
45
|
+
:param entra_tenant_id: Tenant Id for connecting in a Service Principal Authentication approach. Optional.
|
|
46
|
+
:type entra_client_id: str
|
|
47
|
+
:param entra_client_id: Client Id for connecting in a Service Principal Authentication approach. Optional.
|
|
48
|
+
:type entra_client_secret: str
|
|
49
|
+
:param entra_client_secret: Client Secret for connecting in a Service Principal Authentication approach. Optional.
|
|
44
50
|
:type aws_requested_syncs: str
|
|
45
51
|
:param aws_requested_syncs: Comma-separated list of AWS resources to sync. Optional.
|
|
46
52
|
:type analysis_job_directory: str
|
|
@@ -133,6 +139,9 @@ class Config:
|
|
|
133
139
|
azure_tenant_id=None,
|
|
134
140
|
azure_client_id=None,
|
|
135
141
|
azure_client_secret=None,
|
|
142
|
+
entra_tenant_id=None,
|
|
143
|
+
entra_client_id=None,
|
|
144
|
+
entra_client_secret=None,
|
|
136
145
|
aws_requested_syncs=None,
|
|
137
146
|
analysis_job_directory=None,
|
|
138
147
|
oci_sync_all_profiles=None,
|
|
@@ -191,6 +200,9 @@ class Config:
|
|
|
191
200
|
self.azure_tenant_id = azure_tenant_id
|
|
192
201
|
self.azure_client_id = azure_client_id
|
|
193
202
|
self.azure_client_secret = azure_client_secret
|
|
203
|
+
self.entra_tenant_id = entra_tenant_id
|
|
204
|
+
self.entra_client_id = entra_client_id
|
|
205
|
+
self.entra_client_secret = entra_client_secret
|
|
194
206
|
self.aws_requested_syncs = aws_requested_syncs
|
|
195
207
|
self.analysis_job_directory = analysis_job_directory
|
|
196
208
|
self.oci_sync_all_profiles = oci_sync_all_profiles
|
cartography/data/indexes.cypher
CHANGED
|
@@ -191,9 +191,6 @@ CREATE INDEX IF NOT EXISTS FOR (n:KMSGrant) ON (n.lastupdated);
|
|
|
191
191
|
CREATE INDEX IF NOT EXISTS FOR (n:LaunchConfiguration) ON (n.id);
|
|
192
192
|
CREATE INDEX IF NOT EXISTS FOR (n:LaunchConfiguration) ON (n.name);
|
|
193
193
|
CREATE INDEX IF NOT EXISTS FOR (n:LaunchConfiguration) ON (n.lastupdated);
|
|
194
|
-
CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancer) ON (n.dnsname);
|
|
195
|
-
CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancer) ON (n.id);
|
|
196
|
-
CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancer) ON (n.lastupdated);
|
|
197
194
|
CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancerV2) ON (n.dnsname);
|
|
198
195
|
CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancerV2) ON (n.id);
|
|
199
196
|
CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancerV2) ON (n.lastupdated);
|
|
@@ -37,37 +37,18 @@ def get_launch_template_versions(
|
|
|
37
37
|
boto3_session: boto3.session.Session,
|
|
38
38
|
region: str,
|
|
39
39
|
launch_templates: list[dict[str, Any]],
|
|
40
|
-
) ->
|
|
41
|
-
|
|
42
|
-
found_templates: list[dict[str, Any]] = []
|
|
40
|
+
) -> list[dict[str, Any]]:
|
|
41
|
+
template_versions: list[dict[str, Any]] = []
|
|
43
42
|
for template in launch_templates:
|
|
44
43
|
launch_template_id = template['LaunchTemplateId']
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
region,
|
|
50
|
-
)
|
|
51
|
-
# If the call succeeded, the template still exists.
|
|
52
|
-
# Add it and its versions (list might be empty if no versions exist).
|
|
53
|
-
found_templates.append(template)
|
|
54
|
-
found_versions.extend(versions)
|
|
55
|
-
except botocore.exceptions.ClientError as e:
|
|
56
|
-
if e.response['Error']['Code'] == 'InvalidLaunchTemplateId.NotFound':
|
|
57
|
-
logger.warning(
|
|
58
|
-
"Launch template %s no longer exists in region %s, skipping.",
|
|
59
|
-
launch_template_id, region,
|
|
60
|
-
)
|
|
61
|
-
# Skip this template, don't add it or its versions
|
|
62
|
-
continue
|
|
63
|
-
else:
|
|
64
|
-
# Re-raise any other client error
|
|
65
|
-
raise
|
|
66
|
-
|
|
67
|
-
return found_versions, found_templates
|
|
44
|
+
versions = get_launch_template_versions_by_template(boto3_session, launch_template_id, region)
|
|
45
|
+
template_versions.extend(versions)
|
|
46
|
+
|
|
47
|
+
return template_versions
|
|
68
48
|
|
|
69
49
|
|
|
70
50
|
@timeit
|
|
51
|
+
@aws_handle_regions
|
|
71
52
|
def get_launch_template_versions_by_template(
|
|
72
53
|
boto3_session: boto3.session.Session,
|
|
73
54
|
launch_template_id: str,
|
|
@@ -76,15 +57,27 @@ def get_launch_template_versions_by_template(
|
|
|
76
57
|
client = boto3_session.client('ec2', region_name=region, config=get_botocore_config())
|
|
77
58
|
v_paginator = client.get_paginator('describe_launch_template_versions')
|
|
78
59
|
template_versions = []
|
|
79
|
-
|
|
80
|
-
|
|
60
|
+
try:
|
|
61
|
+
for versions in v_paginator.paginate(LaunchTemplateId=launch_template_id):
|
|
62
|
+
template_versions.extend(versions['LaunchTemplateVersions'])
|
|
63
|
+
except botocore.exceptions.ClientError as e:
|
|
64
|
+
error_code = e.response['Error']['Code']
|
|
65
|
+
if error_code == 'InvalidLaunchTemplateId.NotFound':
|
|
66
|
+
logger.warning("Launch template %s no longer exists in region %s", launch_template_id, region)
|
|
67
|
+
else:
|
|
68
|
+
raise
|
|
81
69
|
return template_versions
|
|
82
70
|
|
|
83
71
|
|
|
84
|
-
def transform_launch_templates(templates: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
72
|
+
def transform_launch_templates(templates: list[dict[str, Any]], versions: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
73
|
+
valid_template_ids = {v['LaunchTemplateId'] for v in versions}
|
|
85
74
|
result: list[dict[str, Any]] = []
|
|
86
75
|
for template in templates:
|
|
76
|
+
if template['LaunchTemplateId'] not in valid_template_ids:
|
|
77
|
+
continue
|
|
78
|
+
|
|
87
79
|
current = template.copy()
|
|
80
|
+
# Convert CreateTime to timestamp string
|
|
88
81
|
current['CreateTime'] = str(int(current['CreateTime'].timestamp()))
|
|
89
82
|
result.append(current)
|
|
90
83
|
return result
|
|
@@ -176,11 +169,13 @@ def sync_ec2_launch_templates(
|
|
|
176
169
|
for region in regions:
|
|
177
170
|
logger.info(f"Syncing launch templates for region '{region}' in account '{current_aws_account_id}'.")
|
|
178
171
|
templates = get_launch_templates(boto3_session, region)
|
|
179
|
-
versions
|
|
172
|
+
versions = get_launch_template_versions(boto3_session, region, templates)
|
|
180
173
|
|
|
181
|
-
# Transform and load
|
|
182
|
-
transformed_templates = transform_launch_templates(
|
|
174
|
+
# Transform and load the templates that have versions
|
|
175
|
+
transformed_templates = transform_launch_templates(templates, versions)
|
|
183
176
|
load_launch_templates(neo4j_session, transformed_templates, region, current_aws_account_id, update_tag)
|
|
177
|
+
|
|
178
|
+
# Transform and load the versions
|
|
184
179
|
transformed_versions = transform_launch_template_versions(versions)
|
|
185
180
|
load_launch_template_versions(neo4j_session, transformed_versions, region, current_aws_account_id, update_tag)
|
|
186
181
|
|
|
@@ -1,190 +1,168 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Dict
|
|
3
|
-
from typing import List
|
|
4
2
|
|
|
5
3
|
import boto3
|
|
6
4
|
import neo4j
|
|
7
5
|
|
|
8
6
|
from .util import get_botocore_config
|
|
7
|
+
from cartography.client.core.tx import load
|
|
8
|
+
from cartography.graph.job import GraphJob
|
|
9
|
+
from cartography.models.aws.ec2.load_balancer_listeners import ELBListenerSchema
|
|
10
|
+
from cartography.models.aws.ec2.load_balancers import LoadBalancerSchema
|
|
9
11
|
from cartography.util import aws_handle_regions
|
|
10
|
-
from cartography.util import run_cleanup_job
|
|
11
12
|
from cartography.util import timeit
|
|
12
13
|
|
|
13
14
|
logger = logging.getLogger(__name__)
|
|
14
15
|
|
|
15
16
|
|
|
17
|
+
def _get_listener_id(load_balancer_id: str, port: int, protocol: str) -> str:
|
|
18
|
+
"""
|
|
19
|
+
Generate a unique ID for a load balancer listener.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
load_balancer_id: The ID of the load balancer
|
|
23
|
+
port: The listener port
|
|
24
|
+
protocol: The listener protocol
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
A unique ID string for the listener
|
|
28
|
+
"""
|
|
29
|
+
return f"{load_balancer_id}{port}{protocol}"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def transform_load_balancer_listener_data(load_balancer_id: str, listener_data: list[dict]) -> list[dict]:
|
|
33
|
+
"""
|
|
34
|
+
Transform load balancer listener data into a format suitable for cartography ingestion.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
load_balancer_id: The ID of the load balancer
|
|
38
|
+
listener_data: List of listener data from AWS API
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
List of transformed listener data
|
|
42
|
+
"""
|
|
43
|
+
transformed = []
|
|
44
|
+
for listener in listener_data:
|
|
45
|
+
listener_info = listener['Listener']
|
|
46
|
+
transformed_listener = {
|
|
47
|
+
'id': _get_listener_id(load_balancer_id, listener_info['LoadBalancerPort'], listener_info['Protocol']),
|
|
48
|
+
'port': listener_info.get('LoadBalancerPort'),
|
|
49
|
+
'protocol': listener_info.get('Protocol'),
|
|
50
|
+
'instance_port': listener_info.get('InstancePort'),
|
|
51
|
+
'instance_protocol': listener_info.get('InstanceProtocol'),
|
|
52
|
+
'policy_names': listener.get('PolicyNames', []),
|
|
53
|
+
'LoadBalancerId': load_balancer_id,
|
|
54
|
+
}
|
|
55
|
+
transformed.append(transformed_listener)
|
|
56
|
+
return transformed
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def transform_load_balancer_data(load_balancers: list[dict]) -> tuple[list[dict], list[dict]]:
|
|
60
|
+
"""
|
|
61
|
+
Transform load balancer data into a format suitable for cartography ingestion.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
load_balancers: List of load balancer data from AWS API
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
Tuple of (transformed load balancer data, transformed listener data)
|
|
68
|
+
"""
|
|
69
|
+
transformed = []
|
|
70
|
+
listener_data = []
|
|
71
|
+
|
|
72
|
+
for lb in load_balancers:
|
|
73
|
+
load_balancer_id = lb['DNSName']
|
|
74
|
+
transformed_lb = {
|
|
75
|
+
'id': load_balancer_id,
|
|
76
|
+
'name': lb['LoadBalancerName'],
|
|
77
|
+
'dnsname': lb['DNSName'],
|
|
78
|
+
'canonicalhostedzonename': lb.get('CanonicalHostedZoneName'),
|
|
79
|
+
'canonicalhostedzonenameid': lb.get('CanonicalHostedZoneNameID'),
|
|
80
|
+
'scheme': lb.get('Scheme'),
|
|
81
|
+
'createdtime': str(lb['CreatedTime']),
|
|
82
|
+
'GROUP_NAME': lb.get('SourceSecurityGroup', {}).get('GroupName'),
|
|
83
|
+
'GROUP_IDS': [str(group) for group in lb.get('SecurityGroups', [])],
|
|
84
|
+
'INSTANCE_IDS': [instance['InstanceId'] for instance in lb.get('Instances', [])],
|
|
85
|
+
'LISTENER_IDS': [
|
|
86
|
+
_get_listener_id(
|
|
87
|
+
load_balancer_id,
|
|
88
|
+
listener['Listener']['LoadBalancerPort'],
|
|
89
|
+
listener['Listener']['Protocol'],
|
|
90
|
+
) for listener in lb.get('ListenerDescriptions', [])
|
|
91
|
+
],
|
|
92
|
+
}
|
|
93
|
+
transformed.append(transformed_lb)
|
|
94
|
+
|
|
95
|
+
# Classic ELB listeners are not returned anywhere else in AWS, so we must parse them out
|
|
96
|
+
# of the describe_load_balancers response.
|
|
97
|
+
if lb.get('ListenerDescriptions'):
|
|
98
|
+
listener_data.extend(
|
|
99
|
+
transform_load_balancer_listener_data(
|
|
100
|
+
load_balancer_id,
|
|
101
|
+
lb.get('ListenerDescriptions', []),
|
|
102
|
+
),
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
return transformed, listener_data
|
|
106
|
+
|
|
107
|
+
|
|
16
108
|
@timeit
|
|
17
109
|
@aws_handle_regions
|
|
18
|
-
def get_loadbalancer_data(boto3_session: boto3.session.Session, region: str) ->
|
|
110
|
+
def get_loadbalancer_data(boto3_session: boto3.session.Session, region: str) -> list[dict]:
|
|
19
111
|
client = boto3_session.client('elb', region_name=region, config=get_botocore_config())
|
|
20
112
|
paginator = client.get_paginator('describe_load_balancers')
|
|
21
|
-
elbs:
|
|
113
|
+
elbs: list[dict] = []
|
|
22
114
|
for page in paginator.paginate():
|
|
23
115
|
elbs.extend(page['LoadBalancerDescriptions'])
|
|
24
116
|
return elbs
|
|
25
117
|
|
|
26
118
|
|
|
27
119
|
@timeit
|
|
28
|
-
def
|
|
29
|
-
neo4j_session: neo4j.Session,
|
|
120
|
+
def load_load_balancers(
|
|
121
|
+
neo4j_session: neo4j.Session, data: list[dict], region: str, current_aws_account_id: str,
|
|
30
122
|
update_tag: int,
|
|
31
123
|
) -> None:
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
l.firstseen = timestamp()
|
|
40
|
-
SET l.instance_port = data.Listener.InstancePort, l.instance_protocol = data.Listener.InstanceProtocol,
|
|
41
|
-
l.policy_names = data.PolicyNames,
|
|
42
|
-
l.lastupdated = $update_tag
|
|
43
|
-
WITH l, elb
|
|
44
|
-
MERGE (elb)-[r:ELB_LISTENER]->(l)
|
|
45
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
46
|
-
SET r.lastupdated = $update_tag
|
|
47
|
-
"""
|
|
48
|
-
|
|
49
|
-
neo4j_session.run(
|
|
50
|
-
ingest_listener,
|
|
51
|
-
LoadBalancerId=load_balancer_id,
|
|
52
|
-
Listeners=listener_data,
|
|
53
|
-
update_tag=update_tag,
|
|
124
|
+
load(
|
|
125
|
+
neo4j_session,
|
|
126
|
+
LoadBalancerSchema(),
|
|
127
|
+
data,
|
|
128
|
+
Region=region,
|
|
129
|
+
AWS_ID=current_aws_account_id,
|
|
130
|
+
lastupdated=update_tag,
|
|
54
131
|
)
|
|
55
132
|
|
|
56
133
|
|
|
57
134
|
@timeit
|
|
58
|
-
def
|
|
59
|
-
neo4j_session: neo4j.Session,
|
|
60
|
-
update_tag: int,
|
|
61
|
-
) -> None:
|
|
62
|
-
ingest_load_balancer_subnet = """
|
|
63
|
-
MATCH (elb:LoadBalancer{id: $ID}), (subnet:EC2Subnet{subnetid: $SUBNET_ID})
|
|
64
|
-
MERGE (elb)-[r:SUBNET]->(subnet)
|
|
65
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
66
|
-
SET r.lastupdated = $update_tag
|
|
67
|
-
"""
|
|
68
|
-
|
|
69
|
-
for subnet_id in subnets_data:
|
|
70
|
-
neo4j_session.run(
|
|
71
|
-
ingest_load_balancer_subnet,
|
|
72
|
-
ID=load_balancer_id,
|
|
73
|
-
SUBNET_ID=subnet_id,
|
|
74
|
-
update_tag=update_tag,
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
@timeit
|
|
79
|
-
def load_load_balancers(
|
|
80
|
-
neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str,
|
|
135
|
+
def load_load_balancer_listeners(
|
|
136
|
+
neo4j_session: neo4j.Session, data: list[dict], region: str, current_aws_account_id: str,
|
|
81
137
|
update_tag: int,
|
|
82
138
|
) -> None:
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
MERGE (aa)-[r:RESOURCE]->(elb)
|
|
92
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
93
|
-
SET r.lastupdated = $update_tag
|
|
94
|
-
"""
|
|
95
|
-
|
|
96
|
-
ingest_load_balancersource_security_group = """
|
|
97
|
-
MATCH (elb:LoadBalancer{id: $ID}),
|
|
98
|
-
(group:EC2SecurityGroup{name: $GROUP_NAME})
|
|
99
|
-
MERGE (elb)-[r:SOURCE_SECURITY_GROUP]->(group)
|
|
100
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
101
|
-
SET r.lastupdated = $update_tag
|
|
102
|
-
"""
|
|
103
|
-
|
|
104
|
-
ingest_load_balancer_security_group = """
|
|
105
|
-
MATCH (elb:LoadBalancer{id: $ID}),
|
|
106
|
-
(group:EC2SecurityGroup{groupid: $GROUP_ID})
|
|
107
|
-
MERGE (elb)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)
|
|
108
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
109
|
-
SET r.lastupdated = $update_tag
|
|
110
|
-
"""
|
|
111
|
-
|
|
112
|
-
ingest_instances = """
|
|
113
|
-
MATCH (elb:LoadBalancer{id: $ID}), (instance:EC2Instance{instanceid: $INSTANCE_ID})
|
|
114
|
-
MERGE (elb)-[r:EXPOSE]->(instance)
|
|
115
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
116
|
-
SET r.lastupdated = $update_tag
|
|
117
|
-
WITH instance
|
|
118
|
-
MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})
|
|
119
|
-
MERGE (aa)-[r:RESOURCE]->(instance)
|
|
120
|
-
ON CREATE SET r.firstseen = timestamp()
|
|
121
|
-
SET r.lastupdated = $update_tag
|
|
122
|
-
"""
|
|
123
|
-
|
|
124
|
-
for lb in data:
|
|
125
|
-
load_balancer_id = lb["DNSName"]
|
|
126
|
-
|
|
127
|
-
neo4j_session.run(
|
|
128
|
-
ingest_load_balancer,
|
|
129
|
-
ID=load_balancer_id,
|
|
130
|
-
CREATED_TIME=str(lb["CreatedTime"]),
|
|
131
|
-
NAME=lb["LoadBalancerName"],
|
|
132
|
-
DNS_NAME=load_balancer_id,
|
|
133
|
-
HOSTED_ZONE_NAME=lb.get("CanonicalHostedZoneName"),
|
|
134
|
-
HOSTED_ZONE_NAME_ID=lb.get("CanonicalHostedZoneNameID"),
|
|
135
|
-
SCHEME=lb.get("Scheme", ""),
|
|
136
|
-
AWS_ACCOUNT_ID=current_aws_account_id,
|
|
137
|
-
Region=region,
|
|
138
|
-
update_tag=update_tag,
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
if lb["Subnets"]:
|
|
142
|
-
load_load_balancer_subnets(neo4j_session, load_balancer_id, lb["Subnets"], update_tag)
|
|
143
|
-
|
|
144
|
-
if lb["SecurityGroups"]:
|
|
145
|
-
for group in lb["SecurityGroups"]:
|
|
146
|
-
neo4j_session.run(
|
|
147
|
-
ingest_load_balancer_security_group,
|
|
148
|
-
ID=load_balancer_id,
|
|
149
|
-
GROUP_ID=str(group),
|
|
150
|
-
update_tag=update_tag,
|
|
151
|
-
)
|
|
152
|
-
|
|
153
|
-
if lb["SourceSecurityGroup"]:
|
|
154
|
-
source_group = lb["SourceSecurityGroup"]
|
|
155
|
-
neo4j_session.run(
|
|
156
|
-
ingest_load_balancersource_security_group,
|
|
157
|
-
ID=load_balancer_id,
|
|
158
|
-
GROUP_NAME=source_group["GroupName"],
|
|
159
|
-
update_tag=update_tag,
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
if lb["Instances"]:
|
|
163
|
-
for instance in lb["Instances"]:
|
|
164
|
-
neo4j_session.run(
|
|
165
|
-
ingest_instances,
|
|
166
|
-
ID=load_balancer_id,
|
|
167
|
-
INSTANCE_ID=instance["InstanceId"],
|
|
168
|
-
AWS_ACCOUNT_ID=current_aws_account_id,
|
|
169
|
-
update_tag=update_tag,
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
if lb["ListenerDescriptions"]:
|
|
173
|
-
load_load_balancer_listeners(neo4j_session, load_balancer_id, lb["ListenerDescriptions"], update_tag)
|
|
139
|
+
load(
|
|
140
|
+
neo4j_session,
|
|
141
|
+
ELBListenerSchema(),
|
|
142
|
+
data,
|
|
143
|
+
Region=region,
|
|
144
|
+
AWS_ID=current_aws_account_id,
|
|
145
|
+
lastupdated=update_tag,
|
|
146
|
+
)
|
|
174
147
|
|
|
175
148
|
|
|
176
149
|
@timeit
|
|
177
|
-
def cleanup_load_balancers(neo4j_session: neo4j.Session, common_job_parameters:
|
|
178
|
-
|
|
150
|
+
def cleanup_load_balancers(neo4j_session: neo4j.Session, common_job_parameters: dict) -> None:
|
|
151
|
+
GraphJob.from_node_schema(ELBListenerSchema(), common_job_parameters).run(neo4j_session)
|
|
152
|
+
GraphJob.from_node_schema(LoadBalancerSchema(), common_job_parameters).run(neo4j_session)
|
|
179
153
|
|
|
180
154
|
|
|
181
155
|
@timeit
|
|
182
156
|
def sync_load_balancers(
|
|
183
|
-
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions:
|
|
184
|
-
update_tag: int, common_job_parameters:
|
|
157
|
+
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: list[str], current_aws_account_id: str,
|
|
158
|
+
update_tag: int, common_job_parameters: dict,
|
|
185
159
|
) -> None:
|
|
186
160
|
for region in regions:
|
|
187
161
|
logger.info("Syncing EC2 load balancers for region '%s' in account '%s'.", region, current_aws_account_id)
|
|
188
162
|
data = get_loadbalancer_data(boto3_session, region)
|
|
189
|
-
|
|
163
|
+
transformed_data, listener_data = transform_load_balancer_data(data)
|
|
164
|
+
|
|
165
|
+
load_load_balancers(neo4j_session, transformed_data, region, current_aws_account_id, update_tag)
|
|
166
|
+
load_load_balancer_listeners(neo4j_session, listener_data, region, current_aws_account_id, update_tag)
|
|
167
|
+
|
|
190
168
|
cleanup_load_balancers(neo4j_session, common_job_parameters)
|