cartography 0.101.1rc1__py3-none-any.whl → 0.102.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cartography might be problematic. Click here for more details.

cartography/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.101.1rc1'
21
- __version_tuple__ = version_tuple = (0, 101, 1)
20
+ __version__ = version = '0.102.0rc1'
21
+ __version_tuple__ = version_tuple = (0, 102, 0)
cartography/cli.py CHANGED
@@ -211,6 +211,30 @@ class CLI:
211
211
  'The name of environment variable containing Azure Client Secret for Service Principal Authentication.'
212
212
  ),
213
213
  )
214
+ parser.add_argument(
215
+ '--entra-tenant-id',
216
+ type=str,
217
+ default=None,
218
+ help=(
219
+ 'Entra Tenant Id for Service Principal Authentication.'
220
+ ),
221
+ )
222
+ parser.add_argument(
223
+ '--entra-client-id',
224
+ type=str,
225
+ default=None,
226
+ help=(
227
+ 'Entra Client Id for Service Principal Authentication.'
228
+ ),
229
+ )
230
+ parser.add_argument(
231
+ '--entra-client-secret-env-var',
232
+ type=str,
233
+ default=None,
234
+ help=(
235
+ 'The name of environment variable containing Entra Client Secret for Service Principal Authentication.'
236
+ ),
237
+ )
214
238
  parser.add_argument(
215
239
  '--aws-requested-syncs',
216
240
  type=str,
@@ -615,6 +639,16 @@ class CLI:
615
639
  else:
616
640
  config.azure_client_secret = None
617
641
 
642
+ # Entra config
643
+ if config.entra_tenant_id and config.entra_client_id and config.entra_client_secret_env_var:
644
+ logger.debug(
645
+ "Reading Client Secret for Entra Authentication from environment variable %s",
646
+ config.entra_client_secret_env_var,
647
+ )
648
+ config.entra_client_secret = os.environ.get(config.entra_client_secret_env_var)
649
+ else:
650
+ config.entra_client_secret = None
651
+
618
652
  # Okta config
619
653
  if config.okta_org_id and config.okta_api_key_env_var:
620
654
  logger.debug(f"Reading API key for Okta from environment variable {config.okta_api_key_env_var}")
@@ -798,5 +832,9 @@ def main(argv=None):
798
832
  logging.getLogger('botocore').setLevel(logging.WARNING)
799
833
  logging.getLogger('googleapiclient').setLevel(logging.WARNING)
800
834
  logging.getLogger('neo4j').setLevel(logging.WARNING)
835
+ logging.getLogger('azure.identity').setLevel(logging.WARNING)
836
+ logging.getLogger('httpx').setLevel(logging.WARNING)
837
+ logging.getLogger('azure.core.pipeline.policies.http_logging_policy').setLevel(logging.WARNING)
838
+
801
839
  argv = argv if argv is not None else sys.argv[1:]
802
840
  sys.exit(CLI(prog='cartography').main(argv))
cartography/config.py CHANGED
@@ -41,6 +41,12 @@ class Config:
41
41
  :param azure_client_id: Client Id for connecting in a Service Principal Authentication approach. Optional.
42
42
  :type azure_client_secret: str
43
43
  :param azure_client_secret: Client Secret for connecting in a Service Principal Authentication approach. Optional.
44
+ :type entra_tenant_id: str
45
+ :param entra_tenant_id: Tenant Id for connecting in a Service Principal Authentication approach. Optional.
46
+ :type entra_client_id: str
47
+ :param entra_client_id: Client Id for connecting in a Service Principal Authentication approach. Optional.
48
+ :type entra_client_secret: str
49
+ :param entra_client_secret: Client Secret for connecting in a Service Principal Authentication approach. Optional.
44
50
  :type aws_requested_syncs: str
45
51
  :param aws_requested_syncs: Comma-separated list of AWS resources to sync. Optional.
46
52
  :type analysis_job_directory: str
@@ -133,6 +139,9 @@ class Config:
133
139
  azure_tenant_id=None,
134
140
  azure_client_id=None,
135
141
  azure_client_secret=None,
142
+ entra_tenant_id=None,
143
+ entra_client_id=None,
144
+ entra_client_secret=None,
136
145
  aws_requested_syncs=None,
137
146
  analysis_job_directory=None,
138
147
  oci_sync_all_profiles=None,
@@ -191,6 +200,9 @@ class Config:
191
200
  self.azure_tenant_id = azure_tenant_id
192
201
  self.azure_client_id = azure_client_id
193
202
  self.azure_client_secret = azure_client_secret
203
+ self.entra_tenant_id = entra_tenant_id
204
+ self.entra_client_id = entra_client_id
205
+ self.entra_client_secret = entra_client_secret
194
206
  self.aws_requested_syncs = aws_requested_syncs
195
207
  self.analysis_job_directory = analysis_job_directory
196
208
  self.oci_sync_all_profiles = oci_sync_all_profiles
@@ -65,9 +65,6 @@ CREATE INDEX IF NOT EXISTS FOR (n:AccountAccessKey) ON (n.accesskeyid);
65
65
  CREATE INDEX IF NOT EXISTS FOR (n:AccountAccessKey) ON (n.lastupdated);
66
66
  CREATE INDEX IF NOT EXISTS FOR (n:AutoScalingGroup) ON (n.arn);
67
67
  CREATE INDEX IF NOT EXISTS FOR (n:AutoScalingGroup) ON (n.lastupdated);
68
- CREATE INDEX IF NOT EXISTS FOR (n:CrowdstrikeHost) ON (n.id);
69
- CREATE INDEX IF NOT EXISTS FOR (n:CrowdstrikeHost) ON (n.instance_id);
70
- CREATE INDEX IF NOT EXISTS FOR (n:CrowdstrikeHost) ON (n.lastupdated);
71
68
  CREATE INDEX IF NOT EXISTS FOR (n:CVE) ON (n.id);
72
69
  CREATE INDEX IF NOT EXISTS FOR (n:CVE) ON (n.lastupdated);
73
70
  CREATE INDEX IF NOT EXISTS FOR (n:Dependency) ON (n.id);
@@ -194,9 +191,6 @@ CREATE INDEX IF NOT EXISTS FOR (n:KMSGrant) ON (n.lastupdated);
194
191
  CREATE INDEX IF NOT EXISTS FOR (n:LaunchConfiguration) ON (n.id);
195
192
  CREATE INDEX IF NOT EXISTS FOR (n:LaunchConfiguration) ON (n.name);
196
193
  CREATE INDEX IF NOT EXISTS FOR (n:LaunchConfiguration) ON (n.lastupdated);
197
- CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancer) ON (n.dnsname);
198
- CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancer) ON (n.id);
199
- CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancer) ON (n.lastupdated);
200
194
  CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancerV2) ON (n.dnsname);
201
195
  CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancerV2) ON (n.id);
202
196
  CREATE INDEX IF NOT EXISTS FOR (n:LoadBalancerV2) ON (n.lastupdated);
@@ -5,11 +5,6 @@
5
5
  "iterative": true,
6
6
  "iterationsize": 100
7
7
  },
8
- {
9
- "query": "MATCH (h:CrowdstrikeHost) WHERE h.lastupdated <> $UPDATE_TAG WITH h LIMIT $LIMIT_SIZE DETACH DELETE (h)",
10
- "iterative": true,
11
- "iterationsize": 100
12
- },
13
8
  {
14
9
  "query": "MATCH (:CrowdstrikeFinding)<-[hc:HAS_CVE]-(:SpotlightVulnerability) WHERE hc.lastupdated <> $UPDATE_TAG WITH hc LIMIT $LIMIT_SIZE DELETE (hc)",
15
10
  "iterative": true,
@@ -69,10 +69,15 @@ def get_launch_template_versions_by_template(
69
69
  return template_versions
70
70
 
71
71
 
72
- def transform_launch_templates(templates: list[dict[str, Any]]) -> list[dict[str, Any]]:
72
+ def transform_launch_templates(templates: list[dict[str, Any]], versions: list[dict[str, Any]]) -> list[dict[str, Any]]:
73
+ valid_template_ids = {v['LaunchTemplateId'] for v in versions}
73
74
  result: list[dict[str, Any]] = []
74
75
  for template in templates:
76
+ if template['LaunchTemplateId'] not in valid_template_ids:
77
+ continue
78
+
75
79
  current = template.copy()
80
+ # Convert CreateTime to timestamp string
76
81
  current['CreateTime'] = str(int(current['CreateTime'].timestamp()))
77
82
  result.append(current)
78
83
  return result
@@ -165,9 +170,13 @@ def sync_ec2_launch_templates(
165
170
  logger.info(f"Syncing launch templates for region '{region}' in account '{current_aws_account_id}'.")
166
171
  templates = get_launch_templates(boto3_session, region)
167
172
  versions = get_launch_template_versions(boto3_session, region, templates)
168
- templates = transform_launch_templates(templates)
169
- load_launch_templates(neo4j_session, templates, region, current_aws_account_id, update_tag)
170
- versions = transform_launch_template_versions(versions)
171
- load_launch_template_versions(neo4j_session, versions, region, current_aws_account_id, update_tag)
173
+
174
+ # Transform and load the templates that have versions
175
+ transformed_templates = transform_launch_templates(templates, versions)
176
+ load_launch_templates(neo4j_session, transformed_templates, region, current_aws_account_id, update_tag)
177
+
178
+ # Transform and load the versions
179
+ transformed_versions = transform_launch_template_versions(versions)
180
+ load_launch_template_versions(neo4j_session, transformed_versions, region, current_aws_account_id, update_tag)
172
181
 
173
182
  cleanup(neo4j_session, common_job_parameters)
@@ -1,190 +1,168 @@
1
1
  import logging
2
- from typing import Dict
3
- from typing import List
4
2
 
5
3
  import boto3
6
4
  import neo4j
7
5
 
8
6
  from .util import get_botocore_config
7
+ from cartography.client.core.tx import load
8
+ from cartography.graph.job import GraphJob
9
+ from cartography.models.aws.ec2.load_balancer_listeners import ELBListenerSchema
10
+ from cartography.models.aws.ec2.load_balancers import LoadBalancerSchema
9
11
  from cartography.util import aws_handle_regions
10
- from cartography.util import run_cleanup_job
11
12
  from cartography.util import timeit
12
13
 
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
 
17
+ def _get_listener_id(load_balancer_id: str, port: int, protocol: str) -> str:
18
+ """
19
+ Generate a unique ID for a load balancer listener.
20
+
21
+ Args:
22
+ load_balancer_id: The ID of the load balancer
23
+ port: The listener port
24
+ protocol: The listener protocol
25
+
26
+ Returns:
27
+ A unique ID string for the listener
28
+ """
29
+ return f"{load_balancer_id}{port}{protocol}"
30
+
31
+
32
+ def transform_load_balancer_listener_data(load_balancer_id: str, listener_data: list[dict]) -> list[dict]:
33
+ """
34
+ Transform load balancer listener data into a format suitable for cartography ingestion.
35
+
36
+ Args:
37
+ load_balancer_id: The ID of the load balancer
38
+ listener_data: List of listener data from AWS API
39
+
40
+ Returns:
41
+ List of transformed listener data
42
+ """
43
+ transformed = []
44
+ for listener in listener_data:
45
+ listener_info = listener['Listener']
46
+ transformed_listener = {
47
+ 'id': _get_listener_id(load_balancer_id, listener_info['LoadBalancerPort'], listener_info['Protocol']),
48
+ 'port': listener_info.get('LoadBalancerPort'),
49
+ 'protocol': listener_info.get('Protocol'),
50
+ 'instance_port': listener_info.get('InstancePort'),
51
+ 'instance_protocol': listener_info.get('InstanceProtocol'),
52
+ 'policy_names': listener.get('PolicyNames', []),
53
+ 'LoadBalancerId': load_balancer_id,
54
+ }
55
+ transformed.append(transformed_listener)
56
+ return transformed
57
+
58
+
59
+ def transform_load_balancer_data(load_balancers: list[dict]) -> tuple[list[dict], list[dict]]:
60
+ """
61
+ Transform load balancer data into a format suitable for cartography ingestion.
62
+
63
+ Args:
64
+ load_balancers: List of load balancer data from AWS API
65
+
66
+ Returns:
67
+ Tuple of (transformed load balancer data, transformed listener data)
68
+ """
69
+ transformed = []
70
+ listener_data = []
71
+
72
+ for lb in load_balancers:
73
+ load_balancer_id = lb['DNSName']
74
+ transformed_lb = {
75
+ 'id': load_balancer_id,
76
+ 'name': lb['LoadBalancerName'],
77
+ 'dnsname': lb['DNSName'],
78
+ 'canonicalhostedzonename': lb.get('CanonicalHostedZoneName'),
79
+ 'canonicalhostedzonenameid': lb.get('CanonicalHostedZoneNameID'),
80
+ 'scheme': lb.get('Scheme'),
81
+ 'createdtime': str(lb['CreatedTime']),
82
+ 'GROUP_NAME': lb.get('SourceSecurityGroup', {}).get('GroupName'),
83
+ 'GROUP_IDS': [str(group) for group in lb.get('SecurityGroups', [])],
84
+ 'INSTANCE_IDS': [instance['InstanceId'] for instance in lb.get('Instances', [])],
85
+ 'LISTENER_IDS': [
86
+ _get_listener_id(
87
+ load_balancer_id,
88
+ listener['Listener']['LoadBalancerPort'],
89
+ listener['Listener']['Protocol'],
90
+ ) for listener in lb.get('ListenerDescriptions', [])
91
+ ],
92
+ }
93
+ transformed.append(transformed_lb)
94
+
95
+ # Classic ELB listeners are not returned anywhere else in AWS, so we must parse them out
96
+ # of the describe_load_balancers response.
97
+ if lb.get('ListenerDescriptions'):
98
+ listener_data.extend(
99
+ transform_load_balancer_listener_data(
100
+ load_balancer_id,
101
+ lb.get('ListenerDescriptions', []),
102
+ ),
103
+ )
104
+
105
+ return transformed, listener_data
106
+
107
+
16
108
  @timeit
17
109
  @aws_handle_regions
18
- def get_loadbalancer_data(boto3_session: boto3.session.Session, region: str) -> List[Dict]:
110
+ def get_loadbalancer_data(boto3_session: boto3.session.Session, region: str) -> list[dict]:
19
111
  client = boto3_session.client('elb', region_name=region, config=get_botocore_config())
20
112
  paginator = client.get_paginator('describe_load_balancers')
21
- elbs: List[Dict] = []
113
+ elbs: list[dict] = []
22
114
  for page in paginator.paginate():
23
115
  elbs.extend(page['LoadBalancerDescriptions'])
24
116
  return elbs
25
117
 
26
118
 
27
119
  @timeit
28
- def load_load_balancer_listeners(
29
- neo4j_session: neo4j.Session, load_balancer_id: str, listener_data: List[Dict],
120
+ def load_load_balancers(
121
+ neo4j_session: neo4j.Session, data: list[dict], region: str, current_aws_account_id: str,
30
122
  update_tag: int,
31
123
  ) -> None:
32
- ingest_listener = """
33
- MATCH (elb:LoadBalancer{id: $LoadBalancerId})
34
- WITH elb
35
- UNWIND $Listeners as data
36
- MERGE (l:Endpoint:ELBListener{id: elb.id + toString(data.Listener.LoadBalancerPort) +
37
- toString(data.Listener.Protocol)})
38
- ON CREATE SET l.port = data.Listener.LoadBalancerPort, l.protocol = data.Listener.Protocol,
39
- l.firstseen = timestamp()
40
- SET l.instance_port = data.Listener.InstancePort, l.instance_protocol = data.Listener.InstanceProtocol,
41
- l.policy_names = data.PolicyNames,
42
- l.lastupdated = $update_tag
43
- WITH l, elb
44
- MERGE (elb)-[r:ELB_LISTENER]->(l)
45
- ON CREATE SET r.firstseen = timestamp()
46
- SET r.lastupdated = $update_tag
47
- """
48
-
49
- neo4j_session.run(
50
- ingest_listener,
51
- LoadBalancerId=load_balancer_id,
52
- Listeners=listener_data,
53
- update_tag=update_tag,
124
+ load(
125
+ neo4j_session,
126
+ LoadBalancerSchema(),
127
+ data,
128
+ Region=region,
129
+ AWS_ID=current_aws_account_id,
130
+ lastupdated=update_tag,
54
131
  )
55
132
 
56
133
 
57
134
  @timeit
58
- def load_load_balancer_subnets(
59
- neo4j_session: neo4j.Session, load_balancer_id: str, subnets_data: List[Dict],
60
- update_tag: int,
61
- ) -> None:
62
- ingest_load_balancer_subnet = """
63
- MATCH (elb:LoadBalancer{id: $ID}), (subnet:EC2Subnet{subnetid: $SUBNET_ID})
64
- MERGE (elb)-[r:SUBNET]->(subnet)
65
- ON CREATE SET r.firstseen = timestamp()
66
- SET r.lastupdated = $update_tag
67
- """
68
-
69
- for subnet_id in subnets_data:
70
- neo4j_session.run(
71
- ingest_load_balancer_subnet,
72
- ID=load_balancer_id,
73
- SUBNET_ID=subnet_id,
74
- update_tag=update_tag,
75
- )
76
-
77
-
78
- @timeit
79
- def load_load_balancers(
80
- neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str,
135
+ def load_load_balancer_listeners(
136
+ neo4j_session: neo4j.Session, data: list[dict], region: str, current_aws_account_id: str,
81
137
  update_tag: int,
82
138
  ) -> None:
83
- ingest_load_balancer = """
84
- MERGE (elb:LoadBalancer{id: $ID})
85
- ON CREATE SET elb.firstseen = timestamp(), elb.createdtime = $CREATED_TIME
86
- SET elb.lastupdated = $update_tag, elb.name = $NAME, elb.dnsname = $DNS_NAME,
87
- elb.canonicalhostedzonename = $HOSTED_ZONE_NAME, elb.canonicalhostedzonenameid = $HOSTED_ZONE_NAME_ID,
88
- elb.scheme = $SCHEME, elb.region = $Region
89
- WITH elb
90
- MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})
91
- MERGE (aa)-[r:RESOURCE]->(elb)
92
- ON CREATE SET r.firstseen = timestamp()
93
- SET r.lastupdated = $update_tag
94
- """
95
-
96
- ingest_load_balancersource_security_group = """
97
- MATCH (elb:LoadBalancer{id: $ID}),
98
- (group:EC2SecurityGroup{name: $GROUP_NAME})
99
- MERGE (elb)-[r:SOURCE_SECURITY_GROUP]->(group)
100
- ON CREATE SET r.firstseen = timestamp()
101
- SET r.lastupdated = $update_tag
102
- """
103
-
104
- ingest_load_balancer_security_group = """
105
- MATCH (elb:LoadBalancer{id: $ID}),
106
- (group:EC2SecurityGroup{groupid: $GROUP_ID})
107
- MERGE (elb)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)
108
- ON CREATE SET r.firstseen = timestamp()
109
- SET r.lastupdated = $update_tag
110
- """
111
-
112
- ingest_instances = """
113
- MATCH (elb:LoadBalancer{id: $ID}), (instance:EC2Instance{instanceid: $INSTANCE_ID})
114
- MERGE (elb)-[r:EXPOSE]->(instance)
115
- ON CREATE SET r.firstseen = timestamp()
116
- SET r.lastupdated = $update_tag
117
- WITH instance
118
- MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})
119
- MERGE (aa)-[r:RESOURCE]->(instance)
120
- ON CREATE SET r.firstseen = timestamp()
121
- SET r.lastupdated = $update_tag
122
- """
123
-
124
- for lb in data:
125
- load_balancer_id = lb["DNSName"]
126
-
127
- neo4j_session.run(
128
- ingest_load_balancer,
129
- ID=load_balancer_id,
130
- CREATED_TIME=str(lb["CreatedTime"]),
131
- NAME=lb["LoadBalancerName"],
132
- DNS_NAME=load_balancer_id,
133
- HOSTED_ZONE_NAME=lb.get("CanonicalHostedZoneName"),
134
- HOSTED_ZONE_NAME_ID=lb.get("CanonicalHostedZoneNameID"),
135
- SCHEME=lb.get("Scheme", ""),
136
- AWS_ACCOUNT_ID=current_aws_account_id,
137
- Region=region,
138
- update_tag=update_tag,
139
- )
140
-
141
- if lb["Subnets"]:
142
- load_load_balancer_subnets(neo4j_session, load_balancer_id, lb["Subnets"], update_tag)
143
-
144
- if lb["SecurityGroups"]:
145
- for group in lb["SecurityGroups"]:
146
- neo4j_session.run(
147
- ingest_load_balancer_security_group,
148
- ID=load_balancer_id,
149
- GROUP_ID=str(group),
150
- update_tag=update_tag,
151
- )
152
-
153
- if lb["SourceSecurityGroup"]:
154
- source_group = lb["SourceSecurityGroup"]
155
- neo4j_session.run(
156
- ingest_load_balancersource_security_group,
157
- ID=load_balancer_id,
158
- GROUP_NAME=source_group["GroupName"],
159
- update_tag=update_tag,
160
- )
161
-
162
- if lb["Instances"]:
163
- for instance in lb["Instances"]:
164
- neo4j_session.run(
165
- ingest_instances,
166
- ID=load_balancer_id,
167
- INSTANCE_ID=instance["InstanceId"],
168
- AWS_ACCOUNT_ID=current_aws_account_id,
169
- update_tag=update_tag,
170
- )
171
-
172
- if lb["ListenerDescriptions"]:
173
- load_load_balancer_listeners(neo4j_session, load_balancer_id, lb["ListenerDescriptions"], update_tag)
139
+ load(
140
+ neo4j_session,
141
+ ELBListenerSchema(),
142
+ data,
143
+ Region=region,
144
+ AWS_ID=current_aws_account_id,
145
+ lastupdated=update_tag,
146
+ )
174
147
 
175
148
 
176
149
  @timeit
177
- def cleanup_load_balancers(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
178
- run_cleanup_job('aws_ingest_load_balancers_cleanup.json', neo4j_session, common_job_parameters)
150
+ def cleanup_load_balancers(neo4j_session: neo4j.Session, common_job_parameters: dict) -> None:
151
+ GraphJob.from_node_schema(ELBListenerSchema(), common_job_parameters).run(neo4j_session)
152
+ GraphJob.from_node_schema(LoadBalancerSchema(), common_job_parameters).run(neo4j_session)
179
153
 
180
154
 
181
155
  @timeit
182
156
  def sync_load_balancers(
183
- neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str], current_aws_account_id: str,
184
- update_tag: int, common_job_parameters: Dict,
157
+ neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: list[str], current_aws_account_id: str,
158
+ update_tag: int, common_job_parameters: dict,
185
159
  ) -> None:
186
160
  for region in regions:
187
161
  logger.info("Syncing EC2 load balancers for region '%s' in account '%s'.", region, current_aws_account_id)
188
162
  data = get_loadbalancer_data(boto3_session, region)
189
- load_load_balancers(neo4j_session, data, region, current_aws_account_id, update_tag)
163
+ transformed_data, listener_data = transform_load_balancer_data(data)
164
+
165
+ load_load_balancers(neo4j_session, transformed_data, region, current_aws_account_id, update_tag)
166
+ load_load_balancer_listeners(neo4j_session, listener_data, region, current_aws_account_id, update_tag)
167
+
190
168
  cleanup_load_balancers(neo4j_session, common_job_parameters)
@@ -1,11 +1,14 @@
1
1
  import logging
2
+ from typing import Any
2
3
 
3
4
  import neo4j
4
5
 
5
6
  from cartography.config import Config
7
+ from cartography.graph.job import GraphJob
6
8
  from cartography.intel.crowdstrike.endpoints import sync_hosts
7
9
  from cartography.intel.crowdstrike.spotlight import sync_vulnerabilities
8
10
  from cartography.intel.crowdstrike.util import get_authorization
11
+ from cartography.models.crowdstrike.hosts import CrowdstrikeHostSchema
9
12
  from cartography.stats import get_stats_client
10
13
  from cartography.util import merge_module_sync_metadata
11
14
  from cartography.util import run_cleanup_job
@@ -50,11 +53,7 @@ def start_crowdstrike_ingestion(
50
53
  config.update_tag,
51
54
  authorization,
52
55
  )
53
- run_cleanup_job(
54
- "crowdstrike_import_cleanup.json",
55
- neo4j_session,
56
- common_job_parameters,
57
- )
56
+ cleanup(neo4j_session, common_job_parameters)
58
57
 
59
58
  group_id = "public"
60
59
  if config.crowdstrike_api_url:
@@ -67,3 +66,16 @@ def start_crowdstrike_ingestion(
67
66
  update_tag=config.update_tag,
68
67
  stat_handler=stat_handler,
69
68
  )
69
+
70
+
71
+ @timeit
72
+ def cleanup(neo4j_session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
73
+ logger.info("Running Crowdstrike cleanup")
74
+ GraphJob.from_node_schema(CrowdstrikeHostSchema(), common_job_parameters).run(neo4j_session)
75
+
76
+ # Cleanup other crowdstrike assets not handled by the data model
77
+ run_cleanup_job(
78
+ "crowdstrike_import_cleanup.json",
79
+ neo4j_session,
80
+ common_job_parameters,
81
+ )
@@ -6,6 +6,8 @@ import neo4j
6
6
  from falconpy.hosts import Hosts
7
7
  from falconpy.oauth2 import OAuth2
8
8
 
9
+ from cartography.client.core.tx import load
10
+ from cartography.models.crowdstrike.hosts import CrowdstrikeHostSchema
9
11
  from cartography.util import timeit
10
12
 
11
13
  logger = logging.getLogger(__name__)
@@ -24,55 +26,21 @@ def sync_hosts(
24
26
  load_host_data(neo4j_session, host_data, update_tag)
25
27
 
26
28
 
29
+ @timeit
27
30
  def load_host_data(
28
- neo4j_session: neo4j.Session, data: List[Dict], update_tag: int,
31
+ neo4j_session: neo4j.Session,
32
+ data: List[Dict],
33
+ update_tag: int,
29
34
  ) -> None:
30
35
  """
31
- Transform and load scan information
32
- """
33
- ingestion_cypher_query = """
34
- UNWIND $Hosts AS host
35
- MERGE (h:CrowdstrikeHost{id: host.device_id})
36
- ON CREATE SET h.cid = host.cid,
37
- h.instance_id = host.instance_id,
38
- h.serial_number = host.serial_number,
39
- h.firstseen = timestamp()
40
- SET h.status = host.status,
41
- h.hostname = host.hostname,
42
- h.machine_domain = host.machine_domain,
43
- h.crowdstrike_first_seen = host.first_seen,
44
- h.crowdstrike_last_seen = host.last_seen,
45
- h.local_ip = host.local_ip,
46
- h.external_ip = host.external_ip,
47
- h.cpu_signature = host.cpu_signature,
48
- h.bios_manufacturer = host.bios_manufacturer,
49
- h.bios_version = host.bios_version,
50
- h.mac_address = host.mac_address,
51
- h.os_version = host.os_version,
52
- h.os_build = host.os_build,
53
- h.platform_id = host.platform_id,
54
- h.platform_name = host.platform_name,
55
- h.service_provider = host.service_provider,
56
- h.service_provider_account_id = host.service_provider_account_id,
57
- h.agent_version = host.agent_version,
58
- h.system_manufacturer = host.system_manufacturer,
59
- h.system_product_name = host.system_product_name,
60
- h.product_type = host.product_type,
61
- h.product_type_desc = host.product_type_desc,
62
- h.provision_status = host.provision_status,
63
- h.reduced_functionality_mode = host.reduced_functionality_mode,
64
- h.kernel_version = host.kernel_version,
65
- h.major_version = host.major_version,
66
- h.minor_version = host.minor_version,
67
- h.tags = host.tags,
68
- h.modified_timestamp = host.modified_timestamp,
69
- h.lastupdated = $update_tag
36
+ Load Crowdstrike host data into Neo4j.
70
37
  """
71
38
  logger.info(f"Loading {len(data)} crowdstrike hosts.")
72
- neo4j_session.run(
73
- ingestion_cypher_query,
74
- Hosts=data,
75
- update_tag=update_tag,
39
+ load(
40
+ neo4j_session,
41
+ CrowdstrikeHostSchema(),
42
+ data,
43
+ lastupdated=update_tag,
76
44
  )
77
45
 
78
46
 
@@ -0,0 +1,43 @@
1
+ import asyncio
2
+ import logging
3
+
4
+ import neo4j
5
+
6
+ from cartography.config import Config
7
+ from cartography.intel.entra.users import sync_entra_users
8
+ from cartography.util import timeit
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ @timeit
14
+ def start_entra_ingestion(neo4j_session: neo4j.Session, config: Config) -> None:
15
+ """
16
+ If this module is configured, perform ingestion of Entra data. Otherwise warn and exit
17
+ :param neo4j_session: Neo4J session for database interface
18
+ :param config: A cartography.config object
19
+ :return: None
20
+ """
21
+
22
+ if not config.entra_tenant_id or not config.entra_client_id or not config.entra_client_secret:
23
+ logger.info(
24
+ 'Entra import is not configured - skipping this module. '
25
+ 'See docs to configure.',
26
+ )
27
+ return
28
+
29
+ common_job_parameters = {
30
+ "UPDATE_TAG": config.update_tag,
31
+ "TENANT_ID": config.entra_tenant_id,
32
+ }
33
+
34
+ asyncio.run(
35
+ sync_entra_users(
36
+ neo4j_session,
37
+ config.entra_tenant_id,
38
+ config.entra_client_id,
39
+ config.entra_client_secret,
40
+ config.update_tag,
41
+ common_job_parameters,
42
+ ),
43
+ )