cartography 0.94.0rc2__py3-none-any.whl → 0.95.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/cli.py +42 -24
- cartography/config.py +12 -8
- cartography/data/indexes.cypher +0 -2
- cartography/data/jobs/scoped_analysis/semgrep_sca_risk_analysis.json +13 -13
- cartography/driftdetect/cli.py +1 -1
- cartography/graph/job.py +8 -1
- cartography/intel/aws/permission_relationships.py +6 -2
- cartography/intel/gcp/__init__.py +110 -23
- cartography/intel/kandji/__init__.py +1 -1
- cartography/intel/semgrep/findings.py +106 -59
- cartography/intel/snipeit/__init__.py +30 -0
- cartography/intel/snipeit/asset.py +74 -0
- cartography/intel/snipeit/user.py +75 -0
- cartography/intel/snipeit/util.py +35 -0
- cartography/models/semgrep/findings.py +3 -1
- cartography/models/snipeit/__init__.py +0 -0
- cartography/models/snipeit/asset.py +81 -0
- cartography/models/snipeit/tenant.py +17 -0
- cartography/models/snipeit/user.py +49 -0
- cartography/sync.py +2 -2
- {cartography-0.94.0rc2.dist-info → cartography-0.95.0rc1.dist-info}/LICENSE +1 -1
- {cartography-0.94.0rc2.dist-info → cartography-0.95.0rc1.dist-info}/METADATA +3 -5
- {cartography-0.94.0rc2.dist-info → cartography-0.95.0rc1.dist-info}/RECORD +26 -22
- {cartography-0.94.0rc2.dist-info → cartography-0.95.0rc1.dist-info}/WHEEL +1 -1
- cartography/data/jobs/cleanup/crxcavator_import_cleanup.json +0 -18
- cartography/intel/crxcavator/__init__.py +0 -44
- cartography/intel/crxcavator/crxcavator.py +0 -329
- cartography-0.94.0rc2.dist-info/NOTICE +0 -4
- {cartography-0.94.0rc2.dist-info → cartography-0.95.0rc1.dist-info}/entry_points.txt +0 -0
- {cartography-0.94.0rc2.dist-info → cartography-0.95.0rc1.dist-info}/top_level.txt +0 -0
cartography/cli.py
CHANGED
|
@@ -220,23 +220,6 @@ class CLI:
|
|
|
220
220
|
' If not specified, cartography by default will run all AWS sync modules available.'
|
|
221
221
|
),
|
|
222
222
|
)
|
|
223
|
-
parser.add_argument(
|
|
224
|
-
'--crxcavator-api-base-uri',
|
|
225
|
-
type=str,
|
|
226
|
-
default='https://api.crxcavator.io/v1',
|
|
227
|
-
help=(
|
|
228
|
-
'Base URI for the CRXcavator API. Defaults to public API endpoint.'
|
|
229
|
-
),
|
|
230
|
-
)
|
|
231
|
-
parser.add_argument(
|
|
232
|
-
'--crxcavator-api-key-env-var',
|
|
233
|
-
type=str,
|
|
234
|
-
default=None,
|
|
235
|
-
help=(
|
|
236
|
-
'The name of an environment variable containing a key with which to auth to the CRXcavator API. '
|
|
237
|
-
'Required if you are using the CRXcavator intel module. Ignored otherwise.'
|
|
238
|
-
),
|
|
239
|
-
)
|
|
240
223
|
parser.add_argument(
|
|
241
224
|
'--analysis-job-directory',
|
|
242
225
|
type=str,
|
|
@@ -541,6 +524,28 @@ class CLI:
|
|
|
541
524
|
'Required if you are using the Semgrep intel module. Ignored otherwise.'
|
|
542
525
|
),
|
|
543
526
|
)
|
|
527
|
+
parser.add_argument(
|
|
528
|
+
'--snipeit-base-uri',
|
|
529
|
+
type=str,
|
|
530
|
+
default=None,
|
|
531
|
+
help=(
|
|
532
|
+
'Your SnipeIT base URI'
|
|
533
|
+
'Required if you are using the SnipeIT intel module. Ignored otherwise.'
|
|
534
|
+
),
|
|
535
|
+
)
|
|
536
|
+
parser.add_argument(
|
|
537
|
+
'--snipeit-token-env-var',
|
|
538
|
+
type=str,
|
|
539
|
+
default=None,
|
|
540
|
+
help='The name of an environment variable containing token with which to authenticate to SnipeIT.',
|
|
541
|
+
)
|
|
542
|
+
parser.add_argument(
|
|
543
|
+
'--snipeit-tenant-id',
|
|
544
|
+
type=str,
|
|
545
|
+
default=None,
|
|
546
|
+
help='An ID for the SnipeIT tenant.',
|
|
547
|
+
)
|
|
548
|
+
|
|
544
549
|
return parser
|
|
545
550
|
|
|
546
551
|
def main(self, argv: str) -> int:
|
|
@@ -604,13 +609,6 @@ class CLI:
|
|
|
604
609
|
else:
|
|
605
610
|
config.okta_api_key = None
|
|
606
611
|
|
|
607
|
-
# CRXcavator config
|
|
608
|
-
if config.crxcavator_api_base_uri and config.crxcavator_api_key_env_var:
|
|
609
|
-
logger.debug(f"Reading API key for CRXcavator from env variable {config.crxcavator_api_key_env_var}.")
|
|
610
|
-
config.crxcavator_api_key = os.environ.get(config.crxcavator_api_key_env_var)
|
|
611
|
-
else:
|
|
612
|
-
config.crxcavator_api_key = None
|
|
613
|
-
|
|
614
612
|
# GitHub config
|
|
615
613
|
if config.github_config_env_var:
|
|
616
614
|
logger.debug(f"Reading config string for GitHub from environment variable {config.github_config_env_var}")
|
|
@@ -744,6 +742,26 @@ class CLI:
|
|
|
744
742
|
else:
|
|
745
743
|
config.cve_api_key = None
|
|
746
744
|
|
|
745
|
+
# SnipeIT config
|
|
746
|
+
if config.snipeit_base_uri:
|
|
747
|
+
if config.snipeit_token_env_var:
|
|
748
|
+
logger.debug(
|
|
749
|
+
"Reading SnipeIT API token from environment variable '%s'.",
|
|
750
|
+
config.snipeit_token_env_var,
|
|
751
|
+
)
|
|
752
|
+
config.snipeit_token = os.environ.get(config.snipeit_token_env_var)
|
|
753
|
+
elif os.environ.get('SNIPEIT_TOKEN'):
|
|
754
|
+
logger.debug(
|
|
755
|
+
"Reading SnipeIT API token from environment variable 'SNIPEIT_TOKEN'.",
|
|
756
|
+
)
|
|
757
|
+
config.snipeit_token = os.environ.get('SNIPEIT_TOKEN')
|
|
758
|
+
else:
|
|
759
|
+
logger.warning("A SnipeIT base URI was provided but a token was not.")
|
|
760
|
+
config.kandji_token = None
|
|
761
|
+
else:
|
|
762
|
+
logger.warning("A SnipeIT base URI was not provided.")
|
|
763
|
+
config.snipeit_base_uri = None
|
|
764
|
+
|
|
747
765
|
# Run cartography
|
|
748
766
|
try:
|
|
749
767
|
return cartography.sync.run_with_config(self.sync, config)
|
cartography/config.py
CHANGED
|
@@ -43,10 +43,6 @@ class Config:
|
|
|
43
43
|
:param azure_client_secret: Client Secret for connecting in a Service Principal Authentication approach. Optional.
|
|
44
44
|
:type aws_requested_syncs: str
|
|
45
45
|
:param aws_requested_syncs: Comma-separated list of AWS resources to sync. Optional.
|
|
46
|
-
:type crxcavator_api_base_uri: str
|
|
47
|
-
:param crxcavator_api_base_uri: URI for CRXcavator API. Optional.
|
|
48
|
-
:type crxcavator_api_key: str
|
|
49
|
-
:param crxcavator_api_key: Auth key for CRXcavator API. Optional.
|
|
50
46
|
:type analysis_job_directory: str
|
|
51
47
|
:param analysis_job_directory: Path to a directory tree containing analysis jobs to run. Optional.
|
|
52
48
|
:type oci_sync_all_profiles: bool
|
|
@@ -111,6 +107,12 @@ class Config:
|
|
|
111
107
|
:param duo_api_hostname: The Duo api hostname, e.g. "api-abc123.duosecurity.com". Optional.
|
|
112
108
|
:param semgrep_app_token: The Semgrep api token. Optional.
|
|
113
109
|
:type semgrep_app_token: str
|
|
110
|
+
:type snipeit_base_uri: string
|
|
111
|
+
:param snipeit_base_uri: SnipeIT data provider base URI. Optional.
|
|
112
|
+
:type snipeit_token: string
|
|
113
|
+
:param snipeit_token: Token used to authenticate to the SnipeIT data provider. Optional.
|
|
114
|
+
:type snipeit_tenant_id: string
|
|
115
|
+
:param snipeit_tenant_id: Token used to authenticate to the SnipeIT data provider. Optional.
|
|
114
116
|
"""
|
|
115
117
|
|
|
116
118
|
def __init__(
|
|
@@ -131,8 +133,6 @@ class Config:
|
|
|
131
133
|
azure_client_secret=None,
|
|
132
134
|
aws_requested_syncs=None,
|
|
133
135
|
analysis_job_directory=None,
|
|
134
|
-
crxcavator_api_base_uri=None,
|
|
135
|
-
crxcavator_api_key=None,
|
|
136
136
|
oci_sync_all_profiles=None,
|
|
137
137
|
okta_org_id=None,
|
|
138
138
|
okta_api_key=None,
|
|
@@ -170,6 +170,9 @@ class Config:
|
|
|
170
170
|
duo_api_secret=None,
|
|
171
171
|
duo_api_hostname=None,
|
|
172
172
|
semgrep_app_token=None,
|
|
173
|
+
snipeit_base_uri=None,
|
|
174
|
+
snipeit_token=None,
|
|
175
|
+
snipeit_tenant_id=None,
|
|
173
176
|
):
|
|
174
177
|
self.neo4j_uri = neo4j_uri
|
|
175
178
|
self.neo4j_user = neo4j_user
|
|
@@ -187,8 +190,6 @@ class Config:
|
|
|
187
190
|
self.azure_client_secret = azure_client_secret
|
|
188
191
|
self.aws_requested_syncs = aws_requested_syncs
|
|
189
192
|
self.analysis_job_directory = analysis_job_directory
|
|
190
|
-
self.crxcavator_api_base_uri = crxcavator_api_base_uri
|
|
191
|
-
self.crxcavator_api_key = crxcavator_api_key
|
|
192
193
|
self.oci_sync_all_profiles = oci_sync_all_profiles
|
|
193
194
|
self.okta_org_id = okta_org_id
|
|
194
195
|
self.okta_api_key = okta_api_key
|
|
@@ -226,3 +227,6 @@ class Config:
|
|
|
226
227
|
self.duo_api_secret = duo_api_secret
|
|
227
228
|
self.duo_api_hostname = duo_api_hostname
|
|
228
229
|
self.semgrep_app_token = semgrep_app_token
|
|
230
|
+
self.snipeit_base_uri = snipeit_base_uri
|
|
231
|
+
self.snipeit_token = snipeit_token
|
|
232
|
+
self.snipeit_tenant_id = snipeit_tenant_id
|
cartography/data/indexes.cypher
CHANGED
|
@@ -65,8 +65,6 @@ CREATE INDEX IF NOT EXISTS FOR (n:AccountAccessKey) ON (n.accesskeyid);
|
|
|
65
65
|
CREATE INDEX IF NOT EXISTS FOR (n:AccountAccessKey) ON (n.lastupdated);
|
|
66
66
|
CREATE INDEX IF NOT EXISTS FOR (n:AutoScalingGroup) ON (n.arn);
|
|
67
67
|
CREATE INDEX IF NOT EXISTS FOR (n:AutoScalingGroup) ON (n.lastupdated);
|
|
68
|
-
CREATE INDEX IF NOT EXISTS FOR (n:ChromeExtension) ON (n.id);
|
|
69
|
-
CREATE INDEX IF NOT EXISTS FOR (n:ChromeExtension) ON (n.lastupdated);
|
|
70
68
|
CREATE INDEX IF NOT EXISTS FOR (n:CrowdstrikeHost) ON (n.id);
|
|
71
69
|
CREATE INDEX IF NOT EXISTS FOR (n:CrowdstrikeHost) ON (n.instance_id);
|
|
72
70
|
CREATE INDEX IF NOT EXISTS FOR (n:CrowdstrikeHost) ON (n.lastupdated);
|
|
@@ -13,47 +13,47 @@
|
|
|
13
13
|
},
|
|
14
14
|
{
|
|
15
15
|
"__comment__": "not possible to identify if reachable && version specifier is the only flag of the vulnerability (likelihood = rare) && severity in [low, medium, high] -> Risk = Info",
|
|
16
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'
|
|
16
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'UNREACHABLE', reachability_check:'NO REACHABILITY ANALYSIS', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity IN ['LOW', 'MEDIUM', 'HIGH'] SET s.reachability_risk = 'INFO' return COUNT(*) as TotalCompleted",
|
|
17
17
|
"iterative": false
|
|
18
18
|
},
|
|
19
19
|
{
|
|
20
20
|
"__comment__": "not possible to identify if reachable && version specifier is the only flag of the vulnerability (likelihood = rare) && severity = critical -> Risk = Low",
|
|
21
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'
|
|
21
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'UNREACHABLE', reachability_check:'NO REACHABILITY ANALYSIS', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity = 'CRITICAL' SET s.reachability_risk = 'LOW' return COUNT(*) as TotalCompleted",
|
|
22
22
|
"iterative": false
|
|
23
23
|
},
|
|
24
24
|
{
|
|
25
|
-
"__comment__": "manual review required to confirm
|
|
26
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
25
|
+
"__comment__": "manual review required to confirm exploitation when conditions met && identified version is vulnerable (likelihood = possible) && severity in [low, medium] -> Risk = Low",
|
|
26
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'CONDITIONALLY REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity IN ['LOW', 'MEDIUM'] SET s.reachability_risk = 'LOW' return COUNT(*) as TotalCompleted",
|
|
27
27
|
"iterative": false
|
|
28
28
|
},
|
|
29
29
|
{
|
|
30
|
-
"__comment__": "manual review required to confirm
|
|
31
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
30
|
+
"__comment__": "manual review required to confirm exploitation when conditions met && identified version is vulnerable (likelihood = possible) && severity = high -> Risk = Medium",
|
|
31
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'CONDITIONALLY REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity = 'HIGH' SET s.reachability_risk = 'MEDIUM' return COUNT(*) as TotalCompleted",
|
|
32
32
|
"iterative": false
|
|
33
33
|
},
|
|
34
34
|
{
|
|
35
|
-
"__comment__": "manual review required to confirm
|
|
36
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
35
|
+
"__comment__": "manual review required to confirm exploitation when conditions met && identified version is vulnerable (likelihood = possible) && severity = critical -> Risk = High",
|
|
36
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'CONDITIONALLY REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity = 'CRITICAL' SET s.reachability_risk = 'HIGH' return COUNT(*) as TotalCompleted",
|
|
37
37
|
"iterative": false
|
|
38
38
|
},
|
|
39
39
|
{
|
|
40
40
|
"__comment__": "adding the vulnerable version flags it reachable (likelihood = likely) && severity in [low, medium] -> Risk = Low",
|
|
41
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
41
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'ALWAYS REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity IN ['LOW','MEDIUM'] SET s.reachability_risk = 'LOW' return COUNT(*) as TotalCompleted",
|
|
42
42
|
"iterative": false
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
"__comment__": "adding the vulnerable version flags it reachable (likelihood = likely) && severity = high -> Risk =
|
|
46
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
45
|
+
"__comment__": "adding the vulnerable version flags it reachable (likelihood = likely) && severity = high -> Risk = Medium",
|
|
46
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'ALWAYS REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity = 'HIGH' SET s.reachability_risk = 'MEDIUM' return COUNT(*) as TotalCompleted",
|
|
47
47
|
"iterative": false
|
|
48
48
|
},
|
|
49
49
|
{
|
|
50
50
|
"__comment__": "adding the vulnerable version flags it reachable (special case for critical, if something is so critical that needs to be fixed, likelihood = likely)) && severity = critical -> Risk = Critical",
|
|
51
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
51
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'ALWAYS REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) WHERE s.severity = 'CRITICAL' SET s.reachability_risk = 'CRITICAL' return COUNT(*) as TotalCompleted",
|
|
52
52
|
"iterative": false
|
|
53
53
|
},
|
|
54
54
|
{
|
|
55
55
|
"__comment__": "if reachability analysis confirmed that is rechable (likelihood = certain) -> Risk = Severity",
|
|
56
|
-
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'
|
|
56
|
+
"query": "MATCH (g:GitHubRepository{archived:false})<-[:FOUND_IN]-(s:SemgrepSCAFinding{reachability:'REACHABLE', reachability_check:'REACHABLE', lastupdated:$UPDATE_TAG})<-[:RESOURCE]-(:SemgrepDeployment{id:$DEPLOYMENT_ID}) SET s.reachability_risk = s.severity return COUNT(*) as TotalCompleted",
|
|
57
57
|
"iterative": false
|
|
58
58
|
},
|
|
59
59
|
{
|
cartography/driftdetect/cli.py
CHANGED
|
@@ -30,7 +30,7 @@ class CLI:
|
|
|
30
30
|
'graph database and reports the deviations.'
|
|
31
31
|
),
|
|
32
32
|
epilog='For more documentation please visit: '
|
|
33
|
-
'https://github.
|
|
33
|
+
'https://cartography-cncf.github.io/cartography/usage/drift-detect.html',
|
|
34
34
|
)
|
|
35
35
|
parser.add_argument(
|
|
36
36
|
'-v',
|
cartography/graph/job.py
CHANGED
|
@@ -150,7 +150,14 @@ class GraphJob:
|
|
|
150
150
|
)
|
|
151
151
|
|
|
152
152
|
statements: List[GraphStatement] = [
|
|
153
|
-
GraphStatement(
|
|
153
|
+
GraphStatement(
|
|
154
|
+
query,
|
|
155
|
+
parameters=parameters,
|
|
156
|
+
iterative=True,
|
|
157
|
+
iterationsize=100,
|
|
158
|
+
parent_job_name=node_schema.label,
|
|
159
|
+
parent_job_sequence_num=idx,
|
|
160
|
+
) for idx, query in enumerate(queries, start=1)
|
|
154
161
|
]
|
|
155
162
|
|
|
156
163
|
return cls(
|
|
@@ -322,8 +322,12 @@ def cleanup_rpr(
|
|
|
322
322
|
)
|
|
323
323
|
|
|
324
324
|
statement = GraphStatement(
|
|
325
|
-
cleanup_rpr_query_template,
|
|
326
|
-
|
|
325
|
+
cleanup_rpr_query_template,
|
|
326
|
+
{'UPDATE_TAG': update_tag, 'AWS_ID': current_aws_id},
|
|
327
|
+
True,
|
|
328
|
+
1000,
|
|
329
|
+
parent_job_name=f"{relationship_name}:{node_label}",
|
|
330
|
+
parent_job_sequence_num=1,
|
|
327
331
|
)
|
|
328
332
|
statement.run(neo4j_session)
|
|
329
333
|
|
|
@@ -120,11 +120,11 @@ def _initialize_resources(credentials: GoogleCredentials) -> Resource:
|
|
|
120
120
|
return Resources(
|
|
121
121
|
crm_v1=_get_crm_resource_v1(credentials),
|
|
122
122
|
crm_v2=_get_crm_resource_v2(credentials),
|
|
123
|
-
compute=_get_compute_resource(credentials),
|
|
124
|
-
storage=_get_storage_resource(credentials),
|
|
125
|
-
container=_get_container_resource(credentials),
|
|
126
123
|
serviceusage=_get_serviceusage_resource(credentials),
|
|
127
|
-
|
|
124
|
+
compute=None,
|
|
125
|
+
container=None,
|
|
126
|
+
dns=None,
|
|
127
|
+
storage=None,
|
|
128
128
|
)
|
|
129
129
|
|
|
130
130
|
|
|
@@ -159,12 +159,12 @@ def _services_enabled_on_project(serviceusage: Resource, project_id: str) -> Set
|
|
|
159
159
|
return set()
|
|
160
160
|
|
|
161
161
|
|
|
162
|
-
def
|
|
162
|
+
def _sync_single_project_compute(
|
|
163
163
|
neo4j_session: neo4j.Session, resources: Resource, project_id: str, gcp_update_tag: int,
|
|
164
164
|
common_job_parameters: Dict,
|
|
165
165
|
) -> None:
|
|
166
166
|
"""
|
|
167
|
-
Handles graph sync for a single GCP project.
|
|
167
|
+
Handles graph sync for a single GCP project on Compute resources.
|
|
168
168
|
:param neo4j_session: The Neo4j session
|
|
169
169
|
:param resources: namedtuple of the GCP resource objects
|
|
170
170
|
:param project_id: The project ID number to sync. See the `projectId` field in
|
|
@@ -175,14 +175,72 @@ def _sync_single_project(
|
|
|
175
175
|
"""
|
|
176
176
|
# Determine the resources available on the project.
|
|
177
177
|
enabled_services = _services_enabled_on_project(resources.serviceusage, project_id)
|
|
178
|
+
compute_cred = _get_compute_resource(get_gcp_credentials())
|
|
178
179
|
if service_names.compute in enabled_services:
|
|
179
|
-
compute.sync(neo4j_session,
|
|
180
|
+
compute.sync(neo4j_session, compute_cred, project_id, gcp_update_tag, common_job_parameters)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _sync_single_project_storage(
|
|
184
|
+
neo4j_session: neo4j.Session, resources: Resource, project_id: str, gcp_update_tag: int,
|
|
185
|
+
common_job_parameters: Dict,
|
|
186
|
+
) -> None:
|
|
187
|
+
"""
|
|
188
|
+
Handles graph sync for a single GCP project on Storage resources.
|
|
189
|
+
:param neo4j_session: The Neo4j session
|
|
190
|
+
:param resources: namedtuple of the GCP resource objects
|
|
191
|
+
:param project_id: The project ID number to sync. See the `projectId` field in
|
|
192
|
+
https://cloud.google.com/resource-manager/reference/rest/v1/projects
|
|
193
|
+
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
|
|
194
|
+
:param common_job_parameters: Other parameters sent to Neo4j
|
|
195
|
+
:return: Nothing
|
|
196
|
+
"""
|
|
197
|
+
# Determine the resources available on the project.
|
|
198
|
+
enabled_services = _services_enabled_on_project(resources.serviceusage, project_id)
|
|
199
|
+
storage_cred = _get_storage_resource(get_gcp_credentials())
|
|
180
200
|
if service_names.storage in enabled_services:
|
|
181
|
-
storage.sync_gcp_buckets(neo4j_session,
|
|
201
|
+
storage.sync_gcp_buckets(neo4j_session, storage_cred, project_id, gcp_update_tag, common_job_parameters)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _sync_single_project_gke(
|
|
205
|
+
neo4j_session: neo4j.Session, resources: Resource, project_id: str, gcp_update_tag: int,
|
|
206
|
+
common_job_parameters: Dict,
|
|
207
|
+
) -> None:
|
|
208
|
+
"""
|
|
209
|
+
Handles graph sync for a single GCP project GKE resources.
|
|
210
|
+
:param neo4j_session: The Neo4j session
|
|
211
|
+
:param resources: namedtuple of the GCP resource objects
|
|
212
|
+
:param project_id: The project ID number to sync. See the `projectId` field in
|
|
213
|
+
https://cloud.google.com/resource-manager/reference/rest/v1/projects
|
|
214
|
+
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
|
|
215
|
+
:param common_job_parameters: Other parameters sent to Neo4j
|
|
216
|
+
:return: Nothing
|
|
217
|
+
"""
|
|
218
|
+
# Determine the resources available on the project.
|
|
219
|
+
enabled_services = _services_enabled_on_project(resources.serviceusage, project_id)
|
|
220
|
+
container_cred = _get_container_resource(get_gcp_credentials())
|
|
182
221
|
if service_names.gke in enabled_services:
|
|
183
|
-
gke.sync_gke_clusters(neo4j_session,
|
|
222
|
+
gke.sync_gke_clusters(neo4j_session, container_cred, project_id, gcp_update_tag, common_job_parameters)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def _sync_single_project_dns(
|
|
226
|
+
neo4j_session: neo4j.Session, resources: Resource, project_id: str, gcp_update_tag: int,
|
|
227
|
+
common_job_parameters: Dict,
|
|
228
|
+
) -> None:
|
|
229
|
+
"""
|
|
230
|
+
Handles graph sync for a single GCP project DNS resources.
|
|
231
|
+
:param neo4j_session: The Neo4j session
|
|
232
|
+
:param resources: namedtuple of the GCP resource objects
|
|
233
|
+
:param project_id: The project ID number to sync. See the `projectId` field in
|
|
234
|
+
https://cloud.google.com/resource-manager/reference/rest/v1/projects
|
|
235
|
+
:param gcp_update_tag: The timestamp value to set our new Neo4j nodes with
|
|
236
|
+
:param common_job_parameters: Other parameters sent to Neo4j
|
|
237
|
+
:return: Nothing
|
|
238
|
+
"""
|
|
239
|
+
# Determine the resources available on the project.
|
|
240
|
+
enabled_services = _services_enabled_on_project(resources.serviceusage, project_id)
|
|
241
|
+
dns_cred = _get_dns_resource(get_gcp_credentials())
|
|
184
242
|
if service_names.dns in enabled_services:
|
|
185
|
-
dns.sync(neo4j_session,
|
|
243
|
+
dns.sync(neo4j_session, dns_cred, project_id, gcp_update_tag, common_job_parameters)
|
|
186
244
|
|
|
187
245
|
|
|
188
246
|
def _sync_multiple_projects(
|
|
@@ -203,26 +261,38 @@ def _sync_multiple_projects(
|
|
|
203
261
|
"""
|
|
204
262
|
logger.info("Syncing %d GCP projects.", len(projects))
|
|
205
263
|
crm.sync_gcp_projects(neo4j_session, projects, gcp_update_tag, common_job_parameters)
|
|
264
|
+
# Compute data sync
|
|
265
|
+
for project in projects:
|
|
266
|
+
project_id = project['projectId']
|
|
267
|
+
logger.info("Syncing GCP project %s for Compute.", project_id)
|
|
268
|
+
_sync_single_project_compute(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
|
|
206
269
|
|
|
270
|
+
# Storage data sync
|
|
207
271
|
for project in projects:
|
|
208
272
|
project_id = project['projectId']
|
|
209
|
-
logger.info("Syncing GCP project %s
|
|
210
|
-
|
|
273
|
+
logger.info("Syncing GCP project %s for Storage", project_id)
|
|
274
|
+
_sync_single_project_storage(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
|
|
275
|
+
|
|
276
|
+
# GKE data sync
|
|
277
|
+
for project in projects:
|
|
278
|
+
project_id = project['projectId']
|
|
279
|
+
logger.info("Syncing GCP project %s for GKE", project_id)
|
|
280
|
+
_sync_single_project_gke(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
|
|
281
|
+
|
|
282
|
+
# DNS data sync
|
|
283
|
+
for project in projects:
|
|
284
|
+
project_id = project['projectId']
|
|
285
|
+
logger.info("Syncing GCP project %s for DNS", project_id)
|
|
286
|
+
_sync_single_project_dns(neo4j_session, resources, project_id, gcp_update_tag, common_job_parameters)
|
|
211
287
|
|
|
212
288
|
|
|
213
289
|
@timeit
|
|
214
|
-
def
|
|
290
|
+
def get_gcp_credentials() -> GoogleCredentials:
|
|
215
291
|
"""
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
:param neo4j_session: The Neo4j session
|
|
220
|
-
:param config: A `cartography.config` object
|
|
221
|
-
:return: Nothing
|
|
292
|
+
Gets access tokens for GCP API access.
|
|
293
|
+
:param: None
|
|
294
|
+
:return: GoogleCredentials
|
|
222
295
|
"""
|
|
223
|
-
common_job_parameters = {
|
|
224
|
-
"UPDATE_TAG": config.update_tag,
|
|
225
|
-
}
|
|
226
296
|
try:
|
|
227
297
|
# Explicitly use Application Default Credentials.
|
|
228
298
|
# See https://oauth2client.readthedocs.io/en/latest/source/
|
|
@@ -239,7 +309,24 @@ def start_gcp_ingestion(neo4j_session: neo4j.Session, config: Config) -> None:
|
|
|
239
309
|
),
|
|
240
310
|
e,
|
|
241
311
|
)
|
|
242
|
-
return
|
|
312
|
+
return credentials
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
@timeit
|
|
316
|
+
def start_gcp_ingestion(neo4j_session: neo4j.Session, config: Config) -> None:
|
|
317
|
+
"""
|
|
318
|
+
Starts the GCP ingestion process by initializing Google Application Default Credentials, creating the necessary
|
|
319
|
+
resource objects, listing all GCP organizations and projects available to the GCP identity, and supplying that
|
|
320
|
+
context to all intel modules.
|
|
321
|
+
:param neo4j_session: The Neo4j session
|
|
322
|
+
:param config: A `cartography.config` object
|
|
323
|
+
:return: Nothing
|
|
324
|
+
"""
|
|
325
|
+
common_job_parameters = {
|
|
326
|
+
"UPDATE_TAG": config.update_tag,
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
credentials = get_gcp_credentials()
|
|
243
330
|
|
|
244
331
|
resources = _initialize_resources(credentials)
|
|
245
332
|
|
|
@@ -21,7 +21,7 @@ def start_kandji_ingestion(neo4j_session: neo4j.Session, config: Config) -> None
|
|
|
21
21
|
"""
|
|
22
22
|
if config.kandji_base_uri is None or config.kandji_token is None or config.kandji_tenant_id is None:
|
|
23
23
|
logger.warning(
|
|
24
|
-
'Required parameter
|
|
24
|
+
'Required parameter missing. Skipping sync. '
|
|
25
25
|
'See docs to configure.',
|
|
26
26
|
)
|
|
27
27
|
return
|