cartography 0.96.2__py3-none-any.whl → 0.97.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cartography might be problematic. Click here for more details.

@@ -56,6 +56,7 @@ def load_identity_center_instances(
56
56
 
57
57
 
58
58
  @timeit
59
+ @aws_handle_regions
59
60
  def get_permission_sets(boto3_session: boto3.session.Session, instance_arn: str, region: str) -> List[Dict]:
60
61
  """
61
62
  Get all permission sets for a given Identity Center instance
@@ -2,6 +2,9 @@ import logging
2
2
  from datetime import datetime
3
3
 
4
4
  import neo4j
5
+ from requests import Session
6
+ from requests.adapters import HTTPAdapter
7
+ from urllib3 import Retry
5
8
 
6
9
  from cartography.config import Config
7
10
  from cartography.intel.cve import feed
@@ -13,28 +16,34 @@ logger = logging.getLogger(__name__)
13
16
  stat_handler = get_stats_client(__name__)
14
17
 
15
18
 
16
- @timeit
17
- def start_cve_ingestion(
18
- neo4j_session: neo4j.Session, config: Config,
19
- ) -> None:
20
- """
21
- Perform ingestion of CVE data from NIST APIs.
22
- :param neo4j_session: Neo4J session for database interface
23
- :param config: A cartography.config object
24
- :return: None
25
- """
26
- if not config.cve_enabled:
27
- return
28
- cve_api_key: str | None = config.cve_api_key if config.cve_api_key else None
19
+ def _retryable_session() -> Session:
20
+ session = Session()
21
+ retry_policy = Retry(
22
+ total=8,
23
+ connect=1,
24
+ backoff_factor=1,
25
+ status_forcelist=[429, 500, 502, 503, 504],
26
+ allowed_methods=["GET"],
27
+ )
28
+ session.mount("https://", HTTPAdapter(max_retries=retry_policy))
29
+ logger.info(f"Configured session with retry policy: {retry_policy}")
30
+ return session
29
31
 
30
- # sync CVE year archives, if not yet synced
32
+
33
+ def _sync_year_archives(
34
+ http_session: Session,
35
+ neo4j_session: neo4j.Session,
36
+ config: Config,
37
+ cve_api_key: str | None,
38
+ ) -> None:
31
39
  existing_years = feed.get_cve_sync_metadata(neo4j_session)
32
40
  current_year = datetime.now().year
33
- for year in range(2002, current_year + 1):
41
+ logger.info(f"Syncing CVE data for year archives. Existing years: {existing_years}. Current year: {current_year}")
42
+ for year in range(1999, current_year + 1):
34
43
  if year in existing_years:
35
44
  continue
36
45
  logger.info(f"Syncing CVE data for year {year}")
37
- cves = feed.get_published_cves_per_year(config.nist_cve_url, str(year), cve_api_key)
46
+ cves = feed.get_published_cves_per_year(http_session, config.nist_cve_url, str(year), cve_api_key)
38
47
  feed_metadata = feed.transform_cve_feed(cves)
39
48
  feed.load_cve_feed(neo4j_session, [feed_metadata], config.update_tag)
40
49
  published_cves = feed.transform_cves(cves)
@@ -48,10 +57,16 @@ def start_cve_ingestion(
48
57
  stat_handler=stat_handler,
49
58
  )
50
59
 
51
- # sync modified data
60
+
61
+ def _sync_modified_data(
62
+ http_session: Session,
63
+ neo4j_session: neo4j.Session,
64
+ config: Config,
65
+ cve_api_key: str | None,
66
+ ) -> None:
52
67
  logger.info("Syncing CVE data for modified data")
53
68
  last_modified_date = feed.get_last_modified_cve_date(neo4j_session)
54
- cves = feed.get_modified_cves(config.nist_cve_url, last_modified_date, cve_api_key)
69
+ cves = feed.get_modified_cves(http_session, config.nist_cve_url, last_modified_date, cve_api_key)
55
70
  feed_metadata = feed.transform_cve_feed(cves)
56
71
  feed.load_cve_feed(neo4j_session, [feed_metadata], config.update_tag)
57
72
  modified_cves = feed.transform_cves(cves)
@@ -65,4 +80,21 @@ def start_cve_ingestion(
65
80
  stat_handler=stat_handler,
66
81
  )
67
82
 
68
- # CVEs are never deleted, so we don't need to run a cleanup job
83
+
84
+ @timeit
85
+ def start_cve_ingestion(
86
+ neo4j_session: neo4j.Session, config: Config,
87
+ ) -> None:
88
+ """
89
+ Perform ingestion of CVE data from NIST APIs.
90
+ :param neo4j_session: Neo4J session for database interface
91
+ :param config: A cartography.config object
92
+ :return: None
93
+ """
94
+ if not config.cve_enabled:
95
+ return
96
+ cve_api_key: str | None = config.cve_api_key if config.cve_api_key else None
97
+ with _retryable_session() as http_session:
98
+ _sync_year_archives(http_session, neo4j_session=neo4j_session, config=config, cve_api_key=cve_api_key)
99
+ _sync_modified_data(http_session, neo4j_session=neo4j_session, config=config, cve_api_key=cve_api_key)
100
+ # CVEs are never deleted, so we don't need to run a cleanup job
@@ -11,7 +11,7 @@ from typing import List
11
11
  from typing import Optional
12
12
 
13
13
  import neo4j
14
- import requests
14
+ from requests import Session
15
15
 
16
16
  from cartography.client.core.tx import load
17
17
  from cartography.client.core.tx import read_list_of_values_tx
@@ -22,7 +22,6 @@ from cartography.util import timeit
22
22
 
23
23
  logger = logging.getLogger(__name__)
24
24
 
25
- MAX_RETRIES = 8
26
25
  # Connect and read timeouts of 120 seconds each; see https://requests.readthedocs.io/en/master/user/advanced/#timeouts
27
26
  CONNECT_AND_READ_TIMEOUT = (30, 120)
28
27
  CVE_FEED_ID = "NIST_NVD"
@@ -68,53 +67,36 @@ def _map_cve_dict(cve_dict: Dict[Any, Any], data: Dict[Any, Any]) -> None:
68
67
  cve_dict["startIndex"] = data["startIndex"]
69
68
 
70
69
 
71
- def _call_cves_api(url: str, api_key: str | None, params: Dict[str, Any]) -> Dict[Any, Any]:
72
- totalResults = 0
73
- sleep_time = DEFAULT_SLEEP_TIME
74
- retries = 0
70
+ def _call_cves_api(http_session: Session, url: str, api_key: str | None, params: Dict[str, Any]) -> Dict[Any, Any]:
71
+ total_results = 0
75
72
  params["startIndex"] = 0
76
73
  params["resultsPerPage"] = RESULTS_PER_PAGE
77
- headers = {}
78
- headers["Content-Type"] = "application/json"
74
+ headers = {"Content-Type": "application/json"}
79
75
  if api_key:
76
+ sleep_between_requests = DEFAULT_SLEEP_TIME
80
77
  headers["apiKey"] = api_key
81
78
  else:
82
- sleep_time = DELAYED_SLEEP_TIME # Sleep for 6 seconds between each request to avoid rate limiting
79
+ sleep_between_requests = DELAYED_SLEEP_TIME
83
80
  logger.warning(
84
- f"No NIST NVD API key provided. Increasing sleep time to {sleep_time}.",
81
+ f"No NIST NVD API key provided. Increasing sleep time to {sleep_between_requests}.",
85
82
  )
86
83
  results: Dict[Any, Any] = dict()
87
84
 
88
- with requests.Session() as session:
89
- while params["resultsPerPage"] > 0 or params["startIndex"] < totalResults:
90
- logger.info(f"Calling NIST NVD API at {url} with params {params}")
91
- try:
92
- res = session.get(
93
- url, params=params, headers=headers, timeout=CONNECT_AND_READ_TIMEOUT,
94
- )
95
- res.raise_for_status()
96
- data = res.json()
97
- except requests.exceptions.HTTPError:
98
- logger.error(
99
- f"Failed to get CVE data from NIST NVD API {res.status_code} : {res.text}",
100
- )
101
- retries += 1
102
- if retries >= MAX_RETRIES:
103
- raise
104
- # Exponential backoff
105
- sleep_time *= 2
106
- time.sleep(sleep_time)
107
- continue
108
- _map_cve_dict(results, data)
109
- totalResults = data["totalResults"]
110
- params["resultsPerPage"] = data["resultsPerPage"]
111
- params["startIndex"] += data["resultsPerPage"]
112
- retries = 0
113
- time.sleep(sleep_time)
85
+ while params["resultsPerPage"] > 0 or params["startIndex"] < total_results:
86
+ logger.info(f"Calling NIST NVD API at {url} with params {params}")
87
+ res = http_session.get(url, params=params, headers=headers, timeout=CONNECT_AND_READ_TIMEOUT)
88
+ res.raise_for_status()
89
+ data = res.json()
90
+ _map_cve_dict(results, data)
91
+ total_results = data["totalResults"]
92
+ params["resultsPerPage"] = data["resultsPerPage"]
93
+ params["startIndex"] += data["resultsPerPage"]
94
+ time.sleep(sleep_between_requests)
114
95
  return results
115
96
 
116
97
 
117
98
  def get_cves_in_batches(
99
+ http_session: Session,
118
100
  nist_cve_url: str,
119
101
  start_date: datetime,
120
102
  end_date: datetime,
@@ -147,7 +129,7 @@ def get_cves_in_batches(
147
129
  logger.info(
148
130
  f"Querying CVE data between {current_start_date} and {current_end_date}",
149
131
  )
150
- batch_cves = _call_cves_api(nist_cve_url, api_key, params)
132
+ batch_cves = _call_cves_api(http_session, nist_cve_url, api_key, params)
151
133
  _map_cve_dict(cves, batch_cves)
152
134
  current_start_date = current_end_date
153
135
  new_end_date = current_start_date + batch_size
@@ -158,9 +140,8 @@ def get_cves_in_batches(
158
140
 
159
141
 
160
142
  def get_modified_cves(
161
- nist_cve_url: str, last_modified_date: str, api_key: str | None,
143
+ http_session: Session, nist_cve_url: str, last_modified_date: str, api_key: str | None,
162
144
  ) -> Dict[Any, Any]:
163
- cves = dict()
164
145
  end_date = datetime.now(tz=timezone.utc)
165
146
  start_date = datetime.strptime(last_modified_date, "%Y-%m-%dT%H:%M:%S").replace(
166
147
  tzinfo=timezone.utc,
@@ -170,15 +151,14 @@ def get_modified_cves(
170
151
  "end": "lastModEndDate",
171
152
  }
172
153
  cves = get_cves_in_batches(
173
- nist_cve_url, start_date, end_date, date_param_names, api_key,
154
+ http_session, nist_cve_url, start_date, end_date, date_param_names, api_key,
174
155
  )
175
156
  return cves
176
157
 
177
158
 
178
159
  def get_published_cves_per_year(
179
- nist_cve_url: str, year: str, api_key: str | None,
160
+ http_session: Session, nist_cve_url: str, year: str, api_key: str | None,
180
161
  ) -> Dict[Any, Any]:
181
- cves = {}
182
162
  start_of_year = datetime.strptime(f"{year}-01-01", "%Y-%m-%d")
183
163
  next_year = int(year) + 1
184
164
  end_of_next_year = datetime.strptime(f"{next_year}-01-01", "%Y-%m-%d")
@@ -187,7 +167,7 @@ def get_published_cves_per_year(
187
167
  "end": "pubEndDate",
188
168
  }
189
169
  cves = get_cves_in_batches(
190
- nist_cve_url, start_of_year, end_of_next_year, date_param_names, api_key,
170
+ http_session, nist_cve_url, start_of_year, end_of_next_year, date_param_names, api_key,
191
171
  )
192
172
  return cves
193
173
 
@@ -21,6 +21,9 @@ logger = logging.getLogger(__name__)
21
21
  RepoPermission = namedtuple('RepoPermission', ['repo_url', 'permission'])
22
22
  # A team member's role: https://docs.github.com/en/graphql/reference/enums#teammemberrole
23
23
  UserRole = namedtuple('UserRole', ['user_url', 'role'])
24
+ # Unlike the other tuples here, there is no qualification (like 'role' or 'permission') to the relationship.
25
+ # A child team is just a child team: https://docs.github.com/en/graphql/reference/objects#teamconnection
26
+ ChildTeam = namedtuple('ChildTeam', ['team_url'])
24
27
 
25
28
 
26
29
  def backoff_handler(details: Dict) -> None:
@@ -53,6 +56,9 @@ def get_teams(org: str, api_url: str, token: str) -> Tuple[PaginatedGraphqlData,
53
56
  members(membership: IMMEDIATE) {
54
57
  totalCount
55
58
  }
59
+ childTeams {
60
+ totalCount
61
+ }
56
62
  }
57
63
  pageInfo{
58
64
  endCursor
@@ -166,6 +172,21 @@ def _get_team_repos(org: str, api_url: str, token: str, team: str) -> PaginatedG
166
172
  return team_repos
167
173
 
168
174
 
175
+ def _get_teams_users_inner_func(
176
+ org: str, api_url: str, token: str, team_name: str,
177
+ user_urls: List[str], user_roles: List[str],
178
+ ) -> None:
179
+ logger.info(f"Loading team users for {team_name}.")
180
+ team_users = _get_team_users(org, api_url, token, team_name)
181
+ # The `or []` is because `.nodes` can be None. See:
182
+ # https://docs.github.com/en/graphql/reference/objects#teammemberconnection
183
+ for user in team_users.nodes or []:
184
+ user_urls.append(user['url'])
185
+ # The `or []` is because `.edges` can be None.
186
+ for edge in team_users.edges or []:
187
+ user_roles.append(edge['role'])
188
+
189
+
169
190
  def _get_team_users_for_multiple_teams(
170
191
  team_raw_data: list[dict[str, Any]],
171
192
  org: str,
@@ -185,21 +206,7 @@ def _get_team_users_for_multiple_teams(
185
206
  user_urls: List[str] = []
186
207
  user_roles: List[str] = []
187
208
 
188
- def get_teams_users_inner_func(
189
- org: str, api_url: str, token: str, team_name: str,
190
- user_urls: List[str], user_roles: List[str],
191
- ) -> None:
192
- logger.info(f"Loading team users for {team_name}.")
193
- team_users = _get_team_users(org, api_url, token, team_name)
194
- # The `or []` is because `.nodes` can be None. See:
195
- # https://docs.github.com/en/graphql/reference/objects#teammemberconnection
196
- for user in team_users.nodes or []:
197
- user_urls.append(user['url'])
198
- # The `or []` is because `.edges` can be None.
199
- for edge in team_users.edges or []:
200
- user_roles.append(edge['role'])
201
-
202
- retries_with_backoff(get_teams_users_inner_func, TypeError, 5, backoff_handler)(
209
+ retries_with_backoff(_get_teams_users_inner_func, TypeError, 5, backoff_handler)(
203
210
  org=org, api_url=api_url, token=token, team_name=team_name, user_urls=user_urls, user_roles=user_roles,
204
211
  )
205
212
 
@@ -252,11 +259,90 @@ def _get_team_users(org: str, api_url: str, token: str, team: str) -> PaginatedG
252
259
  return team_users
253
260
 
254
261
 
262
+ def _get_child_teams_inner_func(
263
+ org: str, api_url: str, token: str, team_name: str, team_urls: List[str],
264
+ ) -> None:
265
+ logger.info(f"Loading child teams for {team_name}.")
266
+ child_teams = _get_child_teams(org, api_url, token, team_name)
267
+ # The `or []` is because `.nodes` can be None. See:
268
+ # https://docs.github.com/en/graphql/reference/objects#teammemberconnection
269
+ for cteam in child_teams.nodes or []:
270
+ team_urls.append(cteam['url'])
271
+ # No edges to process here, the GitHub response for child teams has no relevant info in edges.
272
+
273
+
274
+ def _get_child_teams_for_multiple_teams(
275
+ team_raw_data: list[dict[str, Any]],
276
+ org: str,
277
+ api_url: str,
278
+ token: str,
279
+ ) -> dict[str, list[ChildTeam]]:
280
+ result: dict[str, list[ChildTeam]] = {}
281
+ for team in team_raw_data:
282
+ team_name = team['slug']
283
+ team_count = team['childTeams']['totalCount']
284
+
285
+ if team_count == 0:
286
+ # This team has no child teams so let's move on
287
+ result[team_name] = []
288
+ continue
289
+
290
+ team_urls: List[str] = []
291
+
292
+ retries_with_backoff(_get_child_teams_inner_func, TypeError, 5, backoff_handler)(
293
+ org=org, api_url=api_url, token=token, team_name=team_name, team_urls=team_urls,
294
+ )
295
+
296
+ result[team_name] = [ChildTeam(url) for url in team_urls]
297
+ return result
298
+
299
+
300
+ def _get_child_teams(org: str, api_url: str, token: str, team: str) -> PaginatedGraphqlData:
301
+ team_users_gql = """
302
+ query($login: String!, $team: String!, $cursor: String) {
303
+ organization(login: $login) {
304
+ url
305
+ login
306
+ team(slug: $team) {
307
+ slug
308
+ childTeams(first: 100, after: $cursor) {
309
+ totalCount
310
+ nodes {
311
+ url
312
+ }
313
+ pageInfo {
314
+ endCursor
315
+ hasNextPage
316
+ }
317
+ }
318
+ }
319
+ }
320
+ rateLimit {
321
+ limit
322
+ cost
323
+ remaining
324
+ resetAt
325
+ }
326
+ }
327
+ """
328
+ team_users, _ = fetch_all(
329
+ token,
330
+ api_url,
331
+ org,
332
+ team_users_gql,
333
+ 'team',
334
+ resource_inner_type='childTeams',
335
+ team=team,
336
+ )
337
+ return team_users
338
+
339
+
255
340
  def transform_teams(
256
341
  team_paginated_data: PaginatedGraphqlData,
257
342
  org_data: Dict[str, Any],
258
343
  team_repo_data: dict[str, list[RepoPermission]],
259
344
  team_user_data: dict[str, list[UserRole]],
345
+ team_child_team_data: dict[str, list[ChildTeam]],
260
346
  ) -> list[dict[str, Any]]:
261
347
  result = []
262
348
  for team in team_paginated_data.nodes:
@@ -267,13 +353,15 @@ def transform_teams(
267
353
  'description': team['description'],
268
354
  'repo_count': team['repositories']['totalCount'],
269
355
  'member_count': team['members']['totalCount'],
356
+ 'child_team_count': team['childTeams']['totalCount'],
270
357
  'org_url': org_data['url'],
271
358
  'org_login': org_data['login'],
272
359
  }
273
360
  repo_permissions = team_repo_data[team_name]
274
361
  user_roles = team_user_data[team_name]
362
+ child_teams = team_child_team_data[team_name]
275
363
 
276
- if not repo_permissions and not user_roles:
364
+ if not repo_permissions and not user_roles and not child_teams:
277
365
  result.append(repo_info)
278
366
  continue
279
367
 
@@ -289,6 +377,15 @@ def transform_teams(
289
377
  repo_info_copy = repo_info.copy()
290
378
  repo_info_copy[role] = user_url
291
379
  result.append(repo_info_copy)
380
+ if child_teams:
381
+ for (team_url,) in child_teams:
382
+ repo_info_copy = repo_info.copy()
383
+ # GitHub speaks of team-team relationships as 'child teams', as in the graphql query
384
+ # or here: https://docs.github.com/en/graphql/reference/enums#teammembershiptype
385
+ # We label the relationship as 'MEMBER_OF_TEAM' here because it is in line with
386
+ # other similar relationships in Cartography.
387
+ repo_info_copy['MEMBER_OF_TEAM'] = team_url
388
+ result.append(repo_info_copy)
292
389
  return result
293
390
 
294
391
 
@@ -325,7 +422,8 @@ def sync_github_teams(
325
422
  teams_paginated, org_data = get_teams(organization, github_url, github_api_key)
326
423
  team_repos = _get_team_repos_for_multiple_teams(teams_paginated.nodes, organization, github_url, github_api_key)
327
424
  team_users = _get_team_users_for_multiple_teams(teams_paginated.nodes, organization, github_url, github_api_key)
328
- processed_data = transform_teams(teams_paginated, org_data, team_repos, team_users)
425
+ team_children = _get_child_teams_for_multiple_teams(teams_paginated.nodes, organization, github_url, github_api_key)
426
+ processed_data = transform_teams(teams_paginated, org_data, team_repos, team_users, team_children)
329
427
  load_team_repos(neo4j_session, processed_data, common_job_parameters['UPDATE_TAG'], org_data['url'])
330
428
  common_job_parameters['org_url'] = org_data['url']
331
429
  cleanup(neo4j_session, common_job_parameters)
@@ -144,10 +144,13 @@ def transform_users(user_data: List[Dict], owners_data: List[Dict], org_data: Di
144
144
 
145
145
  users_dict = {}
146
146
  for user in user_data:
147
+ # all members get the 'MEMBER_OF' relationship
147
148
  processed_user = deepcopy(user['node'])
148
- processed_user['role'] = user['role']
149
149
  processed_user['hasTwoFactorEnabled'] = user['hasTwoFactorEnabled']
150
150
  processed_user['MEMBER_OF'] = org_data['url']
151
+ # admins get a second relationship expressing them as such
152
+ if user['role'] == 'ADMIN':
153
+ processed_user['ADMIN_OF'] = org_data['url']
151
154
  users_dict[processed_user['url']] = processed_user
152
155
 
153
156
  owners_dict = {}
@@ -1,7 +1,7 @@
1
1
  """
2
2
  This schema does not handle the org's relationships. Those are handled by other schemas, for example:
3
3
  * GitHubTeamSchema defines (GitHubOrganization)-[RESOURCE]->(GitHubTeam)
4
- * GitHubUserSchema defines (GitHubUser)-[MEMBER_OF|UNAFFILIATED]->(GitHubOrganization)
4
+ * GitHubUserSchema defines (GitHubUser)-[MEMBER_OF|ADMIN_OF|UNAFFILIATED]->(GitHubOrganization)
5
5
  (There may be others, these are just two examples.)
6
6
  """
7
7
  from dataclasses import dataclass
@@ -123,6 +123,22 @@ class GitHubTeamToOrganizationRel(CartographyRelSchema):
123
123
  properties: GitHubTeamToOrganizationRelProperties = GitHubTeamToOrganizationRelProperties()
124
124
 
125
125
 
126
+ @dataclass(frozen=True)
127
+ class GitHubTeamToChildTeamRelProperties(CartographyRelProperties):
128
+ lastupdated: PropertyRef = PropertyRef('lastupdated', set_in_kwargs=True)
129
+
130
+
131
+ @dataclass(frozen=True)
132
+ class GitHubTeamChildTeamRel(CartographyRelSchema):
133
+ target_node_label: str = 'GitHubTeam'
134
+ target_node_matcher: TargetNodeMatcher = make_target_node_matcher(
135
+ {'id': PropertyRef('MEMBER_OF_TEAM')},
136
+ )
137
+ direction: LinkDirection = LinkDirection.INWARD
138
+ rel_label: str = "MEMBER_OF_TEAM"
139
+ properties: GitHubTeamToChildTeamRelProperties = GitHubTeamToChildTeamRelProperties()
140
+
141
+
126
142
  @dataclass(frozen=True)
127
143
  class GitHubTeamSchema(CartographyNodeSchema):
128
144
  label: str = 'GitHubTeam'
@@ -136,6 +152,7 @@ class GitHubTeamSchema(CartographyNodeSchema):
136
152
  GitHubTeamWriteRepoRel(),
137
153
  GitHubTeamMaintainerUserRel(),
138
154
  GitHubTeamMemberUserRel(),
155
+ GitHubTeamChildTeamRel(),
139
156
  ],
140
157
  )
141
158
  sub_resource_relationship: GitHubTeamToOrganizationRel = GitHubTeamToOrganizationRel()
@@ -20,13 +20,12 @@ To allow for this in the schema, this relationship is treated as any other node-
20
20
  RE: GitHubOrganizationUserSchema vs GitHubUnaffiliatedUserSchema
21
21
 
22
22
  As noted above, there are implicitly two types of users, those that are part of, or affiliated, to a target
23
- GitHubOrganization, and those thare are not part, or unaffiliated. Both are represented as GitHubUser nodes,
24
- but there are two schemas below to allow for some differences between them, e.g., unaffiliated lack these properties:
25
- * the 'role' property, because unaffiliated have no 'role' in the target org
23
+ GitHubOrganization, and those that are not part, or unaffiliated. Both are represented as GitHubUser nodes,
24
+ but there are two schemas below to allow for a difference between them: unaffiliated nodes lack this property:
26
25
  * the 'has_2fa_enabled' property, because the GitHub api does not return it, for these users
27
26
  The main importance of having two schemas is to allow the two sets of users to be loaded separately. If we are loading
28
27
  an unaffiliated user, but the user already exists in the graph (perhaps they are members of another GitHub orgs for
29
- example), then loading the unaffiliated user will not blank out the 'role' and 'has_2fa_enabled' properties.
28
+ example), then loading the unaffiliated user will not blank out the 'has_2fa_enabled' property.
30
29
  """
31
30
  from dataclasses import dataclass
32
31
 
@@ -58,8 +57,6 @@ class BaseGitHubUserNodeProperties(CartographyNodeProperties):
58
57
  class GitHubOrganizationUserNodeProperties(BaseGitHubUserNodeProperties):
59
58
  # specified for affiliated users only. The GitHub api does not return this property for unaffiliated users.
60
59
  has_2fa_enabled: PropertyRef = PropertyRef('hasTwoFactorEnabled')
61
- # specified for affiliated uers only. Unaffiliated users do not have a 'role' in the target organization.
62
- role: PropertyRef = PropertyRef('role')
63
60
 
64
61
 
65
62
  @dataclass(frozen=True)
@@ -84,6 +81,17 @@ class GitHubUserMemberOfOrganizationRel(CartographyRelSchema):
84
81
  properties: GitHubUserToOrganizationRelProperties = GitHubUserToOrganizationRelProperties()
85
82
 
86
83
 
84
+ @dataclass(frozen=True)
85
+ class GitHubUserAdminOfOrganizationRel(CartographyRelSchema):
86
+ target_node_label: str = 'GitHubOrganization'
87
+ target_node_matcher: TargetNodeMatcher = make_target_node_matcher(
88
+ {'id': PropertyRef('ADMIN_OF')},
89
+ )
90
+ direction: LinkDirection = LinkDirection.OUTWARD
91
+ rel_label: str = "ADMIN_OF"
92
+ properties: GitHubUserToOrganizationRelProperties = GitHubUserToOrganizationRelProperties()
93
+
94
+
87
95
  @dataclass(frozen=True)
88
96
  class GitHubUserUnaffiliatedOrganizationRel(CartographyRelSchema):
89
97
  target_node_label: str = 'GitHubOrganization'
@@ -102,6 +110,7 @@ class GitHubOrganizationUserSchema(CartographyNodeSchema):
102
110
  other_relationships: OtherRelationships = OtherRelationships(
103
111
  [
104
112
  GitHubUserMemberOfOrganizationRel(),
113
+ GitHubUserAdminOfOrganizationRel(),
105
114
  ],
106
115
  )
107
116
  sub_resource_relationship = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cartography
3
- Version: 0.96.2
3
+ Version: 0.97.0
4
4
  Summary: Explore assets and their relationships across your technical infrastructure.
5
5
  Home-page: https://www.github.com/cartography-cncf/cartography
6
6
  Maintainer: Cartography Contributors
@@ -153,7 +153,7 @@ cartography/intel/aws/elasticache.py,sha256=fCI47aDFmIDyE26GiReKYb6XIZUwrzcvsXBQ
153
153
  cartography/intel/aws/elasticsearch.py,sha256=ZL7MkXF_bXRSoXuDSI1dwGckRLG2zDB8LuAD07vSLnE,8374
154
154
  cartography/intel/aws/emr.py,sha256=xhWBVZngxJRFjMEDxwq3G6SgytRGLq0v2a_CeDvByR0,3372
155
155
  cartography/intel/aws/iam.py,sha256=zRlF9cKcYm44iL63G6bd-_flNOFHVrjsEfW0jZHpUNg,32387
156
- cartography/intel/aws/identitycenter.py,sha256=zIWe_JpXPC-kkWu26aFjYtGsClNG_GaQ3bdCeiRkApc,9475
156
+ cartography/intel/aws/identitycenter.py,sha256=Mgd7ZNj8W7IghVSXovgYyQDwRt_MjxrBPGyDKxlFsoQ,9495
157
157
  cartography/intel/aws/inspector.py,sha256=S22ZgRKEnmnBTJ-u0rodqRPB7_LkSIek47NeBxN4XJw,9336
158
158
  cartography/intel/aws/kms.py,sha256=bZUzMxAH_DsAcGTJBs08gg2tLKYu-QWjvMvV9C-6v50,11731
159
159
  cartography/intel/aws/lambda_function.py,sha256=KKTyn53xpaMI9WvIqxmsOASFwflHt-2_5ow-zUFc2wg,9890
@@ -208,8 +208,8 @@ cartography/intel/crowdstrike/__init__.py,sha256=dAtgI-0vZAQZ3cTFQhMEzzt7aqiNSNu
208
208
  cartography/intel/crowdstrike/endpoints.py,sha256=tdqokMDW3p4fK3dHKKb2T1DTogvOJBCpwyrxdQlbUhw,3815
209
209
  cartography/intel/crowdstrike/spotlight.py,sha256=yNhj44-RYF6ubck-hHMKhKiNU0fCfhQf4Oagopc31EM,4754
210
210
  cartography/intel/crowdstrike/util.py,sha256=gfJ6Ptr6YdbBS9Qj9a_-Jc-IJroADDRcXqjh5TW0qXE,277
211
- cartography/intel/cve/__init__.py,sha256=3PLAhQ36g-aq40IHvba789WANsA-TY9B-Oe9mpQweQ8,2516
212
- cartography/intel/cve/feed.py,sha256=1q9ZxbA4Hp2V84h7kadTImyCk7w0dmV7ROsZQ-kxKwE,10365
211
+ cartography/intel/cve/__init__.py,sha256=u9mv5O_qkSLmdhLhLm1qbwmhoeLQ3A3fQTjNyLQpEyI,3656
212
+ cartography/intel/cve/feed.py,sha256=HJL94jyVcRzIbpe4ooEXr6lnKfrmpukKOEDTs9djrfk,9832
213
213
  cartography/intel/digitalocean/__init__.py,sha256=SMYB7LGIQOj_EgGSGVjWZk7SJNbP43hQuOfgOu6xYm4,1526
214
214
  cartography/intel/digitalocean/compute.py,sha256=9XctwMjq9h5dExFgExvawoqyiEwSoocNgaMm3Fgl5GM,4911
215
215
  cartography/intel/digitalocean/management.py,sha256=YWRnBLLL_bAP1vefIAQgm_-QzefGH0sZKmyU_EokHfA,3764
@@ -230,8 +230,8 @@ cartography/intel/gcp/gke.py,sha256=qaTwsVaxkwNhW5_Mw4bedOk7fgJK8y0LwwcYlUABXDg,
230
230
  cartography/intel/gcp/storage.py,sha256=oO_ayEhkXlj2Gn7T5MU41ZXiqwRwe6Ud4wzqyRTsyf4,9075
231
231
  cartography/intel/github/__init__.py,sha256=y876JJGTDJZEOFCDiNCJfcLNxN24pVj4s2N0YmuuoaE,1914
232
232
  cartography/intel/github/repos.py,sha256=MmpxZASDJFQxDeSMxX3pZcpxCHFPos4_uYC_cX9KjOg,29865
233
- cartography/intel/github/teams.py,sha256=uQTiOfUBCk4qk0uw7jEPevAq-b2fvynRJ3t-62RZW2c,10659
234
- cartography/intel/github/users.py,sha256=kkxk0TqPp8HKQAd3RvJs-N_ySFIxHqLsvkvJf0WoGyc,8565
233
+ cartography/intel/github/teams.py,sha256=AltQSmBHHmyzBtnRkez9Bo5yChEKBSt3wwzhGcfqmX4,14180
234
+ cartography/intel/github/users.py,sha256=f_YIDwdNECRg7wRwY8qZ5mztm7m3SIgOz8BbfveRzy8,8734
235
235
  cartography/intel/github/util.py,sha256=K0cXOPuhnGvN-aqcSUBO3vTuKQLjufVal9kn2HwOpbo,8110
236
236
  cartography/intel/gsuite/__init__.py,sha256=AGIUskGlLCVGHbnQicNpNWi9AvmV7_7hUKTt-hsB2J8,4306
237
237
  cartography/intel/gsuite/api.py,sha256=J0dkNdfBVMrEv8vvStQu7YKVxXSyV45WueFhUS4aOG4,10310
@@ -335,9 +335,9 @@ cartography/models/duo/token.py,sha256=BS_AvF-TAGzCY9Owtqxr8g_s6716dnzFOO1Iwkckm
335
335
  cartography/models/duo/user.py,sha256=ih3DH_QveAve4cX9dmIwC5gVN6_RNnuLK3bfJ5I9u6g,6554
336
336
  cartography/models/duo/web_authn_credential.py,sha256=OcZnfG5zCMlphxSltRcAXQ12hHYJjxrBt6A9L28g7Vk,2920
337
337
  cartography/models/github/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
338
- cartography/models/github/orgs.py,sha256=f5kJ-51MDGW5k4sWMeTfyBDxcHdhFJZGkRUvGcjllBU,1097
339
- cartography/models/github/teams.py,sha256=mykeo89acrzyrPzVVC0fzO1piTuysR2CB48LwwrQc0g,5435
340
- cartography/models/github/users.py,sha256=XliVsBKWF7wW7Dwjwdoz9E2gk1cASyfQgAddKYr23VY,5739
338
+ cartography/models/github/orgs.py,sha256=EcUmkeyoCJmkmzLsfKdUwwTE0N2IIwyaUrIK32dQybo,1106
339
+ cartography/models/github/teams.py,sha256=qFyFAKKsiiHqFZkMM7Fd9My16dgXgylcFy3BbXHhzng,6069
340
+ cartography/models/github/users.py,sha256=N0OWcIAEzaLCx8WIc8XivwFnr2oBOvHHCKmQ0XOVVrc,5969
341
341
  cartography/models/kandji/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
342
342
  cartography/models/kandji/device.py,sha256=C3zPhLi1oPNysbSUr4H2u8b-Xy14sb3FE7YcjCwlntw,2214
343
343
  cartography/models/kandji/tenant.py,sha256=KhcbahNBemny3coQPiadIY8B-yDMg_ejYB2BR6vqBfw,674
@@ -353,9 +353,9 @@ cartography/models/snipeit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
353
353
  cartography/models/snipeit/asset.py,sha256=FyRAaeXuZjMy0eUQcSDFcgEAF5lbLMlvqp1Tv9d3Lv4,3238
354
354
  cartography/models/snipeit/tenant.py,sha256=p4rFnpNNuF1W5ilGBbexDaETWTwavfb38RcQGoImkQI,679
355
355
  cartography/models/snipeit/user.py,sha256=MsB4MiCVNTH6JpESime7cOkB89autZOXQpL6Z0l7L6o,2113
356
- cartography-0.96.2.dist-info/LICENSE,sha256=kvLEBRYaQ1RvUni6y7Ti9uHeooqnjPoo6n_-0JO1ETc,11351
357
- cartography-0.96.2.dist-info/METADATA,sha256=JPfNQ2Z_bulMqksx5unRej-WGueUKhi1Q9wFWqaOHk8,1938
358
- cartography-0.96.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
359
- cartography-0.96.2.dist-info/entry_points.txt,sha256=GVIAWD0o0_K077qMA_k1oZU4v-M0a8GLKGJR8tZ-qH8,112
360
- cartography-0.96.2.dist-info/top_level.txt,sha256=BHqsNJQiI6Q72DeypC1IINQJE59SLhU4nllbQjgJi9g,12
361
- cartography-0.96.2.dist-info/RECORD,,
356
+ cartography-0.97.0.dist-info/LICENSE,sha256=kvLEBRYaQ1RvUni6y7Ti9uHeooqnjPoo6n_-0JO1ETc,11351
357
+ cartography-0.97.0.dist-info/METADATA,sha256=KXebB76HbcUfcT1umMgUjh0c8lw_ah1wCW1SEtZ5rWU,1938
358
+ cartography-0.97.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
359
+ cartography-0.97.0.dist-info/entry_points.txt,sha256=GVIAWD0o0_K077qMA_k1oZU4v-M0a8GLKGJR8tZ-qH8,112
360
+ cartography-0.97.0.dist-info/top_level.txt,sha256=BHqsNJQiI6Q72DeypC1IINQJE59SLhU4nllbQjgJi9g,12
361
+ cartography-0.97.0.dist-info/RECORD,,