cartography 0.105.0__py3-none-any.whl → 0.106.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/_version.py +2 -2
- cartography/cli.py +78 -2
- cartography/client/core/tx.py +62 -0
- cartography/config.py +24 -0
- cartography/data/indexes.cypher +0 -34
- cartography/driftdetect/cli.py +3 -2
- cartography/graph/cleanupbuilder.py +47 -0
- cartography/graph/job.py +42 -0
- cartography/graph/querybuilder.py +136 -2
- cartography/graph/statement.py +1 -1
- cartography/intel/airbyte/__init__.py +105 -0
- cartography/intel/airbyte/connections.py +120 -0
- cartography/intel/airbyte/destinations.py +81 -0
- cartography/intel/airbyte/organizations.py +59 -0
- cartography/intel/airbyte/sources.py +78 -0
- cartography/intel/airbyte/tags.py +64 -0
- cartography/intel/airbyte/users.py +106 -0
- cartography/intel/airbyte/util.py +122 -0
- cartography/intel/airbyte/workspaces.py +63 -0
- cartography/intel/aws/codebuild.py +132 -0
- cartography/intel/aws/ecs.py +228 -380
- cartography/intel/aws/efs.py +261 -0
- cartography/intel/aws/identitycenter.py +14 -3
- cartography/intel/aws/inspector.py +96 -53
- cartography/intel/aws/rds.py +2 -1
- cartography/intel/aws/resources.py +4 -0
- cartography/intel/entra/__init__.py +11 -0
- cartography/intel/entra/applications.py +366 -0
- cartography/intel/entra/users.py +84 -42
- cartography/intel/kubernetes/__init__.py +30 -14
- cartography/intel/kubernetes/clusters.py +86 -0
- cartography/intel/kubernetes/namespaces.py +59 -57
- cartography/intel/kubernetes/pods.py +140 -77
- cartography/intel/kubernetes/secrets.py +95 -45
- cartography/intel/kubernetes/services.py +131 -67
- cartography/intel/kubernetes/util.py +125 -14
- cartography/intel/scaleway/__init__.py +127 -0
- cartography/intel/scaleway/iam/__init__.py +0 -0
- cartography/intel/scaleway/iam/apikeys.py +71 -0
- cartography/intel/scaleway/iam/applications.py +71 -0
- cartography/intel/scaleway/iam/groups.py +71 -0
- cartography/intel/scaleway/iam/users.py +71 -0
- cartography/intel/scaleway/instances/__init__.py +0 -0
- cartography/intel/scaleway/instances/flexibleips.py +86 -0
- cartography/intel/scaleway/instances/instances.py +92 -0
- cartography/intel/scaleway/projects.py +79 -0
- cartography/intel/scaleway/storage/__init__.py +0 -0
- cartography/intel/scaleway/storage/snapshots.py +86 -0
- cartography/intel/scaleway/storage/volumes.py +84 -0
- cartography/intel/scaleway/utils.py +37 -0
- cartography/models/airbyte/__init__.py +0 -0
- cartography/models/airbyte/connection.py +138 -0
- cartography/models/airbyte/destination.py +75 -0
- cartography/models/airbyte/organization.py +19 -0
- cartography/models/airbyte/source.py +75 -0
- cartography/models/airbyte/stream.py +74 -0
- cartography/models/airbyte/tag.py +69 -0
- cartography/models/airbyte/user.py +111 -0
- cartography/models/airbyte/workspace.py +46 -0
- cartography/models/aws/codebuild/__init__.py +0 -0
- cartography/models/aws/codebuild/project.py +49 -0
- cartography/models/aws/ecs/__init__.py +0 -0
- cartography/models/aws/ecs/clusters.py +64 -0
- cartography/models/aws/ecs/container_definitions.py +93 -0
- cartography/models/aws/ecs/container_instances.py +84 -0
- cartography/models/aws/ecs/containers.py +99 -0
- cartography/models/aws/ecs/services.py +117 -0
- cartography/models/aws/ecs/task_definitions.py +135 -0
- cartography/models/aws/ecs/tasks.py +110 -0
- cartography/models/aws/efs/__init__.py +0 -0
- cartography/models/aws/efs/access_point.py +77 -0
- cartography/models/aws/efs/file_system.py +60 -0
- cartography/models/aws/efs/mount_target.py +79 -0
- cartography/models/core/common.py +1 -0
- cartography/models/core/relationships.py +44 -0
- cartography/models/entra/app_role_assignment.py +115 -0
- cartography/models/entra/application.py +47 -0
- cartography/models/entra/user.py +17 -51
- cartography/models/kubernetes/__init__.py +0 -0
- cartography/models/kubernetes/clusters.py +26 -0
- cartography/models/kubernetes/containers.py +108 -0
- cartography/models/kubernetes/namespaces.py +51 -0
- cartography/models/kubernetes/pods.py +80 -0
- cartography/models/kubernetes/secrets.py +79 -0
- cartography/models/kubernetes/services.py +108 -0
- cartography/models/scaleway/__init__.py +0 -0
- cartography/models/scaleway/iam/__init__.py +0 -0
- cartography/models/scaleway/iam/apikey.py +96 -0
- cartography/models/scaleway/iam/application.py +52 -0
- cartography/models/scaleway/iam/group.py +95 -0
- cartography/models/scaleway/iam/user.py +60 -0
- cartography/models/scaleway/instance/__init__.py +0 -0
- cartography/models/scaleway/instance/flexibleip.py +52 -0
- cartography/models/scaleway/instance/instance.py +118 -0
- cartography/models/scaleway/organization.py +19 -0
- cartography/models/scaleway/project.py +48 -0
- cartography/models/scaleway/storage/__init__.py +0 -0
- cartography/models/scaleway/storage/snapshot.py +78 -0
- cartography/models/scaleway/storage/volume.py +51 -0
- cartography/sync.py +8 -4
- cartography/util.py +15 -10
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/METADATA +5 -2
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/RECORD +107 -35
- cartography/data/jobs/cleanup/kubernetes_import_cleanup.json +0 -70
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/WHEEL +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/entry_points.txt +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/licenses/LICENSE +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/top_level.txt +0 -0
|
@@ -1,90 +1,154 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import neo4j
|
|
6
|
+
from kubernetes.client.models import V1LoadBalancerIngress
|
|
7
|
+
from kubernetes.client.models import V1PortStatus
|
|
8
|
+
from kubernetes.client.models import V1Service
|
|
6
9
|
|
|
10
|
+
from cartography.client.core.tx import load
|
|
11
|
+
from cartography.graph.job import GraphJob
|
|
7
12
|
from cartography.intel.kubernetes.util import get_epoch
|
|
13
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
8
14
|
from cartography.intel.kubernetes.util import K8sClient
|
|
15
|
+
from cartography.models.kubernetes.services import KubernetesServiceSchema
|
|
9
16
|
from cartography.util import timeit
|
|
10
17
|
|
|
11
18
|
logger = logging.getLogger(__name__)
|
|
12
19
|
|
|
13
20
|
|
|
14
21
|
@timeit
|
|
15
|
-
def
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
update_tag: int,
|
|
19
|
-
cluster: Dict,
|
|
20
|
-
pods: List[Dict],
|
|
21
|
-
) -> None:
|
|
22
|
-
services = get_services(client, cluster, pods)
|
|
23
|
-
load_services(session, services, update_tag)
|
|
22
|
+
def get_services(client: K8sClient) -> list[V1Service]:
|
|
23
|
+
items = k8s_paginate(client.core.list_service_for_all_namespaces)
|
|
24
|
+
return items
|
|
24
25
|
|
|
25
26
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
27
|
+
def _format_service_selector(selector: dict[str, str]) -> str:
|
|
28
|
+
return json.dumps(selector)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _format_load_balancer_ingress(ingress: list[V1LoadBalancerIngress] | None) -> str:
|
|
32
|
+
|
|
33
|
+
def _format_ingress_ports(
|
|
34
|
+
ports: list[V1PortStatus] | None,
|
|
35
|
+
) -> list[dict[str, Any]] | None:
|
|
36
|
+
if ports is None:
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
ingress_ports = []
|
|
40
|
+
for port in ports:
|
|
41
|
+
ingress_ports.append(
|
|
42
|
+
{
|
|
43
|
+
"error": port.port,
|
|
44
|
+
"port": port.protocol,
|
|
45
|
+
"protocol": port.ip,
|
|
46
|
+
}
|
|
47
|
+
)
|
|
48
|
+
return ingress_ports
|
|
49
|
+
|
|
50
|
+
if ingress is None:
|
|
51
|
+
return json.dumps(None)
|
|
52
|
+
|
|
53
|
+
loadbalancer_ingress = []
|
|
54
|
+
for item in ingress:
|
|
55
|
+
loadbalancer_ingress.append(
|
|
56
|
+
{
|
|
57
|
+
"hostname": item.hostname,
|
|
58
|
+
"ip": item.ip,
|
|
59
|
+
"ip_mode": item.ip_mode,
|
|
60
|
+
"ports": _format_ingress_ports(item.ports),
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
return json.dumps(loadbalancer_ingress)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def transform_services(
|
|
67
|
+
services: list[V1Service], all_pods: list[dict[str, Any]]
|
|
68
|
+
) -> list[dict[str, Any]]:
|
|
69
|
+
services_list = []
|
|
70
|
+
for service in services:
|
|
30
71
|
item = {
|
|
31
72
|
"uid": service.metadata.uid,
|
|
32
73
|
"name": service.metadata.name,
|
|
33
74
|
"creation_timestamp": get_epoch(service.metadata.creation_timestamp),
|
|
34
75
|
"deletion_timestamp": get_epoch(service.metadata.deletion_timestamp),
|
|
35
76
|
"namespace": service.metadata.namespace,
|
|
36
|
-
"cluster_uid": cluster["uid"],
|
|
37
77
|
"type": service.spec.type,
|
|
38
|
-
"selector": service.spec.selector,
|
|
78
|
+
"selector": _format_service_selector(service.spec.selector),
|
|
79
|
+
"cluster_ip": service.spec.cluster_ip,
|
|
39
80
|
"load_balancer_ip": service.spec.load_balancer_ip,
|
|
40
81
|
}
|
|
41
82
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
logger.
|
|
90
|
-
|
|
83
|
+
# TODO: instead of storing a json string, we should probably create seperate nodes for each ingress
|
|
84
|
+
if service.spec.type == "LoadBalancer":
|
|
85
|
+
if service.status.load_balancer:
|
|
86
|
+
item["load_balancer_ingress"] = _format_load_balancer_ingress(
|
|
87
|
+
service.status.load_balancer.ingress
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# check if pod labels match service selector and add pod_ids to item
|
|
91
|
+
pod_ids = []
|
|
92
|
+
for pod in all_pods:
|
|
93
|
+
if pod["namespace"] == service.metadata.namespace:
|
|
94
|
+
service_selector: dict[str, str] | None = service.spec.selector
|
|
95
|
+
pod_labels: dict[str, str] | None = json.loads(pod["labels"])
|
|
96
|
+
|
|
97
|
+
# check if pod labels match service selector
|
|
98
|
+
if pod_labels and service_selector:
|
|
99
|
+
if all(
|
|
100
|
+
service_selector[key] == pod_labels.get(key)
|
|
101
|
+
for key in service_selector
|
|
102
|
+
):
|
|
103
|
+
pod_ids.append(pod["uid"])
|
|
104
|
+
|
|
105
|
+
item["pod_ids"] = pod_ids
|
|
106
|
+
|
|
107
|
+
services_list.append(item)
|
|
108
|
+
return services_list
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def load_services(
|
|
112
|
+
session: neo4j.Session,
|
|
113
|
+
services: list[dict[str, Any]],
|
|
114
|
+
update_tag: int,
|
|
115
|
+
cluster_id: str,
|
|
116
|
+
cluster_name: str,
|
|
117
|
+
) -> None:
|
|
118
|
+
logger.info(f"Loading {len(services)} KubernetesServices")
|
|
119
|
+
load(
|
|
120
|
+
session,
|
|
121
|
+
KubernetesServiceSchema(),
|
|
122
|
+
services,
|
|
123
|
+
lastupdated=update_tag,
|
|
124
|
+
CLUSTER_ID=cluster_id,
|
|
125
|
+
CLUSTER_NAME=cluster_name,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def cleanup(session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
|
|
130
|
+
logger.debug("Running cleanup job for KubernetesService")
|
|
131
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
132
|
+
KubernetesServiceSchema(), common_job_parameters
|
|
133
|
+
)
|
|
134
|
+
cleanup_job.run(session)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@timeit
|
|
138
|
+
def sync_services(
|
|
139
|
+
session: neo4j.Session,
|
|
140
|
+
client: K8sClient,
|
|
141
|
+
all_pods: list[dict[str, Any]],
|
|
142
|
+
update_tag: int,
|
|
143
|
+
common_job_parameters: dict[str, Any],
|
|
144
|
+
) -> None:
|
|
145
|
+
services = get_services(client)
|
|
146
|
+
transformed_services = transform_services(services, all_pods)
|
|
147
|
+
load_services(
|
|
148
|
+
session=session,
|
|
149
|
+
services=transformed_services,
|
|
150
|
+
update_tag=update_tag,
|
|
151
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
152
|
+
cluster_name=client.name,
|
|
153
|
+
)
|
|
154
|
+
cleanup(session, common_job_parameters)
|
|
@@ -1,11 +1,16 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
from datetime import datetime
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Any
|
|
4
|
+
from typing import Callable
|
|
4
5
|
|
|
5
6
|
from kubernetes import config
|
|
6
7
|
from kubernetes.client import ApiClient
|
|
7
8
|
from kubernetes.client import CoreV1Api
|
|
8
9
|
from kubernetes.client import NetworkingV1Api
|
|
10
|
+
from kubernetes.client import VersionApi
|
|
11
|
+
from kubernetes.client.exceptions import ApiException
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
9
14
|
|
|
10
15
|
|
|
11
16
|
class KubernetesContextNotFound(Exception):
|
|
@@ -13,39 +18,145 @@ class KubernetesContextNotFound(Exception):
|
|
|
13
18
|
|
|
14
19
|
|
|
15
20
|
class K8CoreApiClient(CoreV1Api):
|
|
16
|
-
def __init__(
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
name: str,
|
|
24
|
+
config_file: str,
|
|
25
|
+
api_client: ApiClient | None = None,
|
|
26
|
+
) -> None:
|
|
17
27
|
self.name = name
|
|
18
28
|
if not api_client:
|
|
19
|
-
api_client = config.new_client_from_config(
|
|
29
|
+
api_client = config.new_client_from_config(
|
|
30
|
+
context=name, config_file=config_file
|
|
31
|
+
)
|
|
20
32
|
super().__init__(api_client=api_client)
|
|
21
33
|
|
|
22
34
|
|
|
23
35
|
class K8NetworkingApiClient(NetworkingV1Api):
|
|
24
|
-
def __init__(
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
name: str,
|
|
39
|
+
config_file: str,
|
|
40
|
+
api_client: ApiClient | None = None,
|
|
41
|
+
) -> None:
|
|
42
|
+
self.name = name
|
|
43
|
+
if not api_client:
|
|
44
|
+
api_client = config.new_client_from_config(
|
|
45
|
+
context=name, config_file=config_file
|
|
46
|
+
)
|
|
47
|
+
super().__init__(api_client=api_client)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class K8VersionApiClient(VersionApi):
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
name: str,
|
|
54
|
+
config_file: str,
|
|
55
|
+
api_client: ApiClient | None = None,
|
|
56
|
+
) -> None:
|
|
25
57
|
self.name = name
|
|
26
58
|
if not api_client:
|
|
27
|
-
api_client = config.new_client_from_config(
|
|
59
|
+
api_client = config.new_client_from_config(
|
|
60
|
+
context=name, config_file=config_file
|
|
61
|
+
)
|
|
28
62
|
super().__init__(api_client=api_client)
|
|
29
63
|
|
|
30
64
|
|
|
31
65
|
class K8sClient:
|
|
32
|
-
def __init__(
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
name: str,
|
|
69
|
+
config_file: str,
|
|
70
|
+
external_id: str | None = None,
|
|
71
|
+
) -> None:
|
|
33
72
|
self.name = name
|
|
34
|
-
self.
|
|
35
|
-
self.
|
|
73
|
+
self.config_file = config_file
|
|
74
|
+
self.external_id = external_id
|
|
75
|
+
self.core = K8CoreApiClient(self.name, self.config_file)
|
|
76
|
+
self.networking = K8NetworkingApiClient(self.name, self.config_file)
|
|
77
|
+
self.version = K8VersionApiClient(self.name, self.config_file)
|
|
36
78
|
|
|
37
79
|
|
|
38
|
-
def get_k8s_clients(kubeconfig: str) ->
|
|
80
|
+
def get_k8s_clients(kubeconfig: str) -> list[K8sClient]:
|
|
81
|
+
# returns a tuple of (all contexts, current context)
|
|
39
82
|
contexts, _ = config.list_kube_config_contexts(kubeconfig)
|
|
40
83
|
if not contexts:
|
|
41
84
|
raise KubernetesContextNotFound("No context found in kubeconfig.")
|
|
42
|
-
|
|
85
|
+
|
|
86
|
+
clients = []
|
|
43
87
|
for context in contexts:
|
|
44
|
-
clients.append(
|
|
88
|
+
clients.append(
|
|
89
|
+
K8sClient(
|
|
90
|
+
context["name"],
|
|
91
|
+
kubeconfig,
|
|
92
|
+
external_id=context["context"].get("cluster"),
|
|
93
|
+
),
|
|
94
|
+
)
|
|
45
95
|
return clients
|
|
46
96
|
|
|
47
97
|
|
|
48
|
-
def get_epoch(date: datetime) ->
|
|
98
|
+
def get_epoch(date: datetime | None) -> int | None:
|
|
49
99
|
if date:
|
|
50
|
-
return int(date.
|
|
100
|
+
return int(date.timestamp())
|
|
51
101
|
return None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def k8s_paginate(
|
|
105
|
+
list_func: Callable,
|
|
106
|
+
**kwargs: Any,
|
|
107
|
+
) -> list[dict[str, Any]]:
|
|
108
|
+
"""
|
|
109
|
+
Handles pagination for a Kubernetes API call.
|
|
110
|
+
|
|
111
|
+
:param list_func: The list function to call (e.g. client.core.list_pod_for_all_namespaces)
|
|
112
|
+
:param kwargs: Keyword arguments to pass to the list function (e.g. limit=100)
|
|
113
|
+
:return: A list of all resources returned by the list function
|
|
114
|
+
"""
|
|
115
|
+
all_resources = []
|
|
116
|
+
continue_token = None
|
|
117
|
+
limit = kwargs.pop("limit", 100)
|
|
118
|
+
function_name = list_func.__name__
|
|
119
|
+
|
|
120
|
+
logger.debug(f"Starting pagination for {function_name} with limit {limit}.")
|
|
121
|
+
|
|
122
|
+
while True:
|
|
123
|
+
try:
|
|
124
|
+
if continue_token:
|
|
125
|
+
response = list_func(limit=limit, _continue=continue_token, **kwargs)
|
|
126
|
+
else:
|
|
127
|
+
response = list_func(limit=limit, **kwargs)
|
|
128
|
+
|
|
129
|
+
# Check if items exists on the response
|
|
130
|
+
if not hasattr(response, "items"):
|
|
131
|
+
logger.warning(
|
|
132
|
+
f"Response from {function_name} does not contain 'items' attribute."
|
|
133
|
+
)
|
|
134
|
+
break
|
|
135
|
+
|
|
136
|
+
items_count = len(response.items)
|
|
137
|
+
all_resources.extend(response.items)
|
|
138
|
+
|
|
139
|
+
logger.debug(f"Retrieved {items_count} {function_name} resources")
|
|
140
|
+
|
|
141
|
+
# Check if metadata exists on the response
|
|
142
|
+
if not hasattr(response, "metadata"):
|
|
143
|
+
logger.warning(
|
|
144
|
+
f"Response from {function_name} does not contain 'metadata' attribute."
|
|
145
|
+
)
|
|
146
|
+
break
|
|
147
|
+
|
|
148
|
+
continue_token = response.metadata._continue
|
|
149
|
+
if not continue_token:
|
|
150
|
+
logger.debug(f"No more {function_name} resources to retrieve.")
|
|
151
|
+
break
|
|
152
|
+
|
|
153
|
+
except ApiException as e:
|
|
154
|
+
logger.error(
|
|
155
|
+
f"Kubernetes API error retrieving {function_name} resources. {e}: {e.status} - {e.reason}"
|
|
156
|
+
)
|
|
157
|
+
break
|
|
158
|
+
|
|
159
|
+
logger.debug(
|
|
160
|
+
f"Completed pagination for {function_name}: retrieved {len(all_resources)} resources"
|
|
161
|
+
)
|
|
162
|
+
return all_resources
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import neo4j
|
|
4
|
+
import scaleway
|
|
5
|
+
|
|
6
|
+
import cartography.intel.scaleway.iam.apikeys
|
|
7
|
+
import cartography.intel.scaleway.iam.applications
|
|
8
|
+
import cartography.intel.scaleway.iam.groups
|
|
9
|
+
import cartography.intel.scaleway.iam.users
|
|
10
|
+
import cartography.intel.scaleway.instances.flexibleips
|
|
11
|
+
import cartography.intel.scaleway.instances.instances
|
|
12
|
+
import cartography.intel.scaleway.projects
|
|
13
|
+
import cartography.intel.scaleway.storage.snapshots
|
|
14
|
+
import cartography.intel.scaleway.storage.volumes
|
|
15
|
+
from cartography.config import Config
|
|
16
|
+
from cartography.util import timeit
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@timeit
|
|
22
|
+
def start_scaleway_ingestion(neo4j_session: neo4j.Session, config: Config) -> None:
|
|
23
|
+
"""
|
|
24
|
+
If this module is configured, perform ingestion of Scaleway data. Otherwise warn and exit
|
|
25
|
+
:param neo4j_session: Neo4J session for database interface
|
|
26
|
+
:param config: A cartography.config object
|
|
27
|
+
:return: None
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
if (
|
|
31
|
+
not config.scaleway_access_key
|
|
32
|
+
or not config.scaleway_secret_key
|
|
33
|
+
or not config.scaleway_org
|
|
34
|
+
):
|
|
35
|
+
logger.info(
|
|
36
|
+
"Tailscale import is not configured - skipping this module. "
|
|
37
|
+
"See docs to configure.",
|
|
38
|
+
)
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
# Create client
|
|
42
|
+
client = scaleway.Client(
|
|
43
|
+
access_key=config.scaleway_access_key,
|
|
44
|
+
secret_key=config.scaleway_secret_key,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
common_job_parameters = {
|
|
48
|
+
"UPDATE_TAG": config.update_tag,
|
|
49
|
+
"ORG_ID": config.scaleway_org,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Organization level
|
|
53
|
+
projects = cartography.intel.scaleway.projects.sync(
|
|
54
|
+
neo4j_session,
|
|
55
|
+
client,
|
|
56
|
+
common_job_parameters,
|
|
57
|
+
org_id=config.scaleway_org,
|
|
58
|
+
update_tag=config.update_tag,
|
|
59
|
+
)
|
|
60
|
+
projects_id = [project["id"] for project in projects]
|
|
61
|
+
cartography.intel.scaleway.iam.users.sync(
|
|
62
|
+
neo4j_session,
|
|
63
|
+
client,
|
|
64
|
+
common_job_parameters,
|
|
65
|
+
org_id=config.scaleway_org,
|
|
66
|
+
update_tag=config.update_tag,
|
|
67
|
+
)
|
|
68
|
+
cartography.intel.scaleway.iam.applications.sync(
|
|
69
|
+
neo4j_session,
|
|
70
|
+
client,
|
|
71
|
+
common_job_parameters,
|
|
72
|
+
org_id=config.scaleway_org,
|
|
73
|
+
update_tag=config.update_tag,
|
|
74
|
+
)
|
|
75
|
+
cartography.intel.scaleway.iam.groups.sync(
|
|
76
|
+
neo4j_session,
|
|
77
|
+
client,
|
|
78
|
+
common_job_parameters,
|
|
79
|
+
org_id=config.scaleway_org,
|
|
80
|
+
update_tag=config.update_tag,
|
|
81
|
+
)
|
|
82
|
+
cartography.intel.scaleway.iam.apikeys.sync(
|
|
83
|
+
neo4j_session,
|
|
84
|
+
client,
|
|
85
|
+
common_job_parameters,
|
|
86
|
+
org_id=config.scaleway_org,
|
|
87
|
+
update_tag=config.update_tag,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Storage
|
|
91
|
+
cartography.intel.scaleway.storage.volumes.sync(
|
|
92
|
+
neo4j_session,
|
|
93
|
+
client,
|
|
94
|
+
common_job_parameters,
|
|
95
|
+
org_id=config.scaleway_org,
|
|
96
|
+
projects_id=projects_id,
|
|
97
|
+
update_tag=config.update_tag,
|
|
98
|
+
)
|
|
99
|
+
cartography.intel.scaleway.storage.snapshots.sync(
|
|
100
|
+
neo4j_session,
|
|
101
|
+
client,
|
|
102
|
+
common_job_parameters,
|
|
103
|
+
org_id=config.scaleway_org,
|
|
104
|
+
projects_id=projects_id,
|
|
105
|
+
update_tag=config.update_tag,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Instances
|
|
109
|
+
# DISABLED due to https://github.com/scaleway/scaleway-sdk-python/issues/1040
|
|
110
|
+
"""
|
|
111
|
+
cartography.intel.scaleway.instances.flexibleips.sync(
|
|
112
|
+
neo4j_session,
|
|
113
|
+
client,
|
|
114
|
+
common_job_parameters,
|
|
115
|
+
org_id=config.scaleway_org,
|
|
116
|
+
projects_id=projects_id,
|
|
117
|
+
update_tag=config.update_tag,
|
|
118
|
+
)
|
|
119
|
+
"""
|
|
120
|
+
cartography.intel.scaleway.instances.instances.sync(
|
|
121
|
+
neo4j_session,
|
|
122
|
+
client,
|
|
123
|
+
common_job_parameters,
|
|
124
|
+
org_id=config.scaleway_org,
|
|
125
|
+
projects_id=projects_id,
|
|
126
|
+
update_tag=config.update_tag,
|
|
127
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
import neo4j
|
|
5
|
+
import scaleway
|
|
6
|
+
from scaleway.iam.v1alpha1 import APIKey
|
|
7
|
+
from scaleway.iam.v1alpha1 import IamV1Alpha1API
|
|
8
|
+
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
11
|
+
from cartography.intel.scaleway.utils import scaleway_obj_to_dict
|
|
12
|
+
from cartography.models.scaleway.iam.apikey import ScalewayApiKeySchema
|
|
13
|
+
from cartography.util import timeit
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@timeit
|
|
19
|
+
def sync(
|
|
20
|
+
neo4j_session: neo4j.Session,
|
|
21
|
+
client: scaleway.Client,
|
|
22
|
+
common_job_parameters: dict[str, Any],
|
|
23
|
+
org_id: str,
|
|
24
|
+
update_tag: int,
|
|
25
|
+
) -> None:
|
|
26
|
+
apikeys = get(client, org_id)
|
|
27
|
+
formatted_apikeys = transform_apikeys(apikeys)
|
|
28
|
+
load_apikeys(neo4j_session, formatted_apikeys, org_id, update_tag)
|
|
29
|
+
cleanup(neo4j_session, common_job_parameters)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@timeit
|
|
33
|
+
def get(
|
|
34
|
+
client: scaleway.Client,
|
|
35
|
+
org_id: str,
|
|
36
|
+
) -> list[APIKey]:
|
|
37
|
+
api = IamV1Alpha1API(client)
|
|
38
|
+
return api.list_api_keys_all(organization_id=org_id)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def transform_apikeys(apikeys: list[APIKey]) -> list[dict[str, Any]]:
|
|
42
|
+
formatted_apikeys = []
|
|
43
|
+
for apikey in apikeys:
|
|
44
|
+
formatted_apikeys.append(scaleway_obj_to_dict(apikey))
|
|
45
|
+
return formatted_apikeys
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@timeit
|
|
49
|
+
def load_apikeys(
|
|
50
|
+
neo4j_session: neo4j.Session,
|
|
51
|
+
data: list[dict[str, Any]],
|
|
52
|
+
org_id: str,
|
|
53
|
+
update_tag: int,
|
|
54
|
+
) -> None:
|
|
55
|
+
logger.info("Loading %d Scaleway ApiKeys into Neo4j.", len(data))
|
|
56
|
+
load(
|
|
57
|
+
neo4j_session,
|
|
58
|
+
ScalewayApiKeySchema(),
|
|
59
|
+
data,
|
|
60
|
+
lastupdated=update_tag,
|
|
61
|
+
ORG_ID=org_id,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@timeit
|
|
66
|
+
def cleanup(
|
|
67
|
+
neo4j_session: neo4j.Session, common_job_parameters: dict[str, Any]
|
|
68
|
+
) -> None:
|
|
69
|
+
GraphJob.from_node_schema(ScalewayApiKeySchema(), common_job_parameters).run(
|
|
70
|
+
neo4j_session
|
|
71
|
+
)
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
import neo4j
|
|
5
|
+
import scaleway
|
|
6
|
+
from scaleway.iam.v1alpha1 import Application
|
|
7
|
+
from scaleway.iam.v1alpha1 import IamV1Alpha1API
|
|
8
|
+
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
11
|
+
from cartography.intel.scaleway.utils import scaleway_obj_to_dict
|
|
12
|
+
from cartography.models.scaleway.iam.application import ScalewayApplicationSchema
|
|
13
|
+
from cartography.util import timeit
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@timeit
|
|
19
|
+
def sync(
|
|
20
|
+
neo4j_session: neo4j.Session,
|
|
21
|
+
client: scaleway.Client,
|
|
22
|
+
common_job_parameters: dict[str, Any],
|
|
23
|
+
org_id: str,
|
|
24
|
+
update_tag: int,
|
|
25
|
+
) -> None:
|
|
26
|
+
applications = get(client, org_id)
|
|
27
|
+
formatted_applications = transform_applications(applications)
|
|
28
|
+
load_applications(neo4j_session, formatted_applications, org_id, update_tag)
|
|
29
|
+
cleanup(neo4j_session, common_job_parameters)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@timeit
|
|
33
|
+
def get(
|
|
34
|
+
client: scaleway.Client,
|
|
35
|
+
org_id: str,
|
|
36
|
+
) -> list[Application]:
|
|
37
|
+
api = IamV1Alpha1API(client)
|
|
38
|
+
return api.list_applications_all(organization_id=org_id)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def transform_applications(applications: list[Application]) -> list[dict[str, Any]]:
|
|
42
|
+
formatted_applications = []
|
|
43
|
+
for application in applications:
|
|
44
|
+
formatted_applications.append(scaleway_obj_to_dict(application))
|
|
45
|
+
return formatted_applications
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@timeit
|
|
49
|
+
def load_applications(
|
|
50
|
+
neo4j_session: neo4j.Session,
|
|
51
|
+
data: list[dict[str, Any]],
|
|
52
|
+
org_id: str,
|
|
53
|
+
update_tag: int,
|
|
54
|
+
) -> None:
|
|
55
|
+
logger.info("Loading %d Scaleway Applications into Neo4j.", len(data))
|
|
56
|
+
load(
|
|
57
|
+
neo4j_session,
|
|
58
|
+
ScalewayApplicationSchema(),
|
|
59
|
+
data,
|
|
60
|
+
lastupdated=update_tag,
|
|
61
|
+
ORG_ID=org_id,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@timeit
|
|
66
|
+
def cleanup(
|
|
67
|
+
neo4j_session: neo4j.Session, common_job_parameters: dict[str, Any]
|
|
68
|
+
) -> None:
|
|
69
|
+
GraphJob.from_node_schema(ScalewayApplicationSchema(), common_job_parameters).run(
|
|
70
|
+
neo4j_session
|
|
71
|
+
)
|