cartography 0.105.0__py3-none-any.whl → 0.106.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/_version.py +2 -2
- cartography/cli.py +78 -2
- cartography/client/core/tx.py +62 -0
- cartography/config.py +24 -0
- cartography/data/indexes.cypher +0 -34
- cartography/driftdetect/cli.py +3 -2
- cartography/graph/cleanupbuilder.py +47 -0
- cartography/graph/job.py +42 -0
- cartography/graph/querybuilder.py +136 -2
- cartography/graph/statement.py +1 -1
- cartography/intel/airbyte/__init__.py +105 -0
- cartography/intel/airbyte/connections.py +120 -0
- cartography/intel/airbyte/destinations.py +81 -0
- cartography/intel/airbyte/organizations.py +59 -0
- cartography/intel/airbyte/sources.py +78 -0
- cartography/intel/airbyte/tags.py +64 -0
- cartography/intel/airbyte/users.py +106 -0
- cartography/intel/airbyte/util.py +122 -0
- cartography/intel/airbyte/workspaces.py +63 -0
- cartography/intel/aws/codebuild.py +132 -0
- cartography/intel/aws/ecs.py +228 -380
- cartography/intel/aws/efs.py +261 -0
- cartography/intel/aws/identitycenter.py +14 -3
- cartography/intel/aws/inspector.py +96 -53
- cartography/intel/aws/rds.py +2 -1
- cartography/intel/aws/resources.py +4 -0
- cartography/intel/entra/__init__.py +11 -0
- cartography/intel/entra/applications.py +366 -0
- cartography/intel/entra/users.py +84 -42
- cartography/intel/kubernetes/__init__.py +30 -14
- cartography/intel/kubernetes/clusters.py +86 -0
- cartography/intel/kubernetes/namespaces.py +59 -57
- cartography/intel/kubernetes/pods.py +140 -77
- cartography/intel/kubernetes/secrets.py +95 -45
- cartography/intel/kubernetes/services.py +131 -67
- cartography/intel/kubernetes/util.py +125 -14
- cartography/intel/scaleway/__init__.py +127 -0
- cartography/intel/scaleway/iam/__init__.py +0 -0
- cartography/intel/scaleway/iam/apikeys.py +71 -0
- cartography/intel/scaleway/iam/applications.py +71 -0
- cartography/intel/scaleway/iam/groups.py +71 -0
- cartography/intel/scaleway/iam/users.py +71 -0
- cartography/intel/scaleway/instances/__init__.py +0 -0
- cartography/intel/scaleway/instances/flexibleips.py +86 -0
- cartography/intel/scaleway/instances/instances.py +92 -0
- cartography/intel/scaleway/projects.py +79 -0
- cartography/intel/scaleway/storage/__init__.py +0 -0
- cartography/intel/scaleway/storage/snapshots.py +86 -0
- cartography/intel/scaleway/storage/volumes.py +84 -0
- cartography/intel/scaleway/utils.py +37 -0
- cartography/models/airbyte/__init__.py +0 -0
- cartography/models/airbyte/connection.py +138 -0
- cartography/models/airbyte/destination.py +75 -0
- cartography/models/airbyte/organization.py +19 -0
- cartography/models/airbyte/source.py +75 -0
- cartography/models/airbyte/stream.py +74 -0
- cartography/models/airbyte/tag.py +69 -0
- cartography/models/airbyte/user.py +111 -0
- cartography/models/airbyte/workspace.py +46 -0
- cartography/models/aws/codebuild/__init__.py +0 -0
- cartography/models/aws/codebuild/project.py +49 -0
- cartography/models/aws/ecs/__init__.py +0 -0
- cartography/models/aws/ecs/clusters.py +64 -0
- cartography/models/aws/ecs/container_definitions.py +93 -0
- cartography/models/aws/ecs/container_instances.py +84 -0
- cartography/models/aws/ecs/containers.py +99 -0
- cartography/models/aws/ecs/services.py +117 -0
- cartography/models/aws/ecs/task_definitions.py +135 -0
- cartography/models/aws/ecs/tasks.py +110 -0
- cartography/models/aws/efs/__init__.py +0 -0
- cartography/models/aws/efs/access_point.py +77 -0
- cartography/models/aws/efs/file_system.py +60 -0
- cartography/models/aws/efs/mount_target.py +79 -0
- cartography/models/core/common.py +1 -0
- cartography/models/core/relationships.py +44 -0
- cartography/models/entra/app_role_assignment.py +115 -0
- cartography/models/entra/application.py +47 -0
- cartography/models/entra/user.py +17 -51
- cartography/models/kubernetes/__init__.py +0 -0
- cartography/models/kubernetes/clusters.py +26 -0
- cartography/models/kubernetes/containers.py +108 -0
- cartography/models/kubernetes/namespaces.py +51 -0
- cartography/models/kubernetes/pods.py +80 -0
- cartography/models/kubernetes/secrets.py +79 -0
- cartography/models/kubernetes/services.py +108 -0
- cartography/models/scaleway/__init__.py +0 -0
- cartography/models/scaleway/iam/__init__.py +0 -0
- cartography/models/scaleway/iam/apikey.py +96 -0
- cartography/models/scaleway/iam/application.py +52 -0
- cartography/models/scaleway/iam/group.py +95 -0
- cartography/models/scaleway/iam/user.py +60 -0
- cartography/models/scaleway/instance/__init__.py +0 -0
- cartography/models/scaleway/instance/flexibleip.py +52 -0
- cartography/models/scaleway/instance/instance.py +118 -0
- cartography/models/scaleway/organization.py +19 -0
- cartography/models/scaleway/project.py +48 -0
- cartography/models/scaleway/storage/__init__.py +0 -0
- cartography/models/scaleway/storage/snapshot.py +78 -0
- cartography/models/scaleway/storage/volume.py +51 -0
- cartography/sync.py +8 -4
- cartography/util.py +15 -10
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/METADATA +5 -2
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/RECORD +107 -35
- cartography/data/jobs/cleanup/kubernetes_import_cleanup.json +0 -70
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/WHEEL +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/entry_points.txt +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/licenses/LICENSE +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
import neo4j
|
|
5
|
+
from kubernetes.client.models import V1Namespace
|
|
6
|
+
from kubernetes.client.models import VersionInfo
|
|
7
|
+
|
|
8
|
+
from cartography.client.core.tx import load
|
|
9
|
+
from cartography.intel.kubernetes.util import get_epoch
|
|
10
|
+
from cartography.intel.kubernetes.util import K8sClient
|
|
11
|
+
from cartography.models.kubernetes.clusters import KubernetesClusterSchema
|
|
12
|
+
from cartography.util import timeit
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@timeit
|
|
18
|
+
def get_kubernetes_cluster_namespace(client: K8sClient) -> V1Namespace:
|
|
19
|
+
return client.core.read_namespace("kube-system")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@timeit
|
|
23
|
+
def get_kubernetes_cluster_version(client: K8sClient) -> VersionInfo:
|
|
24
|
+
return client.version.get_code()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def transform_kubernetes_cluster(
|
|
28
|
+
client: K8sClient,
|
|
29
|
+
namespace: V1Namespace,
|
|
30
|
+
version: VersionInfo,
|
|
31
|
+
) -> list[dict[str, Any]]:
|
|
32
|
+
cluster = {
|
|
33
|
+
"id": namespace.metadata.uid,
|
|
34
|
+
"creation_timestamp": get_epoch(namespace.metadata.creation_timestamp),
|
|
35
|
+
"external_id": client.external_id,
|
|
36
|
+
"name": client.name,
|
|
37
|
+
"git_version": version.git_version,
|
|
38
|
+
"version_major": version.major,
|
|
39
|
+
"version_minor": version.minor,
|
|
40
|
+
"go_version": version.go_version,
|
|
41
|
+
"compiler": version.compiler,
|
|
42
|
+
"platform": version.platform,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return [cluster]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def load_kubernetes_cluster(
|
|
49
|
+
neo4j_session: neo4j.Session,
|
|
50
|
+
cluster_data: list[dict[str, Any]],
|
|
51
|
+
update_tag: int,
|
|
52
|
+
) -> None:
|
|
53
|
+
logger.info(
|
|
54
|
+
"Loading '{}' Kubernetes cluster into graph".format(cluster_data[0].get("name"))
|
|
55
|
+
)
|
|
56
|
+
load(
|
|
57
|
+
neo4j_session,
|
|
58
|
+
KubernetesClusterSchema(),
|
|
59
|
+
cluster_data,
|
|
60
|
+
lastupdated=update_tag,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# cleaning up the kubernetes cluster node is currently not supported
|
|
65
|
+
# def cleanup(
|
|
66
|
+
# neo4j_session: neo4j.Session, common_job_parameters: Dict[str, Any]
|
|
67
|
+
# ) -> None:
|
|
68
|
+
# logger.debug("Running cleanup job for KubernetesCluster")
|
|
69
|
+
# run_cleanup_job(
|
|
70
|
+
# "kubernetes_cluster_cleanup.json", neo4j_session, common_job_parameters
|
|
71
|
+
# )
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@timeit
|
|
75
|
+
def sync_kubernetes_cluster(
|
|
76
|
+
neo4j_session: neo4j.Session,
|
|
77
|
+
client: K8sClient,
|
|
78
|
+
update_tag: int,
|
|
79
|
+
common_job_parameters: dict[str, Any],
|
|
80
|
+
) -> dict[str, Any]:
|
|
81
|
+
namespace = get_kubernetes_cluster_namespace(client)
|
|
82
|
+
version = get_kubernetes_cluster_version(client)
|
|
83
|
+
cluster_info = transform_kubernetes_cluster(client, namespace, version)
|
|
84
|
+
|
|
85
|
+
load_kubernetes_cluster(neo4j_session, cluster_info, update_tag)
|
|
86
|
+
return cluster_info[0]
|
|
@@ -1,82 +1,84 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
4
|
-
from typing import Tuple
|
|
2
|
+
from typing import Any
|
|
5
3
|
|
|
6
|
-
|
|
4
|
+
import neo4j
|
|
5
|
+
from kubernetes.client.models import V1Namespace
|
|
7
6
|
|
|
7
|
+
from cartography.client.core.tx import load
|
|
8
|
+
from cartography.graph.job import GraphJob
|
|
8
9
|
from cartography.intel.kubernetes.util import get_epoch
|
|
10
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
9
11
|
from cartography.intel.kubernetes.util import K8sClient
|
|
10
|
-
from cartography.
|
|
11
|
-
from cartography.util import merge_module_sync_metadata
|
|
12
|
+
from cartography.models.kubernetes.namespaces import KubernetesNamespaceSchema
|
|
12
13
|
from cartography.util import timeit
|
|
13
14
|
|
|
14
15
|
logger = logging.getLogger(__name__)
|
|
15
|
-
stat_handler = get_stats_client(__name__)
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
@timeit
|
|
19
|
-
def
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
merge_module_sync_metadata(
|
|
23
|
-
session,
|
|
24
|
-
group_type="KubernetesCluster",
|
|
25
|
-
group_id=cluster["uid"],
|
|
26
|
-
synced_type="KubernetesCluster",
|
|
27
|
-
update_tag=update_tag,
|
|
28
|
-
stat_handler=stat_handler,
|
|
29
|
-
)
|
|
30
|
-
return cluster
|
|
19
|
+
def get_namespaces(client: K8sClient) -> list[V1Namespace]:
|
|
20
|
+
items = k8s_paginate(client.core.list_namespace)
|
|
21
|
+
return items
|
|
31
22
|
|
|
32
23
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
for namespace in client.core.list_namespace().items:
|
|
38
|
-
namespaces.append(
|
|
24
|
+
def transform_namespaces(namespaces: list[V1Namespace]) -> list[dict[str, Any]]:
|
|
25
|
+
transformed_namespaces = []
|
|
26
|
+
for namespace in namespaces:
|
|
27
|
+
transformed_namespaces.append(
|
|
39
28
|
{
|
|
40
29
|
"uid": namespace.metadata.uid,
|
|
41
30
|
"name": namespace.metadata.name,
|
|
42
31
|
"creation_timestamp": get_epoch(namespace.metadata.creation_timestamp),
|
|
43
32
|
"deletion_timestamp": get_epoch(namespace.metadata.deletion_timestamp),
|
|
44
|
-
|
|
33
|
+
"status_phase": namespace.status.phase if namespace.status else None,
|
|
34
|
+
}
|
|
45
35
|
)
|
|
46
|
-
|
|
47
|
-
cluster = {"uid": namespace.metadata.uid, "name": client.name}
|
|
48
|
-
return cluster, namespaces
|
|
36
|
+
return transformed_namespaces
|
|
49
37
|
|
|
50
38
|
|
|
51
39
|
def load_namespaces(
|
|
52
|
-
session: Session,
|
|
53
|
-
|
|
54
|
-
data: List[Dict],
|
|
40
|
+
session: neo4j.Session,
|
|
41
|
+
namespaces: list[dict[str, Any]],
|
|
55
42
|
update_tag: int,
|
|
43
|
+
cluster_name: str,
|
|
44
|
+
cluster_id: str,
|
|
45
|
+
) -> None:
|
|
46
|
+
logger.info(f"Loading {len(namespaces)} kubernetes namespaces.")
|
|
47
|
+
load(
|
|
48
|
+
session,
|
|
49
|
+
KubernetesNamespaceSchema(),
|
|
50
|
+
namespaces,
|
|
51
|
+
lastupdated=update_tag,
|
|
52
|
+
cluster_name=cluster_name,
|
|
53
|
+
CLUSTER_ID=cluster_id,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def cleanup(
|
|
58
|
+
neo4j_session: neo4j.Session, common_job_parameters: dict[str, Any]
|
|
56
59
|
) -> None:
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
cluster_id
|
|
80
|
-
cluster_name=cluster["name"],
|
|
81
|
-
update_tag=update_tag,
|
|
60
|
+
logger.debug("Running cleanup job for KubernetesNamespace")
|
|
61
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
62
|
+
KubernetesNamespaceSchema(), common_job_parameters
|
|
63
|
+
)
|
|
64
|
+
cleanup_job.run(neo4j_session)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@timeit
|
|
68
|
+
def sync_namespaces(
|
|
69
|
+
session: neo4j.Session,
|
|
70
|
+
client: K8sClient,
|
|
71
|
+
update_tag: int,
|
|
72
|
+
common_job_parameters: dict[str, Any],
|
|
73
|
+
) -> None:
|
|
74
|
+
namespaces = get_namespaces(client)
|
|
75
|
+
transformed_namespaces = transform_namespaces(namespaces)
|
|
76
|
+
cluster_id: str = common_job_parameters["CLUSTER_ID"]
|
|
77
|
+
load_namespaces(
|
|
78
|
+
session,
|
|
79
|
+
transformed_namespaces,
|
|
80
|
+
update_tag,
|
|
81
|
+
client.name,
|
|
82
|
+
cluster_id,
|
|
82
83
|
)
|
|
84
|
+
cleanup(session, common_job_parameters)
|
|
@@ -1,39 +1,35 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import neo4j
|
|
6
|
+
from kubernetes.client.models import V1Container
|
|
7
|
+
from kubernetes.client.models import V1Pod
|
|
6
8
|
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
7
11
|
from cartography.intel.kubernetes.util import get_epoch
|
|
12
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
8
13
|
from cartography.intel.kubernetes.util import K8sClient
|
|
14
|
+
from cartography.models.kubernetes.containers import KubernetesContainerSchema
|
|
15
|
+
from cartography.models.kubernetes.pods import KubernetesPodSchema
|
|
9
16
|
from cartography.util import timeit
|
|
10
17
|
|
|
11
18
|
logger = logging.getLogger(__name__)
|
|
12
19
|
|
|
13
20
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
@timeit
|
|
27
|
-
def get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:
|
|
28
|
-
pods = list()
|
|
29
|
-
for pod in client.core.list_pod_for_all_namespaces().items:
|
|
30
|
-
containers = {}
|
|
31
|
-
for container in pod.spec.containers:
|
|
32
|
-
containers[container.name] = {
|
|
33
|
-
"name": container.name,
|
|
34
|
-
"image": container.image,
|
|
35
|
-
"uid": f"{pod.metadata.uid}-{container.name}",
|
|
36
|
-
}
|
|
21
|
+
def _extract_pod_containers(pod: V1Pod) -> dict[str, Any]:
|
|
22
|
+
pod_containers: list[V1Container] = pod.spec.containers
|
|
23
|
+
containers = dict()
|
|
24
|
+
for container in pod_containers:
|
|
25
|
+
containers[container.name] = {
|
|
26
|
+
"uid": f"{pod.metadata.uid}-{container.name}",
|
|
27
|
+
"name": container.name,
|
|
28
|
+
"image": container.image,
|
|
29
|
+
"namespace": pod.metadata.namespace,
|
|
30
|
+
"pod_id": pod.metadata.uid,
|
|
31
|
+
"imagePullPolicy": container.image_pull_policy,
|
|
32
|
+
}
|
|
37
33
|
if pod.status and pod.status.container_statuses:
|
|
38
34
|
for status in pod.status.container_statuses:
|
|
39
35
|
if status.name in containers:
|
|
@@ -46,14 +42,31 @@ def get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:
|
|
|
46
42
|
image_sha = status.image_id.split("@")[1]
|
|
47
43
|
except IndexError:
|
|
48
44
|
image_sha = None
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
45
|
+
|
|
46
|
+
containers[status.name]["status_image_id"] = status.image_id
|
|
47
|
+
containers[status.name]["status_image_sha"] = image_sha
|
|
48
|
+
containers[status.name]["status_ready"] = status.ready
|
|
49
|
+
containers[status.name]["status_started"] = status.started
|
|
50
|
+
containers[status.name]["status_state"] = _state
|
|
51
|
+
return containers
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@timeit
|
|
55
|
+
def get_pods(client: K8sClient) -> list[V1Pod]:
|
|
56
|
+
items = k8s_paginate(client.core.list_pod_for_all_namespaces)
|
|
57
|
+
return items
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _format_pod_labels(labels: dict[str, str]) -> str:
|
|
61
|
+
return json.dumps(labels)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def transform_pods(pods: list[V1Pod]) -> list[dict[str, Any]]:
|
|
65
|
+
transformed_pods = []
|
|
66
|
+
|
|
67
|
+
for pod in pods:
|
|
68
|
+
containers = _extract_pod_containers(pod)
|
|
69
|
+
transformed_pods.append(
|
|
57
70
|
{
|
|
58
71
|
"uid": pod.metadata.uid,
|
|
59
72
|
"name": pod.metadata.name,
|
|
@@ -62,49 +75,99 @@ def get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:
|
|
|
62
75
|
"deletion_timestamp": get_epoch(pod.metadata.deletion_timestamp),
|
|
63
76
|
"namespace": pod.metadata.namespace,
|
|
64
77
|
"node": pod.spec.node_name,
|
|
65
|
-
"
|
|
66
|
-
"labels": pod.metadata.labels,
|
|
78
|
+
"labels": _format_pod_labels(pod.metadata.labels),
|
|
67
79
|
"containers": list(containers.values()),
|
|
68
80
|
},
|
|
69
81
|
)
|
|
70
|
-
return
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
82
|
+
return transformed_pods
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@timeit
|
|
86
|
+
def load_pods(
|
|
87
|
+
session: neo4j.Session,
|
|
88
|
+
pods: list[dict[str, Any]],
|
|
89
|
+
update_tag: int,
|
|
90
|
+
cluster_id: str,
|
|
91
|
+
cluster_name: str,
|
|
92
|
+
) -> None:
|
|
93
|
+
logger.info(f"Loading {len(pods)} kubernetes pods.")
|
|
94
|
+
load(
|
|
95
|
+
session,
|
|
96
|
+
KubernetesPodSchema(),
|
|
97
|
+
pods,
|
|
98
|
+
lastupdated=update_tag,
|
|
99
|
+
CLUSTER_ID=cluster_id,
|
|
100
|
+
CLUSTER_NAME=cluster_name,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def transform_containers(pods: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
105
|
+
containers = []
|
|
106
|
+
for pod in pods:
|
|
107
|
+
containers.extend(pod.get("containers", []))
|
|
108
|
+
return containers
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@timeit
|
|
112
|
+
def load_containers(
|
|
113
|
+
session: neo4j.Session,
|
|
114
|
+
containers: list[dict[str, Any]],
|
|
115
|
+
update_tag: int,
|
|
116
|
+
cluster_id: str,
|
|
117
|
+
cluster_name: str,
|
|
118
|
+
) -> None:
|
|
119
|
+
logger.info(f"Loading {len(containers)} kubernetes containers.")
|
|
120
|
+
load(
|
|
121
|
+
session,
|
|
122
|
+
KubernetesContainerSchema(),
|
|
123
|
+
containers,
|
|
124
|
+
lastupdated=update_tag,
|
|
125
|
+
CLUSTER_ID=cluster_id,
|
|
126
|
+
CLUSTER_NAME=cluster_name,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
@timeit
|
|
131
|
+
def cleanup(session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
|
|
132
|
+
logger.debug("Running cleanup job for KubernetesContainer")
|
|
133
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
134
|
+
KubernetesContainerSchema(), common_job_parameters
|
|
135
|
+
)
|
|
136
|
+
cleanup_job.run(session)
|
|
137
|
+
|
|
138
|
+
logger.debug("Running cleanup job for KubernetesPod")
|
|
139
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
140
|
+
KubernetesPodSchema(), common_job_parameters
|
|
141
|
+
)
|
|
142
|
+
cleanup_job.run(session)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
@timeit
|
|
146
|
+
def sync_pods(
|
|
147
|
+
session: neo4j.Session,
|
|
148
|
+
client: K8sClient,
|
|
149
|
+
update_tag: int,
|
|
150
|
+
common_job_parameters: dict[str, Any],
|
|
151
|
+
) -> list[dict[str, Any]]:
|
|
152
|
+
pods = get_pods(client)
|
|
153
|
+
|
|
154
|
+
transformed_pods = transform_pods(pods)
|
|
155
|
+
load_pods(
|
|
156
|
+
session=session,
|
|
157
|
+
pods=transformed_pods,
|
|
158
|
+
update_tag=update_tag,
|
|
159
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
160
|
+
cluster_name=client.name,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
transformed_containers = transform_containers(transformed_pods)
|
|
164
|
+
load_containers(
|
|
165
|
+
session=session,
|
|
166
|
+
containers=transformed_containers,
|
|
167
|
+
update_tag=update_tag,
|
|
168
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
169
|
+
cluster_name=client.name,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
cleanup(session, common_job_parameters)
|
|
173
|
+
return transformed_pods
|
|
@@ -1,60 +1,110 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import neo4j
|
|
6
|
+
from kubernetes.client.models import V1OwnerReference
|
|
7
|
+
from kubernetes.client.models import V1Secret
|
|
6
8
|
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
7
11
|
from cartography.intel.kubernetes.util import get_epoch
|
|
12
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
8
13
|
from cartography.intel.kubernetes.util import K8sClient
|
|
14
|
+
from cartography.models.kubernetes.secrets import KubernetesSecretSchema
|
|
9
15
|
from cartography.util import timeit
|
|
10
16
|
|
|
11
17
|
logger = logging.getLogger(__name__)
|
|
12
18
|
|
|
13
19
|
|
|
14
20
|
@timeit
|
|
15
|
-
def
|
|
16
|
-
|
|
17
|
-
|
|
21
|
+
def get_secrets(client: K8sClient) -> list[V1Secret]:
|
|
22
|
+
items = k8s_paginate(client.core.list_secret_for_all_namespaces)
|
|
23
|
+
return items
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _get_owner_references(
|
|
27
|
+
owner_references: list[V1OwnerReference] | None,
|
|
28
|
+
) -> str | None:
|
|
29
|
+
if owner_references:
|
|
30
|
+
owner_references_list = []
|
|
31
|
+
for owner_reference in owner_references:
|
|
32
|
+
owner_references_list.append(
|
|
33
|
+
{
|
|
34
|
+
"kind": owner_reference.kind,
|
|
35
|
+
"name": owner_reference.name,
|
|
36
|
+
"uid": owner_reference.uid,
|
|
37
|
+
"apiVersion": owner_reference.api_version,
|
|
38
|
+
"controller": owner_reference.controller,
|
|
39
|
+
}
|
|
40
|
+
)
|
|
41
|
+
return json.dumps(owner_references_list)
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def transform_secrets(secrets: list[V1Secret]) -> list[dict[str, Any]]:
|
|
46
|
+
secrets_list = []
|
|
47
|
+
for secret in secrets:
|
|
48
|
+
secrets_list.append(
|
|
49
|
+
{
|
|
50
|
+
"uid": secret.metadata.uid,
|
|
51
|
+
"name": secret.metadata.name,
|
|
52
|
+
"creation_timestamp": get_epoch(secret.metadata.creation_timestamp),
|
|
53
|
+
"deletion_timestamp": get_epoch(secret.metadata.deletion_timestamp),
|
|
54
|
+
"owner_references": _get_owner_references(
|
|
55
|
+
secret.metadata.owner_references
|
|
56
|
+
),
|
|
57
|
+
"namespace": secret.metadata.namespace,
|
|
58
|
+
"type": secret.type,
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return secrets_list
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@timeit
|
|
66
|
+
def load_secrets(
|
|
67
|
+
session: neo4j.Session,
|
|
68
|
+
secrets: list[dict[str, Any]],
|
|
18
69
|
update_tag: int,
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
70
|
+
cluster_id: str,
|
|
71
|
+
cluster_name: str,
|
|
72
|
+
) -> None:
|
|
73
|
+
logger.info(f"Loading {len(secrets)} KubernetesSecrets")
|
|
74
|
+
load(
|
|
75
|
+
session,
|
|
76
|
+
KubernetesSecretSchema(),
|
|
77
|
+
secrets,
|
|
78
|
+
lastupdated=update_tag,
|
|
79
|
+
CLUSTER_ID=cluster_id,
|
|
80
|
+
CLUSTER_NAME=cluster_name,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
@timeit
|
|
85
|
+
def cleanup(session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
|
|
86
|
+
logger.debug("Running cleanup for KubernetesSecrets")
|
|
87
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
88
|
+
KubernetesSecretSchema(),
|
|
89
|
+
common_job_parameters,
|
|
90
|
+
)
|
|
91
|
+
cleanup_job.run(session)
|
|
24
92
|
|
|
25
93
|
|
|
26
94
|
@timeit
|
|
27
|
-
def
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def load_secrets(session: Session, data: List[Dict], update_tag: int) -> None:
|
|
44
|
-
ingestion_cypher_query = """
|
|
45
|
-
UNWIND $secrets as k8secret
|
|
46
|
-
MERGE (secret:KubernetesSecret {id: k8secret.uid})
|
|
47
|
-
ON CREATE SET secret.firstseen = timestamp()
|
|
48
|
-
SET secret.lastupdated = $update_tag,
|
|
49
|
-
secret.name = k8secret.name,
|
|
50
|
-
secret.created_at = k8secret.creation_timestamp,
|
|
51
|
-
secret.deleted_at = k8secret.deletion_timestamp,
|
|
52
|
-
secret.type = k8secret.type
|
|
53
|
-
WITH secret, k8secret.namespace as ns, k8secret.cluster_uid as cuid
|
|
54
|
-
MATCH (cluster:KubernetesCluster {id: cuid})-[:HAS_NAMESPACE]->(space:KubernetesNamespace {name: ns})
|
|
55
|
-
MERGE (space)-[rel1:HAS_SECRET]->(secret)
|
|
56
|
-
ON CREATE SET rel1.firstseen = timestamp()
|
|
57
|
-
SET rel1.lastupdated = $update_tag
|
|
58
|
-
"""
|
|
59
|
-
logger.info(f"Loading {len(data)} kubernetes secrets.")
|
|
60
|
-
session.run(ingestion_cypher_query, secrets=data, update_tag=update_tag)
|
|
95
|
+
def sync_secrets(
|
|
96
|
+
session: neo4j.Session,
|
|
97
|
+
client: K8sClient,
|
|
98
|
+
update_tag: int,
|
|
99
|
+
common_job_parameters: dict[str, Any],
|
|
100
|
+
) -> None:
|
|
101
|
+
secrets = get_secrets(client)
|
|
102
|
+
transformed_secrets = transform_secrets(secrets)
|
|
103
|
+
load_secrets(
|
|
104
|
+
session=session,
|
|
105
|
+
secrets=transformed_secrets,
|
|
106
|
+
update_tag=update_tag,
|
|
107
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
108
|
+
cluster_name=client.name,
|
|
109
|
+
)
|
|
110
|
+
cleanup(session, common_job_parameters)
|