cartography 0.105.0__py3-none-any.whl → 0.106.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cartography might be problematic. Click here for more details.
- cartography/_version.py +2 -2
- cartography/client/core/tx.py +62 -0
- cartography/data/indexes.cypher +0 -34
- cartography/graph/cleanupbuilder.py +47 -0
- cartography/graph/job.py +42 -0
- cartography/graph/querybuilder.py +136 -2
- cartography/graph/statement.py +1 -1
- cartography/intel/aws/ecs.py +228 -380
- cartography/intel/aws/efs.py +261 -0
- cartography/intel/aws/identitycenter.py +14 -3
- cartography/intel/aws/inspector.py +96 -53
- cartography/intel/aws/rds.py +2 -1
- cartography/intel/aws/resources.py +2 -0
- cartography/intel/entra/__init__.py +11 -0
- cartography/intel/entra/applications.py +366 -0
- cartography/intel/kubernetes/__init__.py +30 -14
- cartography/intel/kubernetes/clusters.py +86 -0
- cartography/intel/kubernetes/namespaces.py +59 -57
- cartography/intel/kubernetes/pods.py +140 -77
- cartography/intel/kubernetes/secrets.py +95 -45
- cartography/intel/kubernetes/services.py +131 -67
- cartography/intel/kubernetes/util.py +125 -14
- cartography/models/aws/ecs/__init__.py +0 -0
- cartography/models/aws/ecs/clusters.py +64 -0
- cartography/models/aws/ecs/container_definitions.py +93 -0
- cartography/models/aws/ecs/container_instances.py +84 -0
- cartography/models/aws/ecs/containers.py +80 -0
- cartography/models/aws/ecs/services.py +117 -0
- cartography/models/aws/ecs/task_definitions.py +97 -0
- cartography/models/aws/ecs/tasks.py +110 -0
- cartography/models/aws/efs/__init__.py +0 -0
- cartography/models/aws/efs/access_point.py +77 -0
- cartography/models/aws/efs/file_system.py +60 -0
- cartography/models/aws/efs/mount_target.py +79 -0
- cartography/models/core/common.py +1 -0
- cartography/models/core/relationships.py +44 -0
- cartography/models/entra/app_role_assignment.py +115 -0
- cartography/models/entra/application.py +47 -0
- cartography/models/kubernetes/__init__.py +0 -0
- cartography/models/kubernetes/clusters.py +26 -0
- cartography/models/kubernetes/containers.py +108 -0
- cartography/models/kubernetes/namespaces.py +51 -0
- cartography/models/kubernetes/pods.py +80 -0
- cartography/models/kubernetes/secrets.py +79 -0
- cartography/models/kubernetes/services.py +108 -0
- cartography/util.py +15 -10
- {cartography-0.105.0.dist-info → cartography-0.106.0rc2.dist-info}/METADATA +1 -1
- {cartography-0.105.0.dist-info → cartography-0.106.0rc2.dist-info}/RECORD +52 -29
- cartography/data/jobs/cleanup/kubernetes_import_cleanup.json +0 -70
- {cartography-0.105.0.dist-info → cartography-0.106.0rc2.dist-info}/WHEEL +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0rc2.dist-info}/entry_points.txt +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0rc2.dist-info}/licenses/LICENSE +0 -0
- {cartography-0.105.0.dist-info → cartography-0.106.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -1,39 +1,35 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import neo4j
|
|
6
|
+
from kubernetes.client.models import V1Container
|
|
7
|
+
from kubernetes.client.models import V1Pod
|
|
6
8
|
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
7
11
|
from cartography.intel.kubernetes.util import get_epoch
|
|
12
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
8
13
|
from cartography.intel.kubernetes.util import K8sClient
|
|
14
|
+
from cartography.models.kubernetes.containers import KubernetesContainerSchema
|
|
15
|
+
from cartography.models.kubernetes.pods import KubernetesPodSchema
|
|
9
16
|
from cartography.util import timeit
|
|
10
17
|
|
|
11
18
|
logger = logging.getLogger(__name__)
|
|
12
19
|
|
|
13
20
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
@timeit
|
|
27
|
-
def get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:
|
|
28
|
-
pods = list()
|
|
29
|
-
for pod in client.core.list_pod_for_all_namespaces().items:
|
|
30
|
-
containers = {}
|
|
31
|
-
for container in pod.spec.containers:
|
|
32
|
-
containers[container.name] = {
|
|
33
|
-
"name": container.name,
|
|
34
|
-
"image": container.image,
|
|
35
|
-
"uid": f"{pod.metadata.uid}-{container.name}",
|
|
36
|
-
}
|
|
21
|
+
def _extract_pod_containers(pod: V1Pod) -> dict[str, Any]:
|
|
22
|
+
pod_containers: list[V1Container] = pod.spec.containers
|
|
23
|
+
containers = dict()
|
|
24
|
+
for container in pod_containers:
|
|
25
|
+
containers[container.name] = {
|
|
26
|
+
"uid": f"{pod.metadata.uid}-{container.name}",
|
|
27
|
+
"name": container.name,
|
|
28
|
+
"image": container.image,
|
|
29
|
+
"namespace": pod.metadata.namespace,
|
|
30
|
+
"pod_id": pod.metadata.uid,
|
|
31
|
+
"imagePullPolicy": container.image_pull_policy,
|
|
32
|
+
}
|
|
37
33
|
if pod.status and pod.status.container_statuses:
|
|
38
34
|
for status in pod.status.container_statuses:
|
|
39
35
|
if status.name in containers:
|
|
@@ -46,14 +42,31 @@ def get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:
|
|
|
46
42
|
image_sha = status.image_id.split("@")[1]
|
|
47
43
|
except IndexError:
|
|
48
44
|
image_sha = None
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
45
|
+
|
|
46
|
+
containers[status.name]["status_image_id"] = status.image_id
|
|
47
|
+
containers[status.name]["status_image_sha"] = image_sha
|
|
48
|
+
containers[status.name]["status_ready"] = status.ready
|
|
49
|
+
containers[status.name]["status_started"] = status.started
|
|
50
|
+
containers[status.name]["status_state"] = _state
|
|
51
|
+
return containers
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@timeit
|
|
55
|
+
def get_pods(client: K8sClient) -> list[V1Pod]:
|
|
56
|
+
items = k8s_paginate(client.core.list_pod_for_all_namespaces)
|
|
57
|
+
return items
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _format_pod_labels(labels: dict[str, str]) -> str:
|
|
61
|
+
return json.dumps(labels)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def transform_pods(pods: list[V1Pod]) -> list[dict[str, Any]]:
|
|
65
|
+
transformed_pods = []
|
|
66
|
+
|
|
67
|
+
for pod in pods:
|
|
68
|
+
containers = _extract_pod_containers(pod)
|
|
69
|
+
transformed_pods.append(
|
|
57
70
|
{
|
|
58
71
|
"uid": pod.metadata.uid,
|
|
59
72
|
"name": pod.metadata.name,
|
|
@@ -62,49 +75,99 @@ def get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:
|
|
|
62
75
|
"deletion_timestamp": get_epoch(pod.metadata.deletion_timestamp),
|
|
63
76
|
"namespace": pod.metadata.namespace,
|
|
64
77
|
"node": pod.spec.node_name,
|
|
65
|
-
"
|
|
66
|
-
"labels": pod.metadata.labels,
|
|
78
|
+
"labels": _format_pod_labels(pod.metadata.labels),
|
|
67
79
|
"containers": list(containers.values()),
|
|
68
80
|
},
|
|
69
81
|
)
|
|
70
|
-
return
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
82
|
+
return transformed_pods
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@timeit
|
|
86
|
+
def load_pods(
|
|
87
|
+
session: neo4j.Session,
|
|
88
|
+
pods: list[dict[str, Any]],
|
|
89
|
+
update_tag: int,
|
|
90
|
+
cluster_id: str,
|
|
91
|
+
cluster_name: str,
|
|
92
|
+
) -> None:
|
|
93
|
+
logger.info(f"Loading {len(pods)} kubernetes pods.")
|
|
94
|
+
load(
|
|
95
|
+
session,
|
|
96
|
+
KubernetesPodSchema(),
|
|
97
|
+
pods,
|
|
98
|
+
lastupdated=update_tag,
|
|
99
|
+
CLUSTER_ID=cluster_id,
|
|
100
|
+
CLUSTER_NAME=cluster_name,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def transform_containers(pods: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
105
|
+
containers = []
|
|
106
|
+
for pod in pods:
|
|
107
|
+
containers.extend(pod.get("containers", []))
|
|
108
|
+
return containers
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@timeit
|
|
112
|
+
def load_containers(
|
|
113
|
+
session: neo4j.Session,
|
|
114
|
+
containers: list[dict[str, Any]],
|
|
115
|
+
update_tag: int,
|
|
116
|
+
cluster_id: str,
|
|
117
|
+
cluster_name: str,
|
|
118
|
+
) -> None:
|
|
119
|
+
logger.info(f"Loading {len(containers)} kubernetes containers.")
|
|
120
|
+
load(
|
|
121
|
+
session,
|
|
122
|
+
KubernetesContainerSchema(),
|
|
123
|
+
containers,
|
|
124
|
+
lastupdated=update_tag,
|
|
125
|
+
CLUSTER_ID=cluster_id,
|
|
126
|
+
CLUSTER_NAME=cluster_name,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
@timeit
|
|
131
|
+
def cleanup(session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
|
|
132
|
+
logger.debug("Running cleanup job for KubernetesContainer")
|
|
133
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
134
|
+
KubernetesContainerSchema(), common_job_parameters
|
|
135
|
+
)
|
|
136
|
+
cleanup_job.run(session)
|
|
137
|
+
|
|
138
|
+
logger.debug("Running cleanup job for KubernetesPod")
|
|
139
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
140
|
+
KubernetesPodSchema(), common_job_parameters
|
|
141
|
+
)
|
|
142
|
+
cleanup_job.run(session)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
@timeit
|
|
146
|
+
def sync_pods(
|
|
147
|
+
session: neo4j.Session,
|
|
148
|
+
client: K8sClient,
|
|
149
|
+
update_tag: int,
|
|
150
|
+
common_job_parameters: dict[str, Any],
|
|
151
|
+
) -> list[dict[str, Any]]:
|
|
152
|
+
pods = get_pods(client)
|
|
153
|
+
|
|
154
|
+
transformed_pods = transform_pods(pods)
|
|
155
|
+
load_pods(
|
|
156
|
+
session=session,
|
|
157
|
+
pods=transformed_pods,
|
|
158
|
+
update_tag=update_tag,
|
|
159
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
160
|
+
cluster_name=client.name,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
transformed_containers = transform_containers(transformed_pods)
|
|
164
|
+
load_containers(
|
|
165
|
+
session=session,
|
|
166
|
+
containers=transformed_containers,
|
|
167
|
+
update_tag=update_tag,
|
|
168
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
169
|
+
cluster_name=client.name,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
cleanup(session, common_job_parameters)
|
|
173
|
+
return transformed_pods
|
|
@@ -1,60 +1,110 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import neo4j
|
|
6
|
+
from kubernetes.client.models import V1OwnerReference
|
|
7
|
+
from kubernetes.client.models import V1Secret
|
|
6
8
|
|
|
9
|
+
from cartography.client.core.tx import load
|
|
10
|
+
from cartography.graph.job import GraphJob
|
|
7
11
|
from cartography.intel.kubernetes.util import get_epoch
|
|
12
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
8
13
|
from cartography.intel.kubernetes.util import K8sClient
|
|
14
|
+
from cartography.models.kubernetes.secrets import KubernetesSecretSchema
|
|
9
15
|
from cartography.util import timeit
|
|
10
16
|
|
|
11
17
|
logger = logging.getLogger(__name__)
|
|
12
18
|
|
|
13
19
|
|
|
14
20
|
@timeit
|
|
15
|
-
def
|
|
16
|
-
|
|
17
|
-
|
|
21
|
+
def get_secrets(client: K8sClient) -> list[V1Secret]:
|
|
22
|
+
items = k8s_paginate(client.core.list_secret_for_all_namespaces)
|
|
23
|
+
return items
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _get_owner_references(
|
|
27
|
+
owner_references: list[V1OwnerReference] | None,
|
|
28
|
+
) -> str | None:
|
|
29
|
+
if owner_references:
|
|
30
|
+
owner_references_list = []
|
|
31
|
+
for owner_reference in owner_references:
|
|
32
|
+
owner_references_list.append(
|
|
33
|
+
{
|
|
34
|
+
"kind": owner_reference.kind,
|
|
35
|
+
"name": owner_reference.name,
|
|
36
|
+
"uid": owner_reference.uid,
|
|
37
|
+
"apiVersion": owner_reference.api_version,
|
|
38
|
+
"controller": owner_reference.controller,
|
|
39
|
+
}
|
|
40
|
+
)
|
|
41
|
+
return json.dumps(owner_references_list)
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def transform_secrets(secrets: list[V1Secret]) -> list[dict[str, Any]]:
|
|
46
|
+
secrets_list = []
|
|
47
|
+
for secret in secrets:
|
|
48
|
+
secrets_list.append(
|
|
49
|
+
{
|
|
50
|
+
"uid": secret.metadata.uid,
|
|
51
|
+
"name": secret.metadata.name,
|
|
52
|
+
"creation_timestamp": get_epoch(secret.metadata.creation_timestamp),
|
|
53
|
+
"deletion_timestamp": get_epoch(secret.metadata.deletion_timestamp),
|
|
54
|
+
"owner_references": _get_owner_references(
|
|
55
|
+
secret.metadata.owner_references
|
|
56
|
+
),
|
|
57
|
+
"namespace": secret.metadata.namespace,
|
|
58
|
+
"type": secret.type,
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return secrets_list
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@timeit
|
|
66
|
+
def load_secrets(
|
|
67
|
+
session: neo4j.Session,
|
|
68
|
+
secrets: list[dict[str, Any]],
|
|
18
69
|
update_tag: int,
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
70
|
+
cluster_id: str,
|
|
71
|
+
cluster_name: str,
|
|
72
|
+
) -> None:
|
|
73
|
+
logger.info(f"Loading {len(secrets)} KubernetesSecrets")
|
|
74
|
+
load(
|
|
75
|
+
session,
|
|
76
|
+
KubernetesSecretSchema(),
|
|
77
|
+
secrets,
|
|
78
|
+
lastupdated=update_tag,
|
|
79
|
+
CLUSTER_ID=cluster_id,
|
|
80
|
+
CLUSTER_NAME=cluster_name,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
@timeit
|
|
85
|
+
def cleanup(session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
|
|
86
|
+
logger.debug("Running cleanup for KubernetesSecrets")
|
|
87
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
88
|
+
KubernetesSecretSchema(),
|
|
89
|
+
common_job_parameters,
|
|
90
|
+
)
|
|
91
|
+
cleanup_job.run(session)
|
|
24
92
|
|
|
25
93
|
|
|
26
94
|
@timeit
|
|
27
|
-
def
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def load_secrets(session: Session, data: List[Dict], update_tag: int) -> None:
|
|
44
|
-
ingestion_cypher_query = """
|
|
45
|
-
UNWIND $secrets as k8secret
|
|
46
|
-
MERGE (secret:KubernetesSecret {id: k8secret.uid})
|
|
47
|
-
ON CREATE SET secret.firstseen = timestamp()
|
|
48
|
-
SET secret.lastupdated = $update_tag,
|
|
49
|
-
secret.name = k8secret.name,
|
|
50
|
-
secret.created_at = k8secret.creation_timestamp,
|
|
51
|
-
secret.deleted_at = k8secret.deletion_timestamp,
|
|
52
|
-
secret.type = k8secret.type
|
|
53
|
-
WITH secret, k8secret.namespace as ns, k8secret.cluster_uid as cuid
|
|
54
|
-
MATCH (cluster:KubernetesCluster {id: cuid})-[:HAS_NAMESPACE]->(space:KubernetesNamespace {name: ns})
|
|
55
|
-
MERGE (space)-[rel1:HAS_SECRET]->(secret)
|
|
56
|
-
ON CREATE SET rel1.firstseen = timestamp()
|
|
57
|
-
SET rel1.lastupdated = $update_tag
|
|
58
|
-
"""
|
|
59
|
-
logger.info(f"Loading {len(data)} kubernetes secrets.")
|
|
60
|
-
session.run(ingestion_cypher_query, secrets=data, update_tag=update_tag)
|
|
95
|
+
def sync_secrets(
|
|
96
|
+
session: neo4j.Session,
|
|
97
|
+
client: K8sClient,
|
|
98
|
+
update_tag: int,
|
|
99
|
+
common_job_parameters: dict[str, Any],
|
|
100
|
+
) -> None:
|
|
101
|
+
secrets = get_secrets(client)
|
|
102
|
+
transformed_secrets = transform_secrets(secrets)
|
|
103
|
+
load_secrets(
|
|
104
|
+
session=session,
|
|
105
|
+
secrets=transformed_secrets,
|
|
106
|
+
update_tag=update_tag,
|
|
107
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
108
|
+
cluster_name=client.name,
|
|
109
|
+
)
|
|
110
|
+
cleanup(session, common_job_parameters)
|
|
@@ -1,90 +1,154 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
|
-
from typing import
|
|
3
|
-
from typing import List
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import neo4j
|
|
6
|
+
from kubernetes.client.models import V1LoadBalancerIngress
|
|
7
|
+
from kubernetes.client.models import V1PortStatus
|
|
8
|
+
from kubernetes.client.models import V1Service
|
|
6
9
|
|
|
10
|
+
from cartography.client.core.tx import load
|
|
11
|
+
from cartography.graph.job import GraphJob
|
|
7
12
|
from cartography.intel.kubernetes.util import get_epoch
|
|
13
|
+
from cartography.intel.kubernetes.util import k8s_paginate
|
|
8
14
|
from cartography.intel.kubernetes.util import K8sClient
|
|
15
|
+
from cartography.models.kubernetes.services import KubernetesServiceSchema
|
|
9
16
|
from cartography.util import timeit
|
|
10
17
|
|
|
11
18
|
logger = logging.getLogger(__name__)
|
|
12
19
|
|
|
13
20
|
|
|
14
21
|
@timeit
|
|
15
|
-
def
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
update_tag: int,
|
|
19
|
-
cluster: Dict,
|
|
20
|
-
pods: List[Dict],
|
|
21
|
-
) -> None:
|
|
22
|
-
services = get_services(client, cluster, pods)
|
|
23
|
-
load_services(session, services, update_tag)
|
|
22
|
+
def get_services(client: K8sClient) -> list[V1Service]:
|
|
23
|
+
items = k8s_paginate(client.core.list_service_for_all_namespaces)
|
|
24
|
+
return items
|
|
24
25
|
|
|
25
26
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
27
|
+
def _format_service_selector(selector: dict[str, str]) -> str:
|
|
28
|
+
return json.dumps(selector)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _format_load_balancer_ingress(ingress: list[V1LoadBalancerIngress] | None) -> str:
|
|
32
|
+
|
|
33
|
+
def _format_ingress_ports(
|
|
34
|
+
ports: list[V1PortStatus] | None,
|
|
35
|
+
) -> list[dict[str, Any]] | None:
|
|
36
|
+
if ports is None:
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
ingress_ports = []
|
|
40
|
+
for port in ports:
|
|
41
|
+
ingress_ports.append(
|
|
42
|
+
{
|
|
43
|
+
"error": port.port,
|
|
44
|
+
"port": port.protocol,
|
|
45
|
+
"protocol": port.ip,
|
|
46
|
+
}
|
|
47
|
+
)
|
|
48
|
+
return ingress_ports
|
|
49
|
+
|
|
50
|
+
if ingress is None:
|
|
51
|
+
return json.dumps(None)
|
|
52
|
+
|
|
53
|
+
loadbalancer_ingress = []
|
|
54
|
+
for item in ingress:
|
|
55
|
+
loadbalancer_ingress.append(
|
|
56
|
+
{
|
|
57
|
+
"hostname": item.hostname,
|
|
58
|
+
"ip": item.ip,
|
|
59
|
+
"ip_mode": item.ip_mode,
|
|
60
|
+
"ports": _format_ingress_ports(item.ports),
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
return json.dumps(loadbalancer_ingress)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def transform_services(
|
|
67
|
+
services: list[V1Service], all_pods: list[dict[str, Any]]
|
|
68
|
+
) -> list[dict[str, Any]]:
|
|
69
|
+
services_list = []
|
|
70
|
+
for service in services:
|
|
30
71
|
item = {
|
|
31
72
|
"uid": service.metadata.uid,
|
|
32
73
|
"name": service.metadata.name,
|
|
33
74
|
"creation_timestamp": get_epoch(service.metadata.creation_timestamp),
|
|
34
75
|
"deletion_timestamp": get_epoch(service.metadata.deletion_timestamp),
|
|
35
76
|
"namespace": service.metadata.namespace,
|
|
36
|
-
"cluster_uid": cluster["uid"],
|
|
37
77
|
"type": service.spec.type,
|
|
38
|
-
"selector": service.spec.selector,
|
|
78
|
+
"selector": _format_service_selector(service.spec.selector),
|
|
79
|
+
"cluster_ip": service.spec.cluster_ip,
|
|
39
80
|
"load_balancer_ip": service.spec.load_balancer_ip,
|
|
40
81
|
}
|
|
41
82
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
logger.
|
|
90
|
-
|
|
83
|
+
# TODO: instead of storing a json string, we should probably create seperate nodes for each ingress
|
|
84
|
+
if service.spec.type == "LoadBalancer":
|
|
85
|
+
if service.status.load_balancer:
|
|
86
|
+
item["load_balancer_ingress"] = _format_load_balancer_ingress(
|
|
87
|
+
service.status.load_balancer.ingress
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# check if pod labels match service selector and add pod_ids to item
|
|
91
|
+
pod_ids = []
|
|
92
|
+
for pod in all_pods:
|
|
93
|
+
if pod["namespace"] == service.metadata.namespace:
|
|
94
|
+
service_selector: dict[str, str] | None = service.spec.selector
|
|
95
|
+
pod_labels: dict[str, str] | None = json.loads(pod["labels"])
|
|
96
|
+
|
|
97
|
+
# check if pod labels match service selector
|
|
98
|
+
if pod_labels and service_selector:
|
|
99
|
+
if all(
|
|
100
|
+
service_selector[key] == pod_labels.get(key)
|
|
101
|
+
for key in service_selector
|
|
102
|
+
):
|
|
103
|
+
pod_ids.append(pod["uid"])
|
|
104
|
+
|
|
105
|
+
item["pod_ids"] = pod_ids
|
|
106
|
+
|
|
107
|
+
services_list.append(item)
|
|
108
|
+
return services_list
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def load_services(
|
|
112
|
+
session: neo4j.Session,
|
|
113
|
+
services: list[dict[str, Any]],
|
|
114
|
+
update_tag: int,
|
|
115
|
+
cluster_id: str,
|
|
116
|
+
cluster_name: str,
|
|
117
|
+
) -> None:
|
|
118
|
+
logger.info(f"Loading {len(services)} KubernetesServices")
|
|
119
|
+
load(
|
|
120
|
+
session,
|
|
121
|
+
KubernetesServiceSchema(),
|
|
122
|
+
services,
|
|
123
|
+
lastupdated=update_tag,
|
|
124
|
+
CLUSTER_ID=cluster_id,
|
|
125
|
+
CLUSTER_NAME=cluster_name,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def cleanup(session: neo4j.Session, common_job_parameters: dict[str, Any]) -> None:
|
|
130
|
+
logger.debug("Running cleanup job for KubernetesService")
|
|
131
|
+
cleanup_job = GraphJob.from_node_schema(
|
|
132
|
+
KubernetesServiceSchema(), common_job_parameters
|
|
133
|
+
)
|
|
134
|
+
cleanup_job.run(session)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@timeit
|
|
138
|
+
def sync_services(
|
|
139
|
+
session: neo4j.Session,
|
|
140
|
+
client: K8sClient,
|
|
141
|
+
all_pods: list[dict[str, Any]],
|
|
142
|
+
update_tag: int,
|
|
143
|
+
common_job_parameters: dict[str, Any],
|
|
144
|
+
) -> None:
|
|
145
|
+
services = get_services(client)
|
|
146
|
+
transformed_services = transform_services(services, all_pods)
|
|
147
|
+
load_services(
|
|
148
|
+
session=session,
|
|
149
|
+
services=transformed_services,
|
|
150
|
+
update_tag=update_tag,
|
|
151
|
+
cluster_id=common_job_parameters["CLUSTER_ID"],
|
|
152
|
+
cluster_name=client.name,
|
|
153
|
+
)
|
|
154
|
+
cleanup(session, common_job_parameters)
|