kugl 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kugl-0.3.0/LICENSE +21 -0
- kugl-0.3.0/MANIFEST.in +1 -0
- kugl-0.3.0/PKG-INFO +28 -0
- kugl-0.3.0/README.md +137 -0
- kugl-0.3.0/kugl/__init__.py +0 -0
- kugl-0.3.0/kugl/api.py +28 -0
- kugl-0.3.0/kugl/builtins/__init__.py +0 -0
- kugl-0.3.0/kugl/builtins/helpers.py +156 -0
- kugl-0.3.0/kugl/builtins/kubernetes.py +220 -0
- kugl-0.3.0/kugl/builtins/kubernetes.yaml +27 -0
- kugl-0.3.0/kugl/impl/__init__.py +0 -0
- kugl-0.3.0/kugl/impl/config.py +207 -0
- kugl-0.3.0/kugl/impl/engine.py +246 -0
- kugl-0.3.0/kugl/impl/registry.py +117 -0
- kugl-0.3.0/kugl/impl/tables.py +182 -0
- kugl-0.3.0/kugl/main.py +116 -0
- kugl-0.3.0/kugl/util/__init__.py +23 -0
- kugl-0.3.0/kugl/util/age.py +101 -0
- kugl-0.3.0/kugl/util/clock.py +73 -0
- kugl-0.3.0/kugl/util/misc.py +125 -0
- kugl-0.3.0/kugl/util/size.py +63 -0
- kugl-0.3.0/kugl/util/sqlite.py +70 -0
- kugl-0.3.0/kugl.egg-info/PKG-INFO +28 -0
- kugl-0.3.0/kugl.egg-info/SOURCES.txt +40 -0
- kugl-0.3.0/kugl.egg-info/dependency_links.txt +1 -0
- kugl-0.3.0/kugl.egg-info/entry_points.txt +2 -0
- kugl-0.3.0/kugl.egg-info/requires.txt +6 -0
- kugl-0.3.0/kugl.egg-info/top_level.txt +2 -0
- kugl-0.3.0/setup.cfg +4 -0
- kugl-0.3.0/setup.py +35 -0
- kugl-0.3.0/tests/__init__.py +0 -0
- kugl-0.3.0/tests/conftest.py +30 -0
- kugl-0.3.0/tests/test_cache.py +45 -0
- kugl-0.3.0/tests/test_cli.py +69 -0
- kugl-0.3.0/tests/test_config.py +148 -0
- kugl-0.3.0/tests/test_extra.py +111 -0
- kugl-0.3.0/tests/test_jobs.py +53 -0
- kugl-0.3.0/tests/test_misc.py +65 -0
- kugl-0.3.0/tests/test_nodes.py +76 -0
- kugl-0.3.0/tests/test_pods.py +141 -0
- kugl-0.3.0/tests/test_utils.py +147 -0
- kugl-0.3.0/tests/testing.py +179 -0
kugl-0.3.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright 2023, 2024 by Jonathan Ross <jonross@alum.mit.edu>
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
kugl-0.3.0/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
include kugl/builtins/kubernetes.yaml
|
kugl-0.3.0/PKG-INFO
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: kugl
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Explore Kubernetes resources using SQLite
|
|
5
|
+
Home-page: https://github.com/jonross/kugl
|
|
6
|
+
Author: Jon Ross
|
|
7
|
+
Author-email: kugl.devel@gmail.com
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Requires-Python: >=3.9
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
License-File: LICENSE
|
|
17
|
+
Requires-Dist: arrow<=1.3.0,>=1.0.1
|
|
18
|
+
Requires-Dist: funcy<=1.18,>=1.13
|
|
19
|
+
Requires-Dist: jmespath<=1.0,>=0.9.5
|
|
20
|
+
Requires-Dist: pydantic<=2.9.2,>=2.0.2
|
|
21
|
+
Requires-Dist: pyyaml<=6.0.2,>=5.3
|
|
22
|
+
Requires-Dist: tabulate<=0.9.0,>=0.8.7
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
PyPI readme goes here.
|
|
26
|
+
|
|
27
|
+
For documentation please visit the [GitHub repository](hgttps://github.com/jonross/kugl).
|
|
28
|
+
|
kugl-0.3.0/README.md
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# Kugl
|
|
2
|
+
|
|
3
|
+
Explore Kubernetes resources using SQLite.
|
|
4
|
+
|
|
5
|
+
## Example
|
|
6
|
+
|
|
7
|
+
Find the top users of a GPU pool, based on instance type and a team-specific pod label.
|
|
8
|
+
|
|
9
|
+
With Kugl (and a little configuration)
|
|
10
|
+
|
|
11
|
+
```shell
|
|
12
|
+
kugl -a "select owner, sum(gpu_req), sum(cpu_req)
|
|
13
|
+
from pods join nodes on pods.node_name = nodes.name
|
|
14
|
+
where instance_type like 'g5.%large' and pods.phase in ('Running', 'Pending')
|
|
15
|
+
group by 1 order by 2 desc limit 10"
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
With `kubectl` and `jq`
|
|
19
|
+
|
|
20
|
+
```shell
|
|
21
|
+
kubectl get pods -o json --all-namespaces |
|
|
22
|
+
jq -r --argjson nodes "$(kubectl get nodes -o json | jq '[.items[]
|
|
23
|
+
| select((.metadata.labels["node.kubernetes.io/instance-type"] // "") | test("g5.*large"))
|
|
24
|
+
| .metadata.name]')" \
|
|
25
|
+
'[ .items[]
|
|
26
|
+
| select(.spec.nodeName as $node | $nodes | index($node))
|
|
27
|
+
| select(.status.phase == "Running" or .status.phase == "Pending")
|
|
28
|
+
| . as $pod | $pod.spec.containers[]
|
|
29
|
+
| select(.resources.requests["nvidia.com/gpu"] != null)
|
|
30
|
+
| {owner: $pod.metadata.labels["com.mycompany/job-owner"],
|
|
31
|
+
gpu: .resources.requests["nvidia.com/gpu"],
|
|
32
|
+
cpu: .resources.requests["cpu"]}
|
|
33
|
+
] | group_by(.owner)
|
|
34
|
+
| map({owner: .[0].owner,
|
|
35
|
+
gpu: map(.gpu | tonumber) | add,
|
|
36
|
+
cpu: map(.cpu | if test("m$") then (sub("m$"; "") | tonumber / 1000) else tonumber end) | add})
|
|
37
|
+
| sort_by(-.gpu) | .[:10] | .[]
|
|
38
|
+
| "\(.owner) \(.gpu) \(.cpu)"'
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Installing
|
|
42
|
+
|
|
43
|
+
Kugl requires Python 3.9 or later, and kubectl.
|
|
44
|
+
|
|
45
|
+
**This is an alpha release.** Please expect bugs and backward-incompatible changes.
|
|
46
|
+
|
|
47
|
+
If you don't mind Kugl cluttering your Python with its [dependencies](./reqs_public.txt):
|
|
48
|
+
|
|
49
|
+
```shell
|
|
50
|
+
pip install kugl
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
If you do mind, there's a Docker image; `mkdir ~/.kugl` and use this Bash alias. (Sorry, this is an x86 image,
|
|
54
|
+
I don't have multiarch working yet.)
|
|
55
|
+
|
|
56
|
+
```shell
|
|
57
|
+
kugl() {
|
|
58
|
+
docker run \
|
|
59
|
+
-v ~/.kube:/root/.kube \
|
|
60
|
+
-v ~/.kugl:/root/.kugl \
|
|
61
|
+
jonross/kugl:0.3.0 python3 -m kugl.main "$@"
|
|
62
|
+
}
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
If neither of those suits you, it's easy to set up from source. (This will build a virtualenv in the
|
|
66
|
+
directory where you clone it.)
|
|
67
|
+
|
|
68
|
+
```shell
|
|
69
|
+
git clone https://github.com/jonross/kugl.git
|
|
70
|
+
cd kugl
|
|
71
|
+
make deps
|
|
72
|
+
# put kugl's bin directory in your PATH
|
|
73
|
+
PATH=${PATH}:$(pwd)/bin
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### Test it
|
|
77
|
+
|
|
78
|
+
Find the pods using the most memory:
|
|
79
|
+
|
|
80
|
+
```shell
|
|
81
|
+
kugl -a "select name, to_size(mem_req) from pods order by mem_req desc limit 15"
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
If this query is helpful, [save it](./docs-tmp/shortcuts.md), then you can run `kugl hi-mem`.
|
|
85
|
+
|
|
86
|
+
Please also see the [recommended configuration](./docs-tmp/recommended.md).
|
|
87
|
+
|
|
88
|
+
## How it works (important)
|
|
89
|
+
|
|
90
|
+
Kugl is just a thin wrapper on Kubectl and SQLite. It turns `SELECT ... FROM pods` into
|
|
91
|
+
`kubectl get pods -o json`, then maps fields from the response to columns
|
|
92
|
+
in SQLite. If you `JOIN` to other resource tables like `nodes` it calls `kubectl get`
|
|
93
|
+
for those too. If you need more columns or tables than are built in as of this release,
|
|
94
|
+
there's a config file for that.
|
|
95
|
+
|
|
96
|
+
Because Kugl always fetches all resources from a namespace (or everything, if
|
|
97
|
+
`-a/--all-namespaces` is used), it tries
|
|
98
|
+
to ease Kubernetes API Server load by **caching responses for
|
|
99
|
+
two minutes**. This is why it often prints "Data delayed up to ..." messages.
|
|
100
|
+
|
|
101
|
+
Depending on your cluster activity, the cache can be a help or a hindrance.
|
|
102
|
+
You can suppress the "delayed" messages with the `-r` / `--reckless` option, or
|
|
103
|
+
always update data using the `-u` / `--update` option. These behaviors, and
|
|
104
|
+
the cache expiration time, can be set in the config file as well.
|
|
105
|
+
|
|
106
|
+
In any case, please be mindful of stale data and server load.
|
|
107
|
+
|
|
108
|
+
## Learn more
|
|
109
|
+
|
|
110
|
+
* [Command-line syntax](./docs-tmp/syntax.md)
|
|
111
|
+
* [Recommended configuration](./docs-tmp/recommended.md)
|
|
112
|
+
* [Settings](./docs-tmp/settings.md)
|
|
113
|
+
* [Built-in tables and functions](./docs-tmp/builtins.md)
|
|
114
|
+
* [Configuring new columns and tables](./docs-tmp/extending.md)
|
|
115
|
+
* [Troubleshooting and feedback](./docs-tmp/trouble.md)
|
|
116
|
+
* [Beyond Kubernetes](./docs-tmp/beyond.md)
|
|
117
|
+
* [License](./LICENSE)
|
|
118
|
+
|
|
119
|
+
### Pronunciation
|
|
120
|
+
|
|
121
|
+
Like "cudgel", so, a blunt instrument for convincing data to be row-shaped.
|
|
122
|
+
|
|
123
|
+
Or "koo-jull", if you prefer something less combative.
|
|
124
|
+
|
|
125
|
+
"Kugel" is a casserole with varying degrees of cultural significance, and sounds too much like "Google".
|
|
126
|
+
|
|
127
|
+
### Rationale
|
|
128
|
+
|
|
129
|
+
Kugl won't replace everyday use of `kubectl`; it's more for ad-hoc queries and reports, where the
|
|
130
|
+
cognitive overhead of `kubectl | jq` is an obstacle. In that context, full SQL support and user-defined
|
|
131
|
+
tables are essential, and it is where Kugl hopes to go a step further than prior art.
|
|
132
|
+
|
|
133
|
+
Some other implementations of SQL-on-Kubernetes:
|
|
134
|
+
|
|
135
|
+
* [ksql](https://github.com/brendandburns/ksql) is built on Node.js and AlaSQL; last commit November 2016.
|
|
136
|
+
* [kubeql](https://github.com/saracen/kubeql) is a SQL-like query language for Kubernetes; last commit October 2017.
|
|
137
|
+
* [kube-query](https://github.com/aquasecurity/kube-query) is an [osquery](https://osquery.io/) extension; last commit July 2020.
|
|
File without changes
|
kugl-0.3.0/kugl/api.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Imports usable by user-defined tables in Python (once we have those.)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from kugl.impl.registry import Registry
|
|
6
|
+
|
|
7
|
+
from kugl.util import (
|
|
8
|
+
fail,
|
|
9
|
+
parse_age,
|
|
10
|
+
parse_utc,
|
|
11
|
+
to_age,
|
|
12
|
+
to_utc,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def schema(name: str):
|
|
17
|
+
def wrap(cls):
|
|
18
|
+
Registry.get().add_schema(name, cls)
|
|
19
|
+
return cls
|
|
20
|
+
return wrap
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def table(**kwargs):
|
|
24
|
+
def wrap(cls):
|
|
25
|
+
Registry.get().add_table(cls, **kwargs)
|
|
26
|
+
return cls
|
|
27
|
+
return wrap
|
|
28
|
+
|
|
File without changes
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Wrappers to make JSON returned by kubectl easier to work with.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from abc import abstractmethod
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import funcy as fn
|
|
10
|
+
|
|
11
|
+
from kugl.util import parse_size, parse_cpu
|
|
12
|
+
|
|
13
|
+
# What container name is considered the "main" container, if present
|
|
14
|
+
MAIN_CONTAINERS = ["main", "notebook", "app"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class Limits:
|
|
19
|
+
"""
|
|
20
|
+
A class to hold CPU, GPU and memory resources. This is called "Limits" although it's used for both requests
|
|
21
|
+
and limits, so as not to confuse "resources" with Kubernetes resources in general.
|
|
22
|
+
"""
|
|
23
|
+
cpu: Optional[float]
|
|
24
|
+
gpu: Optional[float]
|
|
25
|
+
mem: Optional[int]
|
|
26
|
+
|
|
27
|
+
def __add__(self, other):
|
|
28
|
+
if self.cpu is None and other.cpu is None:
|
|
29
|
+
cpu = None
|
|
30
|
+
else:
|
|
31
|
+
cpu = (self.cpu or 0) + (other.cpu or 0)
|
|
32
|
+
if self.gpu is None and other.gpu is None:
|
|
33
|
+
gpu = None
|
|
34
|
+
else:
|
|
35
|
+
gpu = (self.gpu or 0) + (other.gpu or 0)
|
|
36
|
+
if self.mem is None and other.mem is None:
|
|
37
|
+
mem = None
|
|
38
|
+
else:
|
|
39
|
+
mem = (self.mem or 0) + (other.mem or 0)
|
|
40
|
+
return Limits(cpu, gpu, mem)
|
|
41
|
+
|
|
42
|
+
def __radd__(self, other):
|
|
43
|
+
"""Needed to support sum() -- handles 0 as a starting value"""
|
|
44
|
+
return self if other == 0 else self.__add__(other)
|
|
45
|
+
|
|
46
|
+
def as_tuple(self):
|
|
47
|
+
return (self.cpu, self.gpu, self.mem)
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def extract(cls, obj):
|
|
51
|
+
"""Extract a Limits object from a dictionary, or return an empty one if the dictionary is None.
|
|
52
|
+
|
|
53
|
+
:param obj: A dictionary with keys "cpu", "nvidia.com/gpu" and "memory" """
|
|
54
|
+
if obj is None:
|
|
55
|
+
return Limits(None, None, None)
|
|
56
|
+
cpu = parse_cpu(obj.get("cpu"))
|
|
57
|
+
gpu = parse_cpu(obj.get("nvidia.com/gpu"))
|
|
58
|
+
mem = parse_size(obj.get("memory"))
|
|
59
|
+
return Limits(cpu, gpu, mem)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ItemHelper:
|
|
63
|
+
"""Some common code for wrappers on JSON for pods, nodes et cetera."""
|
|
64
|
+
|
|
65
|
+
def __init__(self, obj):
|
|
66
|
+
self.obj = obj
|
|
67
|
+
self.metadata = self.obj.get("metadata", {})
|
|
68
|
+
self.labels = self.metadata.get("labels", {})
|
|
69
|
+
|
|
70
|
+
def __getitem__(self, key):
|
|
71
|
+
"""Return a key from the object; no default, will error if not present"""
|
|
72
|
+
return self.obj[key]
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def name(self):
|
|
76
|
+
"""Return the name of the object from the metadata, or none if unavailable."""
|
|
77
|
+
return self.metadata.get("name") or self.obj.get("name")
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def namespace(self):
|
|
81
|
+
"""Return the name of the object from the metadata, or none if unavailable."""
|
|
82
|
+
return self.metadata.get("namespace")
|
|
83
|
+
|
|
84
|
+
def label(self, name):
|
|
85
|
+
"""Return one of the labels from the object, or None if it doesn't have that label."""
|
|
86
|
+
return self.labels.get(name)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class Containerized:
|
|
90
|
+
|
|
91
|
+
@abstractmethod
|
|
92
|
+
def containers(self):
|
|
93
|
+
raise NotImplementedError()
|
|
94
|
+
|
|
95
|
+
def resources(self, tag):
|
|
96
|
+
return sum(Limits.extract(c.get("resources", {}).get(tag)) for c in self.containers)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class PodHelper(ItemHelper, Containerized):
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def command(self):
|
|
103
|
+
return " ".join((self.main or {}).get("command", []))
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def is_daemon(self):
|
|
107
|
+
return any(ref.get("kind") == "DaemonSet" for ref in self.metadata.get("ownerReferences", []))
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def containers(self):
|
|
111
|
+
"""Return the containers in the pod, if any, else an empty list."""
|
|
112
|
+
return self["spec"].get("containers", [])
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def main(self):
|
|
116
|
+
"""Return the main container in the pod, if any, defined as the first container with a name
|
|
117
|
+
in MAIN_CONTAINERS. If there are none of those, return the first one.
|
|
118
|
+
"""
|
|
119
|
+
if not self.containers:
|
|
120
|
+
return None
|
|
121
|
+
main = fn.first(fn.filter(lambda c: c["name"] in MAIN_CONTAINERS, self.containers))
|
|
122
|
+
return main or self.containers[0]
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class JobHelper(ItemHelper, Containerized):
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def status(self):
|
|
129
|
+
status = self.obj.get("status", {})
|
|
130
|
+
if len(status) == 0:
|
|
131
|
+
return "Unknown"
|
|
132
|
+
# Per
|
|
133
|
+
# https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1JobStatus.md
|
|
134
|
+
# and https://kubernetes.io/docs/concepts/workloads/controllers/job/
|
|
135
|
+
for c in status.get("conditions", []):
|
|
136
|
+
if c["status"] == "True":
|
|
137
|
+
if c["type"] == "Failed":
|
|
138
|
+
# TODO use a separate column
|
|
139
|
+
return c.get("reason") or "Failed"
|
|
140
|
+
if c["type"] == "Suspended":
|
|
141
|
+
return "Suspended"
|
|
142
|
+
if c["type"] == "Complete":
|
|
143
|
+
return "Complete"
|
|
144
|
+
if c["type"] == "FailureTarget":
|
|
145
|
+
return "Failed"
|
|
146
|
+
if c["type"] == "SuccessCriteriaMet":
|
|
147
|
+
return "Complete"
|
|
148
|
+
if status.get("active", 0) > 0:
|
|
149
|
+
return "Running"
|
|
150
|
+
return "Unknown"
|
|
151
|
+
|
|
152
|
+
@property
|
|
153
|
+
def containers(self):
|
|
154
|
+
"""Return the containers in the job, if any, else an empty list."""
|
|
155
|
+
return self["spec"]["template"]["spec"].get("containers", [])
|
|
156
|
+
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Built-in table definitions for Kubernetes.
|
|
3
|
+
|
|
4
|
+
NOTE: This is not a good example of how to write user-defined tables.
|
|
5
|
+
FIXME: Remove references to non-API imports.
|
|
6
|
+
FIXME: Don't use ArgumentParser in the API.
|
|
7
|
+
"""
|
|
8
|
+
import json
|
|
9
|
+
from argparse import ArgumentParser
|
|
10
|
+
from threading import Thread
|
|
11
|
+
|
|
12
|
+
from .helpers import Limits, ItemHelper, PodHelper, JobHelper
|
|
13
|
+
from kugl.api import schema, table, fail
|
|
14
|
+
from kugl.util import parse_utc, run, WHITESPACE
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@schema("kubernetes")
|
|
18
|
+
class KubernetesData: # FIXME: this should be a resource type, not a schema
|
|
19
|
+
|
|
20
|
+
def add_cli_options(self, ap: ArgumentParser):
|
|
21
|
+
ap.add_argument("-a", "--all-namespaces", default=False, action="store_true")
|
|
22
|
+
ap.add_argument("-n", "--namespace", type=str)
|
|
23
|
+
|
|
24
|
+
def handle_cli_options(self, args):
|
|
25
|
+
if args.all_namespaces and args.namespace:
|
|
26
|
+
fail("Cannot use both -a/--all-namespaces and -n/--namespace")
|
|
27
|
+
self.set_namespace(args.all_namespaces, args.namespace)
|
|
28
|
+
|
|
29
|
+
def set_namespace(self, all_namespaces: bool, namespace: str):
|
|
30
|
+
if all_namespaces:
|
|
31
|
+
# FIXME: engine.py and testing.py still use this
|
|
32
|
+
self.ns = "__all"
|
|
33
|
+
self.all_ns = True
|
|
34
|
+
else:
|
|
35
|
+
self.ns = namespace or "default"
|
|
36
|
+
self.all_ns = False
|
|
37
|
+
|
|
38
|
+
def get_objects(self, kind: str, namespaced: bool)-> dict:
|
|
39
|
+
"""Fetch resources from Kubernetes using kubectl.
|
|
40
|
+
|
|
41
|
+
:param kind: Kubernetes resource type e.g. "pods"
|
|
42
|
+
:return: JSON as output by "kubectl get {kind} -o json"
|
|
43
|
+
"""
|
|
44
|
+
namespace_flag = ["--all-namespaces"] if self.ns else ["-n", self.ns]
|
|
45
|
+
if kind == "pods":
|
|
46
|
+
pod_statuses = {}
|
|
47
|
+
# Kick off a thread to get pod statuses
|
|
48
|
+
def _fetch():
|
|
49
|
+
_, output, _ = run(["kubectl", "get", "pods", *namespace_flag])
|
|
50
|
+
pod_statuses.update(self._pod_status_from_pod_list(output))
|
|
51
|
+
status_thread = Thread(target=_fetch, daemon=True)
|
|
52
|
+
status_thread.start()
|
|
53
|
+
if namespaced:
|
|
54
|
+
_, output, _= run(["kubectl", "get", kind, *namespace_flag, "-o", "json"])
|
|
55
|
+
else:
|
|
56
|
+
_, output, _ = run(["kubectl", "get", kind, "-o", "json"])
|
|
57
|
+
data = json.loads(output)
|
|
58
|
+
if kind == "pods":
|
|
59
|
+
# Add pod status to pods
|
|
60
|
+
status_thread.join()
|
|
61
|
+
def pod_with_updated_status(pod):
|
|
62
|
+
metadata = pod["metadata"]
|
|
63
|
+
status = pod_statuses.get(f"{metadata['namespace']}/{metadata['name']}")
|
|
64
|
+
if status:
|
|
65
|
+
pod["kubectl_status"] = status
|
|
66
|
+
return pod
|
|
67
|
+
return None
|
|
68
|
+
data["items"] = list(filter(None, map(pod_with_updated_status, data["items"])))
|
|
69
|
+
return data
|
|
70
|
+
|
|
71
|
+
def _pod_status_from_pod_list(self, output) -> dict[str, str]:
|
|
72
|
+
"""
|
|
73
|
+
Convert the tabular output of 'kubectl get pods' to JSON.
|
|
74
|
+
:return: a dict mapping "namespace/name" to status
|
|
75
|
+
"""
|
|
76
|
+
rows = [WHITESPACE.split(line.strip()) for line in output.strip().split("\n")]
|
|
77
|
+
if len(rows) < 2:
|
|
78
|
+
return {}
|
|
79
|
+
header, rows = rows[0], rows[1:]
|
|
80
|
+
name_index = header.index("NAME")
|
|
81
|
+
status_index = header.index("STATUS")
|
|
82
|
+
# It would be nice if 'kubectl get pods' printed the UID, but it doesn't, so use
|
|
83
|
+
# "namespace/name" as the key. (Can't use a tuple since this has to be JSON-dumped.)
|
|
84
|
+
if self.all_ns:
|
|
85
|
+
namespace_index = header.index("NAMESPACE")
|
|
86
|
+
return {f"{row[namespace_index]}/{row[name_index]}": row[status_index] for row in rows}
|
|
87
|
+
else:
|
|
88
|
+
return {f"{self.ns}/{row[name_index]}": row[status_index] for row in rows}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@table(schema="kubernetes", name="nodes", resource="nodes")
|
|
92
|
+
class NodesTable:
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def schema(self):
|
|
96
|
+
return """
|
|
97
|
+
name TEXT,
|
|
98
|
+
uid TEXT,
|
|
99
|
+
cpu_alloc REAL,
|
|
100
|
+
gpu_alloc REAL,
|
|
101
|
+
mem_alloc INTEGER,
|
|
102
|
+
cpu_cap REAL,
|
|
103
|
+
gpu_cap REAL,
|
|
104
|
+
mem_cap INTEGER
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
def make_rows(self, context) -> list[tuple[dict, tuple]]:
|
|
108
|
+
for item in context.data["items"]:
|
|
109
|
+
node = ItemHelper(item)
|
|
110
|
+
yield item, (
|
|
111
|
+
node.name,
|
|
112
|
+
node.metadata.get("uid"),
|
|
113
|
+
*Limits.extract(node["status"]["allocatable"]).as_tuple(),
|
|
114
|
+
*Limits.extract(node["status"]["capacity"]).as_tuple(),
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@table(schema="kubernetes", name="pods", resource="pods")
|
|
119
|
+
class PodsTable:
|
|
120
|
+
|
|
121
|
+
@property
|
|
122
|
+
def schema(self):
|
|
123
|
+
return """
|
|
124
|
+
name TEXT,
|
|
125
|
+
uid TEXT,
|
|
126
|
+
is_daemon INTEGER,
|
|
127
|
+
namespace TEXT,
|
|
128
|
+
node_name TEXT,
|
|
129
|
+
creation_ts INTEGER,
|
|
130
|
+
command TEXT,
|
|
131
|
+
phase TEXT,
|
|
132
|
+
status TEXT,
|
|
133
|
+
cpu_req REAL,
|
|
134
|
+
gpu_req REAL,
|
|
135
|
+
mem_req INTEGER,
|
|
136
|
+
cpu_lim REAL,
|
|
137
|
+
gpu_lim REAL,
|
|
138
|
+
mem_lim INTEGER
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
def make_rows(self, context) -> list[tuple[dict, tuple]]:
|
|
142
|
+
for item in context.data["items"]:
|
|
143
|
+
pod = PodHelper(item)
|
|
144
|
+
yield item, (
|
|
145
|
+
pod.name,
|
|
146
|
+
pod.metadata.get("uid"),
|
|
147
|
+
1 if pod.is_daemon else 0,
|
|
148
|
+
pod.namespace,
|
|
149
|
+
pod["spec"].get("nodeName"),
|
|
150
|
+
parse_utc(pod.metadata["creationTimestamp"]),
|
|
151
|
+
pod.command,
|
|
152
|
+
pod["status"]["phase"],
|
|
153
|
+
pod["kubectl_status"],
|
|
154
|
+
*pod.resources("requests").as_tuple(),
|
|
155
|
+
*pod.resources("limits").as_tuple(),
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
@table(schema="kubernetes", name="jobs", resource="jobs")
|
|
160
|
+
class JobsTable:
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def schema(self):
|
|
164
|
+
return """
|
|
165
|
+
name TEXT,
|
|
166
|
+
uid TEXT,
|
|
167
|
+
namespace TEXT,
|
|
168
|
+
status TEXT,
|
|
169
|
+
cpu_req REAL,
|
|
170
|
+
gpu_req REAL,
|
|
171
|
+
mem_req INTEGER,
|
|
172
|
+
cpu_lim REAL,
|
|
173
|
+
gpu_lim REAL,
|
|
174
|
+
mem_lim INTEGER
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
def make_rows(self, context) -> list[tuple[dict, tuple]]:
|
|
178
|
+
for item in context.data["items"]:
|
|
179
|
+
job = JobHelper(item)
|
|
180
|
+
yield item, (
|
|
181
|
+
job.name,
|
|
182
|
+
job.metadata.get("uid"),
|
|
183
|
+
job.namespace,
|
|
184
|
+
job.status,
|
|
185
|
+
*job.resources("requests").as_tuple(),
|
|
186
|
+
*job.resources("limits").as_tuple(),
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class LabelsTable:
|
|
191
|
+
"""Base class for all built-in label tables; subclasses need only define UID_FIELD."""
|
|
192
|
+
|
|
193
|
+
@property
|
|
194
|
+
def schema(self):
|
|
195
|
+
return f"""
|
|
196
|
+
{self.UID_FIELD} TEXT,
|
|
197
|
+
key TEXT,
|
|
198
|
+
value TEXT
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
def make_rows(self, context) -> list[tuple[dict, tuple]]:
|
|
202
|
+
for item in context.data["items"]:
|
|
203
|
+
thing = ItemHelper(item)
|
|
204
|
+
for key, value in thing.labels.items():
|
|
205
|
+
yield item, (thing.metadata.get("uid"), key, value)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
@table(schema="kubernetes", name="node_labels", resource="nodes")
|
|
209
|
+
class NodeLabelsTable(LabelsTable):
|
|
210
|
+
UID_FIELD = "node_uid"
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
@table(schema="kubernetes", name="pod_labels", resource="pods")
|
|
214
|
+
class PodLabelsTable(LabelsTable):
|
|
215
|
+
UID_FIELD = "pod_uid"
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
@table(schema="kubernetes", name="job_labels", resource="jobs")
|
|
219
|
+
class JobLabelsTable(LabelsTable):
|
|
220
|
+
UID_FIELD = "job_uid"
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
|
|
2
|
+
resources:
|
|
3
|
+
- name: pods
|
|
4
|
+
namespaced: true
|
|
5
|
+
- name: pod_statuses
|
|
6
|
+
namespaced: true
|
|
7
|
+
- name: jobs
|
|
8
|
+
namespaced: true
|
|
9
|
+
- name: nodes
|
|
10
|
+
namespaced: false
|
|
11
|
+
|
|
12
|
+
# node_taints builtin is defined here because it doesn't have any special column extraction
|
|
13
|
+
# logic, and because it serves as a good unit test.
|
|
14
|
+
|
|
15
|
+
create:
|
|
16
|
+
- table: node_taints
|
|
17
|
+
resource: nodes
|
|
18
|
+
row_source:
|
|
19
|
+
- items
|
|
20
|
+
- spec.taints
|
|
21
|
+
columns:
|
|
22
|
+
- name: node_uid
|
|
23
|
+
path: ^metadata.uid
|
|
24
|
+
- name: key
|
|
25
|
+
path: key
|
|
26
|
+
- name: effect
|
|
27
|
+
path: effect
|
|
File without changes
|