jumpstarter-kubernetes 0.7.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,166 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/build/
73
+ docs/build_all/
74
+
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ #Pipfile.lock
97
+
98
+ # poetry
99
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103
+ #poetry.lock
104
+
105
+ # pdm
106
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107
+ #pdm.lock
108
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109
+ # in version control.
110
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
111
+ .pdm.toml
112
+ .pdm-python
113
+ .pdm-build/
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
164
+
165
+ # Ruff cache
166
+ .ruff_cache/
@@ -0,0 +1,15 @@
1
+ Metadata-Version: 2.4
2
+ Name: jumpstarter-kubernetes
3
+ Version: 0.7.3
4
+ Project-URL: Homepage, https://jumpstarter.dev
5
+ Project-URL: source_archive, https://github.com/jumpstarter-dev/repo/archive/db953ec55e643f00dbb26a3da0a50dccf68dd74d.zip
6
+ Author-email: Kirk Brauer <kbrauer@hatci.com>
7
+ License-Expression: Apache-2.0
8
+ Requires-Python: >=3.11
9
+ Requires-Dist: jumpstarter
10
+ Requires-Dist: kubernetes-asyncio>=31.1.0
11
+ Requires-Dist: kubernetes>=31.0.0
12
+ Requires-Dist: pydantic>=2.8.2
13
+ Description-Content-Type: text/markdown
14
+
15
+ # Jumpstarter Kubernetes Library
@@ -0,0 +1 @@
1
+ # Jumpstarter Kubernetes Library
@@ -0,0 +1,58 @@
1
+ from .clients import ClientsV1Alpha1Api, V1Alpha1Client, V1Alpha1ClientList, V1Alpha1ClientStatus
2
+ from .cluster import (
3
+ create_kind_cluster,
4
+ create_minikube_cluster,
5
+ delete_kind_cluster,
6
+ delete_minikube_cluster,
7
+ kind_installed,
8
+ minikube_installed,
9
+ )
10
+ from .exporters import (
11
+ ExportersV1Alpha1Api,
12
+ V1Alpha1Exporter,
13
+ V1Alpha1ExporterDevice,
14
+ V1Alpha1ExporterList,
15
+ V1Alpha1ExporterStatus,
16
+ )
17
+ from .install import (
18
+ helm_installed,
19
+ install_helm_chart,
20
+ uninstall_helm_chart,
21
+ )
22
+ from .leases import (
23
+ LeasesV1Alpha1Api,
24
+ V1Alpha1Lease,
25
+ V1Alpha1LeaseList,
26
+ V1Alpha1LeaseSelector,
27
+ V1Alpha1LeaseSpec,
28
+ V1Alpha1LeaseStatus,
29
+ )
30
+ from .list import V1Alpha1List
31
+
32
+ __all__ = [
33
+ "ClientsV1Alpha1Api",
34
+ "V1Alpha1Client",
35
+ "V1Alpha1ClientList",
36
+ "V1Alpha1ClientStatus",
37
+ "ExportersV1Alpha1Api",
38
+ "V1Alpha1Exporter",
39
+ "V1Alpha1ExporterList",
40
+ "V1Alpha1ExporterStatus",
41
+ "V1Alpha1ExporterDevice",
42
+ "LeasesV1Alpha1Api",
43
+ "V1Alpha1Lease",
44
+ "V1Alpha1LeaseStatus",
45
+ "V1Alpha1LeaseList",
46
+ "V1Alpha1LeaseSelector",
47
+ "V1Alpha1LeaseSpec",
48
+ "V1Alpha1List",
49
+ "helm_installed",
50
+ "install_helm_chart",
51
+ "uninstall_helm_chart",
52
+ "minikube_installed",
53
+ "kind_installed",
54
+ "create_minikube_cluster",
55
+ "create_kind_cluster",
56
+ "delete_minikube_cluster",
57
+ "delete_kind_cluster",
58
+ ]
@@ -0,0 +1,165 @@
1
+ import asyncio
2
+ import base64
3
+ import logging
4
+ from typing import Literal, Optional
5
+
6
+ from kubernetes_asyncio.client.models import V1ObjectMeta, V1ObjectReference
7
+ from pydantic import Field
8
+
9
+ from .json import JsonBaseModel
10
+ from .list import V1Alpha1List
11
+ from .serialize import SerializeV1ObjectMeta, SerializeV1ObjectReference
12
+ from .util import AbstractAsyncCustomObjectApi
13
+ from jumpstarter.config.client import ClientConfigV1Alpha1, ClientConfigV1Alpha1Drivers
14
+ from jumpstarter.config.common import ObjectMeta
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ CREATE_CLIENT_DELAY = 1
19
+ CREATE_CLIENT_COUNT = 10
20
+
21
+
22
+ class V1Alpha1ClientStatus(JsonBaseModel):
23
+ credential: Optional[SerializeV1ObjectReference] = None
24
+ endpoint: str
25
+
26
+
27
+ class V1Alpha1Client(JsonBaseModel):
28
+ api_version: Literal["jumpstarter.dev/v1alpha1"] = Field(alias="apiVersion", default="jumpstarter.dev/v1alpha1")
29
+ kind: Literal["Client"] = Field(default="Client")
30
+ metadata: SerializeV1ObjectMeta
31
+ status: Optional[V1Alpha1ClientStatus]
32
+
33
+ @staticmethod
34
+ def from_dict(dict: dict):
35
+ return V1Alpha1Client(
36
+ api_version=dict["apiVersion"],
37
+ kind=dict["kind"],
38
+ metadata=V1ObjectMeta(
39
+ creation_timestamp=dict["metadata"]["creationTimestamp"],
40
+ generation=dict["metadata"]["generation"],
41
+ name=dict["metadata"]["name"],
42
+ namespace=dict["metadata"]["namespace"],
43
+ resource_version=dict["metadata"]["resourceVersion"],
44
+ uid=dict["metadata"]["uid"],
45
+ ),
46
+ status=V1Alpha1ClientStatus(
47
+ credential=V1ObjectReference(name=dict["status"]["credential"]["name"])
48
+ if "credential" in dict["status"]
49
+ else None,
50
+ endpoint=dict["status"].get("endpoint", ""),
51
+ )
52
+ if "status" in dict
53
+ else None,
54
+ )
55
+
56
+ @classmethod
57
+ def rich_add_columns(cls, table):
58
+ table.add_column("NAME", no_wrap=True)
59
+ table.add_column("ENDPOINT")
60
+ # table.add_column("AGE")
61
+
62
+ def rich_add_rows(self, table):
63
+ table.add_row(
64
+ self.metadata.name,
65
+ self.status.endpoint if self.status is not None else "",
66
+ )
67
+
68
+ def rich_add_names(self, names):
69
+ names.append(f"client.jumpstarter.dev/{self.metadata.name}")
70
+
71
+
72
+ class V1Alpha1ClientList(V1Alpha1List[V1Alpha1Client]):
73
+ kind: Literal["ClientList"] = Field(default="ClientList")
74
+
75
+ @staticmethod
76
+ def from_dict(dict: dict):
77
+ return V1Alpha1ClientList(items=[V1Alpha1Client.from_dict(c) for c in dict.get("items", [])])
78
+
79
+ @classmethod
80
+ def rich_add_columns(cls, table):
81
+ V1Alpha1Client.rich_add_columns(table)
82
+
83
+ def rich_add_rows(self, table):
84
+ for client in self.items:
85
+ client.rich_add_rows(table)
86
+
87
+ def rich_add_names(self, names):
88
+ for client in self.items:
89
+ client.rich_add_names(names)
90
+
91
+
92
+ class ClientsV1Alpha1Api(AbstractAsyncCustomObjectApi):
93
+ """Interact with the clients custom resource API"""
94
+
95
+ async def create_client(
96
+ self, name: str, labels: dict[str, str] | None = None, oidc_username: str | None = None
97
+ ) -> V1Alpha1Client:
98
+ """Create a client object in the cluster async"""
99
+ # Create the namespaced client object
100
+ await self.api.create_namespaced_custom_object(
101
+ namespace=self.namespace,
102
+ group="jumpstarter.dev",
103
+ plural="clients",
104
+ version="v1alpha1",
105
+ body={
106
+ "apiVersion": "jumpstarter.dev/v1alpha1",
107
+ "kind": "Client",
108
+ "metadata": {"name": name} | {"labels": labels} if labels is not None else {},
109
+ "spec": {"username": oidc_username} if oidc_username is not None else {},
110
+ },
111
+ )
112
+ # Wait for the credentials to become available
113
+ # NOTE: Watch is not working here with the Python kubernetes library
114
+ count = 0
115
+ updated_client = {}
116
+ # Retry for a maximum of 10s
117
+ while count < CREATE_CLIENT_COUNT:
118
+ # Try to get the updated client resource
119
+ updated_client = await self.api.get_namespaced_custom_object(
120
+ namespace=self.namespace, group="jumpstarter.dev", plural="clients", version="v1alpha1", name=name
121
+ )
122
+ # check if the client status is updated with the credentials
123
+ if "status" in updated_client:
124
+ if "credential" in updated_client["status"]:
125
+ return V1Alpha1Client.from_dict(updated_client)
126
+ count += 1
127
+ await asyncio.sleep(CREATE_CLIENT_DELAY)
128
+ raise Exception("Timeout waiting for client credentials")
129
+
130
+ async def list_clients(self) -> V1Alpha1List[V1Alpha1Client]:
131
+ """List the client objects in the cluster async"""
132
+ res = await self.api.list_namespaced_custom_object(
133
+ namespace=self.namespace, group="jumpstarter.dev", plural="clients", version="v1alpha1"
134
+ )
135
+ return V1Alpha1ClientList.from_dict(res)
136
+
137
+ async def get_client(self, name: str) -> V1Alpha1Client:
138
+ """Get a single client object from the cluster async"""
139
+ result = await self.api.get_namespaced_custom_object(
140
+ namespace=self.namespace, group="jumpstarter.dev", plural="clients", version="v1alpha1", name=name
141
+ )
142
+ return V1Alpha1Client.from_dict(result)
143
+
144
+ async def get_client_config(self, name: str, allow: list[str], unsafe=False) -> ClientConfigV1Alpha1:
145
+ """Get a client config for a specified client name"""
146
+ client = await self.get_client(name)
147
+ secret = await self.core_api.read_namespaced_secret(client.status.credential.name, self.namespace)
148
+ endpoint = client.status.endpoint
149
+ token = base64.b64decode(secret.data["token"]).decode("utf8")
150
+ return ClientConfigV1Alpha1(
151
+ alias=name,
152
+ metadata=ObjectMeta(
153
+ namespace=client.metadata.namespace,
154
+ name=client.metadata.name,
155
+ ),
156
+ endpoint=endpoint,
157
+ token=token,
158
+ drivers=ClientConfigV1Alpha1Drivers(allow=allow, unsafe=unsafe),
159
+ )
160
+
161
+ async def delete_client(self, name: str):
162
+ """Delete a client object"""
163
+ await self.api.delete_namespaced_custom_object(
164
+ namespace=self.namespace, group="jumpstarter.dev", plural="clients", version="v1alpha1", name=name
165
+ )
@@ -0,0 +1,58 @@
1
+ from kubernetes_asyncio.client.models import V1ObjectMeta
2
+
3
+ from jumpstarter_kubernetes import V1Alpha1Client, V1Alpha1ClientStatus
4
+
5
+ TEST_CLIENT = V1Alpha1Client(
6
+ api_version="jumpstarter.dev/v1alpha1",
7
+ kind="Client",
8
+ metadata=V1ObjectMeta(
9
+ creation_timestamp="2021-10-01T00:00:00Z",
10
+ generation=1,
11
+ name="test-client",
12
+ namespace="default",
13
+ resource_version="1",
14
+ uid="7a25eb81-6443-47ec-a62f-50165bffede8",
15
+ ),
16
+ status=V1Alpha1ClientStatus(credential=None, endpoint="https://test-client"),
17
+ )
18
+
19
+
20
+ def test_client_dump_json():
21
+ assert (
22
+ TEST_CLIENT.dump_json()
23
+ == """{
24
+ "apiVersion": "jumpstarter.dev/v1alpha1",
25
+ "kind": "Client",
26
+ "metadata": {
27
+ "creationTimestamp": "2021-10-01T00:00:00Z",
28
+ "generation": 1,
29
+ "name": "test-client",
30
+ "namespace": "default",
31
+ "resourceVersion": "1",
32
+ "uid": "7a25eb81-6443-47ec-a62f-50165bffede8"
33
+ },
34
+ "status": {
35
+ "credential": null,
36
+ "endpoint": "https://test-client"
37
+ }
38
+ }"""
39
+ )
40
+
41
+
42
+ def test_client_dump_yaml():
43
+ assert (
44
+ TEST_CLIENT.dump_yaml()
45
+ == """apiVersion: jumpstarter.dev/v1alpha1
46
+ kind: Client
47
+ metadata:
48
+ creationTimestamp: '2021-10-01T00:00:00Z'
49
+ generation: 1
50
+ name: test-client
51
+ namespace: default
52
+ resourceVersion: '1'
53
+ uid: 7a25eb81-6443-47ec-a62f-50165bffede8
54
+ status:
55
+ credential: null
56
+ endpoint: https://test-client
57
+ """
58
+ )
@@ -0,0 +1,201 @@
1
+ import asyncio
2
+ import shutil
3
+ from typing import Optional, Tuple
4
+
5
+
6
+ def minikube_installed(minikube: str) -> bool:
7
+ """Check if Minikube is installed and available in the PATH."""
8
+ return shutil.which(minikube) is not None
9
+
10
+
11
+ def kind_installed(kind: str) -> bool:
12
+ """Check if Kind is installed and available in the PATH."""
13
+ return shutil.which(kind) is not None
14
+
15
+
16
+ async def run_command(cmd: list[str]) -> Tuple[int, str, str]:
17
+ """Run a command and return exit code, stdout, stderr"""
18
+ try:
19
+ process = await asyncio.create_subprocess_exec(
20
+ *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
21
+ )
22
+ stdout, stderr = await process.communicate()
23
+ return process.returncode, stdout.decode().strip(), stderr.decode().strip()
24
+ except FileNotFoundError as e:
25
+ raise RuntimeError(f"Command not found: {cmd[0]}") from e
26
+
27
+
28
+ async def run_command_with_output(cmd: list[str]) -> int:
29
+ """Run a command with real-time output streaming and return exit code"""
30
+ try:
31
+ process = await asyncio.create_subprocess_exec(*cmd)
32
+ return await process.wait()
33
+ except FileNotFoundError as e:
34
+ raise RuntimeError(f"Command not found: {cmd[0]}") from e
35
+
36
+
37
+ async def minikube_cluster_exists(minikube: str, cluster_name: str) -> bool:
38
+ """Check if a Minikube cluster exists."""
39
+ if not minikube_installed(minikube):
40
+ return False
41
+
42
+ try:
43
+ returncode, _, _ = await run_command([minikube, "status", "-p", cluster_name])
44
+ return returncode == 0
45
+ except RuntimeError:
46
+ return False
47
+
48
+
49
+ async def kind_cluster_exists(kind: str, cluster_name: str) -> bool:
50
+ """Check if a Kind cluster exists."""
51
+ if not kind_installed(kind):
52
+ return False
53
+
54
+ try:
55
+ returncode, _, _ = await run_command([kind, "get", "kubeconfig", "--name", cluster_name])
56
+ return returncode == 0
57
+ except RuntimeError:
58
+ return False
59
+
60
+
61
+ async def delete_minikube_cluster(minikube: str, cluster_name: str) -> bool:
62
+ """Delete a Minikube cluster."""
63
+ if not minikube_installed(minikube):
64
+ raise RuntimeError(f"{minikube} is not installed or not found in PATH.")
65
+
66
+ if not await minikube_cluster_exists(minikube, cluster_name):
67
+ return True # Already deleted, consider it successful
68
+
69
+ returncode = await run_command_with_output([minikube, "delete", "-p", cluster_name])
70
+
71
+ if returncode == 0:
72
+ return True
73
+ else:
74
+ raise RuntimeError(f"Failed to delete Minikube cluster '{cluster_name}'")
75
+
76
+
77
+ async def delete_kind_cluster(kind: str, cluster_name: str) -> bool:
78
+ """Delete a Kind cluster."""
79
+ if not kind_installed(kind):
80
+ raise RuntimeError(f"{kind} is not installed or not found in PATH.")
81
+
82
+ if not await kind_cluster_exists(kind, cluster_name):
83
+ return True # Already deleted, consider it successful
84
+
85
+ returncode = await run_command_with_output([kind, "delete", "cluster", "--name", cluster_name])
86
+
87
+ if returncode == 0:
88
+ return True
89
+ else:
90
+ raise RuntimeError(f"Failed to delete Kind cluster '{cluster_name}'")
91
+
92
+
93
+ async def create_minikube_cluster(
94
+ minikube: str, cluster_name: str, extra_args: Optional[list[str]] = None, force_recreate: bool = False
95
+ ) -> bool:
96
+ """Create a Minikube cluster."""
97
+ if extra_args is None:
98
+ extra_args = []
99
+
100
+ if not minikube_installed(minikube):
101
+ raise RuntimeError(f"{minikube} is not installed or not found in PATH.")
102
+
103
+ # Check if cluster already exists
104
+ cluster_exists = await minikube_cluster_exists(minikube, cluster_name)
105
+
106
+ if cluster_exists:
107
+ if not force_recreate:
108
+ raise RuntimeError(f"Minikube cluster '{cluster_name}' already exists.")
109
+ else:
110
+ if not await delete_minikube_cluster(minikube, cluster_name):
111
+ return False
112
+
113
+ has_cpus_flag = any(a == "--cpus" or a.startswith("--cpus=") for a in extra_args)
114
+ if not has_cpus_flag:
115
+ try:
116
+ rc, out, _ = await run_command([minikube, "config", "get", "cpus"])
117
+ has_config_cpus = rc == 0 and out.strip().isdigit() and int(out.strip()) > 0
118
+ except RuntimeError:
119
+ # If we cannot query minikube (e.g., not installed in test env), default CPUs
120
+ has_config_cpus = False
121
+ if not has_config_cpus:
122
+ extra_args.append("--cpus=4")
123
+
124
+ command = [
125
+ minikube,
126
+ "start",
127
+ "--profile",
128
+ cluster_name,
129
+ "--extra-config=apiserver.service-node-port-range=8000-9000",
130
+ ]
131
+ command.extend(extra_args)
132
+
133
+ returncode = await run_command_with_output(command)
134
+
135
+ if returncode == 0:
136
+ return True
137
+ else:
138
+ raise RuntimeError(f"Failed to create Minikube cluster '{cluster_name}'")
139
+
140
+
141
+ async def create_kind_cluster(
142
+ kind: str, cluster_name: str, extra_args: Optional[list[str]] = None, force_recreate: bool = False
143
+ ) -> bool:
144
+ """Create a Kind cluster."""
145
+ if extra_args is None:
146
+ extra_args = []
147
+
148
+ if not kind_installed(kind):
149
+ raise RuntimeError(f"{kind} is not installed or not found in PATH.")
150
+
151
+ # Check if cluster already exists
152
+ cluster_exists = await kind_cluster_exists(kind, cluster_name)
153
+
154
+ if cluster_exists:
155
+ if not force_recreate:
156
+ raise RuntimeError(f"Kind cluster '{cluster_name}' already exists.")
157
+ else:
158
+ if not await delete_kind_cluster(kind, cluster_name):
159
+ return False
160
+
161
+ cluster_config = """kind: Cluster
162
+ apiVersion: kind.x-k8s.io/v1alpha4
163
+ kubeadmConfigPatches:
164
+ - |
165
+ kind: ClusterConfiguration
166
+ apiServer:
167
+ extraArgs:
168
+ "service-node-port-range": "3000-32767"
169
+ - |
170
+ kind: InitConfiguration
171
+ nodeRegistration:
172
+ kubeletExtraArgs:
173
+ node-labels: "ingress-ready=true"
174
+ nodes:
175
+ - role: control-plane
176
+ extraPortMappings:
177
+ - containerPort: 80
178
+ hostPort: 5080
179
+ protocol: TCP
180
+ - containerPort: 30010
181
+ hostPort: 8082
182
+ protocol: TCP
183
+ - containerPort: 30011
184
+ hostPort: 8083
185
+ protocol: TCP
186
+ - containerPort: 443
187
+ hostPort: 5443
188
+ protocol: TCP
189
+ """
190
+
191
+ command = [kind, "create", "cluster", "--name", cluster_name, "--config=/dev/stdin"]
192
+ command.extend(extra_args)
193
+
194
+ kind_process = await asyncio.create_subprocess_exec(*command, stdin=asyncio.subprocess.PIPE)
195
+
196
+ await kind_process.communicate(input=cluster_config.encode())
197
+
198
+ if kind_process.returncode == 0:
199
+ return True
200
+ else:
201
+ raise RuntimeError(f"Failed to create Kind cluster '{cluster_name}'")