agentstack-cli 0.6.0rc1__py3-none-manylinux_2_34_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,277 @@
1
+ # Copyright 2025 © BeeAI a Series of LF Projects, LLC
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import importlib.resources
5
+ import os
6
+ import shutil
7
+ import sys
8
+ import tempfile
9
+ import typing
10
+ import uuid
11
+ from subprocess import CompletedProcess
12
+
13
+ import anyio
14
+ import psutil
15
+ import pydantic
16
+ import yaml
17
+
18
+ from agentstack_cli.commands.platform.base_driver import BaseDriver
19
+ from agentstack_cli.configuration import Configuration
20
+ from agentstack_cli.console import console
21
+ from agentstack_cli.utils import run_command
22
+
23
+
24
+ class LimaDriver(BaseDriver):
25
+ limactl_exe: str
26
+
27
+ def __init__(self, vm_name: str = "agentstack"):
28
+ super().__init__(vm_name)
29
+ bundled_limactl_exe = importlib.resources.files("agentstack_cli") / "data" / "limactl"
30
+ if bundled_limactl_exe.is_file():
31
+ self.limactl_exe = str(bundled_limactl_exe)
32
+ else:
33
+ self.limactl_exe = str(shutil.which("limactl"))
34
+ console.warning(f"Using external Lima from {self.limactl_exe}")
35
+
36
+ @typing.override
37
+ async def run_in_vm(
38
+ self,
39
+ command: list[str],
40
+ message: str,
41
+ env: dict[str, str] | None = None,
42
+ input: bytes | None = None,
43
+ ) -> CompletedProcess[bytes]:
44
+ return await run_command(
45
+ [self.limactl_exe, "shell", f"--tty={sys.stdin.isatty()}", self.vm_name, "--", "sudo", *command],
46
+ message,
47
+ env={"LIMA_HOME": str(Configuration().lima_home)} | (env or {}),
48
+ cwd="/",
49
+ input=input,
50
+ )
51
+
52
+ @typing.override
53
+ async def status(self) -> typing.Literal["running"] | str | None:
54
+ try:
55
+ result = await run_command(
56
+ [self.limactl_exe, "--tty=false", "list", "--format=json"],
57
+ "Looking for existing Agent Stack platform in Lima",
58
+ env={"LIMA_HOME": str(Configuration().lima_home)},
59
+ cwd="/",
60
+ )
61
+
62
+ for line in result.stdout.decode().split("\n"):
63
+ if not line:
64
+ continue
65
+ status = pydantic.TypeAdapter(typing.TypedDict("Status", {"name": str, "status": str})).validate_json(
66
+ line
67
+ )
68
+ if status["name"] == self.vm_name:
69
+ return status["status"].lower()
70
+ return None
71
+ except Exception:
72
+ return None
73
+
74
+ @typing.override
75
+ async def create_vm(self):
76
+ Configuration().home.mkdir(exist_ok=True)
77
+ current_status = await self.status()
78
+
79
+ if not current_status:
80
+ await run_command(
81
+ [self.limactl_exe, "--tty=false", "delete", "--force", self.vm_name],
82
+ "Cleaning up remains of previous instance",
83
+ env={"LIMA_HOME": str(Configuration().lima_home)},
84
+ check=False,
85
+ cwd="/",
86
+ )
87
+
88
+ await run_command(
89
+ [self.limactl_exe, "--tty=false", "delete", "--force", "beeai-platform"],
90
+ "Cleaning up remains of legacy instance",
91
+ env={"LIMA_HOME": str(Configuration().lima_home)},
92
+ check=False,
93
+ cwd="/",
94
+ )
95
+
96
+ total_memory_gib = typing.cast(int, psutil.virtual_memory().total / (1024**3))
97
+
98
+ if total_memory_gib < 4:
99
+ console.error("Not enough memory. Agent Stack platform requires at least 4 GB of RAM.")
100
+ sys.exit(1)
101
+
102
+ if total_memory_gib < 8:
103
+ console.warning("Less than 8 GB of RAM detected. Performance may be degraded.")
104
+
105
+ vm_memory_gib = round(min(8, max(3, total_memory_gib / 2)))
106
+
107
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete_on_close=False) as template_file:
108
+ template_file.write(
109
+ yaml.dump(
110
+ {
111
+ "images": [
112
+ {
113
+ "location": "https://cloud-images.ubuntu.com/releases/noble/release/ubuntu-24.04-server-cloudimg-amd64.img",
114
+ "arch": "x86_64",
115
+ },
116
+ {
117
+ "location": "https://cloud-images.ubuntu.com/releases/noble/release/ubuntu-24.04-server-cloudimg-arm64.img",
118
+ "arch": "aarch64",
119
+ },
120
+ ],
121
+ "portForwards": [
122
+ {
123
+ "guestIP": "127.0.0.1",
124
+ "guestPortRange": [1024, 65535],
125
+ "hostPortRange": [1024, 65535],
126
+ "hostIP": "127.0.0.1",
127
+ },
128
+ {"guestIP": "0.0.0.0", "proto": "any", "ignore": True},
129
+ ],
130
+ "mounts": [
131
+ {"location": "/tmp/agentstack", "mountPoint": "/tmp/agentstack", "writable": True}
132
+ ],
133
+ "containerd": {"system": False, "user": False},
134
+ "hostResolver": {"hosts": {"host.docker.internal": "host.lima.internal"}},
135
+ "memory": f"{vm_memory_gib}GiB",
136
+ }
137
+ )
138
+ )
139
+ template_file.flush()
140
+ template_file.close()
141
+ await run_command(
142
+ [
143
+ self.limactl_exe,
144
+ "--tty=false",
145
+ "start",
146
+ str(template_file.name),
147
+ f"--name={self.vm_name}",
148
+ ],
149
+ "Creating a Lima VM",
150
+ env={"LIMA_HOME": str(Configuration().lima_home)},
151
+ cwd="/",
152
+ )
153
+ elif current_status != "running":
154
+ await run_command(
155
+ [self.limactl_exe, "--tty=false", "start", self.vm_name],
156
+ "Starting up",
157
+ env={"LIMA_HOME": str(Configuration().lima_home)},
158
+ cwd="/",
159
+ )
160
+ else:
161
+ console.info("Updating an existing instance.")
162
+
163
+ @typing.override
164
+ async def stop(self):
165
+ await run_command(
166
+ [self.limactl_exe, "--tty=false", "stop", "--force", self.vm_name],
167
+ "Stopping Agent Stack VM",
168
+ env={"LIMA_HOME": str(Configuration().lima_home)},
169
+ cwd="/",
170
+ )
171
+
172
+ @typing.override
173
+ async def delete(self):
174
+ await run_command(
175
+ [self.limactl_exe, "--tty=false", "delete", "--force", self.vm_name],
176
+ "Deleting Agent Stack platform",
177
+ env={"LIMA_HOME": str(Configuration().lima_home)},
178
+ check=False,
179
+ cwd="/",
180
+ )
181
+
182
+ @typing.override
183
+ async def import_images(self, *tags: str):
184
+ image_dir = anyio.Path("/tmp/agentstack")
185
+ await image_dir.mkdir(exist_ok=True, parents=True)
186
+ image_file = str(uuid.uuid4())
187
+ image_path = image_dir / image_file
188
+
189
+ try:
190
+ await run_command(
191
+ ["docker", "image", "save", "-o", str(image_path), *tags],
192
+ f"Exporting image{'' if len(tags) == 1 else 's'} {', '.join(tags)} from Docker",
193
+ )
194
+ await self.run_in_vm(
195
+ ["/bin/sh", "-c", f"k3s ctr images import /tmp/agentstack/{image_file}"],
196
+ f"Importing image{'' if len(tags) == 1 else 's'} {', '.join(tags)} into Agent Stack platform",
197
+ )
198
+ finally:
199
+ await image_path.unlink(missing_ok=True)
200
+
201
+ @typing.override
202
+ async def import_image_to_internal_registry(self, tag: str) -> None:
203
+ # 1. Check if registry is running
204
+ try:
205
+ await self.run_in_vm(
206
+ ["k3s", "kubectl", "get", "svc", "agentstack-registry-svc"],
207
+ "Checking internal registry availability",
208
+ )
209
+ except Exception as e:
210
+ console.warning(f"Internal registry service not found. Push might fail: {e}")
211
+
212
+ # 2. Export image from Docker to shared temp dir
213
+ image_dir = anyio.Path("/tmp/agentstack")
214
+ await image_dir.mkdir(exist_ok=True, parents=True)
215
+ image_file = f"{uuid.uuid4()}.tar"
216
+ image_path = image_dir / image_file
217
+
218
+ try:
219
+ await run_command(
220
+ ["docker", "image", "save", "-o", str(image_path), tag],
221
+ f"Exporting image {tag} from Docker",
222
+ )
223
+
224
+ # 3 & 4. Run Crane Job
225
+ crane_image = "ghcr.io/i-am-bee/alpine/crane:0.20.6"
226
+ for image in self.loaded_images:
227
+ if "alpine/crane" in image:
228
+ crane_image = image
229
+ break
230
+
231
+ job_name = f"push-{uuid.uuid4().hex[:6]}"
232
+ job_def = {
233
+ "apiVersion": "batch/v1",
234
+ "kind": "Job",
235
+ "metadata": {"name": job_name, "namespace": "default"},
236
+ "spec": {
237
+ "backoffLimit": 0,
238
+ "ttlSecondsAfterFinished": 60,
239
+ "template": {
240
+ "spec": {
241
+ "restartPolicy": "Never",
242
+ "containers": [
243
+ {
244
+ "name": "crane",
245
+ "image": crane_image,
246
+ "command": ["crane", "push", f"/workspace/{image_file}", tag, "--insecure"],
247
+ "volumeMounts": [{"name": "workspace", "mountPath": "/workspace"}],
248
+ }
249
+ ],
250
+ "volumes": [{"name": "workspace", "hostPath": {"path": "/tmp/agentstack"}}],
251
+ }
252
+ },
253
+ },
254
+ }
255
+
256
+ await self.run_in_vm(
257
+ ["k3s", "kubectl", "apply", "-f", "-"], "Starting push job", input=yaml.dump(job_def).encode()
258
+ )
259
+ await self.run_in_vm(
260
+ ["k3s", "kubectl", "wait", "--for=condition=complete", f"job/{job_name}", "--timeout=300s"],
261
+ "Waiting for push to complete",
262
+ )
263
+ await self.run_in_vm(["k3s", "kubectl", "delete", "job", job_name], "Cleaning up push job")
264
+ finally:
265
+ await image_path.unlink(missing_ok=True)
266
+
267
+ @typing.override
268
+ async def exec(self, command: list[str]):
269
+ await anyio.run_process(
270
+ [self.limactl_exe, "shell", f"--tty={sys.stdin.isatty()}", self.vm_name, "--", *command],
271
+ input=None if sys.stdin.isatty() else sys.stdin.read().encode(),
272
+ check=False,
273
+ stdout=None,
274
+ stderr=None,
275
+ env={**os.environ, "LIMA_HOME": str(Configuration().lima_home)},
276
+ cwd="/",
277
+ )
@@ -0,0 +1,229 @@
1
+ # Copyright 2025 © BeeAI a Series of LF Projects, LLC
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import configparser
5
+ import os
6
+ import pathlib
7
+ import platform
8
+ import sys
9
+ import textwrap
10
+ import typing
11
+
12
+ import anyio
13
+ import pydantic
14
+ import yaml
15
+
16
+ from agentstack_cli.commands.platform.base_driver import BaseDriver
17
+ from agentstack_cli.configuration import Configuration
18
+ from agentstack_cli.console import console
19
+ from agentstack_cli.utils import run_command
20
+
21
+
22
+ class WSLDriver(BaseDriver):
23
+ @typing.override
24
+ async def run_in_vm(
25
+ self,
26
+ command: list[str],
27
+ message: str,
28
+ env: dict[str, str] | None = None,
29
+ input: bytes | None = None,
30
+ check: bool = True,
31
+ ):
32
+ return await run_command(
33
+ ["wsl.exe", "--user", "root", "--distribution", self.vm_name, "--", *command],
34
+ message,
35
+ env={**(env or {}), "WSL_UTF8": "1", "WSLENV": os.getenv("WSLENV", "") + ":WSL_UTF8"},
36
+ input=input,
37
+ check=check,
38
+ )
39
+
40
+ @typing.override
41
+ async def status(self) -> typing.Literal["running"] | str | None:
42
+ try:
43
+ for status, cmd in [("running", ["--running"]), ("stopped", [])]:
44
+ result = await run_command(
45
+ ["wsl.exe", "--list", "--quiet", *cmd],
46
+ f"Looking for {status} Agent Stack platform in WSL",
47
+ env={"WSL_UTF8": "1", "WSLENV": os.getenv("WSLENV", "") + ":WSL_UTF8"},
48
+ )
49
+ if self.vm_name in result.stdout.decode().splitlines():
50
+ return status
51
+ return None
52
+ except Exception:
53
+ return None
54
+
55
+ @typing.override
56
+ async def create_vm(self):
57
+ if (await run_command(["wsl.exe", "--status"], "Checking for WSL2", check=False)).returncode != 0:
58
+ console.error(
59
+ "WSL is not installed. Please follow the Agent Stack installation instructions: https://agentstack.beeai.dev/introduction/quickstart#windows"
60
+ )
61
+ console.hint(
62
+ "Run [green]wsl.exe --install[/green] as administrator. If you just did this, restart your PC and run the same command again. Full installation may require up to two restarts. WSL is properly set up once you reach a working Linux terminal. You can verify this by running [green]wsl.exe[/green] without arguments."
63
+ )
64
+ sys.exit(1)
65
+
66
+ config_file = (
67
+ pathlib.Path.home()
68
+ if platform.system() == "Windows"
69
+ else pathlib.Path(
70
+ (
71
+ await run_command(
72
+ ["/bin/sh", "-c", '''wslpath "$(cmd.exe /c 'echo %USERPROFILE%')"'''], "Detecting home path"
73
+ )
74
+ )
75
+ .stdout.decode()
76
+ .strip()
77
+ )
78
+ ) / ".wslconfig"
79
+ config_file.touch()
80
+ with config_file.open("r+") as f:
81
+ config = configparser.ConfigParser()
82
+ f.seek(0)
83
+ config.read_file(f)
84
+
85
+ if not config.has_section("wsl2"):
86
+ config.add_section("wsl2")
87
+
88
+ wsl2_networking_mode = config.get("wsl2", "networkingMode", fallback=None)
89
+ if wsl2_networking_mode and wsl2_networking_mode != "nat":
90
+ config.set("wsl2", "networkingMode", "nat")
91
+ f.seek(0)
92
+ f.truncate(0)
93
+ config.write(f)
94
+
95
+ if platform.system() == "Linux":
96
+ console.warning(
97
+ "WSL networking mode updated. Please close WSL, run [green]wsl --shutdown[/green] from PowerShell, re-open WSL and run [green]agentstack platform start[/green] again."
98
+ )
99
+ sys.exit(1)
100
+ await run_command(["wsl.exe", "--shutdown"], "Updating WSL2 networking")
101
+
102
+ Configuration().home.mkdir(exist_ok=True)
103
+ if not await self.status():
104
+ await run_command(
105
+ ["wsl.exe", "--unregister", self.vm_name], "Cleaning up remains of previous instance", check=False
106
+ )
107
+ await run_command(
108
+ ["wsl.exe", "--unregister", "beeai-platform"], "Cleaning up remains of legacy instance", check=False
109
+ )
110
+ await run_command(
111
+ ["wsl.exe", "--install", "--name", self.vm_name, "--no-launch", "--web-download"],
112
+ "Creating a WSL distribution",
113
+ )
114
+
115
+ await self.run_in_vm(
116
+ [
117
+ "sh",
118
+ "-c",
119
+ "echo '[network]\ngenerateResolvConf = false\n[boot]\nsystemd=true\n' >/etc/wsl.conf && rm /etc/resolv.conf && echo 'nameserver 1.1.1.1\n' >/etc/resolv.conf && chattr +i /etc/resolv.conf",
120
+ ],
121
+ "Setting up DNS configuration",
122
+ check=False,
123
+ )
124
+
125
+ await run_command(["wsl.exe", "--terminate", self.vm_name], "Restarting Agent Stack VM")
126
+ await self.run_in_vm(["dbus-launch", "true"], "Ensuring persistence of Agent Stack VM")
127
+
128
+ @typing.override
129
+ async def deploy(
130
+ self,
131
+ set_values_list: list[str],
132
+ values_file: pathlib.Path | None = None,
133
+ import_images: list[str] | None = None,
134
+ pull_on_host: bool = False,
135
+ skip_pull: bool = False,
136
+ skip_restart_deployments: bool = False,
137
+ ) -> None:
138
+ if pull_on_host:
139
+ raise NotImplementedError("Pulling on host is not supported on this platform.")
140
+
141
+ host_ip = (
142
+ (
143
+ await self.run_in_vm(
144
+ ["bash", "-c", "ip route show | grep -i default | cut -d' ' -f3"],
145
+ "Detecting host IP address",
146
+ )
147
+ )
148
+ .stdout.decode()
149
+ .strip()
150
+ )
151
+ await self.run_in_vm(
152
+ ["k3s", "kubectl", "apply", "-f", "-"],
153
+ "Setting up internal networking",
154
+ input=yaml.dump(
155
+ {
156
+ "apiVersion": "v1",
157
+ "kind": "ConfigMap",
158
+ "metadata": {"name": "coredns-custom", "namespace": "kube-system"},
159
+ "data": {
160
+ "default.server": f"host.docker.internal {{\n hosts {{\n {host_ip} host.docker.internal\n fallthrough\n }}\n}}"
161
+ },
162
+ }
163
+ ).encode(),
164
+ )
165
+ await super().deploy(set_values_list=set_values_list, values_file=values_file, import_images=import_images)
166
+ await self.run_in_vm(
167
+ ["sh", "-c", "cat >/etc/systemd/system/kubectl-port-forward@.service"],
168
+ "Installing systemd unit for port-forwarding",
169
+ input=textwrap.dedent("""\
170
+ [Unit]
171
+ Description=Kubectl Port Forward for service %%i
172
+ After=network.target
173
+
174
+ [Service]
175
+ Type=simple
176
+ ExecStart=/bin/bash -c 'IFS=":" read svc port <<< "%i"; exec /usr/local/bin/kubectl port-forward --address=127.0.0.1 svc/$svc $port:$port'
177
+ Restart=on-failure
178
+ User=root
179
+
180
+ [Install]
181
+ WantedBy=multi-user.target
182
+ """).encode(),
183
+ )
184
+ await self.run_in_vm(["systemctl", "daemon-reexec"], "Reloading systemd")
185
+ services_json = (
186
+ await self.run_in_vm(
187
+ ["k3s", "kubectl", "get", "svc", "--field-selector=spec.type=LoadBalancer", "--output=json"],
188
+ "Detecting ports to forward",
189
+ )
190
+ ).stdout
191
+ ServicePort = typing.TypedDict("ServicePort", {"port": int, "name": str})
192
+ ServiceSpec = typing.TypedDict("ServiceSpec", {"ports": list[ServicePort]})
193
+ ServiceMetadata = typing.TypedDict("ServiceMetadata", {"name": str, "namespace": str})
194
+ Service = typing.TypedDict("Service", {"metadata": ServiceMetadata, "spec": ServiceSpec})
195
+ Services = typing.TypedDict("Services", {"items": list[Service]})
196
+ for service in pydantic.TypeAdapter(Services).validate_json(services_json)["items"]:
197
+ name = service["metadata"]["name"]
198
+ for port_item in service["spec"]["ports"]:
199
+ port = port_item["port"]
200
+ await self.run_in_vm(
201
+ ["systemctl", "enable", "--now", f"kubectl-port-forward@{name}:{port}.service"],
202
+ f"Starting port-forward for {name}:{port}",
203
+ )
204
+
205
+ @typing.override
206
+ async def stop(self):
207
+ await run_command(["wsl.exe", "--terminate", self.vm_name], "Stopping Agent Stack VM")
208
+
209
+ @typing.override
210
+ async def delete(self):
211
+ await run_command(["wsl.exe", "--unregister", self.vm_name], "Deleting Agent Stack platform", check=False)
212
+
213
+ @typing.override
214
+ async def import_images(self, *tags: str) -> None:
215
+ raise NotImplementedError("Importing images is not supported on this platform.")
216
+
217
+ @typing.override
218
+ async def import_image_to_internal_registry(self, tag: str) -> None:
219
+ raise NotImplementedError("Importing images to internal registry is not supported on this platform.")
220
+
221
+ @typing.override
222
+ async def exec(self, command: list[str]):
223
+ await anyio.run_process(
224
+ ["wsl.exe", "--user", "root", "--distribution", self.vm_name, "--", *command],
225
+ input=None if sys.stdin.isatty() else sys.stdin.read().encode(),
226
+ check=False,
227
+ stdout=None,
228
+ stderr=None,
229
+ )