agentdocker-lite 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ jobs:
8
+ publish:
9
+ runs-on: ubuntu-latest
10
+ environment: pypi
11
+ permissions:
12
+ contents: read
13
+ id-token: write
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+ - uses: actions/setup-python@v5
17
+ with:
18
+ python-version: "3.12"
19
+ - run: pip install build
20
+ - run: python -m build
21
+ - uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1 @@
1
+ __pycache__/
@@ -0,0 +1,6 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentdocker-lite
3
+ Version: 0.0.1
4
+ Summary: Lightweight Linux namespace sandbox with persistent shell and instant reset
5
+ License-Expression: MIT
6
+ Requires-Python: >=3.10
@@ -0,0 +1,115 @@
1
+ # agentdocker-lite
2
+
3
+ Lightweight Linux namespace sandbox with persistent shell and instant filesystem reset.
4
+
5
+ **20x faster lifecycle** than Docker. Designed for high-frequency workloads like RL training where environments are created, reset, and destroyed thousands of times.
6
+
7
+ ## Key features
8
+
9
+ - **Persistent shell**: ~42ms per command (vs ~330ms with fork/exec/chroot per command)
10
+ - **Instant reset**: overlayfs ~27ms, btrfs ~28ms -- clear filesystem without recreating the sandbox
11
+ - **Fast lifecycle**: ~4ms create, ~6ms delete (overlayfs)
12
+ - **Signal-pipe protocol**: uses a separate fd for command completion signaling -- no sentinel collision with command output
13
+ - **CoW filesystem backends**: overlayfs (default) or btrfs snapshots
14
+ - **cgroup v2**: optional CPU, memory, PID limits
15
+ - **Auto rootfs**: pass a Docker image name, rootfs is auto-prepared and cached
16
+
17
+ ## Requirements
18
+
19
+ - Linux with kernel supporting overlayfs (or btrfs)
20
+ - Root or `CAP_SYS_ADMIN` (for mount/cgroup)
21
+ - `util-linux` (`unshare`)
22
+ - Docker (only for auto-preparing rootfs from image names)
23
+ - Python >= 3.10
24
+
25
+ ## Install
26
+
27
+ ```bash
28
+ cd agentdocker-lite
29
+ pip install -e .
30
+ ```
31
+
32
+ ## Quick start
33
+
34
+ ```python
35
+ from agentdocker_lite import Sandbox, SandboxConfig
36
+
37
+ config = SandboxConfig(
38
+ image="ubuntu:22.04", # Docker image or path to rootfs dir
39
+ working_dir="/workspace",
40
+ )
41
+
42
+ sb = Sandbox(config, name="worker-0")
43
+
44
+ # Run commands (~42ms each)
45
+ output, ec = sb.run("echo hello world")
46
+
47
+ # Direct file I/O (bypasses shell)
48
+ sb.write_file("/workspace/test.txt", "content")
49
+ content = sb.read_file("/workspace/test.txt")
50
+
51
+ # Reset filesystem to initial state (~27ms)
52
+ sb.reset()
53
+
54
+ # Cleanup
55
+ sb.delete()
56
+ ```
57
+
58
+ ## Configuration
59
+
60
+ ```python
61
+ SandboxConfig(
62
+ image="ubuntu:22.04", # Docker image or rootfs path
63
+ working_dir="/workspace", # Initial cwd inside sandbox
64
+ environment={"FOO": "bar"}, # Extra env vars
65
+ volumes=["/host/path:/container/path:ro"], # Bind mounts
66
+ fs_backend="overlayfs", # "overlayfs" or "btrfs"
67
+ env_base_dir="/tmp/agentdocker_lite",
68
+ rootfs_cache_dir="/tmp/agentdocker_lite_rootfs_cache",
69
+ cpu_max="50000 100000", # cgroup cpu.max
70
+ memory_max="536870912", # cgroup memory.max (bytes)
71
+ pids_max="256", # cgroup pids.max
72
+ )
73
+ ```
74
+
75
+ ## API
76
+
77
+ | Method | Description |
78
+ |--------|-------------|
79
+ | `sb.run(cmd, timeout=None)` | Run command, returns `(output, exit_code)` |
80
+ | `sb.reset()` | Reset filesystem to initial state |
81
+ | `sb.delete()` | Full cleanup (unmount, remove cgroup, delete files) |
82
+ | `sb.copy_to(local, container)` | Copy file into sandbox |
83
+ | `sb.copy_from(container, local)` | Copy file out of sandbox |
84
+ | `sb.read_file(path)` | Read file content |
85
+ | `sb.write_file(path, content)` | Write file content |
86
+ | `sb.rootfs` | Host path to sandbox rootfs |
87
+
88
+ ## Examples
89
+
90
+ ```bash
91
+ # Basic usage
92
+ sudo python examples/basic_usage.py
93
+
94
+ # 32-worker concurrent benchmark
95
+ sudo python examples/concurrent_sandboxes.py
96
+ ```
97
+
98
+ ## Architecture
99
+
100
+ ```
101
+ Host kernel (shared)
102
+ |
103
+ +-- Sandbox "worker-0"
104
+ | +-- PID namespace (unshare --pid)
105
+ | +-- Mount namespace (unshare --mount)
106
+ | +-- chroot into overlayfs rootfs
107
+ | | +-- lowerdir: shared base image (read-only)
108
+ | | +-- upperdir: per-sandbox changes (cleared on reset)
109
+ | +-- Persistent bash process (stdin/stdout pipes + signal fd)
110
+ | +-- cgroup v2 limits
111
+ |
112
+ +-- Sandbox "worker-1"
113
+ | +-- (same structure, independent namespaces)
114
+ ...
115
+ ```
@@ -0,0 +1,49 @@
1
+ #!/usr/bin/env python3
2
+ """Basic usage example for agentdocker-lite.
3
+
4
+ Must be run as root (requires mount/cgroup operations).
5
+ Requires Docker to auto-prepare rootfs from image names.
6
+ """
7
+
8
+ from agentdocker_lite import Sandbox, SandboxConfig
9
+
10
+
11
+ def main():
12
+ # Create a sandbox from a Docker image (auto-exports to rootfs on first use).
13
+ # Or pass a path to an existing rootfs directory.
14
+ config = SandboxConfig(
15
+ image="ubuntu:22.04",
16
+ working_dir="/workspace",
17
+ cpu_max="50000 100000", # 50% of one core
18
+ memory_max="536870912", # 512 MB
19
+ pids_max="256",
20
+ )
21
+
22
+ sb = Sandbox(config, name="demo")
23
+
24
+ # Run commands (~42ms per command via persistent shell).
25
+ output, ec = sb.run("echo hello from sandbox")
26
+ print(f"[exit={ec}] {output.strip()}")
27
+
28
+ output, ec = sb.run("cat /etc/os-release | head -2")
29
+ print(f"[exit={ec}] {output.strip()}")
30
+
31
+ # Write and read files directly (bypasses shell, even faster).
32
+ sb.write_file("/workspace/test.txt", "hello world\n")
33
+ content = sb.read_file("/workspace/test.txt")
34
+ print(f"File content: {content.strip()}")
35
+
36
+ # Reset filesystem to initial state (~27ms).
37
+ sb.reset()
38
+
39
+ # File is gone after reset.
40
+ output, ec = sb.run("cat /workspace/test.txt 2>&1")
41
+ print(f"After reset [exit={ec}]: {output.strip()}")
42
+
43
+ # Clean up.
44
+ sb.delete()
45
+ print("Done.")
46
+
47
+
48
+ if __name__ == "__main__":
49
+ main()
@@ -0,0 +1,65 @@
1
+ #!/usr/bin/env python3
2
+ """Concurrent sandbox example: create N sandboxes, run commands, reset, destroy."""
3
+
4
+ import time
5
+ from concurrent.futures import ThreadPoolExecutor
6
+
7
+ from agentdocker_lite import Sandbox, SandboxConfig
8
+
9
+
10
+ def worker(worker_id: int) -> dict:
11
+ config = SandboxConfig(
12
+ image="ubuntu:22.04",
13
+ working_dir="/workspace",
14
+ pids_max="128",
15
+ )
16
+
17
+ t0 = time.monotonic()
18
+ sb = Sandbox(config, name=f"worker-{worker_id}")
19
+ create_ms = (time.monotonic() - t0) * 1000
20
+
21
+ t0 = time.monotonic()
22
+ output, ec = sb.run("echo hello && uname -r")
23
+ run_ms = (time.monotonic() - t0) * 1000
24
+
25
+ t0 = time.monotonic()
26
+ sb.reset()
27
+ reset_ms = (time.monotonic() - t0) * 1000
28
+
29
+ t0 = time.monotonic()
30
+ sb.delete()
31
+ delete_ms = (time.monotonic() - t0) * 1000
32
+
33
+ return {
34
+ "worker": worker_id,
35
+ "create_ms": create_ms,
36
+ "run_ms": run_ms,
37
+ "reset_ms": reset_ms,
38
+ "delete_ms": delete_ms,
39
+ }
40
+
41
+
42
+ def main():
43
+ n_workers = 32
44
+ print(f"Launching {n_workers} sandboxes concurrently...")
45
+
46
+ t0 = time.monotonic()
47
+ with ThreadPoolExecutor(max_workers=n_workers) as pool:
48
+ results = list(pool.map(worker, range(n_workers)))
49
+ wall_ms = (time.monotonic() - t0) * 1000
50
+
51
+ print(f"\n{'worker':>8} {'create':>10} {'run':>10} {'reset':>10} {'delete':>10}")
52
+ for r in results:
53
+ print(
54
+ f"{r['worker']:>8} {r['create_ms']:>9.1f}ms {r['run_ms']:>9.1f}ms "
55
+ f"{r['reset_ms']:>9.1f}ms {r['delete_ms']:>9.1f}ms"
56
+ )
57
+
58
+ avg_create = sum(r["create_ms"] for r in results) / len(results)
59
+ avg_reset = sum(r["reset_ms"] for r in results) / len(results)
60
+ print(f"\nAvg create: {avg_create:.1f}ms Avg reset: {avg_reset:.1f}ms")
61
+ print(f"Wall clock: {wall_ms:.0f}ms for {n_workers} workers")
62
+
63
+
64
+ if __name__ == "__main__":
65
+ main()
@@ -0,0 +1,13 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "agentdocker-lite"
7
+ version = "0.0.1"
8
+ description = "Lightweight Linux namespace sandbox with persistent shell and instant reset"
9
+ requires-python = ">=3.10"
10
+ license = "MIT"
11
+
12
+ [tool.hatch.build.targets.wheel]
13
+ packages = ["src/agentdocker_lite"]
@@ -0,0 +1,6 @@
1
+ """agentdocker-lite: Lightweight Linux namespace sandbox for high-frequency workloads."""
2
+
3
+ from agentdocker_lite.sandbox import Sandbox, SandboxConfig
4
+
5
+ __all__ = ["Sandbox", "SandboxConfig"]
6
+ __version__ = "0.0.1"
@@ -0,0 +1,242 @@
1
+ """Utilities for preparing base rootfs directories from Docker images."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ import subprocess
7
+ from pathlib import Path
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def prepare_rootfs_from_docker(
13
+ image_name: str,
14
+ output_dir: str | Path,
15
+ pull: bool = True,
16
+ ) -> Path:
17
+ """Export a Docker image as a rootfs directory.
18
+
19
+ Equivalent to::
20
+
21
+ docker pull <image_name>
22
+ docker export $(docker create <image_name>) | tar -C <output_dir> -xf -
23
+
24
+ Args:
25
+ image_name: Docker image (e.g. ``"ubuntu:22.04"``).
26
+ output_dir: Target directory for the extracted rootfs.
27
+ pull: Pull the image first (set ``False`` if already local).
28
+
29
+ Returns:
30
+ Path to *output_dir*.
31
+
32
+ Raises:
33
+ RuntimeError: If any Docker/tar command fails.
34
+ """
35
+ output_dir = Path(output_dir)
36
+ output_dir.mkdir(parents=True, exist_ok=True)
37
+
38
+ if pull:
39
+ logger.info("Pulling image: %s", image_name)
40
+ result = subprocess.run(
41
+ ["docker", "pull", image_name],
42
+ capture_output=True,
43
+ text=True,
44
+ )
45
+ if result.returncode != 0:
46
+ raise RuntimeError(f"docker pull failed: {result.stderr.strip()}")
47
+
48
+ logger.info("Creating temporary container from %s", image_name)
49
+ create = subprocess.run(
50
+ ["docker", "create", image_name],
51
+ capture_output=True,
52
+ text=True,
53
+ )
54
+ if create.returncode != 0:
55
+ raise RuntimeError(f"docker create failed: {create.stderr.strip()}")
56
+ container_id = create.stdout.strip()
57
+
58
+ try:
59
+ logger.info("Exporting %s -> %s", image_name, output_dir)
60
+ export_proc = subprocess.Popen(
61
+ ["docker", "export", container_id],
62
+ stdout=subprocess.PIPE,
63
+ )
64
+ tar_proc = subprocess.Popen(
65
+ ["tar", "-C", str(output_dir), "-xf", "-"],
66
+ stdin=export_proc.stdout,
67
+ )
68
+ if export_proc.stdout is not None:
69
+ export_proc.stdout.close()
70
+ tar_proc.communicate()
71
+
72
+ if tar_proc.returncode != 0:
73
+ raise RuntimeError(
74
+ f"tar extraction failed for {image_name} (exit {tar_proc.returncode})"
75
+ )
76
+ finally:
77
+ subprocess.run(["docker", "rm", "-f", container_id], capture_output=True)
78
+
79
+ subprocess.run(["docker", "rmi", "-f", image_name], capture_output=True)
80
+
81
+ logger.info("Rootfs ready: %s", output_dir)
82
+ return output_dir
83
+
84
+
85
+ def prepare_rootfs_without_docker(
86
+ image_ref: str,
87
+ output_dir: str | Path,
88
+ ) -> Path:
89
+ """Export an OCI image as a rootfs without requiring a Docker daemon.
90
+
91
+ Uses ``skopeo`` + ``umoci`` which can run without root and without
92
+ a running Docker daemon.
93
+
94
+ Args:
95
+ image_ref: OCI image reference (e.g. ``"docker://ubuntu:22.04"``).
96
+ output_dir: Target directory for the extracted rootfs.
97
+
98
+ Returns:
99
+ Path to the rootfs bundle directory.
100
+
101
+ Raises:
102
+ RuntimeError: If skopeo/umoci commands fail.
103
+ FileNotFoundError: If skopeo or umoci is not installed.
104
+ """
105
+ import shutil
106
+ import tempfile
107
+
108
+ for tool in ("skopeo", "umoci"):
109
+ if shutil.which(tool) is None:
110
+ raise FileNotFoundError(
111
+ f"{tool} not found. Install it: apt-get install {tool}"
112
+ )
113
+
114
+ output_dir = Path(output_dir)
115
+
116
+ with tempfile.TemporaryDirectory() as tmp:
117
+ oci_dir = Path(tmp) / "oci_image"
118
+
119
+ if not image_ref.startswith("docker://"):
120
+ image_ref = f"docker://{image_ref}"
121
+
122
+ logger.info("Copying %s via skopeo", image_ref)
123
+ result = subprocess.run(
124
+ ["skopeo", "copy", image_ref, f"oci:{oci_dir}:latest"],
125
+ capture_output=True,
126
+ text=True,
127
+ )
128
+ if result.returncode != 0:
129
+ raise RuntimeError(f"skopeo copy failed: {result.stderr.strip()}")
130
+
131
+ logger.info("Unpacking OCI image via umoci -> %s", output_dir)
132
+ output_dir.mkdir(parents=True, exist_ok=True)
133
+ result = subprocess.run(
134
+ ["umoci", "unpack", "--image", f"{oci_dir}:latest", str(output_dir)],
135
+ capture_output=True,
136
+ text=True,
137
+ )
138
+ if result.returncode != 0:
139
+ raise RuntimeError(f"umoci unpack failed: {result.stderr.strip()}")
140
+
141
+ rootfs_path = output_dir / "rootfs"
142
+ if rootfs_path.is_dir():
143
+ logger.info("Rootfs ready: %s", rootfs_path)
144
+ return rootfs_path
145
+
146
+ logger.info("Rootfs ready: %s", output_dir)
147
+ return output_dir
148
+
149
+
150
+ def prepare_btrfs_rootfs_from_docker(
151
+ image_name: str,
152
+ subvolume_path: str | Path,
153
+ pull: bool = True,
154
+ ) -> Path:
155
+ """Export a Docker image into a btrfs subvolume for snapshot-based sandboxes.
156
+
157
+ The target path must be on a btrfs-formatted filesystem.
158
+ """
159
+ import shutil as _shutil
160
+
161
+ if _shutil.which("btrfs") is None:
162
+ raise FileNotFoundError(
163
+ "btrfs-progs not found. Install: apt-get install btrfs-progs"
164
+ )
165
+
166
+ subvolume_path = Path(subvolume_path)
167
+
168
+ if subvolume_path.exists():
169
+ check = subprocess.run(
170
+ ["btrfs", "subvolume", "show", str(subvolume_path)],
171
+ capture_output=True,
172
+ text=True,
173
+ )
174
+ if check.returncode == 0:
175
+ logger.info("Deleting existing btrfs subvolume: %s", subvolume_path)
176
+ subprocess.run(
177
+ ["btrfs", "subvolume", "delete", str(subvolume_path)],
178
+ capture_output=True,
179
+ )
180
+ else:
181
+ _shutil.rmtree(subvolume_path)
182
+
183
+ subvolume_path.parent.mkdir(parents=True, exist_ok=True)
184
+ result = subprocess.run(
185
+ ["btrfs", "subvolume", "create", str(subvolume_path)],
186
+ capture_output=True,
187
+ text=True,
188
+ )
189
+ if result.returncode != 0:
190
+ raise RuntimeError(
191
+ f"btrfs subvolume create failed: {result.stderr.strip()}. "
192
+ f"Ensure {subvolume_path.parent} is on a btrfs filesystem."
193
+ )
194
+ logger.info("Created btrfs subvolume: %s", subvolume_path)
195
+
196
+ if pull:
197
+ logger.info("Pulling image: %s", image_name)
198
+ result = subprocess.run(
199
+ ["docker", "pull", image_name],
200
+ capture_output=True,
201
+ text=True,
202
+ )
203
+ if result.returncode != 0:
204
+ raise RuntimeError(f"docker pull failed: {result.stderr.strip()}")
205
+
206
+ logger.info("Creating temporary container from %s", image_name)
207
+ create = subprocess.run(
208
+ ["docker", "create", image_name],
209
+ capture_output=True,
210
+ text=True,
211
+ )
212
+ if create.returncode != 0:
213
+ raise RuntimeError(f"docker create failed: {create.stderr.strip()}")
214
+ container_id = create.stdout.strip()
215
+
216
+ try:
217
+ logger.info(
218
+ "Exporting %s -> %s (btrfs subvolume)", image_name, subvolume_path
219
+ )
220
+ export_proc = subprocess.Popen(
221
+ ["docker", "export", container_id],
222
+ stdout=subprocess.PIPE,
223
+ )
224
+ tar_proc = subprocess.Popen(
225
+ ["tar", "-C", str(subvolume_path), "-xf", "-"],
226
+ stdin=export_proc.stdout,
227
+ )
228
+ if export_proc.stdout is not None:
229
+ export_proc.stdout.close()
230
+ tar_proc.communicate()
231
+
232
+ if tar_proc.returncode != 0:
233
+ raise RuntimeError(
234
+ f"tar extraction failed for {image_name} (exit {tar_proc.returncode})"
235
+ )
236
+ finally:
237
+ subprocess.run(["docker", "rm", "-f", container_id], capture_output=True)
238
+
239
+ subprocess.run(["docker", "rmi", "-f", image_name], capture_output=True)
240
+
241
+ logger.info("btrfs rootfs ready: %s", subvolume_path)
242
+ return subvolume_path