phx-paddock 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- paddock/__init__.py +1 -0
- paddock/__main__.py +94 -0
- paddock/agents/__init__.py +46 -0
- paddock/agents/claude.py +16 -0
- paddock/agents/shell.py +14 -0
- paddock/cli.py +148 -0
- paddock/config/__init__.py +0 -0
- paddock/config/filters.py +71 -0
- paddock/config/loader.py +353 -0
- paddock/config/schema.py +61 -0
- paddock/docker/__init__.py +0 -0
- paddock/docker/build.py +92 -0
- paddock/docker/builder.py +65 -0
- phx_paddock-0.1.0.dist-info/METADATA +262 -0
- phx_paddock-0.1.0.dist-info/RECORD +18 -0
- phx_paddock-0.1.0.dist-info/WHEEL +4 -0
- phx_paddock-0.1.0.dist-info/entry_points.txt +6 -0
- phx_paddock-0.1.0.dist-info/licenses/LICENCE.txt +21 -0
paddock/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
paddock/__main__.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import subprocess
|
|
4
|
+
import sys
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from paddock.agents import BaseAgent, agent_registry
|
|
8
|
+
from paddock.cli import parse_args
|
|
9
|
+
from paddock.config.loader import ConfigLoader
|
|
10
|
+
from paddock.docker.build import ImageBuilder
|
|
11
|
+
from paddock.docker.builder import DockerCommandBuilder
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger("paddock")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _setup_logging(quiet: bool) -> None:
|
|
17
|
+
if quiet:
|
|
18
|
+
logging.disable(logging.CRITICAL)
|
|
19
|
+
else:
|
|
20
|
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _log_network_peers(network: str) -> None:
|
|
24
|
+
"""Log names of other containers running on the same network."""
|
|
25
|
+
result = subprocess.run(
|
|
26
|
+
["docker", "ps", "--filter", f"network={network}", "--format={{.Names}}"],
|
|
27
|
+
capture_output=True,
|
|
28
|
+
text=True,
|
|
29
|
+
)
|
|
30
|
+
if result.returncode == 0 and result.stdout.strip():
|
|
31
|
+
for name in result.stdout.strip().splitlines():
|
|
32
|
+
logger.info(" - %s", name)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def run(argv: list[str] | None = None) -> None:
|
|
36
|
+
parsed = parse_args(argv if argv is not None else sys.argv[1:])
|
|
37
|
+
_setup_logging(parsed.quiet)
|
|
38
|
+
|
|
39
|
+
workdir = Path(parsed.workdir) if parsed.workdir else Path.cwd()
|
|
40
|
+
|
|
41
|
+
loader = ConfigLoader()
|
|
42
|
+
runner = loader.resolve(parsed, workdir, environ=dict(os.environ))
|
|
43
|
+
|
|
44
|
+
if not runner.is_valid():
|
|
45
|
+
for key, errors in runner.errors.items():
|
|
46
|
+
for error in errors:
|
|
47
|
+
print(f"Config error [{key}]: {error}", file=sys.stderr)
|
|
48
|
+
sys.exit(1)
|
|
49
|
+
|
|
50
|
+
config = runner.cleaned_data
|
|
51
|
+
|
|
52
|
+
agent_key = "false" if config["agent"] is False else str(config["agent"])
|
|
53
|
+
agent: BaseAgent = agent_registry.get(agent_key)
|
|
54
|
+
|
|
55
|
+
logger.info("Using image: %s", config["image"])
|
|
56
|
+
logger.info("Agent: %s", config["agent"])
|
|
57
|
+
for host, container in config["volumes"].items():
|
|
58
|
+
logger.info("Mounting %s -> %s", host, container)
|
|
59
|
+
if config.get("network"):
|
|
60
|
+
logger.info("Network: %s", config["network"])
|
|
61
|
+
logger.info("Other containers on this network:")
|
|
62
|
+
_log_network_peers(config["network"])
|
|
63
|
+
|
|
64
|
+
if config.get("build"):
|
|
65
|
+
builder = ImageBuilder()
|
|
66
|
+
build_args = {**agent.get_build_args(), **config["build"].get("args", {})}
|
|
67
|
+
built = builder.maybe_build(
|
|
68
|
+
build_config=config["build"],
|
|
69
|
+
image=config["image"],
|
|
70
|
+
build_args=build_args,
|
|
71
|
+
)
|
|
72
|
+
logger.info("Image build: %s", "triggered" if built else "skipped (up to date)")
|
|
73
|
+
|
|
74
|
+
docker_argv = DockerCommandBuilder(
|
|
75
|
+
config=config,
|
|
76
|
+
agent=agent,
|
|
77
|
+
workdir=workdir,
|
|
78
|
+
).build(command=parsed.command)
|
|
79
|
+
|
|
80
|
+
if not parsed.quiet:
|
|
81
|
+
print(" ".join(docker_argv))
|
|
82
|
+
|
|
83
|
+
if parsed.dry_run:
|
|
84
|
+
sys.exit(0)
|
|
85
|
+
|
|
86
|
+
subprocess.run(docker_argv)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def main() -> None:
|
|
90
|
+
run()
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
main()
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import ClassVar
|
|
3
|
+
|
|
4
|
+
from class_registry.entry_points import EntryPointClassRegistry
|
|
5
|
+
|
|
6
|
+
agent_registry: EntryPointClassRegistry = EntryPointClassRegistry("paddock.agents")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseAgent(ABC):
|
|
10
|
+
AGENT_KEY: ClassVar[str]
|
|
11
|
+
|
|
12
|
+
@abstractmethod
|
|
13
|
+
def get_command(self) -> list[str]:
|
|
14
|
+
"""
|
|
15
|
+
Default command to run in the container.
|
|
16
|
+
|
|
17
|
+
Example: ['claude'] for ClaudeAgent, ['/bin/bash'] for ShellAgent.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
@abstractmethod
|
|
21
|
+
def get_volumes(self) -> dict[str, str]:
|
|
22
|
+
"""
|
|
23
|
+
Host-path-keyed volume mounts specific to this agent.
|
|
24
|
+
|
|
25
|
+
Values are '/container/path' or '/container/path:mode'.
|
|
26
|
+
Example: {'/home/user/.claude': '/root/.claude:rw'}
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def get_scratch_volumes(self, image: str) -> dict[str, str]:
|
|
30
|
+
"""
|
|
31
|
+
Named Docker volumes (not host paths) to create and mount.
|
|
32
|
+
|
|
33
|
+
Keys are volume names, values are container paths. Override when the agent
|
|
34
|
+
needs persistent storage that must not be shared with the host.
|
|
35
|
+
Example: {'paddock_ubuntu_22_04_claude': '/scratch'}
|
|
36
|
+
"""
|
|
37
|
+
return {}
|
|
38
|
+
|
|
39
|
+
def get_build_args(self) -> dict[str, str]:
|
|
40
|
+
"""
|
|
41
|
+
Docker build args to pass when building the paddock base image.
|
|
42
|
+
|
|
43
|
+
Used when the built-in Dockerfile is referenced in the build config.
|
|
44
|
+
Example: {'AGENT': 'claude'}
|
|
45
|
+
"""
|
|
46
|
+
return {}
|
paddock/agents/claude.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from paddock.agents import BaseAgent
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ClaudeAgent(BaseAgent):
|
|
7
|
+
AGENT_KEY = "claude"
|
|
8
|
+
|
|
9
|
+
def get_command(self) -> list[str]:
|
|
10
|
+
return ["claude"]
|
|
11
|
+
|
|
12
|
+
def get_volumes(self) -> dict[str, str]:
|
|
13
|
+
return {str(Path.home() / ".claude"): "/root/.claude:rw"}
|
|
14
|
+
|
|
15
|
+
def get_build_args(self) -> dict[str, str]:
|
|
16
|
+
return {"AGENT": "claude"}
|
paddock/agents/shell.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from paddock.agents import BaseAgent
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ShellAgent(BaseAgent):
|
|
5
|
+
AGENT_KEY = "false"
|
|
6
|
+
|
|
7
|
+
def get_command(self) -> list[str]:
|
|
8
|
+
return ["/bin/bash"]
|
|
9
|
+
|
|
10
|
+
def get_volumes(self) -> dict[str, str]:
|
|
11
|
+
return {}
|
|
12
|
+
|
|
13
|
+
def get_build_args(self) -> dict[str, str]:
|
|
14
|
+
return {"AGENT": "none"}
|
paddock/cli.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
|
|
4
|
+
_KNOWN_BOOL_FLAGS = frozenset({"--dry-run", "--quiet"})
|
|
5
|
+
_KNOWN_VALUE_FLAGS = frozenset(
|
|
6
|
+
{
|
|
7
|
+
"--agent",
|
|
8
|
+
"--build-context",
|
|
9
|
+
"--build-dockerfile",
|
|
10
|
+
"--build-policy",
|
|
11
|
+
"--config-file",
|
|
12
|
+
"--image",
|
|
13
|
+
"--network",
|
|
14
|
+
"--volume",
|
|
15
|
+
"--workdir",
|
|
16
|
+
}
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ParsedArgs:
|
|
22
|
+
agent: str | bool | None
|
|
23
|
+
build_args: dict[str, str]
|
|
24
|
+
build_context: str | None
|
|
25
|
+
build_dockerfile: str | None
|
|
26
|
+
build_policy: str | None
|
|
27
|
+
command: list[str]
|
|
28
|
+
config_file: str | None
|
|
29
|
+
dry_run: bool
|
|
30
|
+
image: str | None
|
|
31
|
+
network: str | None
|
|
32
|
+
quiet: bool
|
|
33
|
+
volumes: dict[str, str]
|
|
34
|
+
workdir: str | None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _split_argv(argv: list[str]) -> tuple[list[str], list[str]]:
|
|
38
|
+
"""
|
|
39
|
+
Split argv into (paddock_flags, container_command).
|
|
40
|
+
|
|
41
|
+
Scans left-to-right collecting known paddock flags. Stops at:
|
|
42
|
+
- '--' (explicit split, consumed): rest becomes the command
|
|
43
|
+
- A non-flag token (positional): this token and everything after become the command
|
|
44
|
+
|
|
45
|
+
Unknown flags (starting with '--' but not in the known set) remain in
|
|
46
|
+
paddock_flags so argparse can report them as errors.
|
|
47
|
+
"""
|
|
48
|
+
paddock: list[str] = []
|
|
49
|
+
i = 0
|
|
50
|
+
while i < len(argv):
|
|
51
|
+
token = argv[i]
|
|
52
|
+
|
|
53
|
+
if token == "--":
|
|
54
|
+
return paddock, argv[i + 1 :]
|
|
55
|
+
|
|
56
|
+
if token in _KNOWN_BOOL_FLAGS:
|
|
57
|
+
paddock.append(token)
|
|
58
|
+
i += 1
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
flag = token.split("=", 1)[0]
|
|
62
|
+
if flag in _KNOWN_VALUE_FLAGS or flag.startswith("--build-args-"):
|
|
63
|
+
paddock.append(token)
|
|
64
|
+
if "=" not in token:
|
|
65
|
+
i += 1
|
|
66
|
+
if i < len(argv):
|
|
67
|
+
paddock.append(argv[i])
|
|
68
|
+
i += 1
|
|
69
|
+
continue
|
|
70
|
+
|
|
71
|
+
if token.startswith("-"):
|
|
72
|
+
# Unknown flag — leave in paddock so argparse exits with an error.
|
|
73
|
+
paddock.append(token)
|
|
74
|
+
i += 1
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
# Positional — this token and everything after is the container command.
|
|
78
|
+
return paddock, argv[i:]
|
|
79
|
+
|
|
80
|
+
return paddock, []
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _parse_volume(value: str) -> tuple[str, str]:
|
|
84
|
+
"""
|
|
85
|
+
Parse '--volume=/host:/container[:mode]' into (host_path, container_spec).
|
|
86
|
+
|
|
87
|
+
Intentionally does not use the Volume filter — the CLI format separates
|
|
88
|
+
host and container paths with ':', whereas TOML stores them as separate fields.
|
|
89
|
+
"""
|
|
90
|
+
host, _, rest = value.partition(":")
|
|
91
|
+
return host, rest
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def parse_args(argv: list[str]) -> ParsedArgs:
|
|
95
|
+
"""
|
|
96
|
+
Parse paddock CLI arguments.
|
|
97
|
+
|
|
98
|
+
Stops consuming paddock flags at the first positional arg or '--'. Unknown
|
|
99
|
+
flags before either stop-point are errors. '--' is consumed; everything
|
|
100
|
+
after it becomes the container command, preserving any subsequent '--'.
|
|
101
|
+
"""
|
|
102
|
+
paddock_argv, command = _split_argv(argv)
|
|
103
|
+
|
|
104
|
+
# Extract --build-args-<key>=<value> entries before argparse sees them.
|
|
105
|
+
build_args: dict[str, str] = {}
|
|
106
|
+
filtered: list[str] = []
|
|
107
|
+
for entry in paddock_argv:
|
|
108
|
+
if entry.startswith("--build-args-") and "=" in entry:
|
|
109
|
+
raw_key, _, val = entry[len("--build-args-") :].partition("=")
|
|
110
|
+
build_args[raw_key.replace("-", "_")] = val
|
|
111
|
+
else:
|
|
112
|
+
filtered.append(entry)
|
|
113
|
+
|
|
114
|
+
parser = argparse.ArgumentParser(prog="paddock", add_help=True)
|
|
115
|
+
parser.add_argument("--agent")
|
|
116
|
+
parser.add_argument("--build-context")
|
|
117
|
+
parser.add_argument("--build-dockerfile")
|
|
118
|
+
parser.add_argument("--build-policy")
|
|
119
|
+
parser.add_argument("--config-file")
|
|
120
|
+
parser.add_argument("--dry-run", action="store_true", default=False)
|
|
121
|
+
parser.add_argument("--image")
|
|
122
|
+
parser.add_argument("--network")
|
|
123
|
+
parser.add_argument("--quiet", action="store_true", default=False)
|
|
124
|
+
parser.add_argument("--volume", action="append", default=[])
|
|
125
|
+
parser.add_argument("--workdir")
|
|
126
|
+
|
|
127
|
+
namespace = parser.parse_args(filtered)
|
|
128
|
+
|
|
129
|
+
volumes: dict[str, str] = {}
|
|
130
|
+
for vol in namespace.volume:
|
|
131
|
+
host, rest = _parse_volume(vol)
|
|
132
|
+
volumes[host] = rest
|
|
133
|
+
|
|
134
|
+
return ParsedArgs(
|
|
135
|
+
agent=namespace.agent,
|
|
136
|
+
build_args=build_args,
|
|
137
|
+
build_context=namespace.build_context,
|
|
138
|
+
build_dockerfile=namespace.build_dockerfile,
|
|
139
|
+
build_policy=namespace.build_policy,
|
|
140
|
+
command=command,
|
|
141
|
+
config_file=namespace.config_file,
|
|
142
|
+
dry_run=namespace.dry_run,
|
|
143
|
+
image=namespace.image,
|
|
144
|
+
network=namespace.network,
|
|
145
|
+
quiet=namespace.quiet,
|
|
146
|
+
volumes=volumes,
|
|
147
|
+
workdir=namespace.workdir,
|
|
148
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import filters as f
|
|
2
|
+
from filters.base import BaseFilter
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Agent(BaseFilter):
|
|
6
|
+
"""Validates a coding agent value.
|
|
7
|
+
|
|
8
|
+
Accepts a non-empty string agent name, or ``False`` to disable the agent.
|
|
9
|
+
Maps the string ``'false'`` to boolean ``False``. Rejects boolean ``True``.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
CODE_INVALID = "invalid"
|
|
13
|
+
|
|
14
|
+
templates = {
|
|
15
|
+
CODE_INVALID: "Expected a non-empty agent name string, or False.",
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
def _apply(self, value):
|
|
19
|
+
# Boolean True is always invalid.
|
|
20
|
+
if value is True:
|
|
21
|
+
return self._invalid_value(value, self.CODE_INVALID)
|
|
22
|
+
|
|
23
|
+
# Boolean False passes through directly.
|
|
24
|
+
if value is False:
|
|
25
|
+
return False
|
|
26
|
+
|
|
27
|
+
# Map the string 'false' to boolean False.
|
|
28
|
+
if isinstance(value, str) and value == "false":
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
# Non-empty string agent names are valid.
|
|
32
|
+
value = self._filter(value, f.Unicode | f.NotEmpty)
|
|
33
|
+
if self._has_errors:
|
|
34
|
+
return None
|
|
35
|
+
|
|
36
|
+
return value
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class Volume(BaseFilter):
|
|
40
|
+
"""Validates a Docker volume container path.
|
|
41
|
+
|
|
42
|
+
Accepts paths of the form ``/container/path``, ``/container/path:ro``,
|
|
43
|
+
or ``/container/path:rw``. Values with more than one colon-separated
|
|
44
|
+
segment are invalid.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
CODE_INVALID = "invalid"
|
|
48
|
+
|
|
49
|
+
templates = {
|
|
50
|
+
CODE_INVALID: "Expected a container path, optionally suffixed with :ro or :rw.",
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
def _apply(self, value):
|
|
54
|
+
value = self._filter(value, f.Unicode)
|
|
55
|
+
if self._has_errors:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
# Values with more than one colon are invalid.
|
|
59
|
+
parts = value.split(":")
|
|
60
|
+
if len(parts) > 2:
|
|
61
|
+
return self._invalid_value(value, self.CODE_INVALID)
|
|
62
|
+
|
|
63
|
+
# If a mode suffix is present, it must be 'ro' or 'rw'.
|
|
64
|
+
if len(parts) == 2 and parts[1] not in ("ro", "rw"):
|
|
65
|
+
return self._invalid_value(value, self.CODE_INVALID)
|
|
66
|
+
|
|
67
|
+
# Bare paths (no mode suffix) are normalised to read-only.
|
|
68
|
+
if len(parts) == 1:
|
|
69
|
+
return value + ":ro"
|
|
70
|
+
|
|
71
|
+
return value
|
paddock/config/loader.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
import tomllib
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Any, TypedDict
|
|
4
|
+
|
|
5
|
+
import filters as f
|
|
6
|
+
|
|
7
|
+
from paddock.config.schema import _config_schema
|
|
8
|
+
|
|
9
|
+
_USER_CONFIG_PATH = Path.home() / ".config" / "paddock" / "config.toml"
|
|
10
|
+
_PROJECT_CONFIG_NAME = Path(".paddock") / "config.toml"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ConfigEntry(TypedDict):
|
|
14
|
+
"""A single config value annotated with its origin."""
|
|
15
|
+
|
|
16
|
+
value: Any
|
|
17
|
+
source: str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# SourcedConfig: same shape as the config schema, but leaf values are ConfigEntry.
|
|
21
|
+
SourcedConfig = dict[str, Any]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ConfigLoader:
|
|
25
|
+
"""Loads, merges, and validates paddock config from all sources.
|
|
26
|
+
|
|
27
|
+
Config resolution order (later sources overwrite earlier):
|
|
28
|
+
|
|
29
|
+
1. User-level (``~/.config/paddock/config.toml``)
|
|
30
|
+
2. Project-level (``<workdir>/.paddock/config.toml``)
|
|
31
|
+
3. Extra config file via ``PADDOCK_CONFIG_FILE`` env var
|
|
32
|
+
4. Extra config file via ``--config-file`` CLI arg
|
|
33
|
+
5. Env var overrides (``PADDOCK_*``)
|
|
34
|
+
6. CLI arg overrides
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def load_user_config(self, path: Path = _USER_CONFIG_PATH) -> SourcedConfig:
|
|
38
|
+
"""Load the user-level config file.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
path:
|
|
42
|
+
|
|
43
|
+
Path to the user config file. Defaults to
|
|
44
|
+
``~/.config/paddock/config.toml``.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
A ``SourcedConfig`` mapping, or ``{}`` if the file does not exist.
|
|
48
|
+
"""
|
|
49
|
+
return self._load_toml_sourced(path)
|
|
50
|
+
|
|
51
|
+
def load_project_config(self, workdir: Path) -> SourcedConfig:
|
|
52
|
+
"""Load the project-level config from ``<workdir>/.paddock/config.toml``.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
workdir:
|
|
56
|
+
|
|
57
|
+
The project working directory.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
A ``SourcedConfig`` mapping, or ``{}`` if the file does not exist.
|
|
61
|
+
"""
|
|
62
|
+
return self._load_toml_sourced(workdir / _PROJECT_CONFIG_NAME)
|
|
63
|
+
|
|
64
|
+
def load_extra_config(self, path: Path) -> SourcedConfig:
|
|
65
|
+
"""Load an arbitrary config file (for ``PADDOCK_CONFIG_FILE`` or ``--config-file``).
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
path:
|
|
69
|
+
|
|
70
|
+
Path to the extra config file.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
A ``SourcedConfig`` mapping, or ``{}`` if the file does not exist.
|
|
74
|
+
"""
|
|
75
|
+
return self._load_toml_sourced(path)
|
|
76
|
+
|
|
77
|
+
def config_from_env(self, environ: dict[str, str]) -> SourcedConfig:
|
|
78
|
+
"""Extract config from ``PADDOCK_*`` environment variables.
|
|
79
|
+
|
|
80
|
+
Strips the ``PADDOCK_`` prefix, lowercases, and splits on ``_`` to map
|
|
81
|
+
to the config structure. For example::
|
|
82
|
+
|
|
83
|
+
PADDOCK_IMAGE=foo → {'image': {'value': 'foo', ...}}
|
|
84
|
+
PADDOCK_BUILD_DOCKERFILE=x → {'build': {'dockerfile': {'value': 'x', ...}}}
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
environ:
|
|
88
|
+
|
|
89
|
+
Environment variable mapping to inspect.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
A ``SourcedConfig`` containing only the ``PADDOCK_``-prefixed entries.
|
|
93
|
+
"""
|
|
94
|
+
config: SourcedConfig = {}
|
|
95
|
+
prefix = "PADDOCK_"
|
|
96
|
+
for key, value in environ.items():
|
|
97
|
+
if not key.startswith(prefix):
|
|
98
|
+
continue
|
|
99
|
+
parts = key[len(prefix) :].lower().split("_")
|
|
100
|
+
self._deep_set_sourced(config, parts, value, source=f"env:{key}")
|
|
101
|
+
return config
|
|
102
|
+
|
|
103
|
+
def config_from_cli(self, parsed: Any) -> SourcedConfig:
|
|
104
|
+
"""Extract config from a parsed CLI args object (omitting ``None`` values).
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
parsed:
|
|
108
|
+
|
|
109
|
+
An object with attributes matching the paddock CLI argument names.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
A ``SourcedConfig`` containing only the non-``None`` CLI values.
|
|
113
|
+
"""
|
|
114
|
+
config: SourcedConfig = {}
|
|
115
|
+
build: SourcedConfig = {}
|
|
116
|
+
source = "cli"
|
|
117
|
+
|
|
118
|
+
if parsed.image is not None:
|
|
119
|
+
config["image"] = {"value": parsed.image, "source": source}
|
|
120
|
+
if parsed.agent is not None:
|
|
121
|
+
config["agent"] = {"value": parsed.agent, "source": source}
|
|
122
|
+
if parsed.network is not None:
|
|
123
|
+
config["network"] = {"value": parsed.network, "source": source}
|
|
124
|
+
if parsed.build_dockerfile is not None:
|
|
125
|
+
build["dockerfile"] = {"value": parsed.build_dockerfile, "source": source}
|
|
126
|
+
if parsed.build_context is not None:
|
|
127
|
+
build["context"] = {"value": parsed.build_context, "source": source}
|
|
128
|
+
if parsed.build_policy is not None:
|
|
129
|
+
build["policy"] = {"value": parsed.build_policy, "source": source}
|
|
130
|
+
if parsed.build_args:
|
|
131
|
+
build["args"] = {
|
|
132
|
+
k: {"value": v, "source": source} for k, v in parsed.build_args.items()
|
|
133
|
+
}
|
|
134
|
+
if build:
|
|
135
|
+
config["build"] = build
|
|
136
|
+
if parsed.volumes:
|
|
137
|
+
config["volumes"] = {
|
|
138
|
+
k: {"value": v, "source": source} for k, v in parsed.volumes.items()
|
|
139
|
+
}
|
|
140
|
+
return config
|
|
141
|
+
|
|
142
|
+
def resolve(
|
|
143
|
+
self,
|
|
144
|
+
parsed: Any,
|
|
145
|
+
workdir: Path,
|
|
146
|
+
environ: dict[str, str] | None = None,
|
|
147
|
+
) -> f.FilterRunner:
|
|
148
|
+
"""Load config from all sources, merge, apply defaults, and validate.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
parsed:
|
|
152
|
+
|
|
153
|
+
Parsed CLI arguments object.
|
|
154
|
+
|
|
155
|
+
workdir:
|
|
156
|
+
|
|
157
|
+
The project working directory.
|
|
158
|
+
|
|
159
|
+
environ:
|
|
160
|
+
|
|
161
|
+
Environment variable mapping. Defaults to ``os.environ``.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
A ``FilterRunner`` for the caller to check ``is_valid()``.
|
|
165
|
+
"""
|
|
166
|
+
import os
|
|
167
|
+
|
|
168
|
+
env = environ if environ is not None else dict(os.environ)
|
|
169
|
+
|
|
170
|
+
sources = [
|
|
171
|
+
self.load_user_config(),
|
|
172
|
+
self.load_project_config(workdir),
|
|
173
|
+
]
|
|
174
|
+
|
|
175
|
+
if paddock_config_file := env.get("PADDOCK_CONFIG_FILE"):
|
|
176
|
+
sources.append(self.load_extra_config(Path(paddock_config_file)))
|
|
177
|
+
|
|
178
|
+
if parsed.config_file is not None:
|
|
179
|
+
sources.append(self.load_extra_config(Path(parsed.config_file)))
|
|
180
|
+
|
|
181
|
+
sources.append(self.config_from_env(env))
|
|
182
|
+
sources.append(self.config_from_cli(parsed))
|
|
183
|
+
|
|
184
|
+
merged_sourced = self._merge_sourced(sources)
|
|
185
|
+
plain = self._extract_values(merged_sourced)
|
|
186
|
+
plain = self._apply_defaults(plain)
|
|
187
|
+
|
|
188
|
+
return f.FilterRunner(_config_schema, plain)
|
|
189
|
+
|
|
190
|
+
def _load_toml_sourced(self, path: Path) -> SourcedConfig:
|
|
191
|
+
"""Load a TOML file and wrap each leaf value with its source path.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
path:
|
|
195
|
+
|
|
196
|
+
Path to the TOML file.
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
A ``SourcedConfig``, or ``{}`` if the file does not exist.
|
|
200
|
+
"""
|
|
201
|
+
if not path.exists():
|
|
202
|
+
return {}
|
|
203
|
+
with path.open("rb") as fh:
|
|
204
|
+
raw = tomllib.load(fh)
|
|
205
|
+
return self._annotate_source(raw, str(path))
|
|
206
|
+
|
|
207
|
+
def _annotate_source(self, data: dict, source: str) -> SourcedConfig:
|
|
208
|
+
"""Recursively wrap leaf values with source info.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
data:
|
|
212
|
+
|
|
213
|
+
Raw config dict to annotate.
|
|
214
|
+
|
|
215
|
+
source:
|
|
216
|
+
|
|
217
|
+
Source label (typically a file path string).
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
A ``SourcedConfig`` with the same structure as ``data``.
|
|
221
|
+
"""
|
|
222
|
+
result: SourcedConfig = {}
|
|
223
|
+
for key, value in data.items():
|
|
224
|
+
if isinstance(value, dict):
|
|
225
|
+
result[key] = self._annotate_source(value, source)
|
|
226
|
+
else:
|
|
227
|
+
result[key] = {"value": value, "source": source}
|
|
228
|
+
return result
|
|
229
|
+
|
|
230
|
+
def _deep_set_sourced(
|
|
231
|
+
self,
|
|
232
|
+
config: SourcedConfig,
|
|
233
|
+
parts: list[str],
|
|
234
|
+
value: str,
|
|
235
|
+
source: str,
|
|
236
|
+
) -> None:
|
|
237
|
+
"""Deep-set a value in a ``SourcedConfig`` using a key-path list.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
config:
|
|
241
|
+
|
|
242
|
+
The target ``SourcedConfig`` to mutate.
|
|
243
|
+
|
|
244
|
+
parts:
|
|
245
|
+
|
|
246
|
+
Ordered key segments forming the path to the target leaf.
|
|
247
|
+
|
|
248
|
+
value:
|
|
249
|
+
|
|
250
|
+
The raw string value to store.
|
|
251
|
+
|
|
252
|
+
source:
|
|
253
|
+
|
|
254
|
+
Source label for the ``ConfigEntry``.
|
|
255
|
+
"""
|
|
256
|
+
node = config
|
|
257
|
+
for part in parts[:-1]:
|
|
258
|
+
if part not in node or not isinstance(node[part], dict):
|
|
259
|
+
node[part] = {}
|
|
260
|
+
node = node[part]
|
|
261
|
+
node[parts[-1]] = {"value": value, "source": source}
|
|
262
|
+
|
|
263
|
+
def _merge_sourced(self, sources: list[SourcedConfig]) -> SourcedConfig:
|
|
264
|
+
"""Deep-merge a list of ``SourcedConfig`` dicts; later sources overwrite earlier.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
sources:
|
|
268
|
+
|
|
269
|
+
Ordered list of ``SourcedConfig`` mappings.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
A single merged ``SourcedConfig``.
|
|
273
|
+
"""
|
|
274
|
+
result: SourcedConfig = {}
|
|
275
|
+
for source in sources:
|
|
276
|
+
result = self._deep_merge(result, source)
|
|
277
|
+
return result
|
|
278
|
+
|
|
279
|
+
def _deep_merge(self, base: dict, override: dict) -> dict:
|
|
280
|
+
"""Recursively merge ``override`` into ``base``.
|
|
281
|
+
|
|
282
|
+
``ConfigEntry`` dicts (those with both ``value`` and ``source`` keys) are
|
|
283
|
+
treated as leaves and replaced wholesale rather than merged.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
base:
|
|
287
|
+
|
|
288
|
+
The base dict to merge into.
|
|
289
|
+
|
|
290
|
+
override:
|
|
291
|
+
|
|
292
|
+
The dict whose values take precedence.
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
A new merged dict.
|
|
296
|
+
"""
|
|
297
|
+
result = dict(base)
|
|
298
|
+
for key, value in override.items():
|
|
299
|
+
if (
|
|
300
|
+
key in result
|
|
301
|
+
and isinstance(result[key], dict)
|
|
302
|
+
and isinstance(value, dict)
|
|
303
|
+
and not ("value" in value and "source" in value)
|
|
304
|
+
):
|
|
305
|
+
result[key] = self._deep_merge(result[key], value)
|
|
306
|
+
else:
|
|
307
|
+
result[key] = value
|
|
308
|
+
return result
|
|
309
|
+
|
|
310
|
+
def _extract_values(self, sourced: SourcedConfig) -> dict:
|
|
311
|
+
"""Strip source annotations, returning a plain config dict.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
sourced:
|
|
315
|
+
|
|
316
|
+
A ``SourcedConfig`` to strip.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
A plain dict with the same structure but without ``ConfigEntry`` wrappers.
|
|
320
|
+
"""
|
|
321
|
+
result: dict = {}
|
|
322
|
+
for key, value in sourced.items():
|
|
323
|
+
if isinstance(value, dict):
|
|
324
|
+
if "value" in value and "source" in value:
|
|
325
|
+
result[key] = value["value"]
|
|
326
|
+
else:
|
|
327
|
+
result[key] = self._extract_values(value)
|
|
328
|
+
else:
|
|
329
|
+
result[key] = value
|
|
330
|
+
return result
|
|
331
|
+
|
|
332
|
+
def _apply_defaults(self, config: dict) -> dict:
|
|
333
|
+
"""Apply default values for omitted config keys.
|
|
334
|
+
|
|
335
|
+
Mutates and returns ``config``.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
config:
|
|
339
|
+
|
|
340
|
+
The plain config dict to fill.
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
The same dict with defaults applied.
|
|
344
|
+
"""
|
|
345
|
+
config.setdefault("agent", "claude")
|
|
346
|
+
config.setdefault("build", None)
|
|
347
|
+
config.setdefault("network", None)
|
|
348
|
+
config.setdefault("volumes", {})
|
|
349
|
+
if isinstance(config.get("build"), dict):
|
|
350
|
+
config["build"].setdefault("args", {})
|
|
351
|
+
config["build"].setdefault("context", None)
|
|
352
|
+
config["build"].setdefault("policy", "if-missing")
|
|
353
|
+
return config
|
paddock/config/schema.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
import filters as f
|
|
4
|
+
|
|
5
|
+
from paddock.config.filters import Agent, Volume
|
|
6
|
+
|
|
7
|
+
BUILD_POLICIES = ("always", "daily", "if-missing", "weekly")
|
|
8
|
+
|
|
9
|
+
# Schema for the build sub-dict.
|
|
10
|
+
_build_schema = f.FilterMapper(
|
|
11
|
+
{
|
|
12
|
+
"args": f.Optional(None) | f.FilterRepeater(f.Unicode),
|
|
13
|
+
"context": f.Optional(None),
|
|
14
|
+
"dockerfile": f.Required | f.Unicode | f.NotEmpty,
|
|
15
|
+
"policy": f.Optional(None) | f.Choice(BUILD_POLICIES),
|
|
16
|
+
},
|
|
17
|
+
allow_extra_keys=False,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Top-level config schema — exported for use by the loader.
|
|
21
|
+
_config_schema = f.FilterMapper(
|
|
22
|
+
{
|
|
23
|
+
"agent": f.Required | Agent,
|
|
24
|
+
"build": f.Optional(None) | _build_schema,
|
|
25
|
+
"image": f.Required | f.Unicode | f.NotEmpty,
|
|
26
|
+
"network": f.Optional(None),
|
|
27
|
+
"volumes": f.Optional(dict) | f.FilterRepeater(Volume),
|
|
28
|
+
},
|
|
29
|
+
allow_extra_keys=False,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class ConfigSchema:
|
|
34
|
+
"""Validates a merged paddock config dict.
|
|
35
|
+
|
|
36
|
+
Prints errors to stderr and calls ``sys.exit(1)`` on failure.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def validate(self, config: dict) -> dict:
|
|
40
|
+
"""Validates the config dict and returns the cleaned result.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
config:
|
|
44
|
+
|
|
45
|
+
The raw config mapping to validate.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The cleaned and normalised config dict.
|
|
49
|
+
"""
|
|
50
|
+
runner = f.FilterRunner(_config_schema, config)
|
|
51
|
+
|
|
52
|
+
if not runner.is_valid():
|
|
53
|
+
for key, messages in runner.errors.items():
|
|
54
|
+
for msg in messages:
|
|
55
|
+
print(
|
|
56
|
+
f"Config error [{key}]: {msg['message']}",
|
|
57
|
+
file=sys.stderr,
|
|
58
|
+
)
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
|
|
61
|
+
return runner.cleaned_data
|
|
File without changes
|
paddock/docker/build.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
from datetime import datetime, timedelta, timezone
|
|
3
|
+
from enum import StrEnum
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import filters as f
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BuildPolicy(StrEnum):
|
|
10
|
+
ALWAYS = "always"
|
|
11
|
+
DAILY = "daily"
|
|
12
|
+
IF_MISSING = "if-missing"
|
|
13
|
+
WEEKLY = "weekly"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ImageBuilder:
|
|
17
|
+
@staticmethod
|
|
18
|
+
def should_build(policy: BuildPolicy, image_created_at: datetime | None) -> bool:
|
|
19
|
+
"""Determine whether to build the image given the policy and current image age."""
|
|
20
|
+
match policy:
|
|
21
|
+
case BuildPolicy.ALWAYS:
|
|
22
|
+
return True
|
|
23
|
+
case BuildPolicy.IF_MISSING:
|
|
24
|
+
return image_created_at is None
|
|
25
|
+
case BuildPolicy.DAILY:
|
|
26
|
+
if image_created_at is None:
|
|
27
|
+
return True
|
|
28
|
+
return (datetime.now(timezone.utc) - image_created_at) > timedelta(
|
|
29
|
+
hours=24
|
|
30
|
+
)
|
|
31
|
+
case BuildPolicy.WEEKLY:
|
|
32
|
+
if image_created_at is None:
|
|
33
|
+
return True
|
|
34
|
+
return (datetime.now(timezone.utc) - image_created_at) > timedelta(
|
|
35
|
+
days=7
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
def get_image_created_at(self, image: str) -> datetime | None:
|
|
39
|
+
"""Return the creation timestamp of a local Docker image, or None if absent."""
|
|
40
|
+
result = subprocess.run(
|
|
41
|
+
["docker", "image", "inspect", "--format={{.Created}}", image],
|
|
42
|
+
capture_output=True,
|
|
43
|
+
text=True,
|
|
44
|
+
)
|
|
45
|
+
if result.returncode != 0:
|
|
46
|
+
return None
|
|
47
|
+
created_str = result.stdout.strip()
|
|
48
|
+
runner = f.FilterRunner(f.Datetime(), created_str)
|
|
49
|
+
if not runner.is_valid():
|
|
50
|
+
return None
|
|
51
|
+
return runner.cleaned_data
|
|
52
|
+
|
|
53
|
+
def run_build(
|
|
54
|
+
self,
|
|
55
|
+
*,
|
|
56
|
+
image: str,
|
|
57
|
+
dockerfile: str,
|
|
58
|
+
context: str,
|
|
59
|
+
build_args: dict[str, str],
|
|
60
|
+
) -> None:
|
|
61
|
+
"""Run docker build, streaming output to stdout."""
|
|
62
|
+
argv = ["docker", "build", "-t", image, "-f", dockerfile]
|
|
63
|
+
for key, value in build_args.items():
|
|
64
|
+
argv += ["--build-arg", f"{key}={value}"]
|
|
65
|
+
argv.append(context)
|
|
66
|
+
subprocess.run(argv, check=True)
|
|
67
|
+
|
|
68
|
+
def maybe_build(
|
|
69
|
+
self,
|
|
70
|
+
*,
|
|
71
|
+
build_config: dict,
|
|
72
|
+
image: str,
|
|
73
|
+
build_args: dict[str, str],
|
|
74
|
+
) -> bool:
|
|
75
|
+
"""
|
|
76
|
+
Build the image if the build policy requires it.
|
|
77
|
+
|
|
78
|
+
Returns True if a build was triggered, False if skipped.
|
|
79
|
+
"""
|
|
80
|
+
policy = BuildPolicy(build_config.get("policy", "if-missing"))
|
|
81
|
+
dockerfile = build_config["dockerfile"]
|
|
82
|
+
context = build_config.get("context") or str(Path(dockerfile).parent)
|
|
83
|
+
image_created_at = self.get_image_created_at(image)
|
|
84
|
+
if self.should_build(policy, image_created_at):
|
|
85
|
+
self.run_build(
|
|
86
|
+
image=image,
|
|
87
|
+
dockerfile=dockerfile,
|
|
88
|
+
context=context,
|
|
89
|
+
build_args=build_args,
|
|
90
|
+
)
|
|
91
|
+
return True
|
|
92
|
+
return False
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import subprocess
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from paddock.agents import BaseAgent
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def sanitise_volume_name(image: str, agent_key: str) -> str:
|
|
9
|
+
"""Generate a Docker volume name from image + agent key."""
|
|
10
|
+
sanitised = re.sub(r"[^a-z0-9]", "_", image.lower())
|
|
11
|
+
return f"paddock_{sanitised}_{agent_key}"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DockerCommandBuilder:
|
|
15
|
+
def __init__(self, *, config: dict, agent: BaseAgent, workdir: Path) -> None:
|
|
16
|
+
self._config = config
|
|
17
|
+
self._agent = agent
|
|
18
|
+
self._workdir = workdir
|
|
19
|
+
|
|
20
|
+
def build(self, *, command: list[str]) -> list[str]:
|
|
21
|
+
"""Assemble the full 'docker run' argv list."""
|
|
22
|
+
argv = ["docker", "run", "--rm", "-it"]
|
|
23
|
+
argv += ["--name", self._resolve_container_name()]
|
|
24
|
+
argv += [f"--workdir={self._workdir}"]
|
|
25
|
+
argv += self._volume_flag(str(self._workdir), f"{self._workdir}:rw")
|
|
26
|
+
for host, container in self._agent.get_volumes().items():
|
|
27
|
+
argv += self._volume_flag(host, container)
|
|
28
|
+
for host, container in self._config.get("volumes", {}).items():
|
|
29
|
+
argv += self._volume_flag(host, container)
|
|
30
|
+
for vol_name, container_path in self._agent.get_scratch_volumes(
|
|
31
|
+
self._config["image"]
|
|
32
|
+
).items():
|
|
33
|
+
argv += self._volume_flag(vol_name, container_path)
|
|
34
|
+
if self._config.get("network"):
|
|
35
|
+
argv += ["--network", self._config["network"]]
|
|
36
|
+
argv.append(self._config["image"])
|
|
37
|
+
argv += command if command else self._agent.get_command()
|
|
38
|
+
return argv
|
|
39
|
+
|
|
40
|
+
def _resolve_container_name(self) -> str:
|
|
41
|
+
"""Derive container name from workdir; append numeric suffix if taken."""
|
|
42
|
+
dirname = self._workdir.name.lower()
|
|
43
|
+
agent_key = self._agent.AGENT_KEY
|
|
44
|
+
base_name = f"paddock-{dirname}-{agent_key}"
|
|
45
|
+
if self._container_name_available(base_name):
|
|
46
|
+
return base_name
|
|
47
|
+
suffix = 1
|
|
48
|
+
while True:
|
|
49
|
+
candidate = f"{base_name}-{suffix}"
|
|
50
|
+
if self._container_name_available(candidate):
|
|
51
|
+
return candidate
|
|
52
|
+
suffix += 1
|
|
53
|
+
|
|
54
|
+
def _container_name_available(self, name: str) -> bool:
|
|
55
|
+
"""Return True if no running or stopped container has this name."""
|
|
56
|
+
result = subprocess.run(
|
|
57
|
+
["docker", "ps", "-a", "--filter", f"name=^{name}$", "--format={{.Names}}"],
|
|
58
|
+
capture_output=True,
|
|
59
|
+
text=True,
|
|
60
|
+
)
|
|
61
|
+
return name not in result.stdout.splitlines()
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
def _volume_flag(host_or_name: str, container_spec: str) -> list[str]:
|
|
65
|
+
return ["-v", f"{host_or_name}:{container_spec}"]
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: phx-paddock
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Launch coding agents in isolated Docker containers.
|
|
5
|
+
Project-URL: Repository, https://github.com/phx/paddock
|
|
6
|
+
Author-email: Phoenix Zerin <phx@phx.nz>
|
|
7
|
+
License-Expression: MIT
|
|
8
|
+
License-File: LICENCE.txt
|
|
9
|
+
Keywords: agents,coding,docker,isolation
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
16
|
+
Classifier: Topic :: Software Development :: Build Tools
|
|
17
|
+
Requires-Python: <4,>=3.12
|
|
18
|
+
Requires-Dist: phx-class-registry<6.0.0,>=5.2.1
|
|
19
|
+
Requires-Dist: phx-filters<4.0.0,>=3.5.1
|
|
20
|
+
Description-Content-Type: text/x-rst
|
|
21
|
+
|
|
22
|
+
paddock
|
|
23
|
+
=======
|
|
24
|
+
|
|
25
|
+
Launch coding agents (or a plain shell) in isolated Docker containers,
|
|
26
|
+
with the current working directory mounted as the workspace.
|
|
27
|
+
|
|
28
|
+
.. image:: https://img.shields.io/pypi/v/paddock.svg
|
|
29
|
+
:target: https://pypi.org/project/paddock/
|
|
30
|
+
:alt: PyPI version
|
|
31
|
+
|
|
32
|
+
.. image:: https://img.shields.io/pypi/pyversions/paddock.svg
|
|
33
|
+
:alt: Python versions
|
|
34
|
+
|
|
35
|
+
.. image:: https://img.shields.io/badge/licence-MIT-blue.svg
|
|
36
|
+
:alt: MIT Licence
|
|
37
|
+
|
|
38
|
+
Overview
|
|
39
|
+
--------
|
|
40
|
+
|
|
41
|
+
``paddock`` assembles and executes a ``docker run`` command from a layered
|
|
42
|
+
configuration system. Config is resolved in priority order:
|
|
43
|
+
|
|
44
|
+
1. User-level TOML (``~/.config/paddock/config.toml``)
|
|
45
|
+
2. Project-level TOML (``<workdir>/.paddock/config.toml``)
|
|
46
|
+
3. Extra TOML file via ``PADDOCK_CONFIG_FILE`` env var
|
|
47
|
+
4. Extra TOML file via ``--config-file`` CLI flag
|
|
48
|
+
5. ``PADDOCK_*`` environment variables
|
|
49
|
+
6. CLI flags
|
|
50
|
+
|
|
51
|
+
Later sources overwrite earlier ones; ``volumes`` entries are additive.
|
|
52
|
+
|
|
53
|
+
Requirements
|
|
54
|
+
------------
|
|
55
|
+
|
|
56
|
+
- Python 3.12+
|
|
57
|
+
- Docker (CLI must be available on ``PATH``)
|
|
58
|
+
|
|
59
|
+
Installation
|
|
60
|
+
------------
|
|
61
|
+
|
|
62
|
+
.. code-block:: bash
|
|
63
|
+
|
|
64
|
+
pip install paddock
|
|
65
|
+
|
|
66
|
+
Or with `uv <https://github.com/astral-sh/uv>`_:
|
|
67
|
+
|
|
68
|
+
.. code-block:: bash
|
|
69
|
+
|
|
70
|
+
uv tool install paddock
|
|
71
|
+
|
|
72
|
+
Quick Start
|
|
73
|
+
-----------
|
|
74
|
+
|
|
75
|
+
Drop into a plain bash shell inside the current directory:
|
|
76
|
+
|
|
77
|
+
.. code-block:: bash
|
|
78
|
+
|
|
79
|
+
paddock --image=ubuntu:24.04 --agent=false
|
|
80
|
+
|
|
81
|
+
Run Claude Code in an isolated container:
|
|
82
|
+
|
|
83
|
+
.. code-block:: bash
|
|
84
|
+
|
|
85
|
+
paddock --image=my-claude-image --agent=claude
|
|
86
|
+
|
|
87
|
+
Print the assembled ``docker run`` command without executing it:
|
|
88
|
+
|
|
89
|
+
.. code-block:: bash
|
|
90
|
+
|
|
91
|
+
paddock --image=ubuntu:24.04 --agent=false --dry-run
|
|
92
|
+
|
|
93
|
+
Configuration
|
|
94
|
+
-------------
|
|
95
|
+
|
|
96
|
+
TOML files
|
|
97
|
+
~~~~~~~~~~
|
|
98
|
+
|
|
99
|
+
Place a ``config.toml`` at ``~/.config/paddock/`` (user-level) or
|
|
100
|
+
``<project>/.paddock/`` (project-level). Both files are optional.
|
|
101
|
+
|
|
102
|
+
.. code-block:: toml
|
|
103
|
+
|
|
104
|
+
agent = "claude"
|
|
105
|
+
image = "my-claude-image:latest"
|
|
106
|
+
network = "my-docker-network"
|
|
107
|
+
|
|
108
|
+
[volumes]
|
|
109
|
+
"/host/path" = "/container/path:ro"
|
|
110
|
+
|
|
111
|
+
[build]
|
|
112
|
+
dockerfile = "images/Dockerfile"
|
|
113
|
+
context = "."
|
|
114
|
+
policy = "daily"
|
|
115
|
+
|
|
116
|
+
[build.args]
|
|
117
|
+
AGENT = "claude"
|
|
118
|
+
PYTHON_VERSION = "3.13"
|
|
119
|
+
|
|
120
|
+
Config fields
|
|
121
|
+
~~~~~~~~~~~~~
|
|
122
|
+
|
|
123
|
+
+--------------------+----------------------------+--------------------------------------------------+
|
|
124
|
+
| Field | Type | Description |
|
|
125
|
+
+====================+============================+==================================================+
|
|
126
|
+
| ``agent`` | ``string`` or ``false`` | Agent key (``"claude"``) or ``false`` for shell |
|
|
127
|
+
+--------------------+----------------------------+--------------------------------------------------+
|
|
128
|
+
| ``image`` | ``string`` | Docker image to run (required) |
|
|
129
|
+
+--------------------+----------------------------+--------------------------------------------------+
|
|
130
|
+
| ``network`` | ``string`` (optional) | Docker network to attach the container to |
|
|
131
|
+
+--------------------+----------------------------+--------------------------------------------------+
|
|
132
|
+
| ``volumes`` | ``{host: container}`` map | Extra bind-mounts; container path may end |
|
|
133
|
+
| | | in ``:ro`` or ``:rw`` (bare path defaults to |
|
|
134
|
+
| | | ``:ro``) |
|
|
135
|
+
+--------------------+----------------------------+--------------------------------------------------+
|
|
136
|
+
| ``build`` | sub-table (optional) | Image auto-build settings (see below) |
|
|
137
|
+
+--------------------+----------------------------+--------------------------------------------------+
|
|
138
|
+
|
|
139
|
+
Build sub-table
|
|
140
|
+
~~~~~~~~~~~~~~~
|
|
141
|
+
|
|
142
|
+
+----------------+---------------------------------------------+-------------------------------------------+
|
|
143
|
+
| Field | Type | Description |
|
|
144
|
+
+================+=============================================+===========================================+
|
|
145
|
+
| ``dockerfile`` | ``string`` | Path to the Dockerfile (required if build |
|
|
146
|
+
| | | table is present) |
|
|
147
|
+
+----------------+---------------------------------------------+-------------------------------------------+
|
|
148
|
+
| ``context`` | ``string`` (optional) | Docker build context path |
|
|
149
|
+
+----------------+---------------------------------------------+-------------------------------------------+
|
|
150
|
+
| ``policy`` | ``"always"`` / ``"daily"`` / | When to rebuild the image |
|
|
151
|
+
| | ``"if-missing"`` / ``"weekly"`` | |
|
|
152
|
+
+----------------+---------------------------------------------+-------------------------------------------+
|
|
153
|
+
| ``args`` | ``{name: value}`` map (optional) | Build-time ``--build-arg`` values |
|
|
154
|
+
+----------------+---------------------------------------------+-------------------------------------------+
|
|
155
|
+
|
|
156
|
+
Environment variables
|
|
157
|
+
~~~~~~~~~~~~~~~~~~~~~
|
|
158
|
+
|
|
159
|
+
Any config field can be set via an environment variable by uppercasing its
|
|
160
|
+
name and prefixing with ``PADDOCK_``. Nested keys are joined with ``_``:
|
|
161
|
+
|
|
162
|
+
.. code-block:: bash
|
|
163
|
+
|
|
164
|
+
PADDOCK_IMAGE=ubuntu:24.04
|
|
165
|
+
PADDOCK_AGENT=claude
|
|
166
|
+
PADDOCK_BUILD_DOCKERFILE=images/Dockerfile
|
|
167
|
+
PADDOCK_BUILD_POLICY=daily
|
|
168
|
+
PADDOCK_CONFIG_FILE=/path/to/extra.toml # loads an additional TOML file
|
|
169
|
+
|
|
170
|
+
CLI flags
|
|
171
|
+
~~~~~~~~~
|
|
172
|
+
|
|
173
|
+
.. code-block:: text
|
|
174
|
+
|
|
175
|
+
paddock [FLAGS] [--] [COMMAND...]
|
|
176
|
+
|
|
177
|
+
--agent AGENT Agent key (e.g. "claude") or "false" for a shell
|
|
178
|
+
--build-args-KEY=VALUE Build-time ARG (repeatable)
|
|
179
|
+
--build-context PATH Docker build context
|
|
180
|
+
--build-dockerfile PATH Path to Dockerfile
|
|
181
|
+
--build-policy POLICY Build policy (always|daily|if-missing|weekly)
|
|
182
|
+
--config-file PATH Load an additional TOML config file
|
|
183
|
+
--dry-run Print the docker command and exit without running it
|
|
184
|
+
--image IMAGE Docker image
|
|
185
|
+
--network NETWORK Docker network
|
|
186
|
+
--quiet Suppress all logging and the docker command printout
|
|
187
|
+
--volume HOST:CONTAINER[:MODE] Extra bind-mount (repeatable)
|
|
188
|
+
--workdir PATH Host path to use as the workspace (default: CWD)
|
|
189
|
+
|
|
190
|
+
Everything after the first positional argument (or after ``--``) is passed
|
|
191
|
+
as the container command:
|
|
192
|
+
|
|
193
|
+
.. code-block:: bash
|
|
194
|
+
|
|
195
|
+
paddock --image=ubuntu:24.04 --agent=false -- bash -c "echo hello"
|
|
196
|
+
|
|
197
|
+
Agents
|
|
198
|
+
------
|
|
199
|
+
|
|
200
|
+
``claude``
|
|
201
|
+
~~~~~~~~~~
|
|
202
|
+
|
|
203
|
+
Runs ``claude`` inside the container. Mounts ``~/.claude`` from the host
|
|
204
|
+
to ``/root/.claude:rw`` so authentication and configuration persist between
|
|
205
|
+
sessions.
|
|
206
|
+
|
|
207
|
+
``false`` (shell)
|
|
208
|
+
~~~~~~~~~~~~~~~~~
|
|
209
|
+
|
|
210
|
+
Runs ``/bin/bash``. Useful for exploring the container environment or
|
|
211
|
+
running ad-hoc commands without a coding agent.
|
|
212
|
+
|
|
213
|
+
Adding agents
|
|
214
|
+
~~~~~~~~~~~~~
|
|
215
|
+
|
|
216
|
+
Additional agents can be registered via the ``paddock.agents`` entry-point
|
|
217
|
+
group in any installed package:
|
|
218
|
+
|
|
219
|
+
.. code-block:: toml
|
|
220
|
+
|
|
221
|
+
[project.entry-points."paddock.agents"]
|
|
222
|
+
my-agent = "mypackage.agents:MyAgent"
|
|
223
|
+
|
|
224
|
+
Each agent must subclass ``paddock.agents.BaseAgent`` and implement
|
|
225
|
+
``get_command()`` and ``get_volumes()``.
|
|
226
|
+
|
|
227
|
+
Docker Image
|
|
228
|
+
------------
|
|
229
|
+
|
|
230
|
+
A ready-to-use ``Dockerfile`` is included in ``images/``. It installs
|
|
231
|
+
Python (via the deadsnakes PPA), Node.js, and the selected coding agent.
|
|
232
|
+
|
|
233
|
+
Build arguments:
|
|
234
|
+
|
|
235
|
+
+--------------------+-------------------+----------------------------------------------+
|
|
236
|
+
| ARG | Default | Description |
|
|
237
|
+
+====================+===================+==============================================+
|
|
238
|
+
| ``UBUNTU_VERSION`` | ``24.04`` | Ubuntu base image tag |
|
|
239
|
+
+--------------------+-------------------+----------------------------------------------+
|
|
240
|
+
| ``AGENT`` | ``none`` | ``claude`` or ``none`` |
|
|
241
|
+
+--------------------+-------------------+----------------------------------------------+
|
|
242
|
+
| ``NODE_VERSION`` | ``22`` | Node.js major version |
|
|
243
|
+
+--------------------+-------------------+----------------------------------------------+
|
|
244
|
+
| ``PYTHON_VERSION`` | ``3.13`` | Python version (installed from deadsnakes) |
|
|
245
|
+
+--------------------+-------------------+----------------------------------------------+
|
|
246
|
+
|
|
247
|
+
Build the image manually:
|
|
248
|
+
|
|
249
|
+
.. code-block:: bash
|
|
250
|
+
|
|
251
|
+
docker build \
|
|
252
|
+
--build-arg AGENT=claude \
|
|
253
|
+
-t my-claude-image \
|
|
254
|
+
-f images/Dockerfile .
|
|
255
|
+
|
|
256
|
+
Or set a ``[build]`` table in your config and let paddock build it
|
|
257
|
+
automatically according to your chosen policy.
|
|
258
|
+
|
|
259
|
+
Licence
|
|
260
|
+
-------
|
|
261
|
+
|
|
262
|
+
MIT — see ``LICENCE.txt``.
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
paddock/__init__.py,sha256=kUR5RAFc7HCeiqdlX36dZOHkUI5wI6V_43RpEcD8b-0,22
|
|
2
|
+
paddock/__main__.py,sha256=xtxndLPFSAF1l9secx6PFPntslF8T4YBKXURjclBplE,2814
|
|
3
|
+
paddock/cli.py,sha256=fMYhYOnxVtPYdBk24-SEYDfqjJd6hVW3bCAW8GME4D8,4577
|
|
4
|
+
paddock/agents/__init__.py,sha256=RHWFXAucLHS8V1bWir3zb1X9pF8-M4WhtQ1p3BO-ZjU,1424
|
|
5
|
+
paddock/agents/claude.py,sha256=osocoPSF_JnmSi7Wn0z2bqt5WZ1qWBtrMhgKQ-a7kvk,383
|
|
6
|
+
paddock/agents/shell.py,sha256=tAJrxb4e6ooGexCY2ST-8sdGWKtb70iNzII2LB49oyU,308
|
|
7
|
+
paddock/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
paddock/config/filters.py,sha256=dJk_FQdWIOX440woE-xc0CN4zOx14Lg_4QHRvGzPrac,2059
|
|
9
|
+
paddock/config/loader.py,sha256=ckq3lZBAY_3QeAQ4826ehymEzWpapcpBY2DmqPLwCW0,10943
|
|
10
|
+
paddock/config/schema.py,sha256=ASdJ3qyphiEg7zNA6XAfa0ebJYgCWqYWcAFpgx_92yg,1689
|
|
11
|
+
paddock/docker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
+
paddock/docker/build.py,sha256=CGNGlmmNT91fVuzgrg4LVSnA-WXMH4Z5Z9gRd6aZWaM,3022
|
|
13
|
+
paddock/docker/builder.py,sha256=NcvpQ-82tPnC91fQ63xuCwxfXtkyc1vXulVXg4Ky3BM,2619
|
|
14
|
+
phx_paddock-0.1.0.dist-info/METADATA,sha256=PY_jD-i0age86rnO_T77VDfAzjZrBu_NihonoMXoOZU,9678
|
|
15
|
+
phx_paddock-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
16
|
+
phx_paddock-0.1.0.dist-info/entry_points.txt,sha256=4j-ateD6UnVFJLzat-Nqx1rGGxJ4yv2KnpaELs1Kp04,151
|
|
17
|
+
phx_paddock-0.1.0.dist-info/licenses/LICENCE.txt,sha256=OFw0PwrlgLmgsHneANTra9eoHh3t8GF74MaI5Nm6jaU,1070
|
|
18
|
+
phx_paddock-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Phoenix Zerin
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|