devvy 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,182 @@
1
+ """Repo container detection — inspects the cloned workspace to determine
2
+ how to build and run the repo's own container for validation.
3
+
4
+ This module is pure filesystem inspection — no Docker calls, fully testable
5
+ in isolation.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json as _json
11
+ from pathlib import Path
12
+ from typing import Literal
13
+
14
+
15
+ # Services whose names suggest infrastructure rather than the app itself.
16
+ # We skip these when auto-detecting the app service from docker-compose.
17
+ _INFRA_SERVICE_NAMES = frozenset(
18
+ {
19
+ "db",
20
+ "database",
21
+ "redis",
22
+ "postgres",
23
+ "postgresql",
24
+ "mysql",
25
+ "mongo",
26
+ "mongodb",
27
+ "mssql",
28
+ "sqlserver",
29
+ "cache",
30
+ "rabbitmq",
31
+ "kafka",
32
+ "worker",
33
+ "nginx",
34
+ "proxy",
35
+ "elasticsearch",
36
+ "elastic",
37
+ }
38
+ )
39
+
40
+ from dataclasses import dataclass
41
+
42
+
43
+ @dataclass
44
+ class RepoContainerConfig:
45
+ """How to build and run the repo's own container for validation."""
46
+
47
+ kind: Literal["compose", "dockerfile"]
48
+ # compose only
49
+ compose_file: str | None # absolute host path
50
+ service: str | None # docker-compose service name
51
+ # dockerfile only
52
+ dockerfile: str | None # path relative to workspace root
53
+ context: str | None # absolute host path used as build context
54
+ # shared
55
+ image_tag: str # e.g. "devvy-repo-<ticket_id[:8]>"
56
+
57
+ def to_json(self) -> str:
58
+ from dataclasses import asdict
59
+
60
+ return _json.dumps(asdict(self))
61
+
62
+ @staticmethod
63
+ def from_json(raw: str) -> "RepoContainerConfig":
64
+ return RepoContainerConfig(**_json.loads(raw))
65
+
66
+
67
+ def detect_repo_container(ws: Path, ticket_id: str) -> RepoContainerConfig:
68
+ """
69
+ Inspect the cloned workspace and return a RepoContainerConfig describing
70
+ how to build and run the repo's own container for validation.
71
+
72
+ Detection order:
73
+ 1. docker-compose.yml / docker-compose.yaml — pick the first non-infra
74
+ service that has a ``build:`` key.
75
+ 2. .devcontainer/devcontainer.json — VS Code devcontainer spec.
76
+ 3. Dockerfile at the repo root.
77
+
78
+ Raises RuntimeError if none of the above are found, with a helpful message.
79
+ """
80
+ image_tag = f"devvy-repo-{ticket_id[:8]}"
81
+
82
+ # 1. docker-compose
83
+ for compose_name in ("docker-compose.yml", "docker-compose.yaml"):
84
+ compose_path = ws / compose_name
85
+ if compose_path.is_file():
86
+ service = _pick_compose_service(compose_path)
87
+ if service:
88
+ return RepoContainerConfig(
89
+ kind="compose",
90
+ compose_file=str(compose_path),
91
+ service=service,
92
+ dockerfile=None,
93
+ context=None,
94
+ image_tag=image_tag,
95
+ )
96
+
97
+ # 2. .devcontainer/devcontainer.json
98
+ devcontainer = ws / ".devcontainer" / "devcontainer.json"
99
+ if devcontainer.is_file():
100
+ try:
101
+ cfg = _json.loads(devcontainer.read_text())
102
+ build_cfg = cfg.get("build", {})
103
+ dockerfile = build_cfg.get("dockerfile") or cfg.get("dockerFile")
104
+ if dockerfile:
105
+ context = str(ws / build_cfg.get("context", "."))
106
+ return RepoContainerConfig(
107
+ kind="dockerfile",
108
+ compose_file=None,
109
+ service=None,
110
+ dockerfile=dockerfile,
111
+ context=context,
112
+ image_tag=image_tag,
113
+ )
114
+ except Exception:
115
+ pass
116
+
117
+ # 3. Root Dockerfile (case-insensitive — some repos use lowercase 'dockerfile')
118
+ for dockerfile_name in ("Dockerfile", "dockerfile"):
119
+ if (ws / dockerfile_name).is_file():
120
+ return RepoContainerConfig(
121
+ kind="dockerfile",
122
+ compose_file=None,
123
+ service=None,
124
+ dockerfile=dockerfile_name,
125
+ context=str(ws),
126
+ image_tag=image_tag,
127
+ )
128
+
129
+ raise RuntimeError(
130
+ "devvy could not find a container config in this repo.\n"
131
+ "Add one of the following so devvy knows how to run validation:\n"
132
+ " • docker-compose.yml (with a build: service)\n"
133
+ " • .devcontainer/devcontainer.json\n"
134
+ " • Dockerfile (at the repo root)"
135
+ )
136
+
137
+
138
+ def _pick_compose_service(compose_path: Path) -> str | None:
139
+ """
140
+ Parse a docker-compose file and return the name of the best candidate
141
+ app service (has a ``build:`` key, not an infra service by name).
142
+
143
+ Preferred names (``app``, ``api``, ``service``) are chosen first if present.
144
+ Otherwise the first non-infra service with a ``build:`` key wins.
145
+
146
+ Returns None if no suitable service is found.
147
+ """
148
+ # Well-known app service names: checked before falling back to positional order.
149
+ _PREFERRED_SERVICE_NAMES = ("app", "api", "service")
150
+
151
+ try:
152
+ # Avoid adding a pyyaml hard-dependency: use a minimal regex-free
153
+ # approach — just try importing yaml and fall back to None on failure.
154
+ import yaml # type: ignore[import-untyped]
155
+
156
+ data = yaml.safe_load(compose_path.read_text())
157
+ except Exception:
158
+ return None
159
+
160
+ services: dict = (data or {}).get("services", {})
161
+
162
+ # Collect all buildable, non-infra services preserving declaration order.
163
+ candidates: list[str] = []
164
+ for name, svc in services.items():
165
+ if not isinstance(svc, dict):
166
+ continue
167
+ if "build" not in svc:
168
+ continue
169
+ if name.lower() in _INFRA_SERVICE_NAMES:
170
+ continue
171
+ candidates.append(name)
172
+
173
+ if not candidates:
174
+ return None
175
+
176
+ # Prefer well-known app service names over positional order.
177
+ for preferred in _PREFERRED_SERVICE_NAMES:
178
+ for name in candidates:
179
+ if name.lower() == preferred:
180
+ return name
181
+
182
+ return candidates[0]
@@ -0,0 +1,203 @@
1
+ """Validation pipeline — builds the repo's own container image, runs the
2
+ three standard validation scripts (format, code-checks, run-tests), then
3
+ tears the container down.
4
+
5
+ Validation is intentionally separate from workspace lifecycle so it can be
6
+ called multiple times (self-fix retry loop) without re-cloning.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import asyncio
12
+ from dataclasses import dataclass
13
+ from pathlib import Path
14
+
15
+ from cli.local_runner.docker_primitives import CONTAINER_WORKSPACE, _run
16
+ from cli.local_runner.repo_detection import RepoContainerConfig
17
+
18
+
19
+ # Conventional locations for validation scripts, relative to /workspace.
20
+ _FORMAT_SCRIPT = "src/scripts/format.sh"
21
+ _CODE_CHECKS_SCRIPT = "src/scripts/code-checks.sh"
22
+ _RUN_TESTS_SCRIPT = "src/scripts/run-tests.sh"
23
+
24
+
25
+ @dataclass
26
+ class ValidationResult:
27
+ passed: bool
28
+ output: str
29
+
30
+
31
+ async def _file_exists_in_container(container_id: str, path: str) -> bool:
32
+ """Return True if *path* exists (as a file) inside the container."""
33
+ try:
34
+ await _run(["docker", "exec", container_id, "test", "-f", path], check=True)
35
+ return True
36
+ except RuntimeError:
37
+ return False
38
+
39
+
40
+ async def _exec_script(
41
+ container_id: str,
42
+ script_path: str,
43
+ env: dict[str, str] | None = None,
44
+ workdir: str | None = None,
45
+ ) -> tuple[bool, str]:
46
+ """
47
+ Run a shell script inside the container.
48
+
49
+ Returns (passed, output). Never raises on non-zero exit — failures are
50
+ captured in the returned output string.
51
+ """
52
+ cmd = ["docker", "exec"]
53
+ if workdir:
54
+ cmd += ["--workdir", workdir]
55
+ if env:
56
+ for k, v in env.items():
57
+ cmd += ["-e", f"{k}={v}"]
58
+ cmd += [container_id, "bash", script_path]
59
+ try:
60
+ out = await _run(cmd, check=True)
61
+ return True, out
62
+ except RuntimeError as exc:
63
+ return False, str(exc)
64
+
65
+
66
+ async def run_validation(repo_cfg: RepoContainerConfig) -> ValidationResult:
67
+ """
68
+ Build the repo's own container image, spin up a temporary instance with the
69
+ workspace bind-mounted, run the repo's validation scripts in order, then
70
+ tear the container down.
71
+
72
+ Script execution order (each looked up at /workspace/src/scripts/):
73
+ 1. format.sh — auto-format (non-fatal: applies changes, always runs)
74
+ 2. code-checks.sh — lint + types (fatal on non-zero exit)
75
+ 3. run-tests.sh — test suite (fatal on non-zero exit)
76
+
77
+ Raises RuntimeError if no validation scripts are found at all.
78
+ Returns a ValidationResult — never raises on script failure itself.
79
+ """
80
+ ws = (
81
+ Path(repo_cfg.context or "")
82
+ if repo_cfg.kind == "dockerfile"
83
+ else Path(repo_cfg.compose_file or "").parent
84
+ )
85
+ container_name = f"devvy-validate-{repo_cfg.image_tag.split('-')[-1]}"
86
+ combined: list[str] = []
87
+ all_passed = True
88
+
89
+ # ------------------------------------------------------------------
90
+ # 1. Build the repo's image
91
+ # ------------------------------------------------------------------
92
+ if repo_cfg.kind == "compose":
93
+ if not repo_cfg.compose_file or not repo_cfg.service:
94
+ raise RuntimeError("RepoContainerConfig missing compose_file or service")
95
+ compose_file: str = repo_cfg.compose_file
96
+ service: str = repo_cfg.service
97
+ await _run(["docker", "compose", "-f", compose_file, "build", service])
98
+ # Docker Compose names built images as <project>-<service>:latest where
99
+ # project is the lowercase directory name of the compose file's parent.
100
+ # `docker compose images -q` only lists images for *running* containers
101
+ # so it returns nothing here — derive the name directly instead.
102
+ project = Path(compose_file).parent.name.lower()
103
+ image_name = f"{project}-{service}:latest"
104
+ else:
105
+ if not repo_cfg.dockerfile or not repo_cfg.context:
106
+ raise RuntimeError("RepoContainerConfig missing dockerfile or context")
107
+ dockerfile: str = repo_cfg.dockerfile
108
+ context: str = repo_cfg.context
109
+ await _run(
110
+ ["docker", "build", "-f", dockerfile, "-t", repo_cfg.image_tag, context]
111
+ )
112
+ image_name = repo_cfg.image_tag
113
+
114
+ # ------------------------------------------------------------------
115
+ # 2. Spin up a temporary validation container
116
+ # ------------------------------------------------------------------
117
+ # Remove any stale container with the same name first
118
+ await _run(["docker", "rm", "--force", container_name], check=False)
119
+
120
+ validate_container_id = (
121
+ await _run(
122
+ [
123
+ "docker",
124
+ "run",
125
+ "--detach",
126
+ "--name",
127
+ container_name,
128
+ "-v",
129
+ f"{ws}:{CONTAINER_WORKSPACE}",
130
+ image_name,
131
+ "sleep",
132
+ "infinity",
133
+ ]
134
+ )
135
+ ).strip()
136
+
137
+ try:
138
+ # ------------------------------------------------------------------
139
+ # 3. Discover which scripts exist
140
+ # ------------------------------------------------------------------
141
+ format_path = f"{CONTAINER_WORKSPACE}/{_FORMAT_SCRIPT}"
142
+ checks_path = f"{CONTAINER_WORKSPACE}/{_CODE_CHECKS_SCRIPT}"
143
+ tests_path = f"{CONTAINER_WORKSPACE}/{_RUN_TESTS_SCRIPT}"
144
+
145
+ format_exists, checks_exists, tests_exists = await asyncio.gather(
146
+ _file_exists_in_container(validate_container_id, format_path),
147
+ _file_exists_in_container(validate_container_id, checks_path),
148
+ _file_exists_in_container(validate_container_id, tests_path),
149
+ )
150
+
151
+ if not (format_exists or checks_exists or tests_exists):
152
+ raise RuntimeError(
153
+ "No validation scripts found in this repo.\n"
154
+ f"Expected at least one of:\n"
155
+ f" • {_FORMAT_SCRIPT}\n"
156
+ f" • {_CODE_CHECKS_SCRIPT}\n"
157
+ f" • {_RUN_TESTS_SCRIPT}"
158
+ )
159
+
160
+ # ------------------------------------------------------------------
161
+ # 4a. format.sh — non-fatal, always run first if present
162
+ # Changes it writes land in /workspace (shared bind-mount) so
163
+ # the devvy-worker container and subsequent git commits pick them up.
164
+ # ------------------------------------------------------------------
165
+ if format_exists:
166
+ _, out = await _exec_script(validate_container_id, format_path)
167
+ combined.append(f"--- format.sh ---\n{out}")
168
+ # Non-fatal: log the result but don't fail validation
169
+
170
+ # ------------------------------------------------------------------
171
+ # 4b. code-checks.sh — fatal
172
+ # ------------------------------------------------------------------
173
+ if checks_exists:
174
+ passed, out = await _exec_script(validate_container_id, checks_path)
175
+ combined.append(f"--- code-checks.sh ---\n{out}")
176
+ if not passed:
177
+ all_passed = False
178
+
179
+ # ------------------------------------------------------------------
180
+ # 4c. run-tests.sh — fatal
181
+ # ------------------------------------------------------------------
182
+ if tests_exists:
183
+ passed, out = await _exec_script(validate_container_id, tests_path)
184
+ combined.append(f"--- run-tests.sh ---\n{out}")
185
+ if not passed:
186
+ all_passed = False
187
+
188
+ finally:
189
+ # ------------------------------------------------------------------
190
+ # 5. Always tear down the validation container and its image.
191
+ # The repo image is a ~1-2 GB throwaway rebuilt every validation
192
+ # run (including retries), so we remove it immediately to avoid
193
+ # filling disk. Dangling build-cache layers are pruned too.
194
+ # ------------------------------------------------------------------
195
+ await _run(["docker", "stop", validate_container_id], check=False)
196
+ await _run(["docker", "rm", "--force", validate_container_id], check=False)
197
+ await _run(["docker", "rmi", "--force", image_name], check=False)
198
+ await _run(
199
+ ["docker", "builder", "prune", "--filter", "dangling=true", "--force"],
200
+ check=False,
201
+ )
202
+
203
+ return ValidationResult(passed=all_passed, output="\n".join(combined))
@@ -0,0 +1,295 @@
1
+ """Workspace lifecycle and agent operations.
2
+
3
+ Covers the full lifecycle of a devvy workspace:
4
+ - prepare_workspace — clone, start container, detect repo config
5
+ - recover_container — re-attach or re-spawn for a resume
6
+ - run_opencode — invoke opencode inside the worker container
7
+ - run_git — git passthrough into the container
8
+ - commit_if_changed — stage + commit if there are changes
9
+ - push_branch — push agent branch to origin
10
+ - read_pr_template — find ADO PR template in the workspace
11
+ - cleanup_workspace — stop container, delete workspace dir
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json as _json
17
+ import shutil
18
+ import sys
19
+ from dataclasses import dataclass
20
+ from pathlib import Path
21
+
22
+ from cli.local_runner.credentials import _write_git_credentials
23
+ from cli.local_runner.docker_primitives import (
24
+ CONTAINER_WORKSPACE,
25
+ WORKSPACE_BASE,
26
+ _check_tool,
27
+ _check_worker_image,
28
+ _run,
29
+ _start_worker_container,
30
+ )
31
+ from cli.local_runner.repo_detection import RepoContainerConfig, detect_repo_container
32
+
33
+
34
+ @dataclass
35
+ class WorkspaceSetup:
36
+ """Return value of ``prepare_workspace``, grouping all workspace artefacts."""
37
+
38
+ branch_name: str
39
+ workspace: Path
40
+ container_id: str
41
+ default_branch: str
42
+ repo_cfg: RepoContainerConfig
43
+
44
+
45
+ def workspace_path(ticket_id: str) -> Path:
46
+ return WORKSPACE_BASE / ticket_id
47
+
48
+
49
+ async def prepare_workspace(
50
+ ticket_id: str,
51
+ repo_url: str,
52
+ ado_pat: str,
53
+ env_file: str | None = None,
54
+ ) -> WorkspaceSetup:
55
+ """
56
+ Clone the repo on the host, start an ephemeral worker container, and
57
+ detect the repo's own container config for validation.
58
+
59
+ If ``env_file`` is set and points to a readable file, it is copied into the
60
+ cloned workspace as ``.env`` before the container image is built. This is
61
+ needed for repos whose docker-compose references a ``.env`` that is gitignored.
62
+
63
+ Returns a WorkspaceSetup with branch_name, workspace path, container_id,
64
+ default_branch, and repo_cfg.
65
+ """
66
+ _check_tool("docker")
67
+ _check_tool("git")
68
+ await _check_worker_image()
69
+
70
+ ws = workspace_path(ticket_id)
71
+ ws.parent.mkdir(parents=True, exist_ok=True)
72
+
73
+ if ws.exists():
74
+ shutil.rmtree(ws)
75
+
76
+ # Write a temporary git credentials file outside the workspace so the PAT
77
+ # is never embedded in .git/config (where the validation container could read it).
78
+ creds_path = _write_git_credentials(ticket_id, repo_url, ado_pat)
79
+ try:
80
+ await _run(
81
+ [
82
+ "git",
83
+ "-c",
84
+ f"credential.helper=store --file {creds_path}",
85
+ "clone",
86
+ repo_url,
87
+ str(ws),
88
+ ]
89
+ )
90
+ finally:
91
+ creds_path.unlink(missing_ok=True)
92
+
93
+ # Copy the .env file into the workspace if one was configured.
94
+ # docker-compose files typically reference .env for port/password vars, but
95
+ # .env is gitignored so it won't be present in the clone.
96
+ if env_file:
97
+ src_env = Path(env_file).expanduser()
98
+ if src_env.is_file():
99
+ shutil.copy2(src_env, ws / ".env")
100
+ else:
101
+ # Non-fatal — warn but continue; the build may still succeed if
102
+ # all referenced vars have defaults or are not strictly required.
103
+ print(
104
+ f"Warning: env_file '{env_file}' not found — skipping .env copy into workspace",
105
+ file=sys.stderr,
106
+ )
107
+
108
+ # Detect the default branch from the cloned repo (e.g. "main" or "master")
109
+ try:
110
+ raw = await _run(["git", "symbolic-ref", "refs/remotes/origin/HEAD"], cwd=ws)
111
+ # Output is e.g. "refs/remotes/origin/main\n"
112
+ default_branch = raw.strip().split("/")[-1] or "main"
113
+ except RuntimeError:
114
+ default_branch = "main"
115
+
116
+ # Detect the repo's own container config for validation
117
+ repo_cfg = detect_repo_container(ws, ticket_id)
118
+
119
+ # Spawn the worker container with the workspace and opencode auth mounted,
120
+ # and install git credentials inside it.
121
+ container_id = await _start_worker_container(ticket_id, ws, repo_url, ado_pat)
122
+
123
+ # Create and check out the agent branch inside the container
124
+ branch_name = f"agent/{ticket_id[:8]}"
125
+ await run_git(container_id, ["checkout", "-b", branch_name])
126
+
127
+ return WorkspaceSetup(
128
+ branch_name=branch_name,
129
+ workspace=ws,
130
+ container_id=container_id,
131
+ default_branch=default_branch,
132
+ repo_cfg=repo_cfg,
133
+ )
134
+
135
+
136
+ async def run_opencode(
137
+ container_id: str,
138
+ prompt: str,
139
+ model: str,
140
+ session_id: str | None = None,
141
+ ) -> tuple[str, str]:
142
+ """
143
+ Run `opencode run --format json --model <model> [--session <id> --continue] <prompt>`
144
+ inside the worker container.
145
+
146
+ Returns (text_output, session_id).
147
+ """
148
+ cmd = [
149
+ "docker",
150
+ "exec",
151
+ container_id,
152
+ "opencode",
153
+ "run",
154
+ "--format",
155
+ "json",
156
+ "--model",
157
+ model,
158
+ ]
159
+ if session_id:
160
+ cmd += ["--session", session_id, "--continue"]
161
+ cmd.append(prompt)
162
+
163
+ raw = await _run(cmd, check=False)
164
+
165
+ extracted_session_id: str = session_id or ""
166
+ text_parts: list[str] = []
167
+
168
+ for line in raw.splitlines():
169
+ line = line.strip()
170
+ if not line:
171
+ continue
172
+ try:
173
+ event = _json.loads(line)
174
+ except _json.JSONDecodeError:
175
+ continue
176
+
177
+ if not extracted_session_id:
178
+ extracted_session_id = event.get("sessionID", "")
179
+
180
+ if event.get("type") == "text":
181
+ part = event.get("part", {})
182
+ text_parts.append(part.get("text", ""))
183
+
184
+ return "".join(text_parts), extracted_session_id
185
+
186
+
187
+ async def run_git(container_id: str, args: list[str]) -> str:
188
+ """Run a git command inside the worker container at /workspace."""
189
+ return await _run(
190
+ ["docker", "exec", container_id, "git", "-C", CONTAINER_WORKSPACE] + args
191
+ )
192
+
193
+
194
+ async def commit_if_changed(container_id: str, message: str) -> bool:
195
+ """
196
+ Stage all changes and commit if there is anything to commit.
197
+
198
+ Returns True if a commit was made, False if the working tree was clean.
199
+ """
200
+ await run_git(container_id, ["add", "-A"])
201
+ status = await run_git(container_id, ["status", "--porcelain"])
202
+ if not status.strip():
203
+ return False
204
+ await run_git(container_id, ["commit", "-m", message])
205
+ return True
206
+
207
+
208
+ async def push_branch(container_id: str, branch: str) -> None:
209
+ """Push the agent branch to origin from inside the container."""
210
+ await run_git(container_id, ["push", "--set-upstream", "origin", branch])
211
+
212
+
213
+ def read_pr_template(ticket_id: str) -> str | None:
214
+ """
215
+ Look for a PR template in the cloned workspace on the host.
216
+
217
+ Checks ADO-standard locations in order:
218
+ 1. .azuredevops/pull_request_template.md
219
+ 2. docs/pull_request_template.md
220
+ 3. pull_request_template.md
221
+
222
+ Returns the template text, or None if no template is found.
223
+ """
224
+ ws = workspace_path(ticket_id)
225
+ candidate_paths = [
226
+ ws / ".azuredevops" / "pull_request_template.md",
227
+ ws / "docs" / "pull_request_template.md",
228
+ ws / "pull_request_template.md",
229
+ ]
230
+ for path in candidate_paths:
231
+ if path.is_file():
232
+ return path.read_text(encoding="utf-8")
233
+ return None
234
+
235
+
236
+ async def recover_container(
237
+ ticket_id: str,
238
+ container_id: str | None,
239
+ repo_url: str,
240
+ ado_pat: str,
241
+ ) -> tuple[str, bool]:
242
+ """Re-attach to a running container or spawn a fresh one for an existing workspace.
243
+
244
+ Three cases:
245
+ 1. *container_id* is set and the container is still running → return it unchanged.
246
+ 2. The workspace dir exists on disk → ``docker run`` a new container with the
247
+ same bind-mount. The agent branch is already checked out; no re-clone or
248
+ re-checkout needed. Git credentials are restored inside the fresh container.
249
+ 3. Neither → raise RuntimeError (workspace lost, must start a fresh ticket).
250
+
251
+ Returns ``(container_id, fresh)`` where *fresh* is True only when a new
252
+ container was spawned (so the caller knows to clear ``opencode_session_id``).
253
+ """
254
+ _check_tool("docker")
255
+ await _check_worker_image()
256
+
257
+ # Case 1: existing container still alive?
258
+ if container_id:
259
+ try:
260
+ out = await _run(
261
+ ["docker", "inspect", "--format", "{{.State.Running}}", container_id],
262
+ check=True,
263
+ )
264
+ if out.strip() == "true":
265
+ return container_id, False
266
+ except RuntimeError:
267
+ pass # container gone — fall through to case 2
268
+
269
+ # Case 2: workspace on disk — spin up a fresh container
270
+ ws = workspace_path(ticket_id)
271
+ if not ws.exists():
272
+ raise RuntimeError(
273
+ f"Workspace for ticket {ticket_id[:8]} no longer exists on disk. "
274
+ "The ticket cannot be resumed — start a fresh run instead."
275
+ )
276
+
277
+ # Remove any dead container with the same name so docker run doesn't conflict.
278
+ await _run(
279
+ ["docker", "rm", "--force", f"devvy-worker-{ticket_id[:8]}"], check=False
280
+ )
281
+
282
+ new_container_id = await _start_worker_container(ticket_id, ws, repo_url, ado_pat)
283
+ return new_container_id, True
284
+
285
+
286
+ async def cleanup_workspace(ticket_id: str, container_id: str | None) -> None:
287
+ """Stop and remove the worker container, then delete the workspace directory."""
288
+ if container_id:
289
+ # Stop gracefully, then force-remove
290
+ await _run(["docker", "stop", container_id], check=False)
291
+ await _run(["docker", "rm", "--force", container_id], check=False)
292
+
293
+ ws = workspace_path(ticket_id)
294
+ if ws.exists():
295
+ shutil.rmtree(ws, ignore_errors=True)
cli/main.py ADDED
@@ -0,0 +1,22 @@
1
+ """coding-agent CLI — entry point."""
2
+
3
+ import typer
4
+
5
+ from cli.commands import init, logs, ps, resume, run, status
6
+
7
+ app = typer.Typer(
8
+ name="devvy",
9
+ help="Autonomous coding agent CLI",
10
+ no_args_is_help=True,
11
+ )
12
+
13
+ app.add_typer(init.app, name="init")
14
+ app.command("run")(run.run)
15
+ app.command("resume")(resume.resume)
16
+ app.command("status")(status.status)
17
+ app.command("logs")(logs.logs)
18
+ app.command("ps")(ps.ps)
19
+
20
+
21
+ if __name__ == "__main__":
22
+ app()