etch-loop 0.3.2__tar.gz → 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {etch_loop-0.3.2 → etch_loop-0.4.2}/PKG-INFO +1 -1
  2. {etch_loop-0.3.2 → etch_loop-0.4.2}/pyproject.toml +1 -1
  3. etch_loop-0.4.2/src/etch/__init__.py +1 -0
  4. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/analyze.py +73 -0
  5. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/cli.py +1 -0
  6. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/loop.py +72 -3
  7. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/prompt.py +29 -0
  8. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/report.py +8 -0
  9. etch_loop-0.4.2/src/etch/templates/RUN.md +20 -0
  10. etch_loop-0.3.2/src/etch/__init__.py +0 -1
  11. {etch_loop-0.3.2 → etch_loop-0.4.2}/.github/workflows/workflow.yml +0 -0
  12. {etch_loop-0.3.2 → etch_loop-0.4.2}/README.md +0 -0
  13. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/agent.py +0 -0
  14. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/display.py +0 -0
  15. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/git.py +0 -0
  16. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/signals.py +0 -0
  17. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/templates/BREAK.md +0 -0
  18. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/templates/ETCH.md +0 -0
  19. {etch_loop-0.3.2 → etch_loop-0.4.2}/src/etch/templates/SCAN.md +0 -0
  20. {etch_loop-0.3.2 → etch_loop-0.4.2}/tests/__init__.py +0 -0
  21. {etch_loop-0.3.2 → etch_loop-0.4.2}/tests/test_git.py +0 -0
  22. {etch_loop-0.3.2 → etch_loop-0.4.2}/tests/test_loop.py +0 -0
  23. {etch_loop-0.3.2 → etch_loop-0.4.2}/tests/test_prompt.py +0 -0
  24. {etch_loop-0.3.2 → etch_loop-0.4.2}/tests/test_signals.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: etch-loop
3
- Version: 0.3.2
3
+ Version: 0.4.2
4
4
  Summary: Run Claude Code in a fix-break loop until your codebase is clean
5
5
  License: MIT
6
6
  Requires-Python: >=3.11
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "etch-loop"
3
- version = "0.3.2"
3
+ version = "0.4.2"
4
4
  requires-python = ">=3.11"
5
5
  description = "Run Claude Code in a fix-break loop until your codebase is clean"
6
6
  readme = "README.md"
@@ -0,0 +1 @@
1
+ __version__ = "0.4.2"
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import json
5
6
  import subprocess
6
7
  from collections import Counter
7
8
  from pathlib import Path
@@ -259,6 +260,78 @@ Be adversarial — think like someone actively trying to make this code fail.
259
260
  """
260
261
 
261
262
 
263
+ def build_run_md(info: dict) -> str:
264
+ """Generate a tailored RUN.md based on detected build system."""
265
+ root = info.get("root", Path.cwd())
266
+ commands = _detect_run_commands(root)
267
+
268
+ if commands:
269
+ cmd_list = "\n".join(f"- `{cmd}`" for cmd in commands)
270
+ else:
271
+ cmd_list = "- (detect and run the appropriate build/test command for this project)"
272
+
273
+ return f"""# RUN — build and test validation
274
+
275
+ You are a build validator. The fixer has made changes. Your job is to run the project's build and test suite to confirm everything still works.
276
+
277
+ ## Commands to run
278
+
279
+ {cmd_list}
280
+
281
+ ## Rules
282
+
283
+ 1. Run each command and observe the output
284
+ 2. If ALL commands pass:
285
+ - Write `ETCH_SUMMARY: <e.g. "all 47 tests passed">`
286
+ - Write `ETCH_ALL_CLEAR`
287
+ 3. If ANY command fails:
288
+ - Write `ETCH_SUMMARY: <what failed, e.g. "3 tests failed in test_auth.py — TypeError on line 42">`
289
+ - Include the relevant error output so the fixer can diagnose it
290
+ - Write `ETCH_ISSUES_FOUND`
291
+
292
+ Do not fix anything — only run and report.
293
+ """
294
+
295
+
296
+ def _detect_run_commands(root: Path) -> list[str]:
297
+ """Detect build/test commands from project files."""
298
+ commands: list[str] = []
299
+
300
+ if (root / "pyproject.toml").exists() or (root / "setup.py").exists():
301
+ commands.append("python -m pytest")
302
+
303
+ if (root / "package.json").exists():
304
+ try:
305
+ pkg = json.loads((root / "package.json").read_text(encoding="utf-8"))
306
+ scripts = pkg.get("scripts", {})
307
+ if "build" in scripts:
308
+ commands.append("npm run build")
309
+ if "test" in scripts:
310
+ commands.append("npm test")
311
+ except (OSError, json.JSONDecodeError):
312
+ commands.append("npm test")
313
+
314
+ if (root / "Cargo.toml").exists():
315
+ commands.append("cargo test")
316
+
317
+ if (root / "go.mod").exists():
318
+ commands.append("go test ./...")
319
+
320
+ if (root / "Gemfile").exists():
321
+ commands.append("bundle exec rspec")
322
+
323
+ if (root / "mix.exs").exists():
324
+ commands.append("mix test")
325
+
326
+ if (root / "pom.xml").exists():
327
+ commands.append("mvn test -q")
328
+
329
+ if not commands and (root / "Makefile").exists():
330
+ commands.append("make test")
331
+
332
+ return commands
333
+
334
+
262
335
  def _format_scope(info: dict) -> str:
263
336
  lines = []
264
337
  if info["source_dirs"]:
@@ -40,6 +40,7 @@ def init() -> None:
40
40
  (etch_dir / "SCAN.md", analyze.build_scan_md(info, agent_scope), "etch-loop/SCAN.md"),
41
41
  (etch_dir / "ETCH.md", analyze.build_etch_md(info, agent_scope), "etch-loop/ETCH.md"),
42
42
  (etch_dir / "BREAK.md", analyze.build_break_md(info, agent_scope), "etch-loop/BREAK.md"),
43
+ (etch_dir / "RUN.md", analyze.build_run_md(info), "etch-loop/RUN.md"),
43
44
  ]:
44
45
  if dest.exists():
45
46
  disp.add_line(display.SYM_NEUTRAL, display.DIM, f"{label} already exists, skipping")
@@ -49,6 +49,9 @@ def run(
49
49
  display.print_error(str(exc))
50
50
  return
51
51
 
52
+ # Runner is optional — None means the phase is skipped
53
+ run_text = prompt.load_run(prompt_path)
54
+
52
55
  if focus:
53
56
  scan_text += f"\n\n## User focus\n\nConcentrate on: {focus}\n"
54
57
  break_text += f"\n\n## User focus\n\nConcentrate your adversarial review on: {focus}\n"
@@ -63,9 +66,59 @@ def run(
63
66
  }
64
67
  last_breaker_signal: str | None = None
65
68
  last_breaker_output: str | None = None
69
+ last_runner_output: str | None = None
66
70
  iteration_log: list[dict] = []
67
71
 
68
72
  with display.EtchDisplay(target=str(prompt_path.parent)) as disp:
73
+
74
+ # ── Runner helper — called at every clean exit point ──────────────────
75
+ def try_runner(iter_entry: dict) -> str:
76
+ """Run the runner phase if configured.
77
+
78
+ Returns:
79
+ "skip" — no RUN.md, proceed with clean exit
80
+ "clear" — runner passed, proceed with clean exit
81
+ "issues" — runner failed, continue the loop
82
+ "error" — agent error, break the loop
83
+ """
84
+ nonlocal last_runner_output
85
+ if not run_text:
86
+ return "skip"
87
+
88
+ disp.start_phase("runner")
89
+ runner_start = time.monotonic()
90
+ try:
91
+ runner_output = agent.run(run_text, verbose=verbose)
92
+ except AgentError as exc:
93
+ disp.finish_phase("runner", status="error", detail=str(exc),
94
+ duration=time.monotonic() - runner_start, success=False)
95
+ return "error"
96
+
97
+ runner_duration = time.monotonic() - runner_start
98
+ runner_signal = signals.parse(runner_output)
99
+ runner_detail = (
100
+ signals.extract_summary(runner_output)
101
+ or signals.extract_finding(runner_output)
102
+ )
103
+
104
+ if runner_signal == "clear":
105
+ disp.finish_phase("runner", status="all clear",
106
+ detail=runner_detail or "build passed",
107
+ duration=runner_duration, success=True)
108
+ iter_entry["runner"] = {"status": "all clear", "detail": runner_detail}
109
+ last_runner_output = None
110
+ return "clear"
111
+ else:
112
+ disp.record_issue()
113
+ stats["issues"] += 1
114
+ disp.finish_phase("runner", status="build failed",
115
+ detail=runner_detail or "build failed",
116
+ duration=runner_duration, success=False)
117
+ iter_entry["runner"] = {"status": "build failed", "detail": runner_detail}
118
+ last_runner_output = runner_output
119
+ return "issues"
120
+
121
+ # ── Main loop ─────────────────────────────────────────────────────────
69
122
  for iteration in range(1, max_iterations + 1):
70
123
  stats["iterations"] = iteration
71
124
  disp.start_iteration(iteration)
@@ -116,6 +169,12 @@ def run(
116
169
  f"{last_breaker_output.strip()}\n\n"
117
170
  f"Also address these if not already covered above.\n"
118
171
  )
172
+ if last_runner_output:
173
+ fixer_prompt += (
174
+ f"\n\n## Build/test failures from previous iteration\n\n"
175
+ f"{last_runner_output.strip()}\n\n"
176
+ f"Fix the underlying code issues causing these failures.\n"
177
+ )
119
178
 
120
179
  # ── Fixer phase ───────────────────────────────────────────────────
121
180
  disp.start_phase("fixer")
@@ -205,9 +264,19 @@ def run(
205
264
  detail=breaker_detail or "no issues found",
206
265
  duration=breaker_duration, success=True)
207
266
  iter_entry["breaker"] = {"status": "all clear", "detail": breaker_detail}
208
- stats["reason"] = "clear"
209
- iteration_log.append(iter_entry)
210
- break
267
+ runner_result = try_runner(iter_entry)
268
+ if runner_result == "error":
269
+ stats["reason"] = "agent_error"
270
+ iteration_log.append(iter_entry)
271
+ break
272
+ elif runner_result == "issues":
273
+ stats["reason"] = "issues"
274
+ iteration_log.append(iter_entry)
275
+ continue
276
+ else: # "clear" or "skip"
277
+ stats["reason"] = "clear"
278
+ iteration_log.append(iter_entry)
279
+ break
211
280
  else:
212
281
  disp.record_issue()
213
282
  stats["issues"] += 1
@@ -75,6 +75,35 @@ def load_break(path: str | Path | None = None) -> str:
75
75
  raise PromptError(f"BREAK.md not found. Searched: {searched}")
76
76
 
77
77
 
78
+ def load_run(path: str | Path | None = None) -> str | None:
79
+ """Load RUN.md if it exists. Returns None if not found — runner phase is optional.
80
+
81
+ Args:
82
+ path: Optional path. If this is ETCH.md, looks for RUN.md alongside it.
83
+
84
+ Returns:
85
+ File contents as a string, or None if RUN.md is not present.
86
+ """
87
+ candidates: list[Path] = []
88
+
89
+ if path is not None:
90
+ p = Path(path)
91
+ if p.name.upper() == "RUN.MD":
92
+ candidates.append(p)
93
+ else:
94
+ candidates.append(p.parent / "RUN.md")
95
+
96
+ candidates.append(Path.cwd() / "RUN.md")
97
+
98
+ for candidate in candidates:
99
+ if candidate.exists() and candidate.is_file():
100
+ content = candidate.read_text(encoding="utf-8")
101
+ if content.strip():
102
+ return content
103
+
104
+ return None # Optional phase — no error if absent
105
+
106
+
78
107
  def load_scan(path: str | Path | None = None) -> str:
79
108
  """Load and return the content of SCAN.md.
80
109
 
@@ -67,6 +67,14 @@ def write(
67
67
  if detail:
68
68
  lines.append(f"\n> {detail}\n")
69
69
 
70
+ runner = entry.get("runner")
71
+ if runner:
72
+ status = runner.get("status", "")
73
+ detail = runner.get("detail", "")
74
+ lines.append(f"**runner** — {status}")
75
+ if detail:
76
+ lines.append(f"\n> {detail}\n")
77
+
70
78
  path.write_text("\n".join(lines), encoding="utf-8")
71
79
  return path
72
80
 
@@ -0,0 +1,20 @@
1
+ # RUN — build and test validation
2
+
3
+ You are a build validator. The fixer has made changes. Your job is to run the project's build and test suite to confirm everything still works.
4
+
5
+ ## Commands to run
6
+
7
+ [configured by etch init]
8
+
9
+ ## Rules
10
+
11
+ 1. Run each command and observe the output
12
+ 2. If ALL commands pass:
13
+ - Write `ETCH_SUMMARY: <e.g. "all 47 tests passed">`
14
+ - Write `ETCH_ALL_CLEAR`
15
+ 3. If ANY command fails:
16
+ - Write `ETCH_SUMMARY: <what failed, e.g. "3 tests failed in test_auth.py — TypeError on line 42">`
17
+ - Include the relevant error output so the fixer can diagnose it
18
+ - Write `ETCH_ISSUES_FOUND`
19
+
20
+ Do not fix anything — only run and report.
@@ -1 +0,0 @@
1
- __version__ = "0.3.2"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes