@misterhuydo/sentinel 1.6.1 → 1.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  {
2
- "message": "Auto-checkpoint at 2026-04-24T10:58:52.087Z",
3
- "checkpoint_at": "2026-04-24T10:58:52.089Z",
2
+ "message": "Auto-checkpoint at 2026-04-24T11:12:56.361Z",
3
+ "checkpoint_at": "2026-04-24T11:12:56.362Z",
4
4
  "active_files": [
5
5
  "J:\\Projects\\Sentinel\\cli\\bin\\sentinel.js",
6
6
  "J:\\Projects\\Sentinel\\cli\\lib\\test.js",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@misterhuydo/sentinel",
3
- "version": "1.6.1",
3
+ "version": "1.6.2",
4
4
  "description": "Sentinel — Autonomous DevOps Agent installer and manager",
5
5
  "bin": {
6
6
  "sentinel": "./bin/sentinel.js"
@@ -1 +1 @@
1
- __version__ = "1.6.1"
1
+ __version__ = "1.6.2"
@@ -177,6 +177,14 @@ def _build_prompt(
177
177
  "2. Use your available tools to explore the codebase and identify the root cause.",
178
178
  " You can read across ALL listed repos — use that visibility to follow type",
179
179
  " definitions, callers, or shared library code that may be involved.",
180
+ "",
181
+ "CRITICAL — fresh reads only",
182
+ " Before you write ANY diff line, use the Read tool to view the CURRENT content",
183
+ " of every file you intend to modify. Do NOT rely on prior memory of the file",
184
+ " from earlier turns in this conversation: the working tree may have been",
185
+ " updated by a previous Sentinel fix, a human commit, or a `git pull` that ran",
186
+ " moments ago. A patch generated from stale memory will fail dry-run.",
187
+ "",
180
188
  f"3. {marker_instruction}",
181
189
  "4. Consider all possible fix approaches. For each, weigh:",
182
190
  " - Confidence: is this definitely the root cause?",
@@ -492,11 +500,28 @@ def generate_fix(
492
500
  except Exception as _e:
493
501
  logger.debug("fix_engine: git log check failed: %s", _e)
494
502
 
495
- # Pull saved session id (per project) so Claude continues an existing session.
503
+ # Pre-pull every project repo so Claude reads up-to-date content. This
504
+ # closes the window where a previous sentinel commit (or human commit)
505
+ # has landed on remote but the local working tree hasn't been refreshed.
506
+ if all_repos:
507
+ from .git_manager import pull_all_repos
508
+ pull_results = pull_all_repos(all_repos)
509
+ n_failed = sum(1 for ok in pull_results.values() if not ok)
510
+ if n_failed:
511
+ logger.warning(
512
+ "fix_engine: pre-fix pull failed for %d/%d repo(s): %s",
513
+ n_failed, len(pull_results),
514
+ [n for n, ok in pull_results.items() if not ok][:5],
515
+ )
516
+
517
+ # Pull saved session id — keyed per (project, target_repo) so a prior fix
518
+ # targeting repo A doesn't contaminate Claude's memory for a fix targeting
519
+ # repo B (their files differ; resumed memory leads to stale-context patches).
520
+ session_key = f"{getattr(cfg, 'project_name', '') or '_default'}/{repo.repo_name}"
496
521
  session_id = ""
497
- if store is not None and getattr(cfg, "project_name", ""):
522
+ if store is not None:
498
523
  try:
499
- saved = store.get_claude_session(cfg.project_name)
524
+ saved = store.get_claude_session(session_key)
500
525
  if saved:
501
526
  session_id = saved.get("session_id", "") or ""
502
527
  except Exception as _se:
@@ -506,8 +531,8 @@ def generate_fix(
506
531
  claude_logs_dir = Path(cfg.workspace_dir).parent / "logs" / "claude"
507
532
  claude_log_path = claude_logs_dir / f"{event.fingerprint[:8]}-{ts}.log"
508
533
  logger.info(
509
- "Invoking Claude Code for %s (fp=%s) — log: %s — resume=%s",
510
- event.source, event.fingerprint, claude_log_path,
534
+ "Invoking Claude Code for %s (fp=%s, route=%s) — log: %s — resume=%s",
535
+ event.source, event.fingerprint, repo.repo_name, claude_log_path,
511
536
  session_id[:8] if session_id else "(new)",
512
537
  )
513
538
 
@@ -586,15 +611,16 @@ def generate_fix(
586
611
 
587
612
  # Persist the session id (and cost delta) regardless of fix outcome — even
588
613
  # NEEDS_HUMAN / SKIP turns count toward the conversation history.
589
- if store is not None and getattr(cfg, "project_name", "") and parsed["session_id"]:
614
+ # Same composite key as the read above so per-route memory stays separated.
615
+ if store is not None and parsed["session_id"]:
590
616
  try:
591
617
  store.set_claude_session(
592
- cfg.project_name, parsed["session_id"],
618
+ session_key, parsed["session_id"],
593
619
  cost_delta=parsed["total_cost_usd"],
594
620
  )
595
621
  logger.info(
596
- "fix_engine: saved claude session %s for project %s (turn cost $%.4f)",
597
- parsed["session_id"][:8], cfg.project_name, parsed["total_cost_usd"],
622
+ "fix_engine: saved claude session %s for %s (turn cost $%.4f)",
623
+ parsed["session_id"][:8], session_key, parsed["total_cost_usd"],
598
624
  )
599
625
  except Exception as _se:
600
626
  logger.warning("fix_engine: set_claude_session failed: %s", _se)
@@ -192,6 +192,38 @@ def maven_compile_check(local_path: str, timeout: int = 300) -> tuple[bool, str]
192
192
  return r.returncode == 0, output
193
193
 
194
194
 
195
+ def pull_all_repos(repos: list[RepoConfig]) -> dict[str, bool]:
196
+ """Discard local edits and `git pull --rebase` every repo in the list.
197
+
198
+ Used right before invoking Claude so it reads up-to-date file content. A
199
+ failure for any single repo is logged as a warning but never raised — the
200
+ return dict tells callers which repos pulled cleanly so they can decide
201
+ what to do (e.g. fix engine continues anyway; the target-repo dry-run in
202
+ apply_and_commit_multi will catch a stale-on-disk patch later).
203
+ """
204
+ results: dict[str, bool] = {}
205
+ for repo in repos:
206
+ if not repo.local_path:
207
+ results[repo.repo_name] = False
208
+ continue
209
+ env = _git_env(repo)
210
+ try:
211
+ _git(["checkout", "."], cwd=repo.local_path, env=env)
212
+ r = _git(["pull", "--rebase", "origin", repo.branch],
213
+ cwd=repo.local_path, env=env)
214
+ ok = (r.returncode == 0)
215
+ results[repo.repo_name] = ok
216
+ if not ok:
217
+ logger.warning(
218
+ "pull_all_repos: %s pull failed: %s",
219
+ repo.repo_name, r.stderr.strip()[:200],
220
+ )
221
+ except Exception as e:
222
+ logger.warning("pull_all_repos: %s exception: %s", repo.repo_name, e)
223
+ results[repo.repo_name] = False
224
+ return results
225
+
226
+
195
227
  def _check_protected_paths(patch_path: Path) -> bool:
196
228
  text = patch_path.read_text(encoding="utf-8", errors="replace")
197
229
  for line in text.splitlines():
@@ -0,0 +1,94 @@
1
+ """
2
+ test_pull_all_repos.py — Tests for the pre-fix git pull helper.
3
+
4
+ pull_all_repos() runs `git checkout . && git pull --rebase` on every project repo
5
+ so Claude reads up-to-date content. Per-repo failures are non-fatal — they get
6
+ logged as warnings and recorded in the result dict, not raised.
7
+ """
8
+ from pathlib import Path
9
+ from unittest.mock import patch
10
+ from types import SimpleNamespace
11
+
12
+ from sentinel import git_manager
13
+ from sentinel.config_loader import RepoConfig
14
+
15
+
16
+ def _ok():
17
+ return SimpleNamespace(returncode=0, stdout="", stderr="")
18
+
19
+
20
+ def _fail(msg="pull rejected"):
21
+ return SimpleNamespace(returncode=1, stdout="", stderr=msg)
22
+
23
+
24
+ def _mk_repo(tmp_path: Path, name: str) -> RepoConfig:
25
+ p = tmp_path / "repos" / name
26
+ p.mkdir(parents=True, exist_ok=True)
27
+ return RepoConfig(repo_name=name, local_path=str(p), branch="main")
28
+
29
+
30
+ def test_empty_repo_list_returns_empty(tmp_path):
31
+ assert git_manager.pull_all_repos([]) == {}
32
+
33
+
34
+ def test_single_successful_repo(tmp_path):
35
+ repo = _mk_repo(tmp_path, "r1")
36
+ with patch.object(git_manager, "_git", return_value=_ok()):
37
+ result = git_manager.pull_all_repos([repo])
38
+ assert result == {"r1": True}
39
+
40
+
41
+ def test_single_failing_repo_recorded_not_raised(tmp_path):
42
+ repo = _mk_repo(tmp_path, "r1")
43
+ with patch.object(git_manager, "_git", return_value=_fail()):
44
+ result = git_manager.pull_all_repos([repo])
45
+ assert result == {"r1": False}
46
+
47
+
48
+ def test_mixed_results_per_repo(tmp_path):
49
+ a = _mk_repo(tmp_path, "ok-repo")
50
+ b = _mk_repo(tmp_path, "bad-repo")
51
+ c = _mk_repo(tmp_path, "ok-repo-2")
52
+
53
+ # Each repo gets two _git calls (checkout + pull). We make 'bad-repo' fail on pull.
54
+ call_log = []
55
+ def fake_git(args, cwd, env=None, timeout=git_manager.GIT_TIMEOUT):
56
+ call_log.append((args[0], cwd))
57
+ if "bad-repo" in cwd and args[0] == "pull":
58
+ return _fail("conflict")
59
+ return _ok()
60
+
61
+ with patch.object(git_manager, "_git", side_effect=fake_git):
62
+ result = git_manager.pull_all_repos([a, b, c])
63
+
64
+ assert result == {"ok-repo": True, "bad-repo": False, "ok-repo-2": True}
65
+
66
+
67
+ def test_repo_with_empty_local_path_is_skipped(tmp_path):
68
+ repo = RepoConfig(repo_name="ghost", local_path="", branch="main")
69
+ with patch.object(git_manager, "_git") as g:
70
+ result = git_manager.pull_all_repos([repo])
71
+ assert result == {"ghost": False}
72
+ g.assert_not_called()
73
+
74
+
75
+ def test_subprocess_exception_caught_and_recorded(tmp_path):
76
+ repo = _mk_repo(tmp_path, "r1")
77
+ with patch.object(git_manager, "_git",
78
+ side_effect=RuntimeError("git binary missing")):
79
+ result = git_manager.pull_all_repos([repo])
80
+ assert result == {"r1": False}
81
+
82
+
83
+ def test_calls_checkout_then_pull_per_repo(tmp_path):
84
+ """Order matters: checkout (discard local edits) before pull."""
85
+ repo = _mk_repo(tmp_path, "r1")
86
+ seq = []
87
+ def fake_git(args, cwd, env=None, timeout=git_manager.GIT_TIMEOUT):
88
+ seq.append(args[0])
89
+ return _ok()
90
+ with patch.object(git_manager, "_git", side_effect=fake_git):
91
+ git_manager.pull_all_repos([repo])
92
+ assert seq[0] == "checkout"
93
+ assert "pull" in seq
94
+ assert seq.index("checkout") < seq.index("pull")