@rm0nroe/coach-claw 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +311 -0
- package/coach/README.md +99 -0
- package/coach/bin/aggregate_facets.py +274 -0
- package/coach/bin/analyze.py +678 -0
- package/coach/bin/bank.py +247 -0
- package/coach/bin/banner_themes.py +645 -0
- package/coach/bin/coach_paths.py +33 -0
- package/coach/bin/coexistence_check.py +129 -0
- package/coach/bin/configure.py +245 -0
- package/coach/bin/cron_check.py +81 -0
- package/coach/bin/default_statusline.py +135 -0
- package/coach/bin/doctor.py +663 -0
- package/coach/bin/insights-llm.sh +264 -0
- package/coach/bin/insights.sh +163 -0
- package/coach/bin/insights_window.py +111 -0
- package/coach/bin/marker_io.py +154 -0
- package/coach/bin/merge.py +671 -0
- package/coach/bin/redact.py +86 -0
- package/coach/bin/render_env.py +148 -0
- package/coach/bin/reward_hints.py +87 -0
- package/coach/bin/run-insights.sh +20 -0
- package/coach/bin/run_with_lock.py +85 -0
- package/coach/bin/scoring.py +260 -0
- package/coach/bin/skill_inventory.py +215 -0
- package/coach/bin/stats.py +459 -0
- package/coach/bin/status.py +293 -0
- package/coach/bin/statusline_self_patch.py +205 -0
- package/coach/bin/statusline_variants.py +146 -0
- package/coach/bin/statusline_wrap.py +244 -0
- package/coach/bin/statusline_wrap_action.py +460 -0
- package/coach/bin/switch_to_plugin.py +256 -0
- package/coach/bin/themes.py +256 -0
- package/coach/bin/user_config.py +176 -0
- package/coach/bin/xp_accounting.py +98 -0
- package/coach/changelog.md +4 -0
- package/coach/default-statusline-command.sh +19 -0
- package/coach/default-statusline-wrap-command.sh +15 -0
- package/coach/profile.yaml +37 -0
- package/coach/tests/conftest.py +13 -0
- package/coach/tests/test_aggregate_facets.py +379 -0
- package/coach/tests/test_analyze_aggregate.py +153 -0
- package/coach/tests/test_analyze_redaction.py +105 -0
- package/coach/tests/test_analyze_strengths.py +165 -0
- package/coach/tests/test_bank_atomic_write.py +61 -0
- package/coach/tests/test_bank_concurrency.py +126 -0
- package/coach/tests/test_banner_themes.py +981 -0
- package/coach/tests/test_celebrate_dedup.py +409 -0
- package/coach/tests/test_coach_paths.py +50 -0
- package/coach/tests/test_coexistence_check.py +128 -0
- package/coach/tests/test_configure.py +258 -0
- package/coach/tests/test_cron_check.py +118 -0
- package/coach/tests/test_cron_nudge_hook.py +134 -0
- package/coach/tests/test_detection_parity.py +105 -0
- package/coach/tests/test_doctor.py +595 -0
- package/coach/tests/test_hook_bespoke_dispatch.py +288 -0
- package/coach/tests/test_hook_module_resolution.py +116 -0
- package/coach/tests/test_hook_relevance.py +996 -0
- package/coach/tests/test_hook_render_env.py +364 -0
- package/coach/tests/test_hook_session_id_guard.py +160 -0
- package/coach/tests/test_insights_llm.py +759 -0
- package/coach/tests/test_insights_llm_venv_path.py +109 -0
- package/coach/tests/test_insights_window.py +237 -0
- package/coach/tests/test_install.py +1150 -0
- package/coach/tests/test_install_pyyaml_fallback.py +142 -0
- package/coach/tests/test_marker_consumption.py +167 -0
- package/coach/tests/test_marker_writer_locking.py +305 -0
- package/coach/tests/test_merge.py +413 -0
- package/coach/tests/test_no_broken_mktemp.py +90 -0
- package/coach/tests/test_render_env.py +137 -0
- package/coach/tests/test_render_env_glyphs.py +119 -0
- package/coach/tests/test_reward_hints.py +59 -0
- package/coach/tests/test_scoring.py +147 -0
- package/coach/tests/test_session_start_weekly_trigger.py +92 -0
- package/coach/tests/test_skill_inventory.py +368 -0
- package/coach/tests/test_stats_hybrid.py +142 -0
- package/coach/tests/test_status_accounting.py +41 -0
- package/coach/tests/test_statusline_failsafe.py +70 -0
- package/coach/tests/test_statusline_self_patch.py +261 -0
- package/coach/tests/test_statusline_variants.py +110 -0
- package/coach/tests/test_statusline_wrap.py +196 -0
- package/coach/tests/test_statusline_wrap_action.py +408 -0
- package/coach/tests/test_switch_to_plugin.py +360 -0
- package/coach/tests/test_themes.py +104 -0
- package/coach/tests/test_user_config.py +160 -0
- package/coach/tests/test_wrap_announce_hook.py +130 -0
- package/coach/tests/test_xp_accounting.py +55 -0
- package/hooks/coach-session-start.py +536 -0
- package/hooks/coach-user-prompt.py +2288 -0
- package/install-launchd.sh +102 -0
- package/install.sh +597 -0
- package/launchd/com.local.claude-coach.plist.template +34 -0
- package/launchd/run-insights.sh +20 -0
- package/npm/coach-claw.js +259 -0
- package/package.json +52 -0
- package/requirements.txt +11 -0
- package/settings-snippet.json +31 -0
- package/skills/coach/SKILL.md +107 -0
- package/skills/coach-insights/SKILL.md +78 -0
- package/skills/config/SKILL.md +149 -0
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""Regression test for the PyYAML preflight fallback chain in install.sh.
|
|
2
|
+
|
|
3
|
+
A previous round of the installer recommended `brew install pyyaml` as
|
|
4
|
+
the Homebrew-blessed recovery path. The formula does not exist in
|
|
5
|
+
Homebrew core (`brew info pyyaml` → `Error: No available formula`). The
|
|
6
|
+
strategy and the error message that included it were dead code — the
|
|
7
|
+
recovery instructions pointed users at a command that fails.
|
|
8
|
+
|
|
9
|
+
These tests pin the current shape:
|
|
10
|
+
|
|
11
|
+
• install.sh does not mention `brew install pyyaml` anywhere in source
|
|
12
|
+
• when both legitimate pip strategies fail, the recovery message
|
|
13
|
+
surfaces only real options (`pip --break-system-packages` and venv)
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import os
|
|
19
|
+
import shutil
|
|
20
|
+
import subprocess
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
import pytest
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _git_env() -> dict:
|
|
27
|
+
return {
|
|
28
|
+
"GIT_AUTHOR_NAME": "Coach Tests",
|
|
29
|
+
"GIT_AUTHOR_EMAIL": "coach-tests@example.invalid",
|
|
30
|
+
"GIT_COMMITTER_NAME": "Coach Tests",
|
|
31
|
+
"GIT_COMMITTER_EMAIL": "coach-tests@example.invalid",
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_install_sh_does_not_reference_dead_brew_pyyaml() -> None:
|
|
36
|
+
repo = Path(__file__).resolve().parents[2]
|
|
37
|
+
if not (repo / "install.sh").exists():
|
|
38
|
+
pytest.skip("install.sh is only present in the shareable repo checkout")
|
|
39
|
+
|
|
40
|
+
src = (repo / "install.sh").read_text()
|
|
41
|
+
assert "brew install pyyaml" not in src, (
|
|
42
|
+
"install.sh references `brew install pyyaml`, but no such "
|
|
43
|
+
"formula exists in Homebrew core. Use `pip install --user "
|
|
44
|
+
"--break-system-packages pyyaml` or a venv instead."
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def test_install_pyyaml_fallback_message_excludes_dead_brew_path(
|
|
49
|
+
tmp_path: Path,
|
|
50
|
+
) -> None:
|
|
51
|
+
"""Run install.sh with python3 PATH-shimmed to simulate missing
|
|
52
|
+
PyYAML and refusing pip. The installer must fail with exit != 0
|
|
53
|
+
and an error message that:
|
|
54
|
+
|
|
55
|
+
• shows BOTH legitimate strategies were tried (pip --user, then
|
|
56
|
+
pip --user --break-system-packages),
|
|
57
|
+
• offers --break-system-packages and venv as manual recovery,
|
|
58
|
+
• does NOT mention `brew install pyyaml` or claim Homebrew Python
|
|
59
|
+
gets a special blessed strategy.
|
|
60
|
+
"""
|
|
61
|
+
repo = Path(__file__).resolve().parents[2]
|
|
62
|
+
if not (repo / "install.sh").exists():
|
|
63
|
+
pytest.skip("install.sh is only present in the shareable repo checkout")
|
|
64
|
+
|
|
65
|
+
real_python3 = shutil.which("python3")
|
|
66
|
+
assert real_python3, "real python3 must be on PATH for the shim to delegate"
|
|
67
|
+
|
|
68
|
+
shim_dir = tmp_path / "bin"
|
|
69
|
+
shim_dir.mkdir()
|
|
70
|
+
shim = shim_dir / "python3"
|
|
71
|
+
# Bash shim: blocks `-c "import yaml"` and `-m pip ...` by exiting 1;
|
|
72
|
+
# delegates everything else (version check via heredoc, etc.) to the
|
|
73
|
+
# real python3.
|
|
74
|
+
shim.write_text(
|
|
75
|
+
"""#!/bin/bash
|
|
76
|
+
case "$1" in
|
|
77
|
+
-c)
|
|
78
|
+
case "$2" in
|
|
79
|
+
*"import yaml"*) exit 1 ;;
|
|
80
|
+
esac
|
|
81
|
+
;;
|
|
82
|
+
-m)
|
|
83
|
+
case "$2" in
|
|
84
|
+
pip) exit 1 ;;
|
|
85
|
+
esac
|
|
86
|
+
;;
|
|
87
|
+
esac
|
|
88
|
+
exec "$COACH_REAL_PYTHON3" "$@"
|
|
89
|
+
"""
|
|
90
|
+
)
|
|
91
|
+
shim.chmod(0o755)
|
|
92
|
+
|
|
93
|
+
claude_dir = tmp_path / "claude_dir"
|
|
94
|
+
env = os.environ.copy()
|
|
95
|
+
env.update(
|
|
96
|
+
{
|
|
97
|
+
"CLAUDE_DIR": str(claude_dir),
|
|
98
|
+
"PATH": f"{shim_dir}{os.pathsep}{env['PATH']}",
|
|
99
|
+
"COACH_REAL_PYTHON3": real_python3,
|
|
100
|
+
**_git_env(),
|
|
101
|
+
}
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
result = subprocess.run(
|
|
105
|
+
["bash", str(repo / "install.sh")],
|
|
106
|
+
cwd=repo,
|
|
107
|
+
env=env,
|
|
108
|
+
text=True,
|
|
109
|
+
stdout=subprocess.PIPE,
|
|
110
|
+
stderr=subprocess.PIPE,
|
|
111
|
+
timeout=30,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
out = result.stdout + result.stderr
|
|
115
|
+
|
|
116
|
+
assert result.returncode != 0, (
|
|
117
|
+
f"installer succeeded with pip stubbed to fail; "
|
|
118
|
+
f"preflight should have aborted:\n{out}"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Both legitimate strategies must have been attempted before the
|
|
122
|
+
# error message fires.
|
|
123
|
+
assert "pip install --user pyyaml" in out, (
|
|
124
|
+
f"strategy 1 (pip --user) was not attempted:\n{out}"
|
|
125
|
+
)
|
|
126
|
+
assert "pip install --user --break-system-packages pyyaml" in out, (
|
|
127
|
+
f"strategy 2 (pip --break-system-packages) was not attempted:\n{out}"
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Dead Homebrew path must not appear anywhere — neither as a strategy
|
|
131
|
+
# the installer claims to try, nor in the recovery instructions.
|
|
132
|
+
assert "brew install pyyaml" not in out, (
|
|
133
|
+
f"installer output references the dead `brew install pyyaml` "
|
|
134
|
+
f"recovery path:\n{out}"
|
|
135
|
+
)
|
|
136
|
+
assert "Homebrew Python detected" not in out, (
|
|
137
|
+
f"installer announces a Homebrew strategy that no longer exists:\n{out}"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Recovery message must give actionable manual alternatives.
|
|
141
|
+
assert "--break-system-packages" in out
|
|
142
|
+
assert "venv" in out
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""coach-user-prompt.py: per-session marker consumption.
|
|
2
|
+
|
|
3
|
+
Regression guard for BACKLOG P2 — pending marker files (`.pending_levelup`
|
|
4
|
+
etc.) used to be read-and-deleted by whichever UserPromptSubmit hook fired
|
|
5
|
+
first, so a target Claude Code session could lose its celebration banner
|
|
6
|
+
to an unrelated concurrent session. The fix tracks `consumed_by` inside
|
|
7
|
+
the marker JSON + a 24h TTL, so each session sees the marker once and
|
|
8
|
+
abandoned markers don't accumulate.
|
|
9
|
+
"""
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import importlib.util
|
|
13
|
+
import json
|
|
14
|
+
from datetime import datetime, timedelta, timezone
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
import pytest
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@pytest.fixture(scope="module")
|
|
21
|
+
def cup():
|
|
22
|
+
"""Load coach-user-prompt.py as a module."""
|
|
23
|
+
repo_path = Path(__file__).resolve().parents[2] / "hooks" / "coach-user-prompt.py"
|
|
24
|
+
path = repo_path if repo_path.exists() else Path.home() / ".claude" / "hooks" / "coach-user-prompt.py"
|
|
25
|
+
if not path.exists():
|
|
26
|
+
pytest.skip(f"hook not installed at {path}")
|
|
27
|
+
spec = importlib.util.spec_from_file_location("cup_marker_test", str(path))
|
|
28
|
+
mod = importlib.util.module_from_spec(spec)
|
|
29
|
+
spec.loader.exec_module(mod)
|
|
30
|
+
return mod
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _write_marker(path: Path, payload: dict) -> None:
|
|
34
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
35
|
+
path.write_text(json.dumps(payload))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def test_two_sessions_each_see_marker_once(cup, tmp_path):
|
|
39
|
+
marker = tmp_path / ".pending_graduation"
|
|
40
|
+
now = datetime.now(timezone.utc)
|
|
41
|
+
_write_marker(marker, {
|
|
42
|
+
"graduations": [{"id": "x", "name": "X"}],
|
|
43
|
+
"created_at": now.isoformat(),
|
|
44
|
+
"consumed_by": [],
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
a = cup._read_and_consume(marker, "session-A", now)
|
|
48
|
+
assert a is not None
|
|
49
|
+
assert a["graduations"] == [{"id": "x", "name": "X"}]
|
|
50
|
+
|
|
51
|
+
b = cup._read_and_consume(marker, "session-B", now)
|
|
52
|
+
assert b is not None
|
|
53
|
+
assert b["graduations"] == [{"id": "x", "name": "X"}]
|
|
54
|
+
|
|
55
|
+
# Marker still on disk (not deleted), with both sessions recorded.
|
|
56
|
+
raw = json.loads(marker.read_text())
|
|
57
|
+
assert sorted(raw["consumed_by"]) == ["session-A", "session-B"]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def test_same_session_polling_twice_renders_once(cup, tmp_path):
|
|
61
|
+
marker = tmp_path / ".pending_levelup"
|
|
62
|
+
now = datetime.now(timezone.utc)
|
|
63
|
+
_write_marker(marker, {
|
|
64
|
+
"from": "Drafter", "from_idx": 0,
|
|
65
|
+
"to": "Builder", "to_idx": 1,
|
|
66
|
+
"xp_at_levelup": 50,
|
|
67
|
+
"created_at": now.isoformat(),
|
|
68
|
+
"consumed_by": [],
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
first = cup._read_and_consume(marker, "session-A", now)
|
|
72
|
+
assert first is not None
|
|
73
|
+
assert first["to"] == "Builder"
|
|
74
|
+
|
|
75
|
+
second = cup._read_and_consume(marker, "session-A", now)
|
|
76
|
+
assert second is None, "same session should not re-render the same marker"
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def test_marker_older_than_ttl_is_cleaned_up(cup, tmp_path):
|
|
80
|
+
marker = tmp_path / ".pending_regression"
|
|
81
|
+
long_ago = datetime.now(timezone.utc) - timedelta(hours=cup.MARKER_TTL_HOURS + 1)
|
|
82
|
+
_write_marker(marker, {
|
|
83
|
+
"regressions": [{"id": "y", "name": "Y"}],
|
|
84
|
+
"created_at": long_ago.isoformat(),
|
|
85
|
+
"consumed_by": [],
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
now = datetime.now(timezone.utc)
|
|
89
|
+
result = cup._read_and_consume(marker, "session-A", now)
|
|
90
|
+
assert result is None
|
|
91
|
+
assert not marker.exists(), "expired marker should be unlinked on read"
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def test_legacy_marker_without_created_at_is_treated_as_fresh(cup, tmp_path):
|
|
95
|
+
"""v0.1 markers were written without created_at / consumed_by fields.
|
|
96
|
+
The reader must accept them on first encounter and stamp them."""
|
|
97
|
+
marker = tmp_path / ".pending_streak_rewards"
|
|
98
|
+
_write_marker(marker, {"rewards": [{"id": "z", "name": "Z", "streak": 2}]})
|
|
99
|
+
|
|
100
|
+
now = datetime.now(timezone.utc)
|
|
101
|
+
result = cup._read_and_consume(marker, "session-A", now)
|
|
102
|
+
assert result is not None
|
|
103
|
+
assert result["rewards"] == [{"id": "z", "name": "Z", "streak": 2}]
|
|
104
|
+
|
|
105
|
+
# Stamped on first read.
|
|
106
|
+
raw = json.loads(marker.read_text())
|
|
107
|
+
assert "created_at" in raw
|
|
108
|
+
assert raw["consumed_by"] == ["session-A"]
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def test_corrupt_marker_json_is_swallowed(cup, tmp_path):
|
|
112
|
+
marker = tmp_path / ".pending_graduation"
|
|
113
|
+
marker.parent.mkdir(parents=True, exist_ok=True)
|
|
114
|
+
marker.write_text("{not json")
|
|
115
|
+
|
|
116
|
+
now = datetime.now(timezone.utc)
|
|
117
|
+
result = cup._read_and_consume(marker, "session-A", now)
|
|
118
|
+
assert result is None
|
|
119
|
+
# Corrupt file is cleaned up to prevent a poll-loop.
|
|
120
|
+
assert not marker.exists()
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def test_consumed_by_overflow_drops_oldest(cup, tmp_path):
|
|
124
|
+
marker = tmp_path / ".pending_levelup"
|
|
125
|
+
now = datetime.now(timezone.utc)
|
|
126
|
+
cap = cup.MARKER_CONSUMED_BY_CAP
|
|
127
|
+
initial = [f"old-session-{i}" for i in range(cap)]
|
|
128
|
+
_write_marker(marker, {
|
|
129
|
+
"from": "X", "from_idx": 0, "to": "Y", "to_idx": 1, "xp_at_levelup": 1,
|
|
130
|
+
"created_at": now.isoformat(),
|
|
131
|
+
"consumed_by": list(initial),
|
|
132
|
+
})
|
|
133
|
+
|
|
134
|
+
result = cup._read_and_consume(marker, "new-session", now)
|
|
135
|
+
assert result is not None
|
|
136
|
+
raw = json.loads(marker.read_text())
|
|
137
|
+
assert len(raw["consumed_by"]) == cap
|
|
138
|
+
assert "new-session" in raw["consumed_by"]
|
|
139
|
+
assert raw["consumed_by"][0] != "old-session-0" # oldest dropped
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def test_missing_session_key_uses_fallback(cup, tmp_path):
|
|
143
|
+
"""If both transcript_path and session_id are absent, the hook passes
|
|
144
|
+
None for session_key. The reader uses 'unknown' so the marker still
|
|
145
|
+
deduplicates instead of re-firing on every prompt."""
|
|
146
|
+
marker = tmp_path / ".pending_graduation"
|
|
147
|
+
now = datetime.now(timezone.utc)
|
|
148
|
+
_write_marker(marker, {
|
|
149
|
+
"graduations": [{"id": "g", "name": "G"}],
|
|
150
|
+
"created_at": now.isoformat(),
|
|
151
|
+
"consumed_by": [],
|
|
152
|
+
})
|
|
153
|
+
|
|
154
|
+
first = cup._read_and_consume(marker, None, now)
|
|
155
|
+
assert first is not None
|
|
156
|
+
|
|
157
|
+
second = cup._read_and_consume(marker, None, now)
|
|
158
|
+
assert second is None, "successive None-keyed polls should still dedupe"
|
|
159
|
+
|
|
160
|
+
raw = json.loads(marker.read_text())
|
|
161
|
+
assert raw["consumed_by"] == ["unknown"]
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def test_missing_marker_returns_none(cup, tmp_path):
|
|
165
|
+
marker = tmp_path / ".pending_graduation" # never created
|
|
166
|
+
now = datetime.now(timezone.utc)
|
|
167
|
+
assert cup._read_and_consume(marker, "session-A", now) is None
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
"""Marker writer locking — guards against the v0.2.0 race where readers
|
|
2
|
+
acquired a sidecar flock but writers in `merge.py` / `stats.py` did not.
|
|
3
|
+
|
|
4
|
+
Race the test reproduces:
|
|
5
|
+
1. Marker exists with [old], consumed_by=[].
|
|
6
|
+
2. Reader (no lock yet) reads → snapshot of [old].
|
|
7
|
+
3. /coach-insights writer commits new → marker becomes [old, new].
|
|
8
|
+
4. Reader replaces with stale snapshot → marker drops back to [old].
|
|
9
|
+
5. The "new" event is silently lost.
|
|
10
|
+
|
|
11
|
+
Fix under test: writers acquire the same `<marker>.lock` flock the reader
|
|
12
|
+
takes in `_read_and_consume()`, so the read-modify-write windows are
|
|
13
|
+
serialized and a stale-snapshot reader can't clobber a writer's commit.
|
|
14
|
+
"""
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
import fcntl
|
|
18
|
+
import importlib.util
|
|
19
|
+
import json
|
|
20
|
+
import threading
|
|
21
|
+
import time
|
|
22
|
+
from datetime import datetime, timezone
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
import pytest
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@pytest.fixture
|
|
29
|
+
def merge_mod(tmp_path, monkeypatch):
|
|
30
|
+
"""Load coach/bin/merge.py with marker paths redirected to tmp_path."""
|
|
31
|
+
path = Path(__file__).resolve().parents[1] / "bin" / "merge.py"
|
|
32
|
+
spec = importlib.util.spec_from_file_location(
|
|
33
|
+
f"merge_locking_{tmp_path.name}", str(path)
|
|
34
|
+
)
|
|
35
|
+
mod = importlib.util.module_from_spec(spec)
|
|
36
|
+
spec.loader.exec_module(mod)
|
|
37
|
+
monkeypatch.setattr(mod, "GRADUATION_MARKER", tmp_path / ".pending_graduation")
|
|
38
|
+
monkeypatch.setattr(mod, "REGRESSION_MARKER", tmp_path / ".pending_regression")
|
|
39
|
+
monkeypatch.setattr(mod, "STREAK_REWARD_MARKER", tmp_path / ".pending_streak_rewards")
|
|
40
|
+
return mod
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@pytest.fixture
|
|
44
|
+
def stats_mod(tmp_path, monkeypatch):
|
|
45
|
+
"""Load coach/bin/stats.py with marker paths redirected to tmp_path."""
|
|
46
|
+
path = Path(__file__).resolve().parents[1] / "bin" / "stats.py"
|
|
47
|
+
spec = importlib.util.spec_from_file_location(
|
|
48
|
+
f"stats_locking_{tmp_path.name}", str(path)
|
|
49
|
+
)
|
|
50
|
+
mod = importlib.util.module_from_spec(spec)
|
|
51
|
+
spec.loader.exec_module(mod)
|
|
52
|
+
monkeypatch.setattr(mod, "LEVELUP_MARKER", tmp_path / ".pending_levelup")
|
|
53
|
+
monkeypatch.setattr(mod, "LEVEL_STATE", tmp_path / ".level_state.json")
|
|
54
|
+
return mod
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@pytest.fixture
|
|
58
|
+
def cup_mod(tmp_path, monkeypatch):
|
|
59
|
+
"""Load hooks/coach-user-prompt.py with tip state redirected to tmp_path."""
|
|
60
|
+
path = Path(__file__).resolve().parents[2] / "hooks" / "coach-user-prompt.py"
|
|
61
|
+
spec = importlib.util.spec_from_file_location(
|
|
62
|
+
f"cup_locking_{tmp_path.name}", str(path)
|
|
63
|
+
)
|
|
64
|
+
mod = importlib.util.module_from_spec(spec)
|
|
65
|
+
spec.loader.exec_module(mod)
|
|
66
|
+
monkeypatch.setattr(mod, "TIP_STATE", tmp_path / ".tip_state.json")
|
|
67
|
+
return mod
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _hold_lock(lock_path: Path):
|
|
71
|
+
"""Open and exclusively lock a sidecar lockfile. Returns the file
|
|
72
|
+
handle so the caller can release it explicitly."""
|
|
73
|
+
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
|
74
|
+
fh = open(lock_path, "w")
|
|
75
|
+
fcntl.flock(fh.fileno(), fcntl.LOCK_EX)
|
|
76
|
+
return fh
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def test_graduation_writer_blocks_when_reader_lock_held(merge_mod, tmp_path):
|
|
80
|
+
"""The graduation marker writer must wait on the same sidecar flock
|
|
81
|
+
the reader uses. Without writer locking this thread would complete
|
|
82
|
+
immediately and a stale-snapshot reader could clobber its write."""
|
|
83
|
+
marker = tmp_path / ".pending_graduation"
|
|
84
|
+
lock_path = tmp_path / ".pending_graduation.lock"
|
|
85
|
+
holder = _hold_lock(lock_path)
|
|
86
|
+
completed = threading.Event()
|
|
87
|
+
|
|
88
|
+
def writer():
|
|
89
|
+
merge_mod._append_graduation_marker(
|
|
90
|
+
[{"id": "new", "name": "New"}],
|
|
91
|
+
datetime.now(timezone.utc),
|
|
92
|
+
)
|
|
93
|
+
completed.set()
|
|
94
|
+
|
|
95
|
+
t = threading.Thread(target=writer, daemon=True)
|
|
96
|
+
t.start()
|
|
97
|
+
|
|
98
|
+
# Writer must NOT complete while the lock is held.
|
|
99
|
+
assert not completed.wait(timeout=0.3), (
|
|
100
|
+
"writer should be blocked on the marker sidecar lock"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
fcntl.flock(holder.fileno(), fcntl.LOCK_UN)
|
|
104
|
+
holder.close()
|
|
105
|
+
|
|
106
|
+
assert completed.wait(timeout=2.0), (
|
|
107
|
+
"writer should complete promptly after lock is released"
|
|
108
|
+
)
|
|
109
|
+
t.join(timeout=1.0)
|
|
110
|
+
|
|
111
|
+
raw = json.loads(marker.read_text())
|
|
112
|
+
assert raw["graduations"] == [{"id": "new", "name": "New"}]
|
|
113
|
+
assert raw["consumed_by"] == [] # fresh write resets
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def test_regression_writer_takes_lock(merge_mod, tmp_path):
|
|
117
|
+
marker = tmp_path / ".pending_regression"
|
|
118
|
+
lock_path = tmp_path / ".pending_regression.lock"
|
|
119
|
+
holder = _hold_lock(lock_path)
|
|
120
|
+
completed = threading.Event()
|
|
121
|
+
|
|
122
|
+
def writer():
|
|
123
|
+
merge_mod._append_regression_marker(
|
|
124
|
+
[{"id": "r1", "name": "R1"}],
|
|
125
|
+
datetime.now(timezone.utc),
|
|
126
|
+
)
|
|
127
|
+
completed.set()
|
|
128
|
+
|
|
129
|
+
t = threading.Thread(target=writer, daemon=True)
|
|
130
|
+
t.start()
|
|
131
|
+
assert not completed.wait(timeout=0.3)
|
|
132
|
+
fcntl.flock(holder.fileno(), fcntl.LOCK_UN)
|
|
133
|
+
holder.close()
|
|
134
|
+
assert completed.wait(timeout=2.0)
|
|
135
|
+
t.join(timeout=1.0)
|
|
136
|
+
assert json.loads(marker.read_text())["regressions"] == [{"id": "r1", "name": "R1"}]
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def test_streak_reward_writer_takes_lock(merge_mod, tmp_path):
|
|
140
|
+
marker = tmp_path / ".pending_streak_rewards"
|
|
141
|
+
lock_path = tmp_path / ".pending_streak_rewards.lock"
|
|
142
|
+
holder = _hold_lock(lock_path)
|
|
143
|
+
completed = threading.Event()
|
|
144
|
+
|
|
145
|
+
def writer():
|
|
146
|
+
merge_mod._append_streak_reward_marker(
|
|
147
|
+
[{"id": "s1", "name": "S1", "streak": 2}],
|
|
148
|
+
datetime.now(timezone.utc),
|
|
149
|
+
)
|
|
150
|
+
completed.set()
|
|
151
|
+
|
|
152
|
+
t = threading.Thread(target=writer, daemon=True)
|
|
153
|
+
t.start()
|
|
154
|
+
assert not completed.wait(timeout=0.3)
|
|
155
|
+
fcntl.flock(holder.fileno(), fcntl.LOCK_UN)
|
|
156
|
+
holder.close()
|
|
157
|
+
assert completed.wait(timeout=2.0)
|
|
158
|
+
t.join(timeout=1.0)
|
|
159
|
+
assert json.loads(marker.read_text())["rewards"] == [
|
|
160
|
+
{"id": "s1", "name": "S1", "streak": 2}
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def test_levelup_writer_takes_lock(stats_mod, tmp_path):
|
|
165
|
+
"""`stats.py:_check_and_mark_level_up` must lock around its level-up
|
|
166
|
+
marker write so a stale-snapshot reader can't drop a fresh celebration."""
|
|
167
|
+
marker = tmp_path / ".pending_levelup"
|
|
168
|
+
lock_path = tmp_path / ".pending_levelup.lock"
|
|
169
|
+
holder = _hold_lock(lock_path)
|
|
170
|
+
completed = threading.Event()
|
|
171
|
+
|
|
172
|
+
def writer():
|
|
173
|
+
# First-ever level-up path: last_state is None, current_idx > 0.
|
|
174
|
+
stats_mod._check_levelup(
|
|
175
|
+
current_idx=2,
|
|
176
|
+
current_name=stats_mod.LEVELS[2][1],
|
|
177
|
+
xp=120,
|
|
178
|
+
)
|
|
179
|
+
completed.set()
|
|
180
|
+
|
|
181
|
+
t = threading.Thread(target=writer, daemon=True)
|
|
182
|
+
t.start()
|
|
183
|
+
assert not completed.wait(timeout=0.3), (
|
|
184
|
+
"level-up writer should block on marker sidecar lock"
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
fcntl.flock(holder.fileno(), fcntl.LOCK_UN)
|
|
188
|
+
holder.close()
|
|
189
|
+
|
|
190
|
+
assert completed.wait(timeout=2.0)
|
|
191
|
+
t.join(timeout=1.0)
|
|
192
|
+
|
|
193
|
+
raw = json.loads(marker.read_text())
|
|
194
|
+
assert raw["to_idx"] == 2
|
|
195
|
+
assert raw["xp_at_levelup"] == 120
|
|
196
|
+
assert raw["consumed_by"] == []
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def test_level_state_writer_takes_lock(stats_mod, tmp_path):
|
|
200
|
+
"""The high-water level-state file must be serialized too.
|
|
201
|
+
|
|
202
|
+
Without this lock, concurrent statusline renders could both observe a stale
|
|
203
|
+
or missing .level_state.json and duplicate the same level-up marker.
|
|
204
|
+
"""
|
|
205
|
+
lock_path = tmp_path / ".level_state.json.lock"
|
|
206
|
+
holder = _hold_lock(lock_path)
|
|
207
|
+
completed = threading.Event()
|
|
208
|
+
|
|
209
|
+
def writer():
|
|
210
|
+
stats_mod._check_levelup(
|
|
211
|
+
current_idx=0,
|
|
212
|
+
current_name=stats_mod.LEVELS[0][1],
|
|
213
|
+
xp=0,
|
|
214
|
+
)
|
|
215
|
+
completed.set()
|
|
216
|
+
|
|
217
|
+
t = threading.Thread(target=writer, daemon=True)
|
|
218
|
+
t.start()
|
|
219
|
+
assert not completed.wait(timeout=0.3), (
|
|
220
|
+
"level-state writer should block on the level-state sidecar lock"
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
fcntl.flock(holder.fileno(), fcntl.LOCK_UN)
|
|
224
|
+
holder.close()
|
|
225
|
+
|
|
226
|
+
assert completed.wait(timeout=2.0)
|
|
227
|
+
t.join(timeout=1.0)
|
|
228
|
+
assert json.loads((tmp_path / ".level_state.json").read_text()) == {"level_idx": 0}
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def test_tip_state_writer_takes_lock(cup_mod, tmp_path):
|
|
232
|
+
lock_path = tmp_path / ".tip_state.json.lock"
|
|
233
|
+
holder = _hold_lock(lock_path)
|
|
234
|
+
completed = threading.Event()
|
|
235
|
+
|
|
236
|
+
def writer():
|
|
237
|
+
cup_mod._save_tip_state({"last_global_fire": "2026-05-06T00:00:00+00:00"})
|
|
238
|
+
completed.set()
|
|
239
|
+
|
|
240
|
+
t = threading.Thread(target=writer, daemon=True)
|
|
241
|
+
t.start()
|
|
242
|
+
assert not completed.wait(timeout=0.3), (
|
|
243
|
+
"tip-state writer should block on the tip-state sidecar lock"
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
fcntl.flock(holder.fileno(), fcntl.LOCK_UN)
|
|
247
|
+
holder.close()
|
|
248
|
+
|
|
249
|
+
assert completed.wait(timeout=2.0)
|
|
250
|
+
t.join(timeout=1.0)
|
|
251
|
+
raw = json.loads((tmp_path / ".tip_state.json").read_text())
|
|
252
|
+
assert raw["last_global_fire"] == "2026-05-06T00:00:00+00:00"
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def test_existing_entries_preserved_across_writes(merge_mod, tmp_path):
|
|
256
|
+
"""Two sequential writer calls must merge into a combined list, not
|
|
257
|
+
overwrite. consumed_by + created_at reset on each write so newly-added
|
|
258
|
+
entries reach previously-consumed sessions."""
|
|
259
|
+
now = datetime.now(timezone.utc)
|
|
260
|
+
merge_mod._append_graduation_marker(
|
|
261
|
+
[{"id": "old", "name": "Old"}], now
|
|
262
|
+
)
|
|
263
|
+
raw_before = json.loads((tmp_path / ".pending_graduation").read_text())
|
|
264
|
+
assert [g["id"] for g in raw_before["graduations"]] == ["old"]
|
|
265
|
+
|
|
266
|
+
# Pretend a session consumed it.
|
|
267
|
+
raw_before["consumed_by"] = ["session-A"]
|
|
268
|
+
(tmp_path / ".pending_graduation").write_text(json.dumps(raw_before))
|
|
269
|
+
|
|
270
|
+
merge_mod._append_graduation_marker(
|
|
271
|
+
[{"id": "new", "name": "New"}], now
|
|
272
|
+
)
|
|
273
|
+
raw_after = json.loads((tmp_path / ".pending_graduation").read_text())
|
|
274
|
+
assert [g["id"] for g in raw_after["graduations"]] == ["old", "new"]
|
|
275
|
+
assert raw_after["consumed_by"] == [], (
|
|
276
|
+
"writer must reset consumed_by so already-consumed sessions still see "
|
|
277
|
+
"the new entry on their next poll"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def test_concurrent_writers_dont_lose_entries(merge_mod, tmp_path):
|
|
282
|
+
"""Stress test: 20 concurrent _append_graduation_marker calls must
|
|
283
|
+
land all 20 entries (no race-window drops). With unlocked writers
|
|
284
|
+
this would lose entries; with locking it must not."""
|
|
285
|
+
now = datetime.now(timezone.utc)
|
|
286
|
+
threads = []
|
|
287
|
+
for i in range(20):
|
|
288
|
+
ident = f"g{i}"
|
|
289
|
+
t = threading.Thread(
|
|
290
|
+
target=lambda i=ident: merge_mod._append_graduation_marker(
|
|
291
|
+
[{"id": i, "name": i}], now
|
|
292
|
+
),
|
|
293
|
+
daemon=True,
|
|
294
|
+
)
|
|
295
|
+
threads.append(t)
|
|
296
|
+
for t in threads:
|
|
297
|
+
t.start()
|
|
298
|
+
for t in threads:
|
|
299
|
+
t.join(timeout=5.0)
|
|
300
|
+
assert not t.is_alive()
|
|
301
|
+
|
|
302
|
+
raw = json.loads((tmp_path / ".pending_graduation").read_text())
|
|
303
|
+
ids = sorted(g["id"] for g in raw["graduations"])
|
|
304
|
+
expected = sorted(f"g{i}" for i in range(20))
|
|
305
|
+
assert ids == expected, f"expected all 20 entries, got {ids}"
|