@rm0nroe/coach-claw 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +311 -0
- package/coach/README.md +99 -0
- package/coach/bin/aggregate_facets.py +274 -0
- package/coach/bin/analyze.py +678 -0
- package/coach/bin/bank.py +247 -0
- package/coach/bin/banner_themes.py +645 -0
- package/coach/bin/coach_paths.py +33 -0
- package/coach/bin/coexistence_check.py +129 -0
- package/coach/bin/configure.py +245 -0
- package/coach/bin/cron_check.py +81 -0
- package/coach/bin/default_statusline.py +135 -0
- package/coach/bin/doctor.py +663 -0
- package/coach/bin/insights-llm.sh +264 -0
- package/coach/bin/insights.sh +163 -0
- package/coach/bin/insights_window.py +111 -0
- package/coach/bin/marker_io.py +154 -0
- package/coach/bin/merge.py +671 -0
- package/coach/bin/redact.py +86 -0
- package/coach/bin/render_env.py +148 -0
- package/coach/bin/reward_hints.py +87 -0
- package/coach/bin/run-insights.sh +20 -0
- package/coach/bin/run_with_lock.py +85 -0
- package/coach/bin/scoring.py +260 -0
- package/coach/bin/skill_inventory.py +215 -0
- package/coach/bin/stats.py +459 -0
- package/coach/bin/status.py +293 -0
- package/coach/bin/statusline_self_patch.py +205 -0
- package/coach/bin/statusline_variants.py +146 -0
- package/coach/bin/statusline_wrap.py +244 -0
- package/coach/bin/statusline_wrap_action.py +460 -0
- package/coach/bin/switch_to_plugin.py +256 -0
- package/coach/bin/themes.py +256 -0
- package/coach/bin/user_config.py +176 -0
- package/coach/bin/xp_accounting.py +98 -0
- package/coach/changelog.md +4 -0
- package/coach/default-statusline-command.sh +19 -0
- package/coach/default-statusline-wrap-command.sh +15 -0
- package/coach/profile.yaml +37 -0
- package/coach/tests/conftest.py +13 -0
- package/coach/tests/test_aggregate_facets.py +379 -0
- package/coach/tests/test_analyze_aggregate.py +153 -0
- package/coach/tests/test_analyze_redaction.py +105 -0
- package/coach/tests/test_analyze_strengths.py +165 -0
- package/coach/tests/test_bank_atomic_write.py +61 -0
- package/coach/tests/test_bank_concurrency.py +126 -0
- package/coach/tests/test_banner_themes.py +981 -0
- package/coach/tests/test_celebrate_dedup.py +409 -0
- package/coach/tests/test_coach_paths.py +50 -0
- package/coach/tests/test_coexistence_check.py +128 -0
- package/coach/tests/test_configure.py +258 -0
- package/coach/tests/test_cron_check.py +118 -0
- package/coach/tests/test_cron_nudge_hook.py +134 -0
- package/coach/tests/test_detection_parity.py +105 -0
- package/coach/tests/test_doctor.py +595 -0
- package/coach/tests/test_hook_bespoke_dispatch.py +288 -0
- package/coach/tests/test_hook_module_resolution.py +116 -0
- package/coach/tests/test_hook_relevance.py +996 -0
- package/coach/tests/test_hook_render_env.py +364 -0
- package/coach/tests/test_hook_session_id_guard.py +160 -0
- package/coach/tests/test_insights_llm.py +759 -0
- package/coach/tests/test_insights_llm_venv_path.py +109 -0
- package/coach/tests/test_insights_window.py +237 -0
- package/coach/tests/test_install.py +1150 -0
- package/coach/tests/test_install_pyyaml_fallback.py +142 -0
- package/coach/tests/test_marker_consumption.py +167 -0
- package/coach/tests/test_marker_writer_locking.py +305 -0
- package/coach/tests/test_merge.py +413 -0
- package/coach/tests/test_no_broken_mktemp.py +90 -0
- package/coach/tests/test_render_env.py +137 -0
- package/coach/tests/test_render_env_glyphs.py +119 -0
- package/coach/tests/test_reward_hints.py +59 -0
- package/coach/tests/test_scoring.py +147 -0
- package/coach/tests/test_session_start_weekly_trigger.py +92 -0
- package/coach/tests/test_skill_inventory.py +368 -0
- package/coach/tests/test_stats_hybrid.py +142 -0
- package/coach/tests/test_status_accounting.py +41 -0
- package/coach/tests/test_statusline_failsafe.py +70 -0
- package/coach/tests/test_statusline_self_patch.py +261 -0
- package/coach/tests/test_statusline_variants.py +110 -0
- package/coach/tests/test_statusline_wrap.py +196 -0
- package/coach/tests/test_statusline_wrap_action.py +408 -0
- package/coach/tests/test_switch_to_plugin.py +360 -0
- package/coach/tests/test_themes.py +104 -0
- package/coach/tests/test_user_config.py +160 -0
- package/coach/tests/test_wrap_announce_hook.py +130 -0
- package/coach/tests/test_xp_accounting.py +55 -0
- package/hooks/coach-session-start.py +536 -0
- package/hooks/coach-user-prompt.py +2288 -0
- package/install-launchd.sh +102 -0
- package/install.sh +597 -0
- package/launchd/com.local.claude-coach.plist.template +34 -0
- package/launchd/run-insights.sh +20 -0
- package/npm/coach-claw.js +259 -0
- package/package.json +52 -0
- package/requirements.txt +11 -0
- package/settings-snippet.json +31 -0
- package/skills/coach/SKILL.md +107 -0
- package/skills/coach-insights/SKILL.md +78 -0
- package/skills/config/SKILL.md +149 -0
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"""configure.py — set / preview / wizard CLI for coach-claw config."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import sys
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
9
|
+
import configure
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@pytest.fixture
|
|
13
|
+
def isolated_config(tmp_path, monkeypatch):
|
|
14
|
+
"""Redirect config to a tmp dir via COACH_CONFIG_DIR — same pattern
|
|
15
|
+
as test_user_config.py."""
|
|
16
|
+
monkeypatch.setenv("COACH_CONFIG_DIR", str(tmp_path))
|
|
17
|
+
return tmp_path / ".user_config.json"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _make_input_sequence(*answers):
|
|
21
|
+
"""Build a fake `input()` that returns `answers` in order. Raises if
|
|
22
|
+
the wizard asks for more answers than provided — catches infinite-
|
|
23
|
+
loop bugs in the validation re-prompt path."""
|
|
24
|
+
it = iter(answers)
|
|
25
|
+
|
|
26
|
+
def fake_input(prompt: str = "") -> str:
|
|
27
|
+
try:
|
|
28
|
+
return next(it)
|
|
29
|
+
except StopIteration:
|
|
30
|
+
raise AssertionError(
|
|
31
|
+
"wizard asked for more input than the test provided "
|
|
32
|
+
f"(last prompt: {prompt!r})"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
return fake_input
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# --- set ------------------------------------------------------------------
|
|
39
|
+
|
|
40
|
+
def test_set_writes_all_three_keys(isolated_config, capsys):
|
|
41
|
+
rc = configure.main(["set", "--theme", "ocean",
|
|
42
|
+
"--statusline", "pips",
|
|
43
|
+
"--elo", "1200", "2600"])
|
|
44
|
+
assert rc == 0
|
|
45
|
+
payload = json.loads(isolated_config.read_text())
|
|
46
|
+
assert payload["theme"] == "ocean"
|
|
47
|
+
assert payload["statusline_variant"] == "pips"
|
|
48
|
+
assert payload["elo_min"] == 1200
|
|
49
|
+
assert payload["elo_max"] == 2600
|
|
50
|
+
out = capsys.readouterr().out
|
|
51
|
+
assert "saved" in out
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def test_set_partial_preserves_other_keys(isolated_config):
|
|
55
|
+
"""Only the keys the user passes should change. Unspecified keys
|
|
56
|
+
must keep whatever was already in the config."""
|
|
57
|
+
# Seed with non-default values
|
|
58
|
+
import user_config
|
|
59
|
+
user_config.save({
|
|
60
|
+
"theme": "skyrim",
|
|
61
|
+
"statusline_variant": "forge",
|
|
62
|
+
"elo_min": 800,
|
|
63
|
+
"elo_max": 3000,
|
|
64
|
+
})
|
|
65
|
+
|
|
66
|
+
rc = configure.main(["set", "--theme", "ocean"])
|
|
67
|
+
assert rc == 0
|
|
68
|
+
|
|
69
|
+
payload = json.loads(isolated_config.read_text())
|
|
70
|
+
assert payload["theme"] == "ocean" # changed
|
|
71
|
+
assert payload["statusline_variant"] == "forge" # preserved
|
|
72
|
+
assert payload["elo_min"] == 800 # preserved
|
|
73
|
+
assert payload["elo_max"] == 3000 # preserved
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def test_set_rejects_unknown_theme(isolated_config, capsys):
|
|
77
|
+
rc = configure.main(["set", "--theme", "atlantis"])
|
|
78
|
+
assert rc == 1
|
|
79
|
+
err = capsys.readouterr().err
|
|
80
|
+
assert "theme" in err.lower()
|
|
81
|
+
# Existing config (none in this case) must not have been corrupted.
|
|
82
|
+
assert not isolated_config.exists()
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def test_set_rejects_unknown_variant(isolated_config, capsys):
|
|
86
|
+
rc = configure.main(["set", "--statusline", "rainbow"])
|
|
87
|
+
assert rc == 1
|
|
88
|
+
err = capsys.readouterr().err
|
|
89
|
+
assert "statusline_variant" in err.lower() or "statusline" in err.lower()
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def test_set_rejects_invalid_elo_range(isolated_config, capsys):
|
|
93
|
+
rc = configure.main(["set", "--elo", "2000", "1000"])
|
|
94
|
+
assert rc == 1
|
|
95
|
+
err = capsys.readouterr().err
|
|
96
|
+
assert "elo" in err.lower()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def test_set_rejects_negative_elo(isolated_config):
|
|
100
|
+
"""Negative ELO bounds fail at argparse parse time (before func runs)."""
|
|
101
|
+
with pytest.raises(SystemExit):
|
|
102
|
+
configure.main(["set", "--elo", "-100", "2800"])
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def test_set_with_no_flags_complains_and_exits_nonzero(isolated_config, capsys):
|
|
106
|
+
rc = configure.main(["set"])
|
|
107
|
+
assert rc == 1
|
|
108
|
+
err = capsys.readouterr().err
|
|
109
|
+
assert "nothing to do" in err.lower() or "preview" in err.lower()
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
# --- preview --------------------------------------------------------------
|
|
113
|
+
|
|
114
|
+
def test_preview_lists_every_variant(isolated_config, capsys):
|
|
115
|
+
rc = configure.main(["preview"])
|
|
116
|
+
assert rc == 0
|
|
117
|
+
out = capsys.readouterr().out
|
|
118
|
+
# Every variant key must appear (bracket removed in v0.1.4).
|
|
119
|
+
for variant in ["crystal", "pips", "slash", "forge"]:
|
|
120
|
+
assert variant in out
|
|
121
|
+
# Regression guard: bracket was removed and must not reappear in
|
|
122
|
+
# the preview enumeration.
|
|
123
|
+
assert "bracket" not in out
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def test_preview_lists_every_theme(isolated_config, capsys):
|
|
127
|
+
rc = configure.main(["preview"])
|
|
128
|
+
assert rc == 0
|
|
129
|
+
out = capsys.readouterr().out
|
|
130
|
+
for theme in ["craft", "ocean", "skyrim", "marvel", "hacker", "lotr"]:
|
|
131
|
+
assert theme in out
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def test_preview_marks_current_variant_and_theme(isolated_config, capsys):
|
|
135
|
+
import user_config
|
|
136
|
+
user_config.save({"theme": "ocean", "statusline_variant": "pips"})
|
|
137
|
+
|
|
138
|
+
rc = configure.main(["preview"])
|
|
139
|
+
assert rc == 0
|
|
140
|
+
out = capsys.readouterr().out
|
|
141
|
+
# The "← current" marker should appear next to the active selections.
|
|
142
|
+
# Two markers total — one variant, one theme.
|
|
143
|
+
assert out.count("← current") == 2
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def test_preview_pads_theme_names_consistently(isolated_config, capsys):
|
|
147
|
+
"""Theme-row format is `f' {name:>13} → ...'`.
|
|
148
|
+
|
|
149
|
+
Pinned because the slash-command skill used to have its own copy
|
|
150
|
+
of this format string at width `:>7`, while configure.py used
|
|
151
|
+
`:>13` — outputs drifted apart and a teammate caught the false
|
|
152
|
+
'byte-equivalent' claim. After v1.0.5 the slash command delegates
|
|
153
|
+
to configure.py, so configure.py is the single point of truth for
|
|
154
|
+
the format width. Future intentional format changes require
|
|
155
|
+
updating this test deliberately.
|
|
156
|
+
"""
|
|
157
|
+
rc = configure.main(["preview"])
|
|
158
|
+
assert rc == 0
|
|
159
|
+
out = capsys.readouterr().out
|
|
160
|
+
|
|
161
|
+
# finalfantasy is the longest theme name (12 chars). Padded to width
|
|
162
|
+
# 13, it gets exactly 1 leading space inside the field — combined
|
|
163
|
+
# with the 2-char literal indent, that's 3 spaces before the name.
|
|
164
|
+
assert " finalfantasy →" in out, (
|
|
165
|
+
"expected theme name 'finalfantasy' right-padded to width 13; "
|
|
166
|
+
"format string at coach/bin/configure.py:68 may have changed."
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# dc is the shortest theme name (2 chars). Padded to width 13 it
|
|
170
|
+
# gets 11 leading spaces inside the field — plus the 2-char indent
|
|
171
|
+
# is 13 spaces before the name.
|
|
172
|
+
assert " dc →" in out, (
|
|
173
|
+
"expected theme name 'dc' right-padded to width 13; "
|
|
174
|
+
"format string at coach/bin/configure.py:68 may have changed."
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
# --- wizard ---------------------------------------------------------------
|
|
179
|
+
|
|
180
|
+
def test_wizard_skips_on_non_tty(isolated_config, capsys, monkeypatch):
|
|
181
|
+
"""When stdin is not a TTY, wizard prints a pointer to `set` and
|
|
182
|
+
exits 0 without reading any input."""
|
|
183
|
+
monkeypatch.setattr(sys.stdin, "isatty", lambda: False)
|
|
184
|
+
rc = configure.main(["wizard"])
|
|
185
|
+
assert rc == 0
|
|
186
|
+
out = capsys.readouterr().out
|
|
187
|
+
assert "interactive terminal" in out.lower()
|
|
188
|
+
assert "config set" in out
|
|
189
|
+
# And nothing was written.
|
|
190
|
+
assert not isolated_config.exists()
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def test_wizard_pick_by_number_writes_config(isolated_config, capsys, monkeypatch):
|
|
194
|
+
monkeypatch.setattr(sys.stdin, "isatty", lambda: True)
|
|
195
|
+
# 1) first prompt: variant. Pick "2" — second variant in VARIANTS dict
|
|
196
|
+
# 2) second prompt: theme. Pick "ocean" by name.
|
|
197
|
+
from statusline_variants import VARIANTS
|
|
198
|
+
from themes import list_themes
|
|
199
|
+
second_variant = list(VARIANTS.keys())[1]
|
|
200
|
+
|
|
201
|
+
monkeypatch.setattr("builtins.input", _make_input_sequence("2", "ocean"))
|
|
202
|
+
rc = configure.main(["wizard"])
|
|
203
|
+
assert rc == 0
|
|
204
|
+
|
|
205
|
+
payload = json.loads(isolated_config.read_text())
|
|
206
|
+
assert payload["statusline_variant"] == second_variant
|
|
207
|
+
assert payload["theme"] == "ocean"
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def test_wizard_enter_keeps_default(isolated_config, capsys, monkeypatch):
|
|
211
|
+
"""Empty input on each prompt keeps the current value; the wizard
|
|
212
|
+
detects 'no changes' and skips the save call."""
|
|
213
|
+
import user_config
|
|
214
|
+
user_config.save({"theme": "ocean", "statusline_variant": "pips"})
|
|
215
|
+
pre = isolated_config.read_text()
|
|
216
|
+
|
|
217
|
+
monkeypatch.setattr(sys.stdin, "isatty", lambda: True)
|
|
218
|
+
monkeypatch.setattr("builtins.input", _make_input_sequence("", ""))
|
|
219
|
+
rc = configure.main(["wizard"])
|
|
220
|
+
assert rc == 0
|
|
221
|
+
out = capsys.readouterr().out
|
|
222
|
+
assert "no changes" in out.lower()
|
|
223
|
+
|
|
224
|
+
# File on disk byte-identical
|
|
225
|
+
assert isolated_config.read_text() == pre
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def test_wizard_keyboard_interrupt_does_not_save(isolated_config, capsys, monkeypatch):
|
|
229
|
+
monkeypatch.setattr(sys.stdin, "isatty", lambda: True)
|
|
230
|
+
|
|
231
|
+
def cancel(_prompt=""):
|
|
232
|
+
raise KeyboardInterrupt
|
|
233
|
+
|
|
234
|
+
monkeypatch.setattr("builtins.input", cancel)
|
|
235
|
+
rc = configure.main(["wizard"])
|
|
236
|
+
assert rc == 0
|
|
237
|
+
out = capsys.readouterr().out
|
|
238
|
+
assert "cancelled" in out.lower()
|
|
239
|
+
assert not isolated_config.exists()
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def test_wizard_invalid_input_reprompts(isolated_config, monkeypatch):
|
|
243
|
+
"""First answer is bogus; second is valid. Wizard must re-prompt
|
|
244
|
+
rather than crash."""
|
|
245
|
+
monkeypatch.setattr(sys.stdin, "isatty", lambda: True)
|
|
246
|
+
# Variant prompt: "rainbow" (invalid) → re-prompt → "1" (valid)
|
|
247
|
+
# Theme prompt: "ocean" (valid)
|
|
248
|
+
monkeypatch.setattr(
|
|
249
|
+
"builtins.input",
|
|
250
|
+
_make_input_sequence("rainbow", "1", "ocean"),
|
|
251
|
+
)
|
|
252
|
+
rc = configure.main(["wizard"])
|
|
253
|
+
assert rc == 0
|
|
254
|
+
payload = json.loads(isolated_config.read_text())
|
|
255
|
+
# First variant is the default; "1" picks it
|
|
256
|
+
from statusline_variants import VARIANTS
|
|
257
|
+
assert payload["statusline_variant"] == list(VARIANTS.keys())[0]
|
|
258
|
+
assert payload["theme"] == "ocean"
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"""cron_check.is_cron_registered — best-effort detection of whether a
|
|
2
|
+
Coach insights cron/launchd plist is loaded.
|
|
3
|
+
|
|
4
|
+
Used by the plugin's UserPromptSubmit nudge block to decide whether to
|
|
5
|
+
suggest `npx @rm0nroe/coach-claw launchd`. Detection failures default
|
|
6
|
+
to True (assume registered) so users never get false-positive nudges.
|
|
7
|
+
"""
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import subprocess
|
|
11
|
+
from unittest.mock import patch, MagicMock
|
|
12
|
+
|
|
13
|
+
import cron_check
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ---------------------------------------------------------------------------
|
|
17
|
+
# macOS path (launchctl)
|
|
18
|
+
# ---------------------------------------------------------------------------
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _mock_run(returncode: int, stdout: bytes = b"", stderr: bytes = b""):
|
|
22
|
+
return MagicMock(returncode=returncode, stdout=stdout, stderr=stderr)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def test_macos_returns_true_when_launchctl_finds_plist(monkeypatch):
|
|
26
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Darwin")
|
|
27
|
+
with patch.object(cron_check.subprocess, "run", return_value=_mock_run(0)):
|
|
28
|
+
assert cron_check.is_cron_registered() is True
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def test_macos_returns_false_when_launchctl_missing_plist(monkeypatch):
|
|
32
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Darwin")
|
|
33
|
+
with patch.object(cron_check.subprocess, "run", return_value=_mock_run(113)):
|
|
34
|
+
assert cron_check.is_cron_registered() is False
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def test_macos_returns_true_on_subprocess_error(monkeypatch):
|
|
38
|
+
"""Fail-safe: launchctl missing or timing out → assume registered."""
|
|
39
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Darwin")
|
|
40
|
+
with patch.object(
|
|
41
|
+
cron_check.subprocess, "run",
|
|
42
|
+
side_effect=subprocess.TimeoutExpired(cmd="launchctl", timeout=5),
|
|
43
|
+
):
|
|
44
|
+
assert cron_check.is_cron_registered() is True
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_macos_returns_true_when_launchctl_not_found(monkeypatch):
|
|
48
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Darwin")
|
|
49
|
+
with patch.object(cron_check.subprocess, "run", side_effect=FileNotFoundError):
|
|
50
|
+
assert cron_check.is_cron_registered() is True
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ---------------------------------------------------------------------------
|
|
54
|
+
# Linux path (crontab)
|
|
55
|
+
# ---------------------------------------------------------------------------
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def test_linux_returns_false_when_crontab_empty(monkeypatch):
|
|
59
|
+
"""`crontab -l` returns nonzero when user has no crontab. That's
|
|
60
|
+
the strongest signal Coach is not scheduled."""
|
|
61
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Linux")
|
|
62
|
+
with patch.object(cron_check.subprocess, "run", return_value=_mock_run(1)):
|
|
63
|
+
assert cron_check.is_cron_registered() is False
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def test_linux_returns_true_when_coach_marker_in_crontab(monkeypatch):
|
|
67
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Linux")
|
|
68
|
+
fake_crontab = b"0 4 * * * /home/u/.claude/coach/bin/insights.sh 1d\n"
|
|
69
|
+
with patch.object(
|
|
70
|
+
cron_check.subprocess, "run",
|
|
71
|
+
return_value=_mock_run(0, stdout=fake_crontab),
|
|
72
|
+
):
|
|
73
|
+
assert cron_check.is_cron_registered() is True
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def test_linux_returns_false_when_crontab_has_only_other_jobs(monkeypatch):
|
|
77
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Linux")
|
|
78
|
+
fake_crontab = b"0 5 * * * /usr/bin/something-else\n"
|
|
79
|
+
with patch.object(
|
|
80
|
+
cron_check.subprocess, "run",
|
|
81
|
+
return_value=_mock_run(0, stdout=fake_crontab),
|
|
82
|
+
):
|
|
83
|
+
assert cron_check.is_cron_registered() is False
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def test_linux_recognizes_claude_coach_label(monkeypatch):
|
|
87
|
+
"""Alternate marker in case a future helper uses the `claude-coach`
|
|
88
|
+
label instead of the script path."""
|
|
89
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Linux")
|
|
90
|
+
fake_crontab = b"0 4 * * * /opt/claude-coach/run.sh\n"
|
|
91
|
+
with patch.object(
|
|
92
|
+
cron_check.subprocess, "run",
|
|
93
|
+
return_value=_mock_run(0, stdout=fake_crontab),
|
|
94
|
+
):
|
|
95
|
+
assert cron_check.is_cron_registered() is True
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def test_linux_returns_true_on_subprocess_error(monkeypatch):
|
|
99
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Linux")
|
|
100
|
+
with patch.object(
|
|
101
|
+
cron_check.subprocess, "run",
|
|
102
|
+
side_effect=subprocess.TimeoutExpired(cmd="crontab", timeout=5),
|
|
103
|
+
):
|
|
104
|
+
assert cron_check.is_cron_registered() is True
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# ---------------------------------------------------------------------------
|
|
108
|
+
# Other platforms
|
|
109
|
+
# ---------------------------------------------------------------------------
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def test_other_platforms_return_true(monkeypatch):
|
|
113
|
+
"""Windows / unknown systems don't use the cron path. Returning
|
|
114
|
+
True suppresses the nudge."""
|
|
115
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "Windows")
|
|
116
|
+
assert cron_check.is_cron_registered() is True
|
|
117
|
+
monkeypatch.setattr(cron_check.platform, "system", lambda: "FreeBSD")
|
|
118
|
+
assert cron_check.is_cron_registered() is True
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""coach-user-prompt.py: cron-nudge banner gating.
|
|
2
|
+
|
|
3
|
+
The plugin distribution emits a one-time `<coach-cron-nudge>` block
|
|
4
|
+
when (a) we're running under the plugin (CLAUDE_PLUGIN_ROOT set), and
|
|
5
|
+
(b) no Coach cron/launchd plist is registered. Guarded by the
|
|
6
|
+
`.cron-nudged` marker so it fires exactly once.
|
|
7
|
+
"""
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import importlib.util
|
|
11
|
+
import json
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
import pytest
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.fixture(scope="module")
|
|
18
|
+
def cup():
|
|
19
|
+
"""Load coach-user-prompt.py as a module via importlib."""
|
|
20
|
+
repo_path = Path(__file__).resolve().parents[2] / "hooks" / "coach-user-prompt.py"
|
|
21
|
+
path = repo_path if repo_path.exists() else Path.home() / ".claude" / "hooks" / "coach-user-prompt.py"
|
|
22
|
+
if not path.exists():
|
|
23
|
+
pytest.skip(f"hook not installed at {path}")
|
|
24
|
+
spec = importlib.util.spec_from_file_location("cup_cron_under_test", str(path))
|
|
25
|
+
mod = importlib.util.module_from_spec(spec)
|
|
26
|
+
spec.loader.exec_module(mod)
|
|
27
|
+
return mod
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@pytest.fixture
|
|
31
|
+
def coach_dir(tmp_path, monkeypatch):
|
|
32
|
+
"""Redirect COACH_DIR so the test doesn't touch the real
|
|
33
|
+
`~/.claude/coach/.cron-nudged` marker."""
|
|
34
|
+
d = tmp_path / "coach"
|
|
35
|
+
d.mkdir()
|
|
36
|
+
monkeypatch.setenv("COACH_CONFIG_DIR", str(d))
|
|
37
|
+
return d
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_no_nudge_when_not_in_plugin_context(cup, coach_dir, monkeypatch):
|
|
41
|
+
"""CLI distribution: CLAUDE_PLUGIN_ROOT unset → no nudge ever."""
|
|
42
|
+
monkeypatch.delenv("CLAUDE_PLUGIN_ROOT", raising=False)
|
|
43
|
+
# Even with cron absent (mocked False), we still skip — gate is
|
|
44
|
+
# CLAUDE_PLUGIN_ROOT, not cron presence.
|
|
45
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
46
|
+
block = cup._maybe_cron_nudge_block(env="terminal")
|
|
47
|
+
assert block is None
|
|
48
|
+
assert not (coach_dir / ".cron-nudged").exists()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_no_nudge_when_marker_present(cup, coach_dir, monkeypatch):
|
|
52
|
+
"""Marker present → already nudged → no re-nudge."""
|
|
53
|
+
monkeypatch.setenv("CLAUDE_PLUGIN_ROOT", "/some/plugin/root")
|
|
54
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
55
|
+
(coach_dir / ".cron-nudged").write_text(json.dumps({"nudged_at": "2026-05-08T00:00:00Z"}))
|
|
56
|
+
|
|
57
|
+
# Even if cron is absent we should NOT re-nudge.
|
|
58
|
+
import cron_check
|
|
59
|
+
monkeypatch.setattr(cron_check, "is_cron_registered", lambda: False)
|
|
60
|
+
|
|
61
|
+
block = cup._maybe_cron_nudge_block(env="terminal")
|
|
62
|
+
assert block is None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def test_no_nudge_when_cron_already_registered(cup, coach_dir, monkeypatch):
|
|
66
|
+
monkeypatch.setenv("CLAUDE_PLUGIN_ROOT", "/some/plugin/root")
|
|
67
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
68
|
+
import cron_check
|
|
69
|
+
monkeypatch.setattr(cron_check, "is_cron_registered", lambda: True)
|
|
70
|
+
|
|
71
|
+
block = cup._maybe_cron_nudge_block(env="terminal")
|
|
72
|
+
assert block is None
|
|
73
|
+
# Marker NOT written (we didn't nudge, nothing to remember).
|
|
74
|
+
assert not (coach_dir / ".cron-nudged").exists()
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def test_nudge_fires_and_writes_marker(cup, coach_dir, monkeypatch):
|
|
78
|
+
"""Plugin context + cron absent + no prior marker → emit + write."""
|
|
79
|
+
monkeypatch.setenv("CLAUDE_PLUGIN_ROOT", "/some/plugin/root")
|
|
80
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
81
|
+
import cron_check
|
|
82
|
+
monkeypatch.setattr(cron_check, "is_cron_registered", lambda: False)
|
|
83
|
+
|
|
84
|
+
block = cup._maybe_cron_nudge_block(env="terminal")
|
|
85
|
+
assert block is not None
|
|
86
|
+
# Banner mentions the recommended remediation
|
|
87
|
+
assert "npx @rm0nroe/coach-claw launchd" in block
|
|
88
|
+
assert "crontab" in block
|
|
89
|
+
# Fires once: marker written
|
|
90
|
+
marker = coach_dir / ".cron-nudged"
|
|
91
|
+
assert marker.exists()
|
|
92
|
+
payload = json.loads(marker.read_text())
|
|
93
|
+
assert "nudged_at" in payload
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def test_nudge_is_idempotent_after_first_emit(cup, coach_dir, monkeypatch):
|
|
97
|
+
"""First call emits + writes marker; second call sees marker and
|
|
98
|
+
returns None."""
|
|
99
|
+
monkeypatch.setenv("CLAUDE_PLUGIN_ROOT", "/some/plugin/root")
|
|
100
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
101
|
+
import cron_check
|
|
102
|
+
monkeypatch.setattr(cron_check, "is_cron_registered", lambda: False)
|
|
103
|
+
|
|
104
|
+
first = cup._maybe_cron_nudge_block(env="terminal")
|
|
105
|
+
second = cup._maybe_cron_nudge_block(env="terminal")
|
|
106
|
+
assert first is not None
|
|
107
|
+
assert second is None
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def test_nudge_renders_ide_shape(cup, coach_dir, monkeypatch):
|
|
111
|
+
"""IDE entrypoint gets the HR-framed shape (consistent with other
|
|
112
|
+
Coach blocks); terminal gets blockquote shape."""
|
|
113
|
+
monkeypatch.setenv("CLAUDE_PLUGIN_ROOT", "/some/plugin/root")
|
|
114
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
115
|
+
import cron_check
|
|
116
|
+
monkeypatch.setattr(cron_check, "is_cron_registered", lambda: False)
|
|
117
|
+
|
|
118
|
+
block_ide = cup._cron_nudge_block(env="ide")
|
|
119
|
+
block_term = cup._cron_nudge_block(env="terminal")
|
|
120
|
+
assert block_ide.startswith("---")
|
|
121
|
+
assert block_term.startswith(">")
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def test_failsafe_swallows_module_import_error(cup, coach_dir, monkeypatch):
|
|
125
|
+
"""If cron_check is somehow unavailable, return None — never raise
|
|
126
|
+
out of a hook."""
|
|
127
|
+
monkeypatch.setenv("CLAUDE_PLUGIN_ROOT", "/some/plugin/root")
|
|
128
|
+
monkeypatch.setattr(cup, "COACH_DIR", coach_dir)
|
|
129
|
+
|
|
130
|
+
import sys
|
|
131
|
+
monkeypatch.setitem(sys.modules, "cron_check", None)
|
|
132
|
+
|
|
133
|
+
block = cup._maybe_cron_nudge_block(env="terminal")
|
|
134
|
+
assert block is None
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import importlib.util
|
|
4
|
+
import json
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
import analyze
|
|
8
|
+
import scoring
|
|
9
|
+
import stats
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _load_hook_module():
|
|
13
|
+
repo_path = Path(__file__).resolve().parents[2] / "hooks" / "coach-user-prompt.py"
|
|
14
|
+
path = repo_path if repo_path.exists() else Path.home() / ".claude" / "hooks" / "coach-user-prompt.py"
|
|
15
|
+
spec = importlib.util.spec_from_file_location("coach_user_prompt_parity", str(path))
|
|
16
|
+
mod = importlib.util.module_from_spec(spec)
|
|
17
|
+
spec.loader.exec_module(mod)
|
|
18
|
+
return mod
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _tool_use(name: str, input_: dict) -> dict:
|
|
22
|
+
return {"type": "tool_use", "name": name, "input": input_}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _write_transcript(path: Path, tool_uses: list[dict]) -> None:
|
|
26
|
+
lines = []
|
|
27
|
+
for idx, tu in enumerate(tool_uses):
|
|
28
|
+
lines.append(json.dumps({
|
|
29
|
+
"type": "assistant",
|
|
30
|
+
"timestamp": f"2026-01-01T00:00:{idx:02d}+00:00",
|
|
31
|
+
"message": {
|
|
32
|
+
"role": "assistant",
|
|
33
|
+
"content": [tu],
|
|
34
|
+
},
|
|
35
|
+
}))
|
|
36
|
+
path.write_text("\n".join(lines) + "\n")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_hook_completion_matcher_tracks_shared_scoring_actions(tmp_path: Path) -> None:
|
|
40
|
+
hook = _load_hook_module()
|
|
41
|
+
cases = [
|
|
42
|
+
(_tool_use("Bash", {"command": "pytest --collect-only tests/"}), "test_run", None, False),
|
|
43
|
+
(_tool_use("Bash", {"command": "mocha"}), "test_run", None, True),
|
|
44
|
+
(_tool_use("Bash", {"command": "yarn test"}), "test_run", None, True),
|
|
45
|
+
(_tool_use("Bash", {"command": "printf 'pytest in text only'"}), "test_run", None, False),
|
|
46
|
+
(_tool_use("Bash", {"command": "printf 'git commit in text only'"}), "commit", None, False),
|
|
47
|
+
(_tool_use("Skill", {"skill": "/update-docs"}), "skill_invoke", "update-docs", True),
|
|
48
|
+
(_tool_use("Edit", {"file_path": "README.md"}), "doc_write", None, True),
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
for tool_use, action, skill_id, expected in cases:
|
|
52
|
+
assert scoring.matches_action(tool_use, action, skill_id=skill_id) is expected
|
|
53
|
+
assert hook._tool_use_matches_action(tool_use, action, skill_id=skill_id) is expected
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def test_stats_session_xp_delegates_to_shared_scoring(tmp_path: Path) -> None:
|
|
57
|
+
transcript = tmp_path / "session.jsonl"
|
|
58
|
+
_write_transcript(transcript, [
|
|
59
|
+
_tool_use("Bash", {"command": "pytest --collect-only tests/"}),
|
|
60
|
+
_tool_use("Bash", {"command": "mocha"}),
|
|
61
|
+
_tool_use("Bash", {"command": "yarn test"}),
|
|
62
|
+
_tool_use("Bash", {"command": "printf 'pytest and git commit are text'"}),
|
|
63
|
+
_tool_use("Bash", {"command": "git commit -m ok"}),
|
|
64
|
+
_tool_use("Skill", {"skill": "/update-docs"}),
|
|
65
|
+
_tool_use("Edit", {"file_path": "README.md"}),
|
|
66
|
+
])
|
|
67
|
+
profile = {
|
|
68
|
+
"entries": [{
|
|
69
|
+
"id": "docs-drift",
|
|
70
|
+
"reward_hint": {"action": "doc_write", "xp": 1, "description": "doc update"},
|
|
71
|
+
}]
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
assert stats._session_xp_from_transcript(transcript, profile) == scoring.score_transcript(
|
|
75
|
+
transcript, profile
|
|
76
|
+
)
|
|
77
|
+
breakdown = scoring.score_transcript_with_breakdown(transcript, profile)
|
|
78
|
+
assert breakdown["tests"] == 2
|
|
79
|
+
assert breakdown["commits"] == 1
|
|
80
|
+
assert breakdown["skills_list"] == ["update-docs"]
|
|
81
|
+
assert breakdown["dynamic_actions"]["doc_write"]["count"] == 1
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def test_analyze_test_run_detection_tracks_shared_scoring(tmp_path: Path) -> None:
|
|
85
|
+
commands = [
|
|
86
|
+
"pytest --collect-only tests/",
|
|
87
|
+
"mocha",
|
|
88
|
+
"pnpm test",
|
|
89
|
+
"yarn test",
|
|
90
|
+
"printf 'pytest and git commit are just words'",
|
|
91
|
+
]
|
|
92
|
+
transcript = tmp_path / "session.jsonl"
|
|
93
|
+
_write_transcript(
|
|
94
|
+
transcript,
|
|
95
|
+
[_tool_use("Bash", {"command": cmd}) for cmd in commands],
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
sig = analyze.analyze_session(transcript)
|
|
99
|
+
expected = sum(
|
|
100
|
+
1
|
|
101
|
+
for cmd in commands
|
|
102
|
+
if scoring.matches_action(_tool_use("Bash", {"command": cmd}), "test_run")
|
|
103
|
+
)
|
|
104
|
+
assert sig["test_run_count"] == expected
|
|
105
|
+
assert sig["has_any_test_run"] is True
|