@rm0nroe/coach-claw 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +311 -0
  3. package/coach/README.md +99 -0
  4. package/coach/bin/aggregate_facets.py +274 -0
  5. package/coach/bin/analyze.py +678 -0
  6. package/coach/bin/bank.py +247 -0
  7. package/coach/bin/banner_themes.py +645 -0
  8. package/coach/bin/coach_paths.py +33 -0
  9. package/coach/bin/coexistence_check.py +129 -0
  10. package/coach/bin/configure.py +245 -0
  11. package/coach/bin/cron_check.py +81 -0
  12. package/coach/bin/default_statusline.py +135 -0
  13. package/coach/bin/doctor.py +663 -0
  14. package/coach/bin/insights-llm.sh +264 -0
  15. package/coach/bin/insights.sh +163 -0
  16. package/coach/bin/insights_window.py +111 -0
  17. package/coach/bin/marker_io.py +154 -0
  18. package/coach/bin/merge.py +671 -0
  19. package/coach/bin/redact.py +86 -0
  20. package/coach/bin/render_env.py +148 -0
  21. package/coach/bin/reward_hints.py +87 -0
  22. package/coach/bin/run-insights.sh +20 -0
  23. package/coach/bin/run_with_lock.py +85 -0
  24. package/coach/bin/scoring.py +260 -0
  25. package/coach/bin/skill_inventory.py +215 -0
  26. package/coach/bin/stats.py +459 -0
  27. package/coach/bin/status.py +293 -0
  28. package/coach/bin/statusline_self_patch.py +205 -0
  29. package/coach/bin/statusline_variants.py +146 -0
  30. package/coach/bin/statusline_wrap.py +244 -0
  31. package/coach/bin/statusline_wrap_action.py +460 -0
  32. package/coach/bin/switch_to_plugin.py +256 -0
  33. package/coach/bin/themes.py +256 -0
  34. package/coach/bin/user_config.py +176 -0
  35. package/coach/bin/xp_accounting.py +98 -0
  36. package/coach/changelog.md +4 -0
  37. package/coach/default-statusline-command.sh +19 -0
  38. package/coach/default-statusline-wrap-command.sh +15 -0
  39. package/coach/profile.yaml +37 -0
  40. package/coach/tests/conftest.py +13 -0
  41. package/coach/tests/test_aggregate_facets.py +379 -0
  42. package/coach/tests/test_analyze_aggregate.py +153 -0
  43. package/coach/tests/test_analyze_redaction.py +105 -0
  44. package/coach/tests/test_analyze_strengths.py +165 -0
  45. package/coach/tests/test_bank_atomic_write.py +61 -0
  46. package/coach/tests/test_bank_concurrency.py +126 -0
  47. package/coach/tests/test_banner_themes.py +981 -0
  48. package/coach/tests/test_celebrate_dedup.py +409 -0
  49. package/coach/tests/test_coach_paths.py +50 -0
  50. package/coach/tests/test_coexistence_check.py +128 -0
  51. package/coach/tests/test_configure.py +258 -0
  52. package/coach/tests/test_cron_check.py +118 -0
  53. package/coach/tests/test_cron_nudge_hook.py +134 -0
  54. package/coach/tests/test_detection_parity.py +105 -0
  55. package/coach/tests/test_doctor.py +595 -0
  56. package/coach/tests/test_hook_bespoke_dispatch.py +288 -0
  57. package/coach/tests/test_hook_module_resolution.py +116 -0
  58. package/coach/tests/test_hook_relevance.py +996 -0
  59. package/coach/tests/test_hook_render_env.py +364 -0
  60. package/coach/tests/test_hook_session_id_guard.py +160 -0
  61. package/coach/tests/test_insights_llm.py +759 -0
  62. package/coach/tests/test_insights_llm_venv_path.py +109 -0
  63. package/coach/tests/test_insights_window.py +237 -0
  64. package/coach/tests/test_install.py +1150 -0
  65. package/coach/tests/test_install_pyyaml_fallback.py +142 -0
  66. package/coach/tests/test_marker_consumption.py +167 -0
  67. package/coach/tests/test_marker_writer_locking.py +305 -0
  68. package/coach/tests/test_merge.py +413 -0
  69. package/coach/tests/test_no_broken_mktemp.py +90 -0
  70. package/coach/tests/test_render_env.py +137 -0
  71. package/coach/tests/test_render_env_glyphs.py +119 -0
  72. package/coach/tests/test_reward_hints.py +59 -0
  73. package/coach/tests/test_scoring.py +147 -0
  74. package/coach/tests/test_session_start_weekly_trigger.py +92 -0
  75. package/coach/tests/test_skill_inventory.py +368 -0
  76. package/coach/tests/test_stats_hybrid.py +142 -0
  77. package/coach/tests/test_status_accounting.py +41 -0
  78. package/coach/tests/test_statusline_failsafe.py +70 -0
  79. package/coach/tests/test_statusline_self_patch.py +261 -0
  80. package/coach/tests/test_statusline_variants.py +110 -0
  81. package/coach/tests/test_statusline_wrap.py +196 -0
  82. package/coach/tests/test_statusline_wrap_action.py +408 -0
  83. package/coach/tests/test_switch_to_plugin.py +360 -0
  84. package/coach/tests/test_themes.py +104 -0
  85. package/coach/tests/test_user_config.py +160 -0
  86. package/coach/tests/test_wrap_announce_hook.py +130 -0
  87. package/coach/tests/test_xp_accounting.py +55 -0
  88. package/hooks/coach-session-start.py +536 -0
  89. package/hooks/coach-user-prompt.py +2288 -0
  90. package/install-launchd.sh +102 -0
  91. package/install.sh +597 -0
  92. package/launchd/com.local.claude-coach.plist.template +34 -0
  93. package/launchd/run-insights.sh +20 -0
  94. package/npm/coach-claw.js +259 -0
  95. package/package.json +52 -0
  96. package/requirements.txt +11 -0
  97. package/settings-snippet.json +31 -0
  98. package/skills/coach/SKILL.md +107 -0
  99. package/skills/coach-insights/SKILL.md +78 -0
  100. package/skills/config/SKILL.md +149 -0
@@ -0,0 +1,264 @@
1
+ #!/bin/bash
2
+ # Coach Claw — LLM-triggered weekly insights pass.
3
+ #
4
+ # Pipeline:
5
+ # 1. Generate a unique RUN_ID with the `insights-weekly-` prefix so
6
+ # downstream consumers (changelog, recent_runs) can distinguish from
7
+ # the daily deterministic path's `insights-<ts>` runs.
8
+ # 2. Invoke `claude -p "/insights"` with COACH_DISABLE=1 to refresh
9
+ # Anthropic-side facets/*.json sidecars. The CLI's stdout is
10
+ # DISCARDED — we run it for the side effect on disk only.
11
+ # 3. Aggregate the refreshed facets via aggregate_facets.py (pure
12
+ # Python, deterministic) → detections JSON.
13
+ # 4. Hand to merge.py. Same merge logic as the daily path. No schema
14
+ # change. Run-id prefix is the only discriminator.
15
+ # 5. Auto-commit + touch .last_weekly_insights to throttle the
16
+ # SessionStart trigger to ≥7 days.
17
+ #
18
+ # Flags:
19
+ # --dry-run aggregate + print detections JSON; do not call merge.py
20
+ # or touch the throttle marker.
21
+ # --force run even if .last_weekly_insights mtime is < 7 days ago.
22
+ # Without this flag the script exits 0 silently when the
23
+ # throttle is fresh.
24
+ #
25
+ # Exit codes:
26
+ # 0 success (full run completed, or skipped on stale-marker throttle,
27
+ # or skipped on --dry-run after printing detections)
28
+ # 2 python3 not on PATH (or unknown CLI arg)
29
+ # 4 merge.py failed
30
+ # 5 aggregate_facets.py failed (nonzero exit OR unparseable JSON);
31
+ # wrapper bails BEFORE merge + marker touch so the next session
32
+ # can retry from scratch
33
+ # 6 LLM refresh step failed (claude missing / nonzero exit / timeout);
34
+ # wrapper bails BEFORE merge + marker touch. Same reasoning as
35
+ # exit 5 — without a successful refresh, aggregating stale or
36
+ # empty facets and merging the result would advance absence-based
37
+ # streaks on phantom evidence.
38
+ # 7 no current-window evidence (n_sessions == 0 in window); wrapper
39
+ # bails BEFORE merge + marker touch. Aggregator emits exit 3
40
+ # (EXIT_NO_EVIDENCE) for this case; wrapper translates to 7. An
41
+ # empty detections list with zero sessions in window is "no
42
+ # evidence," which must NOT advance absence-based streaks. (Empty
43
+ # detections WITH n_sessions > 0 IS valid — a clean week — and
44
+ # merges normally.)
45
+ # 10 concurrent run already in progress (lock held by another
46
+ # insights-llm.sh on the same .weekly_insights.lock); emitted by
47
+ # run_with_lock.py during the at-startup re-exec. The losing
48
+ # wrapper exits cleanly without invoking aggregator/merge.
49
+ #
50
+ # Cross-platform notes:
51
+ # - Resolves python3 from PATH (no /usr/bin/python3 hardcode).
52
+ # - mktemp templates use trailing Xs (BSD-safe, see CLAUDE.md gotcha).
53
+ # - Date math is delegated to Python — no BSD-vs-GNU `date` flag drift.
54
+
55
+ set -uo pipefail
56
+
57
+ # Plugin-context PATH wedge: when invoked from inside a Claude Code
58
+ # plugin install (CLAUDE_PLUGIN_DATA env var set + venv exists),
59
+ # prepend the plugin's venv bin/ to PATH so subsequent `python3`
60
+ # resolutions in this script (and child python processes) pick up
61
+ # the venv's interpreter — and therefore PyYAML. CLI users never
62
+ # have CLAUDE_PLUGIN_DATA set; this is a no-op for them. See
63
+ # artifacts/e2e-validation-2026-05-09.md for context.
64
+ if [[ -n "${CLAUDE_PLUGIN_DATA:-}" && -x "$CLAUDE_PLUGIN_DATA/venv/bin/python3" ]]; then
65
+ export PATH="$CLAUDE_PLUGIN_DATA/venv/bin:$PATH"
66
+ fi
67
+
68
+ # Resolve env-derived paths + python BEFORE arg parsing so the
69
+ # concurrent-run guard can re-exec us with the original argv.
70
+ COACH_DIR="${COACH_DIR_OVERRIDE:-$HOME/.claude/coach}"
71
+ THROTTLE_MARKER="$COACH_DIR/.last_weekly_insights"
72
+ FACETS_DIR="${COACH_FACETS_DIR:-$HOME/.claude/usage-data/facets}"
73
+
74
+ # Resolve sibling scripts via the shipping bash file's own location, not via
75
+ # COACH_DIR. Lets the test suite point COACH_DIR_OVERRIDE at a throwaway
76
+ # tmpdir while still loading aggregate_facets.py/merge.py from the installed
77
+ # (or source-tree) bin/ next to this script.
78
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
79
+
80
+ PY="$(command -v python3 || true)"
81
+ if [[ -z "$PY" ]]; then
82
+ echo "python3 not found in PATH" >&2
83
+ exit 2
84
+ fi
85
+
86
+ # --- Concurrent-run guard --------------------------------------------------
87
+ # Two SessionStart hooks firing within the slow `claude -p "/insights"`
88
+ # window will both see `.last_weekly_insights` as stale and try to spawn
89
+ # this wrapper. Re-exec ourselves through `run_with_lock.py` so the
90
+ # second invocation either sees the post-refresh fresh marker (under
91
+ # the lock, after the first finishes) and skips on throttle, or hits
92
+ # the lock itself and skips on contention. Either way, exactly one
93
+ # wrapper does the LLM call + merge.
94
+ #
95
+ # Must precede arg parsing — otherwise `"$@"` is empty after the parse
96
+ # loop and the re-execed copy can't see --dry-run / --force.
97
+ #
98
+ # COACH_LLM_LOCK_HELD=1 is the re-entry sentinel — set by
99
+ # run_with_lock.py on the wrapped process so we don't loop.
100
+ if [[ -z "${COACH_LLM_LOCK_HELD:-}" ]]; then
101
+ mkdir -p "$COACH_DIR" 2>/dev/null
102
+ exec "$PY" "$SCRIPT_DIR/run_with_lock.py" \
103
+ "$COACH_DIR/.weekly_insights.lock" \
104
+ bash "${BASH_SOURCE[0]}" "$@"
105
+ fi
106
+
107
+ # --- Below this line we hold the weekly-insights lock ---------------------
108
+
109
+ DRY_RUN=0
110
+ FORCE=0
111
+ TIMEOUT_SECS="${COACH_INSIGHTS_LLM_TIMEOUT:-300}"
112
+ THROTTLE_DAYS="${COACH_INSIGHTS_LLM_THROTTLE_DAYS:-7}"
113
+
114
+ while [[ $# -gt 0 ]]; do
115
+ case "$1" in
116
+ --dry-run) DRY_RUN=1; shift ;;
117
+ --force) FORCE=1; shift ;;
118
+ *) echo "unknown arg: $1" >&2; exit 2 ;;
119
+ esac
120
+ done
121
+
122
+ # RUN_ID generation is inside the lock so two retried invocations of
123
+ # the same SessionStart wave can't stamp identical timestamps if the
124
+ # system clock resolves at one-second granularity.
125
+ RUN_ID="insights-weekly-$(date -u +%Y%m%dT%H%M%SZ)"
126
+
127
+ # --- Throttle check (skipped on --force / --dry-run) -----------------------
128
+ if [[ "$FORCE" -eq 0 && "$DRY_RUN" -eq 0 ]]; then
129
+ if [[ -f "$THROTTLE_MARKER" ]]; then
130
+ AGE_SECS="$("$PY" - "$THROTTLE_MARKER" <<'PY'
131
+ import os, sys, time
132
+ try:
133
+ mtime = os.path.getmtime(sys.argv[1])
134
+ print(int(time.time() - mtime))
135
+ except Exception:
136
+ print(-1)
137
+ PY
138
+ )"
139
+ THROTTLE_SECS=$(( THROTTLE_DAYS * 86400 ))
140
+ if [[ "$AGE_SECS" -ge 0 && "$AGE_SECS" -lt "$THROTTLE_SECS" ]]; then
141
+ echo "skipped (throttle: last run ${AGE_SECS}s ago, threshold ${THROTTLE_SECS}s)"
142
+ exit 0
143
+ fi
144
+ fi
145
+ fi
146
+
147
+ echo "run_id=$RUN_ID dry_run=$DRY_RUN force=$FORCE timeout=${TIMEOUT_SECS}s"
148
+
149
+ # --- Step 1: refresh facets via /insights subprocess -----------------------
150
+ # We discard stdout/stderr — only the side effect on facets/*.json matters.
151
+ # COACH_DISABLE=1 prevents the nested Claude session from loading Coach's
152
+ # own hooks (which would contaminate the analysis).
153
+ #
154
+ # COACH_INSIGHTS_LLM_SKIP_REFRESH=1 lets tests bypass the subprocess and
155
+ # operate on a pre-seeded fixture facets dir.
156
+ if [[ -z "${COACH_INSIGHTS_LLM_SKIP_REFRESH:-}" ]]; then
157
+ CLAUDE_BIN="$(command -v claude || true)"
158
+ if [[ -z "$CLAUDE_BIN" ]]; then
159
+ # Fail-hard: aggregating stale-or-empty facets and merging the result
160
+ # would touch .last_weekly_insights and advance absence-based streaks
161
+ # on phantom evidence. Mirror the aggregator-fail-hard treatment
162
+ # below — bail before merge so the next session retries cleanly.
163
+ echo "claude CLI not on PATH — bailing before merge (LLM refresh failed)" >&2
164
+ exit 6
165
+ fi
166
+ # POSIX-portable timeout: spawn claude in background, kill if it
167
+ # outlives TIMEOUT_SECS. macOS has no `timeout` builtin and `gtimeout`
168
+ # may not be installed.
169
+ (
170
+ COACH_DISABLE=1 "$CLAUDE_BIN" -p "/insights" > /dev/null 2>&1
171
+ ) &
172
+ CLAUDE_PID=$!
173
+ SECS=0
174
+ TIMED_OUT=0
175
+ while kill -0 "$CLAUDE_PID" 2>/dev/null; do
176
+ sleep 2
177
+ SECS=$((SECS + 2))
178
+ if [[ "$SECS" -ge "$TIMEOUT_SECS" ]]; then
179
+ kill -TERM "$CLAUDE_PID" 2>/dev/null
180
+ sleep 2
181
+ kill -KILL "$CLAUDE_PID" 2>/dev/null
182
+ TIMED_OUT=1
183
+ break
184
+ fi
185
+ done
186
+ wait "$CLAUDE_PID" 2>/dev/null
187
+ CLAUDE_RC=$?
188
+ if [[ "$TIMED_OUT" -eq 1 ]]; then
189
+ echo "claude -p /insights timed out after ${TIMEOUT_SECS}s — bailing before merge (LLM refresh failed)" >&2
190
+ exit 6
191
+ fi
192
+ if [[ "$CLAUDE_RC" -ne 0 ]]; then
193
+ echo "claude -p /insights exited rc=$CLAUDE_RC — bailing before merge (LLM refresh failed)" >&2
194
+ exit 6
195
+ fi
196
+ fi
197
+
198
+ # --- Step 2: aggregate facets → detections JSON ----------------------------
199
+ DET="$(mktemp /tmp/coach-weekly-detections-XXXXXX)"
200
+ trap 'rm -f "$DET"' EXIT
201
+
202
+ COACH_FACETS_DIR="$FACETS_DIR" "$PY" "$SCRIPT_DIR/aggregate_facets.py" \
203
+ --window-days "$THROTTLE_DAYS" \
204
+ > "$DET"
205
+ AGG_RC=$?
206
+ if [[ "$AGG_RC" -eq 3 ]]; then
207
+ # Aggregator's EXIT_NO_EVIDENCE: n_sessions == 0 in the requested
208
+ # window. Treat the same as any other "bail before merge" case —
209
+ # don't merge, don't touch the throttle marker. The next session
210
+ # retries from a fresh facets read. Distinct wrapper exit 7 so ops
211
+ # can see "no evidence" vs "aggregator crashed" in logs.
212
+ echo "no current-window evidence (n_sessions=0) — bailing before merge" >&2
213
+ exit 7
214
+ fi
215
+ if [[ "$AGG_RC" -ne 0 ]]; then
216
+ # Aggregator failure → bail BEFORE merge and BEFORE touching the
217
+ # throttle marker. A nonzero aggregator may have written a partial
218
+ # or empty $DET; merging that as `[]` would commit a clean-evidence
219
+ # run that didn't actually happen, prematurely advancing debounce/
220
+ # graduation streaks and consuming the weekly cadence. Re-running
221
+ # the wrapper next session will retry from a fresh facets read.
222
+ echo "aggregate_facets.py failed (rc=$AGG_RC) — bailing before merge" >&2
223
+ exit 5
224
+ fi
225
+ N_DETS=$("$PY" - "$DET" <<'PY'
226
+ import json, sys
227
+ try:
228
+ print(len(json.load(open(sys.argv[1]))))
229
+ except Exception:
230
+ print(-1)
231
+ PY
232
+ )
233
+ if [[ "$N_DETS" -lt 0 ]]; then
234
+ # Aggregator exited 0 but produced unparseable JSON → still bail.
235
+ # Same reasoning as the rc check above; an empty/garbled $DET
236
+ # cannot be merged safely.
237
+ echo "aggregate_facets.py produced unparseable output — bailing before merge" >&2
238
+ exit 5
239
+ fi
240
+ echo "detections=$N_DETS"
241
+
242
+ # --- Step 3: dry-run short-circuit -----------------------------------------
243
+ if [[ "$DRY_RUN" -eq 1 ]]; then
244
+ cat "$DET"
245
+ echo "(dry-run; merge skipped, throttle marker unchanged)"
246
+ exit 0
247
+ fi
248
+
249
+ # --- Step 4: merge ---------------------------------------------------------
250
+ "$PY" "$SCRIPT_DIR/merge.py" \
251
+ --profile "$COACH_DIR/profile.yaml" \
252
+ --changelog "$COACH_DIR/changelog.md" \
253
+ --lock "$COACH_DIR/.lock" \
254
+ --detections "$DET" \
255
+ --run-id "$RUN_ID" || {
256
+ echo "merge.py failed" >&2
257
+ exit 4
258
+ }
259
+
260
+ # --- Step 5: commit + throttle marker --------------------------------------
261
+ ( cd "$COACH_DIR" && git add -A && git commit -q -m "$RUN_ID" ) || true
262
+ touch "$THROTTLE_MARKER"
263
+
264
+ echo "done"
@@ -0,0 +1,163 @@
1
+ #!/bin/bash
2
+ # Headless Coach insights pass — the deterministic cron path.
3
+ #
4
+ # Does NOT go through the claude CLI — runs the analyzer directly for speed
5
+ # and reliability (no LLM cold-start, no -p slash-command routing). The
6
+ # interactive `/coach-insights` skill is the on-demand counterpart (it
7
+ # delegates to Claude Code's built-in `/insights` for LLM-native analysis);
8
+ # this script is what launchd/cron invokes daily.
9
+ #
10
+ # The script's filename stays `insights.sh` for launchd-plist stability —
11
+ # the plist registered in v0.1+ has this path baked in, so renaming would
12
+ # force every existing user to re-run `install-launchd.sh` on upgrade.
13
+ #
14
+ # Usage: insights.sh [WINDOW] (default 1d; also accepts 7d, 30d, etc.)
15
+ #
16
+ # Cross-platform notes:
17
+ # - Resolves `python3` from PATH (no /usr/bin/python3 hardcode) so it works
18
+ # on Homebrew / pyenv / system Python.
19
+ # - Computes the since-time via Python so we don't depend on BSD `date -v`
20
+ # (macOS) vs GNU `date -d` (Linux) flag availability.
21
+
22
+ set -uo pipefail
23
+
24
+ # Plugin-context PATH wedge: when invoked under a plugin install
25
+ # (CLAUDE_PLUGIN_DATA env var set + venv exists), prepend the plugin's
26
+ # venv bin/ to PATH so `python3` resolves to the venv interpreter that
27
+ # has PyYAML. CLI users never have CLAUDE_PLUGIN_DATA set; no-op for
28
+ # them. Currently the daily cron is CLI-only (the plugin nudges users
29
+ # to install it via `npx coach-claw launchd`), so this is defensive —
30
+ # but cheap, and unblocks plugin-only cron once we ship that path.
31
+ if [[ -n "${CLAUDE_PLUGIN_DATA:-}" && -x "$CLAUDE_PLUGIN_DATA/venv/bin/python3" ]]; then
32
+ export PATH="$CLAUDE_PLUGIN_DATA/venv/bin:$PATH"
33
+ fi
34
+
35
+ WINDOW="${1:-1d}"
36
+ COACH_DIR="$HOME/.claude/coach"
37
+ RUN_ID="insights-$(date -u +%Y%m%dT%H%M%SZ)"
38
+
39
+ PY="$(command -v python3 || true)"
40
+ if [[ -z "$PY" ]]; then
41
+ echo "python3 not found in PATH" >&2
42
+ exit 2
43
+ fi
44
+
45
+ # Filter transcripts by mtime in Python so we don't depend on BSD
46
+ # `find -newermt`, which interprets bare timestamps in the host's
47
+ # *local* timezone — wrong on every non-UTC host. insights_window.py
48
+ # does the cutoff math against an explicit UTC-aware datetime and
49
+ # compares to POSIX st_mtime, so the result is TZ-independent.
50
+ TRANSCRIPTS=()
51
+ TRANSCRIPT_OUTPUT="$("$PY" "$COACH_DIR/bin/insights_window.py" "$HOME/.claude/projects" "$WINDOW")"
52
+ WINDOW_STATUS=$?
53
+ if [[ "$WINDOW_STATUS" -ne 0 ]]; then
54
+ exit "$WINDOW_STATUS"
55
+ fi
56
+ while IFS= read -r line; do
57
+ [[ -n "$line" ]] && TRANSCRIPTS+=("$line")
58
+ done <<< "$TRANSCRIPT_OUTPUT"
59
+
60
+ N="${#TRANSCRIPTS[@]}"
61
+ echo "run_id=$RUN_ID window=$WINDOW transcripts=$N"
62
+
63
+ # Analyzer run (or empty detections if no transcripts)
64
+ DET="$(mktemp /tmp/coach-detections-XXXXXX)"
65
+ USED_JSON="$(mktemp /tmp/coach-skills-used-XXXXXX)"
66
+ HINTS_JSON="$(mktemp /tmp/coach-skill-hints-XXXXXX)"
67
+ DELTA_JSON="$(mktemp /tmp/coach-skills-by-project-delta-XXXXXX)"
68
+ EFFECTIVE_JSON="$(mktemp /tmp/coach-skills-by-project-effective-XXXXXX)"
69
+ trap 'rm -f "$DET" "$USED_JSON" "$HINTS_JSON" "$DELTA_JSON" "$EFFECTIVE_JSON"' EXIT
70
+
71
+ if [[ "$N" -eq 0 ]]; then
72
+ echo "[]" > "$DET"
73
+ echo "{}" > "$USED_JSON"
74
+ echo "{}" > "$DELTA_JSON"
75
+ echo "no transcripts in window"
76
+ else
77
+ # Delegate to the analyzer — it handles redaction internally.
78
+ ANALYSIS="$("$PY" "$COACH_DIR/bin/analyze.py" "${TRANSCRIPTS[@]}")" || {
79
+ echo "analyzer failed" >&2
80
+ exit 3
81
+ }
82
+ "$PY" - "$ANALYSIS" "$DET" "$USED_JSON" "$DELTA_JSON" <<'PY'
83
+ import json, sys
84
+ analysis_json, det_path, used_path, delta_path = sys.argv[1:5]
85
+ d = json.loads(analysis_json)
86
+ detections = d.get("detections", []) or []
87
+ summary = d.get("summary") or {}
88
+ with open(det_path, "w") as f:
89
+ json.dump(detections, f, indent=2)
90
+ with open(used_path, "w") as f:
91
+ json.dump(summary.get("skills_used", {}) or {}, f)
92
+ with open(delta_path, "w") as f:
93
+ json.dump(summary.get("skills_by_project", {}) or {}, f)
94
+ print(f"detections={len(detections)} summary={summary}")
95
+ PY
96
+ fi
97
+
98
+ # Build the EFFECTIVE skills_by_project tempfile = profile's existing
99
+ # rolling accumulator + this run's delta. skill_inventory.py reads this
100
+ # to infer per-skill project scope; merge.py separately receives just
101
+ # the delta and accumulates it into the profile after.
102
+ "$PY" - "$COACH_DIR/profile.yaml" "$DELTA_JSON" "$EFFECTIVE_JSON" <<'PY'
103
+ import json, sys
104
+ from pathlib import Path
105
+ profile_path, delta_path, effective_path = (Path(p) for p in sys.argv[1:4])
106
+ existing: dict = {}
107
+ try:
108
+ import yaml
109
+ data = yaml.safe_load(profile_path.read_text()) if profile_path.exists() else {}
110
+ if isinstance(data, dict):
111
+ raw = data.get("skills_by_project") or {}
112
+ if isinstance(raw, dict):
113
+ for proj, skills in raw.items():
114
+ if isinstance(skills, dict):
115
+ existing[str(proj)] = {str(k): int(v) for k, v in skills.items()
116
+ if isinstance(v, (int, float))}
117
+ except Exception:
118
+ existing = {}
119
+ try:
120
+ delta = json.loads(delta_path.read_text() or "{}")
121
+ if not isinstance(delta, dict):
122
+ delta = {}
123
+ except Exception:
124
+ delta = {}
125
+ effective: dict = {p: dict(s) for p, s in existing.items()}
126
+ for proj, skills in delta.items():
127
+ if not isinstance(skills, dict):
128
+ continue
129
+ bucket = effective.setdefault(str(proj), {})
130
+ for sid, count in skills.items():
131
+ if isinstance(count, (int, float)):
132
+ bucket[str(sid)] = bucket.get(str(sid), 0) + int(count)
133
+ effective_path.write_text(json.dumps(effective))
134
+ PY
135
+
136
+ # Build skill_hints snapshot (installed ∖ used-in-window)
137
+ "$PY" "$COACH_DIR/bin/skill_inventory.py" \
138
+ --used-json "$USED_JSON" \
139
+ --skills-by-project "$EFFECTIVE_JSON" \
140
+ > "$HINTS_JSON" || {
141
+ echo "[]" > "$HINTS_JSON"
142
+ }
143
+ N_HINTS=$("$PY" - "$HINTS_JSON" <<'PY'
144
+ import json, sys
145
+ print(len(json.load(open(sys.argv[1]))))
146
+ PY
147
+ )
148
+ echo "skill_hints=$N_HINTS"
149
+
150
+ # Apply merge (safeguards + atomic write + changelog)
151
+ "$PY" "$COACH_DIR/bin/merge.py" \
152
+ --profile "$COACH_DIR/profile.yaml" \
153
+ --changelog "$COACH_DIR/changelog.md" \
154
+ --lock "$COACH_DIR/.lock" \
155
+ --detections "$DET" \
156
+ --skill-hints "$HINTS_JSON" \
157
+ --skills-by-project-delta "$DELTA_JSON" \
158
+ --run-id "$RUN_ID"
159
+
160
+ # Git auto-commit (no-op if nothing changed)
161
+ ( cd "$COACH_DIR" && git add -A && git commit -q -m "insights $RUN_ID" ) || true
162
+
163
+ echo "done"
@@ -0,0 +1,111 @@
1
+ #!/usr/bin/env python3
2
+ """Compute the set of recent JSONL transcripts for the /coach-insights window.
3
+
4
+ Replaces an earlier BSD `find -newermt "$SINCE_TS"` invocation that
5
+ read its timestamp argument in the host's *local* timezone. The Python
6
+ heredoc that produced the timestamp emitted UTC, so on any non-UTC
7
+ host the window was off by `tz_offset` hours every cron run — under-
8
+ counting or double-counting transcripts depending on the sign.
9
+
10
+ This module does the cutoff math entirely in Python with a UTC-aware
11
+ `datetime.now(timezone.utc)` and compares against `path.stat().st_mtime`
12
+ (POSIX seconds-since-epoch — also TZ-independent). The result is
13
+ correct regardless of the host's `TZ` env var.
14
+
15
+ Usage from the shell:
16
+
17
+ python3 insights_window.py <projects_dir> <window>
18
+
19
+ …where `<window>` is `1d` / `7d` / `2h` / `30m` etc. Prints one
20
+ absolute path per line, sorted, and excludes `/subagents/` transcripts
21
+ (those are agent-tool spawns, not main sessions).
22
+ """
23
+ from __future__ import annotations
24
+
25
+ import re
26
+ import sys
27
+ from datetime import datetime, timedelta, timezone
28
+ from pathlib import Path
29
+
30
+ _WINDOW_RE = re.compile(r"(\d+)([dhm])")
31
+
32
+
33
+ def parse_window(spec: str) -> timedelta:
34
+ """Parse a window spec like ``1d`` / ``7d`` / ``2h`` / ``60m``.
35
+
36
+ Raises ``ValueError`` on any other shape so the caller can surface
37
+ a clear error instead of silently picking a default.
38
+ """
39
+ m = _WINDOW_RE.fullmatch(spec.strip())
40
+ if not m:
41
+ raise ValueError(f"bad window: {spec!r} (expected 1d/7d/2h/30m)")
42
+ n, unit = int(m.group(1)), m.group(2)
43
+ return {
44
+ "d": timedelta(days=n),
45
+ "h": timedelta(hours=n),
46
+ "m": timedelta(minutes=n),
47
+ }[unit]
48
+
49
+
50
+ def cutoff_epoch(window: str, now: datetime | None = None) -> float:
51
+ """Return the POSIX-epoch cutoff for the window.
52
+
53
+ `now` may be supplied for testing; if given, it must be tz-aware
54
+ so the cutoff is computed against an absolute moment in time.
55
+ Defaults to ``datetime.now(timezone.utc)``.
56
+ """
57
+ if now is None:
58
+ now = datetime.now(timezone.utc)
59
+ elif now.tzinfo is None:
60
+ raise ValueError("now must be tz-aware")
61
+ return (now - parse_window(window)).timestamp()
62
+
63
+
64
+ def recent_transcripts(
65
+ projects_dir: Path,
66
+ window: str,
67
+ now: datetime | None = None,
68
+ ) -> list[Path]:
69
+ """Return main-session JSONL transcripts modified inside the window.
70
+
71
+ The path-string filter for ``/subagents/`` matches the same exclusion
72
+ used by ``bank.py:_recent_main_transcripts`` — the two callers share
73
+ the same definition of "main session."
74
+ """
75
+ cutoff = cutoff_epoch(window, now)
76
+ if not projects_dir.exists():
77
+ return []
78
+ out: list[Path] = []
79
+ for p in projects_dir.rglob("*.jsonl"):
80
+ if "/subagents/" in str(p):
81
+ continue
82
+ try:
83
+ if p.stat().st_mtime >= cutoff:
84
+ out.append(p)
85
+ except OSError:
86
+ # Permission denied / vanished mid-walk / etc — skip silently.
87
+ continue
88
+ return sorted(out)
89
+
90
+
91
+ def main() -> int:
92
+ if len(sys.argv) != 3:
93
+ print(
94
+ "usage: insights_window.py <projects_dir> <window>",
95
+ file=sys.stderr,
96
+ )
97
+ return 2
98
+ projects_dir = Path(sys.argv[1])
99
+ window = sys.argv[2]
100
+ try:
101
+ paths = recent_transcripts(projects_dir, window)
102
+ except ValueError as e:
103
+ print(str(e), file=sys.stderr)
104
+ return 2
105
+ for p in paths:
106
+ print(p)
107
+ return 0
108
+
109
+
110
+ if __name__ == "__main__":
111
+ sys.exit(main())
@@ -0,0 +1,154 @@
1
+ """Atomic, locked I/O for celebration marker files.
2
+
3
+ Single source of truth for marker writes. Used by `merge.py` (graduation,
4
+ regression, streak markers) and `stats.py` (levelup marker). The
5
+ UserPromptSubmit hook's `_read_and_consume()` takes the SAME sidecar
6
+ flock (`<path>.lock`), so reader/writer interleaves are serialized:
7
+
8
+ • A reader holding the lock won't see a half-written marker.
9
+ • A writer holding the lock can't be clobbered by a reader's
10
+ atomic-replace landing after a stale read.
11
+
12
+ Why this module exists: the v0.2.0 release introduced `consumed_by`
13
+ tracking on the read side but left writers unlocked, so a stale-snapshot
14
+ reader could overwrite a freshly-appended writer payload and silently
15
+ drop an event. This module closes that gap.
16
+
17
+ Helpers never raise — celebration markers are UX, not correctness, and a
18
+ write failure must never break `/coach-insights` or the statusline.
19
+ """
20
+ from __future__ import annotations
21
+
22
+ import fcntl
23
+ import json
24
+ import os
25
+ import tempfile
26
+ from datetime import datetime
27
+ from pathlib import Path
28
+
29
+
30
+ def _lock_path_for(path: Path) -> Path:
31
+ return path.with_suffix(path.suffix + ".lock")
32
+
33
+
34
+ def _atomic_write_under_lock(path: Path, payload: dict) -> None:
35
+ """tempfile + os.replace inside the caller's flock context.
36
+
37
+ Caller MUST already hold the sidecar lock. This helper is intentionally
38
+ private — public surface goes through `atomic_marker_rmw_append` or
39
+ `atomic_marker_replace` so the lock-acquisition is not optional.
40
+ """
41
+ fd, tmp_name = tempfile.mkstemp(
42
+ prefix="." + path.name + ".",
43
+ suffix=".tmp",
44
+ dir=str(path.parent),
45
+ )
46
+ try:
47
+ with os.fdopen(fd, "w") as fh:
48
+ fh.write(json.dumps(payload))
49
+ fh.flush()
50
+ os.fsync(fh.fileno())
51
+ os.replace(tmp_name, path)
52
+ except Exception:
53
+ try:
54
+ os.unlink(tmp_name)
55
+ except Exception:
56
+ pass
57
+ raise
58
+
59
+
60
+ def atomic_marker_rmw_append(
61
+ path: Path,
62
+ items_key: str,
63
+ new_items: list[dict],
64
+ now: datetime,
65
+ ) -> None:
66
+ """Read-modify-write append for celebration markers.
67
+
68
+ Acquires the sidecar flock, reads any existing items under that key,
69
+ appends `new_items`, and atomic-replaces the file. Resets `consumed_by`
70
+ to [] and `created_at` to `now` on every successful write — this is
71
+ deliberate: if `/coach-insights` runs twice in <MARKER_TTL_HOURS, the second
72
+ batch is genuinely new news and a session that already consumed the
73
+ prior version still needs to see the additions. The cost is a one-time
74
+ re-render of prior entries, which is the right trade vs missing the
75
+ new ones entirely.
76
+
77
+ `oldest_entry_at` is preserved across appends (never reset) so the
78
+ catch-up framing line in `<coach-celebrate>` correctly fires when an
79
+ older queued entry is carried into a fresh append. `created_at`
80
+ continues to track the latest write (drives the 24h TTL on the read
81
+ side); `oldest_entry_at` tracks the first unconsumed write. On the
82
+ first append against a legacy marker (pre-v0.4.2, no
83
+ `oldest_entry_at`), the existing `created_at` is promoted into
84
+ `oldest_entry_at` so carried-over entries still drive catch-up.
85
+
86
+ Args:
87
+ path: marker file path (e.g. ~/.claude/coach/.pending_graduation)
88
+ items_key: top-level key in the JSON payload (e.g. "graduations")
89
+ new_items: list of dicts to append
90
+ now: timestamp for the new `created_at`
91
+ """
92
+ if not new_items:
93
+ return
94
+ try:
95
+ path.parent.mkdir(parents=True, exist_ok=True)
96
+ with open(_lock_path_for(path), "w") as lock_fh:
97
+ try:
98
+ fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX)
99
+ except Exception:
100
+ # flock unsupported on this fs — proceed best-effort.
101
+ # Worst case is the legacy race; we still atomic-replace.
102
+ pass
103
+ existing: list[dict] = []
104
+ oldest_entry_at: str | None = None
105
+ if path.exists():
106
+ try:
107
+ data = json.loads(path.read_text())
108
+ if isinstance(data, dict):
109
+ prior = data.get(items_key)
110
+ if isinstance(prior, list):
111
+ existing = [x for x in prior if isinstance(x, dict)]
112
+ prior_oldest = data.get("oldest_entry_at")
113
+ if isinstance(prior_oldest, str):
114
+ oldest_entry_at = prior_oldest
115
+ elif isinstance(data.get("created_at"), str):
116
+ # Legacy marker (pre-v0.4.2): promote existing
117
+ # created_at so carried-over entries still
118
+ # drive catch-up framing.
119
+ oldest_entry_at = data["created_at"]
120
+ except Exception:
121
+ existing = []
122
+ if oldest_entry_at is None:
123
+ # Fresh marker (or unreadable prior file) — anchor at now.
124
+ oldest_entry_at = now.isoformat()
125
+ payload = {
126
+ items_key: existing + new_items,
127
+ "created_at": now.isoformat(),
128
+ "oldest_entry_at": oldest_entry_at,
129
+ "consumed_by": [],
130
+ }
131
+ _atomic_write_under_lock(path, payload)
132
+ except Exception:
133
+ pass
134
+
135
+
136
+ def atomic_marker_replace(path: Path, payload: dict) -> None:
137
+ """Atomic full-replace for single-event markers (e.g. levelup).
138
+
139
+ Levelup is a high-water-mark event — each new level-up replaces any
140
+ prior payload rather than merging. The lock is still required so
141
+ reader/writer interleave doesn't drop a fresh level-up announcement.
142
+ Caller is responsible for setting `created_at` and `consumed_by` on
143
+ the payload.
144
+ """
145
+ try:
146
+ path.parent.mkdir(parents=True, exist_ok=True)
147
+ with open(_lock_path_for(path), "w") as lock_fh:
148
+ try:
149
+ fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX)
150
+ except Exception:
151
+ pass
152
+ _atomic_write_under_lock(path, payload)
153
+ except Exception:
154
+ pass