@rm0nroe/coach-claw 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +311 -0
  3. package/coach/README.md +99 -0
  4. package/coach/bin/aggregate_facets.py +274 -0
  5. package/coach/bin/analyze.py +678 -0
  6. package/coach/bin/bank.py +247 -0
  7. package/coach/bin/banner_themes.py +645 -0
  8. package/coach/bin/coach_paths.py +33 -0
  9. package/coach/bin/coexistence_check.py +129 -0
  10. package/coach/bin/configure.py +245 -0
  11. package/coach/bin/cron_check.py +81 -0
  12. package/coach/bin/default_statusline.py +135 -0
  13. package/coach/bin/doctor.py +663 -0
  14. package/coach/bin/insights-llm.sh +264 -0
  15. package/coach/bin/insights.sh +163 -0
  16. package/coach/bin/insights_window.py +111 -0
  17. package/coach/bin/marker_io.py +154 -0
  18. package/coach/bin/merge.py +671 -0
  19. package/coach/bin/redact.py +86 -0
  20. package/coach/bin/render_env.py +148 -0
  21. package/coach/bin/reward_hints.py +87 -0
  22. package/coach/bin/run-insights.sh +20 -0
  23. package/coach/bin/run_with_lock.py +85 -0
  24. package/coach/bin/scoring.py +260 -0
  25. package/coach/bin/skill_inventory.py +215 -0
  26. package/coach/bin/stats.py +459 -0
  27. package/coach/bin/status.py +293 -0
  28. package/coach/bin/statusline_self_patch.py +205 -0
  29. package/coach/bin/statusline_variants.py +146 -0
  30. package/coach/bin/statusline_wrap.py +244 -0
  31. package/coach/bin/statusline_wrap_action.py +460 -0
  32. package/coach/bin/switch_to_plugin.py +256 -0
  33. package/coach/bin/themes.py +256 -0
  34. package/coach/bin/user_config.py +176 -0
  35. package/coach/bin/xp_accounting.py +98 -0
  36. package/coach/changelog.md +4 -0
  37. package/coach/default-statusline-command.sh +19 -0
  38. package/coach/default-statusline-wrap-command.sh +15 -0
  39. package/coach/profile.yaml +37 -0
  40. package/coach/tests/conftest.py +13 -0
  41. package/coach/tests/test_aggregate_facets.py +379 -0
  42. package/coach/tests/test_analyze_aggregate.py +153 -0
  43. package/coach/tests/test_analyze_redaction.py +105 -0
  44. package/coach/tests/test_analyze_strengths.py +165 -0
  45. package/coach/tests/test_bank_atomic_write.py +61 -0
  46. package/coach/tests/test_bank_concurrency.py +126 -0
  47. package/coach/tests/test_banner_themes.py +981 -0
  48. package/coach/tests/test_celebrate_dedup.py +409 -0
  49. package/coach/tests/test_coach_paths.py +50 -0
  50. package/coach/tests/test_coexistence_check.py +128 -0
  51. package/coach/tests/test_configure.py +258 -0
  52. package/coach/tests/test_cron_check.py +118 -0
  53. package/coach/tests/test_cron_nudge_hook.py +134 -0
  54. package/coach/tests/test_detection_parity.py +105 -0
  55. package/coach/tests/test_doctor.py +595 -0
  56. package/coach/tests/test_hook_bespoke_dispatch.py +288 -0
  57. package/coach/tests/test_hook_module_resolution.py +116 -0
  58. package/coach/tests/test_hook_relevance.py +996 -0
  59. package/coach/tests/test_hook_render_env.py +364 -0
  60. package/coach/tests/test_hook_session_id_guard.py +160 -0
  61. package/coach/tests/test_insights_llm.py +759 -0
  62. package/coach/tests/test_insights_llm_venv_path.py +109 -0
  63. package/coach/tests/test_insights_window.py +237 -0
  64. package/coach/tests/test_install.py +1150 -0
  65. package/coach/tests/test_install_pyyaml_fallback.py +142 -0
  66. package/coach/tests/test_marker_consumption.py +167 -0
  67. package/coach/tests/test_marker_writer_locking.py +305 -0
  68. package/coach/tests/test_merge.py +413 -0
  69. package/coach/tests/test_no_broken_mktemp.py +90 -0
  70. package/coach/tests/test_render_env.py +137 -0
  71. package/coach/tests/test_render_env_glyphs.py +119 -0
  72. package/coach/tests/test_reward_hints.py +59 -0
  73. package/coach/tests/test_scoring.py +147 -0
  74. package/coach/tests/test_session_start_weekly_trigger.py +92 -0
  75. package/coach/tests/test_skill_inventory.py +368 -0
  76. package/coach/tests/test_stats_hybrid.py +142 -0
  77. package/coach/tests/test_status_accounting.py +41 -0
  78. package/coach/tests/test_statusline_failsafe.py +70 -0
  79. package/coach/tests/test_statusline_self_patch.py +261 -0
  80. package/coach/tests/test_statusline_variants.py +110 -0
  81. package/coach/tests/test_statusline_wrap.py +196 -0
  82. package/coach/tests/test_statusline_wrap_action.py +408 -0
  83. package/coach/tests/test_switch_to_plugin.py +360 -0
  84. package/coach/tests/test_themes.py +104 -0
  85. package/coach/tests/test_user_config.py +160 -0
  86. package/coach/tests/test_wrap_announce_hook.py +130 -0
  87. package/coach/tests/test_xp_accounting.py +55 -0
  88. package/hooks/coach-session-start.py +536 -0
  89. package/hooks/coach-user-prompt.py +2288 -0
  90. package/install-launchd.sh +102 -0
  91. package/install.sh +597 -0
  92. package/launchd/com.local.claude-coach.plist.template +34 -0
  93. package/launchd/run-insights.sh +20 -0
  94. package/npm/coach-claw.js +259 -0
  95. package/package.json +52 -0
  96. package/requirements.txt +11 -0
  97. package/settings-snippet.json +31 -0
  98. package/skills/coach/SKILL.md +107 -0
  99. package/skills/coach-insights/SKILL.md +78 -0
  100. package/skills/config/SKILL.md +149 -0
@@ -0,0 +1,671 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Profile merge logic — the deterministic core of autonomous learning.
4
+
5
+ Takes the current `profile.yaml` and a JSON list of pattern detections from
6
+ this `/coach-insights` run, applies the safeguards the debate converged on:
7
+
8
+ - confidence math (+0.15 on recurrence, -0.05/day decay since last seen)
9
+ - 2-of-3 run debounce (candidate → probationary)
10
+ - 7-day probationary window (probationary → active)
11
+ - absence-based graduation (active entry missing 5 runs in a row → retire)
12
+ - low-confidence archive (confidence decay below floor → neutral archive)
13
+ - bounded cap (max 10 active; lowest confidence×priority evicted)
14
+ - atomic write (tempfile + os.replace) under flock
15
+
16
+ Inputs:
17
+ --profile <path> path to profile.yaml (will be mutated atomically)
18
+ --changelog <path> path to changelog.md (appended to)
19
+ --lock <path> flock path
20
+ --detections <path> JSON file: [{"id","name","nudge","examples",...}]
21
+ --run-id <str> stable identifier for this /coach-insights run (used in
22
+ per-entry run_appearances history and changelog)
23
+
24
+ Writes:
25
+ - profile.yaml (atomic replace)
26
+ - one new line in changelog.md describing adds/promotions/retirements
27
+ - exits 0 on success, prints changelog line to stdout for the caller
28
+ - non-zero exit on failure (/coach-insights skill then aborts, leaves profile intact)
29
+
30
+ This script does NOT call any LLM, does NOT read transcripts, does NOT judge
31
+ whether a detection is valid. All of that is the caller's responsibility.
32
+ The caller (the /coach-insights skill, running with a model in the loop) produces
33
+ the detections JSON; this script applies math and I/O.
34
+ """
35
+ from __future__ import annotations
36
+
37
+ import argparse
38
+ import fcntl
39
+ import json
40
+ import os
41
+ import sys
42
+ import tempfile
43
+ from datetime import datetime, timedelta, timezone
44
+ from pathlib import Path
45
+
46
+ import yaml
47
+
48
+ # Shared heuristic for annotating promoted patterns with a reward_hint.
49
+ # reward_hints.py lives in this same directory.
50
+ sys.path.insert(0, str(Path(__file__).resolve().parent))
51
+ from marker_io import atomic_marker_rmw_append # noqa: E402
52
+ from reward_hints import infer_reward_hint # noqa: E402
53
+ from xp_accounting import add_milestone_xp, normalize_profile_xp # noqa: E402
54
+
55
+
56
+ # --- Tunables (the "config" is the constants in this file + the hook) -------
57
+ CONFIDENCE_ON_NEW = 0.20 # starting confidence for a brand-new detection
58
+ CONFIDENCE_BOOST = 0.15 # added when detected again in a run
59
+ CONFIDENCE_DECAY_PER_DAY = 0.05 # subtracted per day since last_seen_in_run
60
+ RETIRE_BELOW = 0.30 # active/probationary drops below → retire
61
+ GC_CANDIDATE_AFTER_DAYS = 14 # candidates that never debounce → gc'd
62
+ DEBOUNCE_THRESHOLD = 2 # "2 of last 3 runs" promotes candidate
63
+ DEBOUNCE_WINDOW = 3 # history window size
64
+ PROBATIONARY_DAYS = 7 # days before probationary → active
65
+ RETIRE_AFTER_ABSENT_RUNS = 5 # active missing this many consecutive runs → retire
66
+ POSITIVE_GRADUATION_RUNS = 5 # positive entry detected this many runs in a row → master
67
+ MAX_ACTIVE = 10 # hard cap on active tier
68
+ # ---------------------------------------------------------------------------
69
+
70
+
71
+ def _now() -> datetime:
72
+ return datetime.now(timezone.utc)
73
+
74
+
75
+ def _iso_date(dt: datetime) -> str:
76
+ return dt.date().isoformat()
77
+
78
+
79
+ def _iso_dt(dt: datetime) -> str:
80
+ return dt.isoformat()
81
+
82
+
83
+ def _parse(s):
84
+ if not s:
85
+ return None
86
+ if isinstance(s, datetime):
87
+ return s if s.tzinfo else s.replace(tzinfo=timezone.utc)
88
+ try:
89
+ ss = str(s)
90
+ if len(ss) == 10:
91
+ return datetime.fromisoformat(ss).replace(tzinfo=timezone.utc)
92
+ return datetime.fromisoformat(ss.replace("Z", "+00:00"))
93
+ except Exception:
94
+ return None
95
+
96
+
97
+ def load_profile(path: Path) -> dict:
98
+ if not path.exists():
99
+ return {"schema_version": 1, "updated": None, "entries": [], "recent_runs": []}
100
+ with path.open("r") as f:
101
+ data = yaml.safe_load(f) or {}
102
+ data.setdefault("schema_version", 1)
103
+ data.setdefault("entries", [])
104
+ data.setdefault("recent_runs", [])
105
+ return data
106
+
107
+
108
+ def merge_skills_by_project(existing: dict, delta: dict) -> dict:
109
+ """Add a per-project skill-invocation delta into the rolling
110
+ accumulator. Both inputs are ``{project: {skill_id: count}}`` shape;
111
+ counts are summed, projects are unioned. Returns a NEW dict — does
112
+ not mutate ``existing``. Non-numeric counts are silently dropped to
113
+ keep the cron path tolerant of stray/garbled state."""
114
+ out: dict[str, dict[str, int]] = {}
115
+ for proj, skills in (existing or {}).items():
116
+ if not isinstance(skills, dict):
117
+ continue
118
+ out[str(proj)] = {
119
+ str(k): int(v) for k, v in skills.items()
120
+ if isinstance(v, (int, float))
121
+ }
122
+ for proj, skills in (delta or {}).items():
123
+ if not isinstance(skills, dict):
124
+ continue
125
+ bucket = out.setdefault(str(proj), {})
126
+ for sid, count in skills.items():
127
+ if isinstance(count, (int, float)):
128
+ bucket[str(sid)] = bucket.get(str(sid), 0) + int(count)
129
+ return out
130
+
131
+
132
+ def atomic_write_yaml(path: Path, data: dict) -> None:
133
+ tmp = tempfile.NamedTemporaryFile(
134
+ "w", delete=False, dir=path.parent, prefix=".profile.", suffix=".tmp"
135
+ )
136
+ try:
137
+ yaml.safe_dump(data, tmp, sort_keys=False, default_flow_style=False, allow_unicode=True)
138
+ tmp.flush()
139
+ os.fsync(tmp.fileno())
140
+ finally:
141
+ tmp.close()
142
+ try:
143
+ os.replace(tmp.name, path)
144
+ except Exception:
145
+ # os.replace can fail on cross-device renames or filesystem
146
+ # quirks (rare — same dir + same fs is the design). Don't leave
147
+ # an orphan .profile.*.tmp behind. Matches the cleanup pattern
148
+ # in marker_io._atomic_write_under_lock and the hook's
149
+ # _atomic_write_text.
150
+ try:
151
+ os.unlink(tmp.name)
152
+ except Exception:
153
+ pass
154
+ raise
155
+
156
+
157
+ GRADUATION_MARKER = Path.home() / ".claude" / "coach" / ".pending_graduation"
158
+ STREAK_REWARD_MARKER = Path.home() / ".claude" / "coach" / ".pending_streak_rewards"
159
+
160
+ # Mid-streak XP schedule: streak N → +xp. 5/5 is graduation (+5) and is
161
+ # handled by the graduation path, not here, so skip 5. Applies to BOTH
162
+ # directions — weaknesses tick off clean_streak_runs (absence), strengths
163
+ # tick off positive_run_streak (presence).
164
+ STREAK_XP_SCHEDULE = {1: 1, 2: 1, 3: 1, 4: 2}
165
+ REGRESSION_MARKER = Path.home() / ".claude" / "coach" / ".pending_regression"
166
+
167
+
168
+ def _append_graduation_marker(new_graduations: list[dict], now: datetime) -> None:
169
+ """Append graduated-this-run entries to the marker the UserPromptSubmit
170
+ hook reads. Locked + atomic via marker_io so a stale-snapshot reader
171
+ can't clobber the new entries with its own atomic-replace."""
172
+ atomic_marker_rmw_append(GRADUATION_MARKER, "graduations", new_graduations, now)
173
+
174
+
175
+ def _append_streak_reward_marker(new_rewards: list[dict], now: datetime) -> None:
176
+ """Mid-streak XP bumps. Written at /coach-insights time (one per pattern that
177
+ ticked from streak N → N+1 for N in 0..3). Graduation (streak 4→5) is
178
+ owned by the graduation marker, not this one."""
179
+ atomic_marker_rmw_append(STREAK_REWARD_MARKER, "rewards", new_rewards, now)
180
+
181
+
182
+ def _append_regression_marker(new_regressions: list[dict], now: datetime) -> None:
183
+ """Append regressed-this-run entries to the marker. Same locked
184
+ read-modify-write semantics as the graduation marker."""
185
+ atomic_marker_rmw_append(REGRESSION_MARKER, "regressions", new_regressions, now)
186
+
187
+
188
+ def merge(profile: dict, detections: list[dict], run_id: str, now: datetime) -> list[str]:
189
+ """Mutate `profile` in place. Return a list of changelog fragments."""
190
+ normalize_profile_xp(profile)
191
+ entries: list[dict] = profile.get("entries", [])
192
+ recent_runs: list[str] = profile.get("recent_runs", [])
193
+ recent_runs = (recent_runs + [run_id])[-DEBOUNCE_WINDOW:]
194
+ profile["recent_runs"] = recent_runs
195
+
196
+ by_id = {e["id"]: e for e in entries if isinstance(e, dict) and "id" in e}
197
+ detected_ids = {d["id"] for d in detections if isinstance(d, dict) and "id" in d}
198
+
199
+ # One-shot back-fill: pre-existing entries written before reward_hint
200
+ # existed get annotated on first merge. Idempotent; explicit hand-edits
201
+ # are preserved because infer_reward_hint only fires when the field is
202
+ # missing/invalid.
203
+ for e in entries:
204
+ if not isinstance(e, dict):
205
+ continue
206
+ if not isinstance(e.get("reward_hint"), dict):
207
+ e["reward_hint"] = infer_reward_hint(e)
208
+
209
+ log_fragments: list[str] = []
210
+ regressions_this_run: list[dict] = []
211
+ streak_rewards_this_run: list[dict] = []
212
+ # Mutable single-cell accumulator (list so the inner scope can write to
213
+ # it without `nonlocal`). Holds total mid-streak XP to bank this run.
214
+ _streak_xp_earned_this_run = [0]
215
+
216
+ # 0. Regression check: any detection whose id matches a currently-graduated
217
+ # NEGATIVE pattern means the user backslid on a weakness they had
218
+ # already retired. Revoke the graduation so the +5 lifetime XP is
219
+ # removed and re-add the pattern to entries as probationary — they
220
+ # have to re-earn mastery the same way they earned it the first time.
221
+ # (Positive-graduation revocation on sustained absence is a separate,
222
+ # more complex detection path — not implemented here yet.)
223
+ graduated_profile: list[dict] = profile.get("graduated", []) or []
224
+ graduated_by_id = {
225
+ g.get("id"): g
226
+ for g in graduated_profile
227
+ if isinstance(g, dict) and g.get("id")
228
+ }
229
+ for did in list(detected_ids):
230
+ g_entry = graduated_by_id.get(did)
231
+ if not g_entry:
232
+ continue
233
+ if g_entry.get("direction", "negative") != "negative":
234
+ continue # positive graduations are not revoked via detection presence
235
+ # Revoke: remove from graduated, re-add to entries as probationary.
236
+ graduated_profile.remove(g_entry)
237
+ graduated_by_id.pop(did, None)
238
+ re_entry = {
239
+ "id": did,
240
+ "name": g_entry.get("name", did),
241
+ "tier": "probationary",
242
+ "direction": "negative",
243
+ "confidence": 0.40, # moderate — we already know this pattern is real
244
+ "priority": 3,
245
+ "nudge": "", # filled in by the detection processing loop below
246
+ "examples": [],
247
+ "first_seen": _iso_date(now),
248
+ "last_seen_in_run": _iso_date(now),
249
+ "last_fired": None,
250
+ "promoted_at": _iso_date(now),
251
+ "source_runs": [], # will be bumped to include run_id below
252
+ "source_session_ids": [],
253
+ "total_occurrences": 0,
254
+ "clean_streak_runs": 0,
255
+ "positive_run_streak": 0,
256
+ # reward_hint carried over from the graduated record if present;
257
+ # otherwise inferred below once nudge is populated.
258
+ "reward_hint": g_entry.get("reward_hint") or None,
259
+ }
260
+ entries.append(re_entry)
261
+ by_id[did] = re_entry
262
+ log_fragments.append(f"⚠️{did}(regressed:re-detected-after-graduation)")
263
+ regressions_this_run.append({
264
+ "id": did,
265
+ "name": g_entry.get("name", did),
266
+ "direction": "negative",
267
+ "originally_graduated_at": g_entry.get("graduated_at"),
268
+ "originally_graduated_reason": g_entry.get("graduated_reason"),
269
+ })
270
+ profile["graduated"] = graduated_profile
271
+
272
+ # 1. Update or insert entries from detections
273
+ for det in detections:
274
+ if not isinstance(det, dict) or "id" not in det:
275
+ continue
276
+ eid = det["id"]
277
+ entry = by_id.get(eid)
278
+ if entry is None:
279
+ entry = {
280
+ "id": eid,
281
+ "name": det.get("name", eid),
282
+ "tier": "candidate",
283
+ "direction": det.get("direction", "negative"),
284
+ "confidence": CONFIDENCE_ON_NEW,
285
+ "priority": int(det.get("priority", 3)),
286
+ "nudge": det.get("nudge", "").strip(),
287
+ "examples": det.get("examples", [])[:3],
288
+ "first_seen": _iso_date(now),
289
+ "last_seen_in_run": _iso_date(now),
290
+ "last_fired": None,
291
+ "promoted_at": None,
292
+ "source_runs": [run_id],
293
+ "source_session_ids": det.get("source_session_ids", [])[:5],
294
+ "total_occurrences": 1,
295
+ "clean_streak_runs": 0, # for negative graduation (absence)
296
+ "positive_run_streak": 1, # for positive graduation (presence)
297
+ }
298
+ # Inferred reward hint — id + nudge are populated, so we can
299
+ # annotate right away. Explicit hints from detection dict win.
300
+ entry["reward_hint"] = (
301
+ det.get("reward_hint") if isinstance(det.get("reward_hint"), dict)
302
+ else infer_reward_hint(entry)
303
+ )
304
+ entries.append(entry)
305
+ by_id[eid] = entry
306
+ log_fragments.append(f"+{eid}(candidate)")
307
+ continue
308
+
309
+ # Existing entry re-detected
310
+ old_tier = entry.get("tier", "candidate")
311
+ # Preserve direction; default to 'negative' for back-compat with
312
+ # entries written by earlier merge versions.
313
+ entry.setdefault("direction", det.get("direction", "negative"))
314
+ entry["confidence"] = min(1.0, float(entry.get("confidence", 0)) + CONFIDENCE_BOOST)
315
+ entry["last_seen_in_run"] = _iso_date(now)
316
+ entry["total_occurrences"] = int(entry.get("total_occurrences", 0)) + 1
317
+ entry["clean_streak_runs"] = 0
318
+ old_positive_streak = int(entry.get("positive_run_streak", 0))
319
+ new_positive_streak = old_positive_streak + 1
320
+ entry["positive_run_streak"] = new_positive_streak
321
+
322
+ # Mid-streak strength reward — mirror of the weakness-path reward in
323
+ # the absence block below. Fires only for positive patterns on ticks
324
+ # 1-4; streak 5 is mastery graduation, rewarded separately (+5 via
325
+ # the positive-graduation block).
326
+ if (
327
+ entry.get("direction") == "positive"
328
+ and new_positive_streak in STREAK_XP_SCHEDULE
329
+ and old_positive_streak < new_positive_streak
330
+ ):
331
+ _strength_xp = STREAK_XP_SCHEDULE[new_positive_streak]
332
+ streak_rewards_this_run.append({
333
+ "id": entry["id"],
334
+ "name": entry.get("name", entry["id"]),
335
+ "streak": new_positive_streak,
336
+ "target": 5,
337
+ "xp_awarded": _strength_xp,
338
+ "direction": "positive",
339
+ })
340
+ _streak_xp_earned_this_run[0] += _strength_xp
341
+ entry["nudge"] = det.get("nudge", entry.get("nudge", "")).strip() or entry.get("nudge", "")
342
+ # Back-fill reward_hint if missing (covers old entries from before this
343
+ # field existed, plus regression re-entries that get their nudge set
344
+ # here for the first time). Explicit hand-set hints are preserved.
345
+ if not isinstance(entry.get("reward_hint"), dict):
346
+ entry["reward_hint"] = (
347
+ det.get("reward_hint") if isinstance(det.get("reward_hint"), dict)
348
+ else infer_reward_hint(entry)
349
+ )
350
+ # Merge examples, dedupe, keep most recent 3
351
+ new_examples = det.get("examples", []) or []
352
+ if new_examples:
353
+ seen: set[str] = set()
354
+ merged: list[str] = []
355
+ for ex in list(new_examples) + list(entry.get("examples", []) or []):
356
+ k = str(ex).strip()
357
+ if k and k not in seen:
358
+ seen.add(k)
359
+ merged.append(k)
360
+ entry["examples"] = merged[:3]
361
+ src_runs = entry.get("source_runs", [])
362
+ src_runs = (src_runs + [run_id])[-DEBOUNCE_WINDOW:]
363
+ entry["source_runs"] = src_runs
364
+ sids_combined = list(entry.get("source_session_ids", []) or []) + list(det.get("source_session_ids", []) or [])
365
+ seen_sids: set[str] = set()
366
+ sids_dedup: list[str] = []
367
+ for s in sids_combined:
368
+ k = str(s)
369
+ if k and k not in seen_sids:
370
+ seen_sids.add(k)
371
+ sids_dedup.append(k)
372
+ entry["source_session_ids"] = sids_dedup[-10:]
373
+
374
+ # Candidate → probationary (2-of-3 debounce)
375
+ if old_tier == "candidate":
376
+ hits_in_window = len(set(src_runs) & set(recent_runs))
377
+ if hits_in_window >= DEBOUNCE_THRESHOLD:
378
+ entry["tier"] = "probationary"
379
+ entry["promoted_at"] = _iso_date(now)
380
+ log_fragments.append(f"↑{eid}(probationary)")
381
+
382
+ graduated: list[dict] = profile.get("graduated", []) or []
383
+ graduated_start_len = len(graduated) # for capturing graduations-this-run at end
384
+ archived: list[dict] = profile.get("archived", []) or []
385
+
386
+ # 1b. Check for POSITIVE graduations — positive entries that hit the
387
+ # required consecutive-presence streak. Scan detected-this-run entries
388
+ # (they just got their streak bumped in the detection loop above).
389
+ for entry in list(entries):
390
+ if entry.get("direction") != "positive":
391
+ continue
392
+ if entry["id"] not in detected_ids:
393
+ continue
394
+ tier = entry.get("tier", "candidate")
395
+ if tier == "candidate":
396
+ continue # must debounce through candidate/probationary first
397
+ streak = int(entry.get("positive_run_streak", 0))
398
+ if streak >= POSITIVE_GRADUATION_RUNS:
399
+ entries.remove(entry)
400
+ graduated.append({
401
+ "id": entry["id"],
402
+ "name": entry.get("name", entry["id"]),
403
+ "direction": "positive",
404
+ "first_seen": entry.get("first_seen"),
405
+ "last_seen_in_run": entry.get("last_seen_in_run"),
406
+ "graduated_at": _iso_date(now),
407
+ "graduated_reason": f"present-{streak}-runs",
408
+ "total_occurrences": int(entry.get("total_occurrences", 0)),
409
+ "final_tier": tier,
410
+ })
411
+ log_fragments.append(f"🎓{entry['id']}(graduated:strength-present-{streak}-runs)")
412
+
413
+ # 2. Apply decay and absence-based retirement for entries NOT detected this run
414
+ for entry in list(entries):
415
+ eid = entry["id"]
416
+ if eid in detected_ids:
417
+ continue
418
+ # Decay confidence by days since last_seen_in_run
419
+ last_seen = _parse(entry.get("last_seen_in_run")) or _parse(entry.get("first_seen")) or now
420
+ days = max(0, (now - last_seen).days)
421
+ entry["confidence"] = max(0.0, float(entry.get("confidence", 0)) - CONFIDENCE_DECAY_PER_DAY * days)
422
+ # Absence bumps clean-streak (good for NEGATIVE graduation) and
423
+ # resets positive-streak (bad for POSITIVE entries — they need
424
+ # consecutive presence).
425
+ old_streak = int(entry.get("clean_streak_runs", 0))
426
+ new_streak = old_streak + 1
427
+ entry["clean_streak_runs"] = new_streak
428
+ entry["positive_run_streak"] = 0
429
+
430
+ # Mid-streak milestone reward. Fires only on a fresh tick (old → new
431
+ # crossed a reward threshold), only for negative patterns (positive
432
+ # graduation path is separate), and only for streaks 1-4. Streak 5+
433
+ # is graduation and is rewarded via the graduation marker downstream.
434
+ if (
435
+ entry.get("direction", "negative") == "negative"
436
+ and new_streak in STREAK_XP_SCHEDULE
437
+ and old_streak < new_streak # always true here, belt-and-braces
438
+ ):
439
+ _streak_xp = STREAK_XP_SCHEDULE[new_streak]
440
+ streak_rewards_this_run.append({
441
+ "id": entry["id"],
442
+ "name": entry.get("name", entry["id"]),
443
+ "streak": new_streak,
444
+ "target": 5,
445
+ "xp_awarded": _streak_xp,
446
+ "direction": "negative",
447
+ })
448
+ _streak_xp_earned_this_run[0] += _streak_xp
449
+
450
+ direction = entry.get("direction", "negative")
451
+ tier = entry.get("tier", "candidate")
452
+ if direction == "positive":
453
+ # Positive patterns don't retire via absence or low confidence —
454
+ # they just stop being credited. We only skip the retirement
455
+ # logic below for them. They can still GC as candidates.
456
+ if tier == "candidate" and days >= GC_CANDIDATE_AFTER_DAYS:
457
+ entries.remove(entry)
458
+ log_fragments.append(f"-{eid}(gc:never-debounced)")
459
+ continue
460
+ # Re-set local tier for the negative-retirement block below
461
+ tier = entry.get("tier", "candidate")
462
+
463
+ # Candidate GC: never debounced and old — remove quietly, not a graduation
464
+ if tier == "candidate" and days >= GC_CANDIDATE_AFTER_DAYS:
465
+ entries.remove(entry)
466
+ log_fragments.append(f"-{eid}(gc:never-debounced)")
467
+ continue
468
+
469
+ # Absence-based retirement: missed too many consecutive runs → GRADUATE.
470
+ # Uses clean_streak_runs (bumped above, unbounded) rather than a
471
+ # loop over recent_runs, which was capped at DEBOUNCE_WINDOW=3 and
472
+ # so could never reach RETIRE_AFTER_ABSENT_RUNS=5.
473
+ absent_streak = int(entry.get("clean_streak_runs", 0))
474
+ if tier in ("active", "probationary") and absent_streak >= RETIRE_AFTER_ABSENT_RUNS:
475
+ entries.remove(entry)
476
+ graduated.append({
477
+ "id": eid,
478
+ "name": entry.get("name", eid),
479
+ "direction": "negative",
480
+ "first_seen": entry.get("first_seen"),
481
+ "last_seen_in_run": entry.get("last_seen_in_run"),
482
+ "graduated_at": _iso_date(now),
483
+ "graduated_reason": f"absent-{absent_streak}-runs",
484
+ "total_occurrences": int(entry.get("total_occurrences", 0)),
485
+ "final_tier": tier,
486
+ })
487
+ log_fragments.append(f"🎓{eid}(graduated:absent-{absent_streak}-runs)")
488
+ continue
489
+
490
+ # Confidence-floor retirement is a neutral archive, not a graduation.
491
+ # The pattern has decayed below trust threshold; that is weaker
492
+ # evidence than a demonstrated 5-run clean streak, so it should not
493
+ # award graduation XP or fire a graduation celebration.
494
+ if entry["confidence"] < RETIRE_BELOW and tier != "candidate":
495
+ entries.remove(entry)
496
+ archived.append({
497
+ "id": eid,
498
+ "name": entry.get("name", eid),
499
+ "direction": "negative",
500
+ "first_seen": entry.get("first_seen"),
501
+ "last_seen_in_run": entry.get("last_seen_in_run"),
502
+ "archived_at": _iso_date(now),
503
+ "archive_reason": "low-confidence",
504
+ "total_occurrences": int(entry.get("total_occurrences", 0)),
505
+ "final_tier": tier,
506
+ "final_confidence": float(entry.get("confidence", 0) or 0),
507
+ })
508
+ log_fragments.append(f"-{eid}(archived:low-confidence)")
509
+ continue
510
+
511
+ profile["graduated"] = graduated
512
+ profile["archived"] = archived
513
+ normalize_profile_xp(profile)
514
+
515
+ # 3. Promote probationary → active after PROBATIONARY_DAYS
516
+ for entry in entries:
517
+ if entry.get("tier") != "probationary":
518
+ continue
519
+ promoted = _parse(entry.get("promoted_at"))
520
+ if promoted and (now - promoted).days >= PROBATIONARY_DAYS:
521
+ entry["tier"] = "active"
522
+ entry["promoted_at"] = _iso_date(now)
523
+ log_fragments.append(f"↑{entry['id']}(active)")
524
+
525
+ # 4. Cap enforcement — keep at most MAX_ACTIVE active entries
526
+ active = [e for e in entries if e.get("tier") == "active"]
527
+ if len(active) > MAX_ACTIVE:
528
+ active.sort(key=lambda e: float(e.get("confidence", 0)) * int(e.get("priority", 1)))
529
+ to_evict = active[: len(active) - MAX_ACTIVE]
530
+ for victim in to_evict:
531
+ entries.remove(victim)
532
+ log_fragments.append(f"-{victim['id']}(evicted:cap)")
533
+
534
+ profile["updated"] = _iso_date(now)
535
+ profile["entries"] = entries
536
+
537
+ # Capture graduations from this run and write them to the
538
+ # UserPromptSubmit marker so they can be celebrated.
539
+ new_graduations = graduated[graduated_start_len:]
540
+ if new_graduations:
541
+ _append_graduation_marker(new_graduations, now)
542
+
543
+ # Capture regressions from this run and write the regression marker.
544
+ if regressions_this_run:
545
+ _append_regression_marker(regressions_this_run, now)
546
+
547
+ # Mid-streak milestone rewards: write marker + record the XP atomically
548
+ # onto profile. Never fires for graduations (those earn +5 via
549
+ # graduation_xp derived from current profile.graduated).
550
+ if streak_rewards_this_run:
551
+ _append_streak_reward_marker(streak_rewards_this_run, now)
552
+ earned = int(_streak_xp_earned_this_run[0])
553
+ if earned > 0:
554
+ add_milestone_xp(profile, earned)
555
+ log_fragments.append(f"+{earned}xp(mid-streak:{len(streak_rewards_this_run)})")
556
+ else:
557
+ normalize_profile_xp(profile)
558
+
559
+ return log_fragments
560
+
561
+
562
+ def main() -> int:
563
+ ap = argparse.ArgumentParser()
564
+ ap.add_argument("--profile", required=True, type=Path)
565
+ ap.add_argument("--changelog", required=True, type=Path)
566
+ ap.add_argument("--lock", required=True, type=Path)
567
+ ap.add_argument("--detections", required=True, type=Path)
568
+ ap.add_argument("--run-id", required=True)
569
+ ap.add_argument("--skill-hints", type=Path, default=None,
570
+ help="Optional JSON list; replaces profile.skill_hints snapshot. "
571
+ "Not subject to debounce/tier/decay — it's a static "
572
+ "installed-but-unused reference each run.")
573
+ ap.add_argument("--skills-by-project-delta", type=Path, default=None,
574
+ help="Optional JSON {project: {skill_id: count}} delta "
575
+ "from this /coach-insights window. Accumulated into "
576
+ "profile.skills_by_project (the rolling per-project "
577
+ "invocation history that drives skill_inventory's "
578
+ "scope inference).")
579
+ args = ap.parse_args()
580
+
581
+ # Resolve marker output paths from the configured profile location
582
+ # rather than the hardcoded ~/.claude/coach/ defaults at module
583
+ # scope. Sandboxed runs (e.g. COACH_DIR_OVERRIDE in test_insights_llm.py)
584
+ # pass --profile <tmp>/profile.yaml and need the .pending_*
585
+ # markers to land under <tmp>/ too — pre-v0.5.1 the markers
586
+ # leaked into the user's live install regardless of --profile.
587
+ #
588
+ # The module-level constants stay as defaults for direct callers
589
+ # of merge() (e.g. the _marker_cleanup monkeypatch fixture in
590
+ # test_merge.py); reassigning the globals here only affects the
591
+ # CLI entry point.
592
+ global GRADUATION_MARKER, STREAK_REWARD_MARKER, REGRESSION_MARKER
593
+ coach_dir = args.profile.parent
594
+ GRADUATION_MARKER = coach_dir / ".pending_graduation"
595
+ STREAK_REWARD_MARKER = coach_dir / ".pending_streak_rewards"
596
+ REGRESSION_MARKER = coach_dir / ".pending_regression"
597
+
598
+ detections = json.loads(args.detections.read_text() or "[]")
599
+ if not isinstance(detections, list):
600
+ print("detections file must be a JSON array", file=sys.stderr)
601
+ return 2
602
+
603
+ skill_hints = None
604
+ if args.skill_hints is not None:
605
+ try:
606
+ skill_hints = json.loads(args.skill_hints.read_text() or "[]")
607
+ if not isinstance(skill_hints, list):
608
+ skill_hints = None
609
+ except Exception:
610
+ skill_hints = None
611
+
612
+ sbp_delta: dict = {}
613
+ if args.skills_by_project_delta is not None:
614
+ try:
615
+ raw = json.loads(args.skills_by_project_delta.read_text() or "{}")
616
+ if isinstance(raw, dict):
617
+ sbp_delta = raw
618
+ except Exception:
619
+ sbp_delta = {}
620
+
621
+ args.lock.parent.mkdir(parents=True, exist_ok=True)
622
+ with args.lock.open("a+") as lockfile:
623
+ try:
624
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
625
+ except OSError as e:
626
+ print(f"flock failed: {e}", file=sys.stderr)
627
+ return 3
628
+
629
+ now = _now()
630
+ profile = load_profile(args.profile)
631
+ fragments = merge(profile, detections, args.run_id, now)
632
+
633
+ if skill_hints is not None:
634
+ prev = profile.get("skill_hints", []) or []
635
+ prev_ids = {h.get("id") for h in prev if isinstance(h, dict)}
636
+ new_ids = {h.get("id") for h in skill_hints if isinstance(h, dict)}
637
+ added = new_ids - prev_ids
638
+ dropped = prev_ids - new_ids
639
+ profile["skill_hints"] = skill_hints
640
+ if added:
641
+ fragments.append(f"+hints:{len(added)}")
642
+ if dropped:
643
+ fragments.append(f"-hints:{len(dropped)}")
644
+
645
+ if sbp_delta:
646
+ existing_sbp = profile.get("skills_by_project") or {}
647
+ updated_sbp = merge_skills_by_project(existing_sbp, sbp_delta)
648
+ profile["skills_by_project"] = updated_sbp
649
+ new_pairs = sum(
650
+ 1 for proj, skills in sbp_delta.items()
651
+ if isinstance(skills, dict)
652
+ for sid in skills
653
+ if sid not in (existing_sbp.get(proj) or {})
654
+ )
655
+ if new_pairs:
656
+ fragments.append(f"+sbp:{new_pairs}")
657
+
658
+ atomic_write_yaml(args.profile, profile)
659
+
660
+ changelog_line = f"- {_iso_dt(now)}: run={args.run_id} " + (
661
+ " ".join(fragments) if fragments else "(no changes)"
662
+ )
663
+ with args.changelog.open("a") as cl:
664
+ cl.write(changelog_line + "\n")
665
+
666
+ print(changelog_line)
667
+ return 0
668
+
669
+
670
+ if __name__ == "__main__":
671
+ sys.exit(main())