threadkeeper 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. threadkeeper/__init__.py +8 -0
  2. threadkeeper/_mcp.py +6 -0
  3. threadkeeper/_setup.py +299 -0
  4. threadkeeper/adapters/__init__.py +40 -0
  5. threadkeeper/adapters/_hook_helpers.py +72 -0
  6. threadkeeper/adapters/base.py +152 -0
  7. threadkeeper/adapters/claude_code.py +178 -0
  8. threadkeeper/adapters/claude_desktop.py +128 -0
  9. threadkeeper/adapters/codex.py +259 -0
  10. threadkeeper/adapters/copilot.py +195 -0
  11. threadkeeper/adapters/gemini.py +169 -0
  12. threadkeeper/adapters/vscode.py +144 -0
  13. threadkeeper/brief.py +735 -0
  14. threadkeeper/config.py +216 -0
  15. threadkeeper/curator.py +390 -0
  16. threadkeeper/db.py +474 -0
  17. threadkeeper/embeddings.py +232 -0
  18. threadkeeper/extract_daemon.py +125 -0
  19. threadkeeper/helpers.py +101 -0
  20. threadkeeper/i18n.py +342 -0
  21. threadkeeper/identity.py +237 -0
  22. threadkeeper/ingest.py +507 -0
  23. threadkeeper/lessons.py +170 -0
  24. threadkeeper/nudges.py +257 -0
  25. threadkeeper/process_health.py +202 -0
  26. threadkeeper/review_prompts.py +207 -0
  27. threadkeeper/search_proxy.py +160 -0
  28. threadkeeper/server.py +55 -0
  29. threadkeeper/shadow_review.py +358 -0
  30. threadkeeper/skill_watcher.py +96 -0
  31. threadkeeper/spawn_budget.py +246 -0
  32. threadkeeper/tools/__init__.py +2 -0
  33. threadkeeper/tools/concepts.py +111 -0
  34. threadkeeper/tools/consolidate.py +222 -0
  35. threadkeeper/tools/core_memory.py +109 -0
  36. threadkeeper/tools/correlation.py +116 -0
  37. threadkeeper/tools/curator.py +121 -0
  38. threadkeeper/tools/dialectic.py +359 -0
  39. threadkeeper/tools/dialog.py +131 -0
  40. threadkeeper/tools/distill.py +184 -0
  41. threadkeeper/tools/extract.py +411 -0
  42. threadkeeper/tools/graph.py +183 -0
  43. threadkeeper/tools/invariants.py +177 -0
  44. threadkeeper/tools/lessons.py +110 -0
  45. threadkeeper/tools/missed_spawns.py +142 -0
  46. threadkeeper/tools/peers.py +579 -0
  47. threadkeeper/tools/pickup.py +148 -0
  48. threadkeeper/tools/probes.py +251 -0
  49. threadkeeper/tools/process_health.py +90 -0
  50. threadkeeper/tools/session.py +34 -0
  51. threadkeeper/tools/shadow_review.py +106 -0
  52. threadkeeper/tools/skills.py +856 -0
  53. threadkeeper/tools/spawn.py +871 -0
  54. threadkeeper/tools/style.py +44 -0
  55. threadkeeper/tools/threads.py +299 -0
  56. threadkeeper-0.4.0.dist-info/METADATA +351 -0
  57. threadkeeper-0.4.0.dist-info/RECORD +61 -0
  58. threadkeeper-0.4.0.dist-info/WHEEL +5 -0
  59. threadkeeper-0.4.0.dist-info/entry_points.txt +2 -0
  60. threadkeeper-0.4.0.dist-info/licenses/LICENSE +21 -0
  61. threadkeeper-0.4.0.dist-info/top_level.txt +1 -0
threadkeeper/brief.py ADDED
@@ -0,0 +1,735 @@
1
+ """Brief rendering and dialog log helper.
2
+
3
+ `render_brief` builds the multi-section ctx string returned by the
4
+ `brief()` MCP tool: ctx header, core_memory, inbox, tasks_running,
5
+ live_peers, open/idle/closed threads, style, verbatim, query-relevant
6
+ hits, weak_spots, concepts, distill_pending, extract_pending,
7
+ pickup_top, evolve_pending, and the trailing user-facing reminder.
8
+
9
+ `_append_dialog_log` writes one line per cross-session signal to the
10
+ shared dialog log tailed by open_dialog_window().
11
+ """
12
+
13
+ import re
14
+ import sqlite3
15
+ import time
16
+ from datetime import datetime, timezone
17
+ from typing import Optional
18
+
19
+ from .config import SEMANTIC_AVAILABLE, DIALOG_LOG, TASK_LOG_DIR
20
+ from .helpers import fmt_age, q
21
+ from . import identity
22
+ from .identity import _detect_self_cid, _ensure_cursor
23
+ from .embeddings import _cosine_search
24
+
25
+
26
+ # Parallelism cue regex: matches user-language signals that work
27
+ # decomposes into multiple independent units. The actual locale bundles
28
+ # (English + Russian + count-plural-noun families) live in i18n.py.
29
+ from .i18n import SPAWN_CUE_RE as _SPAWN_CUE_RE # noqa: E402
30
+
31
+
32
+ def render_brief(conn: sqlite3.Connection, query: str = "", k: int = 6) -> str:
33
+ now = int(time.time())
34
+ out: list[str] = []
35
+
36
+ # ── ctx ───────────────────────────────────────────────────────────────
37
+ last = conn.execute(
38
+ "SELECT started_at, ended_at FROM sessions "
39
+ "WHERE id IS NOT ? ORDER BY started_at DESC LIMIT 1",
40
+ (identity._session_id,),
41
+ ).fetchone()
42
+ last_str = "first" if last is None else fmt_age(now - (last["ended_at"] or last["started_at"]))
43
+ now_iso = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%MZ")
44
+ sem = "on" if SEMANTIC_AVAILABLE else "off"
45
+
46
+ # live=N: events from OTHER sessions since this session's cursor
47
+ _ensure_cursor(conn)
48
+ cur_row = conn.execute(
49
+ "SELECT last_event_id FROM cursors WHERE session_id=?", (identity._session_id,)
50
+ ).fetchone()
51
+ cur_id = cur_row["last_event_id"] if cur_row else 0
52
+ fresh = conn.execute(
53
+ "SELECT COUNT(*) c FROM events WHERE id > ? AND session_id != ?",
54
+ (cur_id, identity._session_id),
55
+ ).fetchone()["c"]
56
+
57
+ self_cid = _detect_self_cid()
58
+ out.append(
59
+ f"ctx sess={identity._session_id or '-'} last={last_str} sem={sem} "
60
+ f"live={fresh} cid={self_cid[:8] if self_cid else '-'} now={now_iso}"
61
+ )
62
+
63
+ # ── core_memory ───────────────────────────────────────────────────────
64
+ # Letta-style RAM tier: always-shown, ordered by priority DESC. Use
65
+ # sparingly — these are "what new-claude must know" lines.
66
+ try:
67
+ core_rows = conn.execute(
68
+ "SELECT key, content, priority FROM core_memory "
69
+ "ORDER BY priority DESC, key ASC"
70
+ ).fetchall()
71
+ except sqlite3.OperationalError:
72
+ core_rows = []
73
+ if core_rows:
74
+ out.append("")
75
+ out.append("core_memory")
76
+ for r in core_rows:
77
+ snip = r["content"][:120].replace("\n", " ")
78
+ if len(r["content"]) > 120:
79
+ snip += "…"
80
+ out.append(f" [P{r['priority']}] {r['key']}: {q(snip)}")
81
+
82
+ # ── inbox (unread signals to me) ──────────────────────────────────────
83
+ if self_cid:
84
+ unread = conn.execute(
85
+ "SELECT id, from_cid, to_cid, kind, content, created_at FROM signals "
86
+ "WHERE (to_cid = ? OR to_cid IS NULL) AND from_cid != ? "
87
+ "AND read_at IS NULL ORDER BY created_at DESC LIMIT 10",
88
+ (self_cid, self_cid),
89
+ ).fetchall()
90
+ if unread:
91
+ out.append("")
92
+ out.append(f"inbox unread={len(unread)} (call inbox() to read+mark)")
93
+ for s in unread[:5]:
94
+ ago = fmt_age(now - s["created_at"])
95
+ scope = "*" if s["to_cid"] is None else "→me"
96
+ snip = s["content"][:90].replace("\n", " ")
97
+ if len(s["content"]) > 90:
98
+ snip += "…"
99
+ out.append(
100
+ f" #{s['id']} {scope} from={s['from_cid'][:8]} "
101
+ f"+{s['kind']} {ago}_ago {q(snip)}"
102
+ )
103
+
104
+ # ── tasks_running ─────────────────────────────────────────────────────
105
+ # Only my own spawned children that are still alive. Refresh first so
106
+ # zombies (parent died, child orphaned and reaped) get marked ended
107
+ # instead of lingering as "running" forever.
108
+ if self_cid:
109
+ try:
110
+ from .tools.spawn import _refresh_tasks
111
+ _refresh_tasks(conn)
112
+ except (sqlite3.OperationalError, NameError, ImportError):
113
+ pass # tolerate at startup before tools.spawn is imported
114
+ running = conn.execute(
115
+ "SELECT id, pid, prompt, started_at, spawned_cid FROM tasks "
116
+ "WHERE parent_cid=? AND ended_at IS NULL "
117
+ "ORDER BY started_at DESC LIMIT 5",
118
+ (self_cid,),
119
+ ).fetchall()
120
+ if running:
121
+ out.append("")
122
+ out.append("tasks_running")
123
+ for t in running:
124
+ cid = (t["spawned_cid"] or "?")[:8]
125
+ snip = t["prompt"][:60].replace("\n", " ")
126
+ if len(t["prompt"]) > 60:
127
+ snip += "…"
128
+ out.append(
129
+ f" {t['id']} pid={t['pid']} cid={cid} "
130
+ f"age={fmt_age(now - t['started_at'])} {q(snip)}"
131
+ )
132
+
133
+ # ── spawn_hint ────────────────────────────────────────────────────────
134
+ # Behavioral nudge: agents systematically under-use spawn(). They read
135
+ # "tool exists" but never reach for it as a parallelism primitive.
136
+ # Surface a one-line trigger when conditions suggest decomposition would
137
+ # help (work piling up, never-spawned this conversation, or explicit
138
+ # parallel cue in user message).
139
+ if self_cid:
140
+ try:
141
+ active_n = conn.execute(
142
+ "SELECT COUNT(*) c FROM threads WHERE state='active'"
143
+ ).fetchone()["c"]
144
+ idle_n = conn.execute(
145
+ "SELECT COUNT(*) c FROM threads WHERE state='idle'"
146
+ ).fetchone()["c"]
147
+ running_kids = conn.execute(
148
+ "SELECT COUNT(*) c FROM tasks "
149
+ "WHERE parent_cid=? AND ended_at IS NULL",
150
+ (self_cid,),
151
+ ).fetchone()["c"]
152
+ ever_kids = conn.execute(
153
+ "SELECT COUNT(*) c FROM tasks WHERE parent_cid=?",
154
+ (self_cid,),
155
+ ).fetchone()["c"]
156
+ except sqlite3.OperationalError:
157
+ active_n = idle_n = running_kids = ever_kids = 0
158
+
159
+ cue_hit = None
160
+ try:
161
+ row = conn.execute(
162
+ "SELECT content FROM dialog_messages "
163
+ "WHERE session_id=? AND role='user' AND created_at > ? "
164
+ "AND content NOT LIKE '[tool_result]%' "
165
+ "ORDER BY created_at DESC LIMIT 1",
166
+ (self_cid, now - 600),
167
+ ).fetchone()
168
+ if row:
169
+ m = _SPAWN_CUE_RE.search(row["content"])
170
+ if m:
171
+ cue_hit = m.group(0)
172
+ except sqlite3.OperationalError:
173
+ pass
174
+
175
+ # Show hint only when not already parallelizing AND there's a real signal.
176
+ show = running_kids == 0 and (active_n >= 3 or idle_n >= 3 or cue_hit)
177
+ if show:
178
+ # Count consecutive hint-shows since the last actual spawn() for
179
+ # this cid. Escalates phrasing when the agent keeps ignoring.
180
+ consecutive_ignored = 0
181
+ try:
182
+ last_spawn_at = conn.execute(
183
+ "SELECT MAX(started_at) m FROM tasks WHERE parent_cid=?",
184
+ (self_cid,),
185
+ ).fetchone()["m"] or 0
186
+ consecutive_ignored = conn.execute(
187
+ "SELECT COUNT(*) c FROM events "
188
+ "WHERE kind='spawn_hint_shown' AND target=? "
189
+ "AND created_at > ?",
190
+ (self_cid, last_spawn_at),
191
+ ).fetchone()["c"]
192
+ except sqlite3.OperationalError:
193
+ pass
194
+
195
+ parts = [
196
+ f"active={active_n}",
197
+ f"idle={idle_n}",
198
+ f"children={running_kids}",
199
+ ]
200
+ if ever_kids == 0:
201
+ parts.append("never_spawned=1")
202
+ if cue_hit:
203
+ parts.append(f"user_cue={q(cue_hit)}")
204
+ if consecutive_ignored >= 3:
205
+ parts.append(f"ignored={consecutive_ignored}x")
206
+ out.append("")
207
+ out.append("spawn_hint " + " ".join(parts))
208
+
209
+ warn = "⚠️ " if consecutive_ignored >= 3 else ""
210
+ if cue_hit:
211
+ out.append(
212
+ f" → {warn}user signaled decomposable work. DO NOT "
213
+ "answer linearly — spawn(prompt, role=...) each unit "
214
+ "NOW, sync via inbox/wait"
215
+ )
216
+ elif ever_kids == 0:
217
+ out.append(
218
+ f" → {warn}never spawned this convo. BEFORE answering: "
219
+ "does request split into ≥2 independent units? if yes → "
220
+ "spawn(prompt, role=...) each, don't go serial"
221
+ )
222
+ else:
223
+ out.append(
224
+ f" → {warn}work piling up. DECOMPOSE: "
225
+ "spawn(prompt, role=...) for each independent unit "
226
+ "before answering"
227
+ )
228
+ if consecutive_ignored >= 3:
229
+ out.append(
230
+ f" ⚠️ hint shown {consecutive_ignored}× without spawn — "
231
+ "reflex is FAILING. next response must spawn() or "
232
+ "explain why decomp doesn't apply"
233
+ )
234
+
235
+ # Log this show so the next render_brief can detect repeated ignore.
236
+ try:
237
+ conn.execute(
238
+ "INSERT INTO events (session_id, kind, target, summary, "
239
+ "created_at) VALUES (?,?,?,?,?)",
240
+ (identity._session_id or "", "spawn_hint_shown",
241
+ self_cid, "", now),
242
+ )
243
+ conn.commit()
244
+ except sqlite3.OperationalError:
245
+ pass
246
+
247
+ # ── live_peers ────────────────────────────────────────────────────────
248
+ # Recent activity (last 5 min) from concurrent claude conversations.
249
+ # Identity is jsonl conversation_id. Self is marked with `*`.
250
+ peer_rows = conn.execute(
251
+ "SELECT session_id, role, content, created_at FROM dialog_messages "
252
+ "WHERE created_at > ? AND session_id IS NOT NULL AND session_id != '' "
253
+ "AND content NOT LIKE '[tool_result]%' AND content NOT LIKE '[Image%' "
254
+ "ORDER BY created_at DESC LIMIT 200",
255
+ (now - 300,),
256
+ ).fetchall()
257
+ by_sess: dict[str, dict] = {}
258
+ for r in peer_rows:
259
+ sid = r["session_id"]
260
+ d = by_sess.setdefault(sid, {"last_user": None, "last_at": 0, "msgs": 0})
261
+ d["msgs"] += 1
262
+ if r["created_at"] > d["last_at"]:
263
+ d["last_at"] = r["created_at"]
264
+ if r["role"] == "user" and d["last_user"] is None:
265
+ d["last_user"] = dict(r)
266
+ if by_sess:
267
+ ordered = sorted(by_sess.items(), key=lambda x: x[1]["last_at"], reverse=True)
268
+ out.append("")
269
+ out.append("live_peers (* = you)")
270
+ for sid, d in ordered[:6]:
271
+ marker = "*" if sid == self_cid else " "
272
+ u = d["last_user"]
273
+ if u:
274
+ snip = u["content"][:80].replace("\n", " ")
275
+ if len(u["content"]) > 80:
276
+ snip += "…"
277
+ out.append(
278
+ f" {marker}{sid[:8]} u={q(snip)} "
279
+ f"u_age={fmt_age(now - u['created_at'])} msgs={d['msgs']}"
280
+ )
281
+ else:
282
+ out.append(
283
+ f" {marker}{sid[:8]} (no user msg) msgs={d['msgs']} "
284
+ f"last={fmt_age(now - d['last_at'])}_ago"
285
+ )
286
+
287
+ # ── open ──────────────────────────────────────────────────────────────
288
+ open_t = conn.execute(
289
+ "SELECT * FROM threads WHERE state='active' ORDER BY last_touched_at DESC"
290
+ ).fetchall()
291
+ if open_t:
292
+ out.append("")
293
+ out.append("open")
294
+ for t in open_t:
295
+ parts = [f" {t['id']}", f"q={q(t['question'])}"]
296
+ if t["parent_id"]:
297
+ parts.append(f"p={t['parent_id']}")
298
+ if t["last_move"]:
299
+ parts.append(f"last={q(t['last_move'][:90])}")
300
+ parts.append(f"age={fmt_age(now - t['opened_at'])}")
301
+ out.append(" ".join(parts))
302
+
303
+ # ── idle ──────────────────────────────────────────────────────────────
304
+ idle_t = conn.execute(
305
+ "SELECT * FROM threads WHERE state='idle' "
306
+ "ORDER BY last_touched_at DESC LIMIT 5"
307
+ ).fetchall()
308
+ if idle_t:
309
+ out.append("")
310
+ out.append("idle")
311
+ for t in idle_t:
312
+ out.append(
313
+ f" {t['id']} q={q(t['question'])} "
314
+ f"dorm={fmt_age(now - t['last_touched_at'])}"
315
+ )
316
+
317
+ # ── closed (recent) ───────────────────────────────────────────────────
318
+ closed_t = conn.execute(
319
+ "SELECT * FROM threads WHERE state='closed' "
320
+ "ORDER BY last_touched_at DESC LIMIT 3"
321
+ ).fetchall()
322
+ if closed_t:
323
+ out.append("")
324
+ out.append("closed_recent")
325
+ for t in closed_t:
326
+ out.append(f" {t['id']} out={q((t['outcome'] or '-')[:120])}")
327
+
328
+ # ── memory_nudge ──────────────────────────────────────────────────────
329
+ # Counter-driven (active push, not just passive surface): when N mutating
330
+ # events have passed in this session without a memory save, escalate from
331
+ # soft hint to demanding ⚠️. See threadkeeper/nudges.py for thresholds.
332
+ try:
333
+ from .nudges import compute_memory_nudge
334
+ mem_nudge = compute_memory_nudge(conn, identity._session_id or "")
335
+ except (sqlite3.OperationalError, ImportError):
336
+ mem_nudge = None
337
+ if mem_nudge:
338
+ out.append("")
339
+ out.append(mem_nudge)
340
+
341
+ # ── skill_hint ────────────────────────────────────────────────────────
342
+ # Behavioral nudge inspired by hermes-agent's Learning loop: after a rich
343
+ # thread closes, the lessons inside it (insights + repeated moves) should
344
+ # be materialized as a reusable Claude skill under ~/.claude/skills/,
345
+ # not just sit in notes. Trigger only on threads recently closed AND
346
+ # rich enough to be worth a class-level skill — never on one-off chatter.
347
+ #
348
+ # Rich = ≥5 notes total AND ≥2 of those tagged 'insight' or 'move'.
349
+ # Recently_closed = closed within last 24h.
350
+ # Suppress if a "skill_materialized" event already logged for the thread.
351
+ if self_cid:
352
+ try:
353
+ rich_closed = conn.execute(
354
+ "SELECT t.id, t.question, t.outcome, "
355
+ " (SELECT COUNT(*) FROM notes n WHERE n.thread_id=t.id) AS n_total, "
356
+ " (SELECT COUNT(*) FROM notes n WHERE n.thread_id=t.id "
357
+ " AND n.kind IN ('insight','move')) AS n_rich "
358
+ "FROM threads t "
359
+ "WHERE t.state='closed' AND t.last_touched_at > ? "
360
+ " AND NOT EXISTS ("
361
+ " SELECT 1 FROM events e "
362
+ " WHERE e.kind='skill_materialized' AND e.target=t.id"
363
+ " ) "
364
+ "ORDER BY t.last_touched_at DESC LIMIT 5",
365
+ (now - 86400,),
366
+ ).fetchall()
367
+ except sqlite3.OperationalError:
368
+ rich_closed = []
369
+
370
+ candidates = [r for r in rich_closed
371
+ if r["n_total"] >= 5 and r["n_rich"] >= 2]
372
+ if candidates:
373
+ # Count consecutive shows since the last skill_materialized event
374
+ # for this cid — escalates if the agent keeps ignoring.
375
+ try:
376
+ last_mat_at = conn.execute(
377
+ "SELECT MAX(created_at) m FROM events "
378
+ "WHERE kind='skill_materialized' AND session_id=?",
379
+ (identity._session_id or "",),
380
+ ).fetchone()["m"] or 0
381
+ consecutive_ignored = conn.execute(
382
+ "SELECT COUNT(*) c FROM events "
383
+ "WHERE kind='skill_hint_shown' AND target=? "
384
+ "AND created_at > ?",
385
+ (self_cid, last_mat_at),
386
+ ).fetchone()["c"]
387
+ except sqlite3.OperationalError:
388
+ consecutive_ignored = 0
389
+
390
+ top = candidates[0]
391
+ out.append("")
392
+ parts = [
393
+ f"n={top['n_total']}",
394
+ f"rich={top['n_rich']}",
395
+ ]
396
+ if consecutive_ignored >= 3:
397
+ parts.append(f"ignored={consecutive_ignored}x")
398
+ out.append("skill_hint " + " ".join(parts))
399
+ warn = "⚠️ " if consecutive_ignored >= 3 else ""
400
+ out.append(
401
+ f" → {warn}closed thread is rich (≥5 notes, ≥2 insight/move). "
402
+ "MATERIALIZE: invoke skill-creator to write ~/.claude/skills/"
403
+ "<class-level-name>/SKILL.md from the insights — don't let "
404
+ "learnings sit only in notes"
405
+ )
406
+ if consecutive_ignored >= 3:
407
+ out.append(
408
+ f" ⚠️ hint shown {consecutive_ignored}× without "
409
+ "materialization — next reply must invoke skill-creator "
410
+ "or explain why the thread isn't class-level"
411
+ )
412
+
413
+ # Log this show so the next render_brief can detect repeated ignore.
414
+ try:
415
+ conn.execute(
416
+ "INSERT INTO events (session_id, kind, target, summary, "
417
+ "created_at) VALUES (?,?,?,?,?)",
418
+ (identity._session_id or "", "skill_hint_shown",
419
+ self_cid, top["id"], now),
420
+ )
421
+ conn.commit()
422
+ except sqlite3.OperationalError:
423
+ pass
424
+
425
+ # ── skill_nudge ───────────────────────────────────────────────────────
426
+ # Counter-driven companion to skill_hint: skill_hint reads thread state,
427
+ # this one reads session event-counter. Together they catch "we have a
428
+ # rich thread, materialize it" (hint) AND "we've been working hard
429
+ # without saving any skill — likely missing something" (nudge).
430
+ try:
431
+ from .nudges import compute_skill_nudge
432
+ sk_nudge = compute_skill_nudge(conn, identity._session_id or "")
433
+ except (sqlite3.OperationalError, ImportError):
434
+ sk_nudge = None
435
+ if sk_nudge:
436
+ out.append("")
437
+ out.append(sk_nudge)
438
+
439
+ # ── consulted_skills (this session) ───────────────────────────────────
440
+ # Surface which skills the agent actually invoked / viewed in the
441
+ # current session, plus any user-judgment outcomes ('helped' /
442
+ # 'partial' / 'wrong'). Drives the patch-loop: if a recently-
443
+ # consulted skill turned out 'wrong', the agent should PATCH it
444
+ # before forgetting context, not next session.
445
+ try:
446
+ sess = identity._session_id or ""
447
+ consulted = conn.execute(
448
+ "SELECT target, kind, summary FROM events "
449
+ "WHERE session_id = ? "
450
+ " AND kind IN ('skill_view', 'skill_use', 'skill_patch', "
451
+ " 'skill_create', 'skill_outcome') "
452
+ "ORDER BY created_at ASC",
453
+ (sess,),
454
+ ).fetchall()
455
+ except sqlite3.OperationalError:
456
+ consulted = []
457
+ if consulted:
458
+ # Group by skill name, collect kinds + outcomes.
459
+ per_skill: dict[str, dict] = {}
460
+ for r in consulted:
461
+ tgt = r["target"] or "?"
462
+ slot = per_skill.setdefault(
463
+ tgt, {"used": 0, "viewed": 0, "patched": 0, "created": 0,
464
+ "outcomes": []},
465
+ )
466
+ kind = r["kind"]
467
+ if kind == "skill_use":
468
+ slot["used"] += 1
469
+ elif kind == "skill_view":
470
+ slot["viewed"] += 1
471
+ elif kind == "skill_patch":
472
+ slot["patched"] += 1
473
+ elif kind == "skill_create":
474
+ slot["created"] += 1
475
+ elif kind == "skill_outcome" and r["summary"]:
476
+ slot["outcomes"].append(r["summary"])
477
+ out.append("")
478
+ out.append("consulted_skills")
479
+ for tgt in sorted(per_skill.keys()):
480
+ s = per_skill[tgt]
481
+ parts: list[str] = []
482
+ if s["created"]:
483
+ parts.append(f"created×{s['created']}")
484
+ if s["viewed"]:
485
+ parts.append(f"viewed×{s['viewed']}")
486
+ if s["used"]:
487
+ parts.append(f"used×{s['used']}")
488
+ if s["patched"]:
489
+ parts.append(f"patched×{s['patched']}")
490
+ if s["outcomes"]:
491
+ # Compact outcome tally
492
+ tally: dict[str, int] = {}
493
+ for o in s["outcomes"]:
494
+ tally[o] = tally.get(o, 0) + 1
495
+ for o, n in tally.items():
496
+ parts.append(f"{o}×{n}")
497
+ out.append(f" {tgt}: {' '.join(parts)}")
498
+
499
+ # ── style ─────────────────────────────────────────────────────────────
500
+ style_rows = conn.execute("SELECT key, value FROM style").fetchall()
501
+ if style_rows:
502
+ out.append("")
503
+ out.append("style " + " ".join(f"{r['key']}={r['value']}" for r in style_rows))
504
+
505
+ # ── verbatim (last 5, chronological) ──────────────────────────────────
506
+ qt = conn.execute(
507
+ "SELECT speaker, content FROM verbatim ORDER BY created_at DESC LIMIT 5"
508
+ ).fetchall()
509
+ if qt:
510
+ out.append("")
511
+ out.append("verbatim")
512
+ for r in reversed(qt):
513
+ out.append(f" {r['speaker']}> {q(r['content'][:200])}")
514
+
515
+ # ── relevant_to_query (only if query passed) ──────────────────────────
516
+ if query:
517
+ if SEMANTIC_AVAILABLE:
518
+ hits = _cosine_search(conn, query, k)
519
+ if hits:
520
+ out.append("")
521
+ out.append(f"relevant q={q(query[:80])}")
522
+ for r in hits:
523
+ snip = r["content"][:160].replace("\n", " ")
524
+ if len(r["content"]) > 160:
525
+ snip += "…"
526
+ out.append(
527
+ f" {r['thread_id'] or '-'} {r['kind']} "
528
+ f"s={r['score']:.2f} {q(snip)}"
529
+ )
530
+ else:
531
+ try:
532
+ rows = conn.execute(
533
+ "SELECT n.thread_id, n.kind, n.content FROM notes_fts f "
534
+ "JOIN notes n ON f.rowid=n.id WHERE notes_fts MATCH ? LIMIT ?",
535
+ (query, k),
536
+ ).fetchall()
537
+ if rows:
538
+ out.append("")
539
+ out.append(f"fts q={q(query[:80])}")
540
+ for r in rows:
541
+ snip = r["content"][:160].replace("\n", " ")
542
+ out.append(f" {r['thread_id'] or '-'} {r['kind']} {q(snip)}")
543
+ except sqlite3.OperationalError:
544
+ pass
545
+
546
+ # ── weak_spots ────────────────────────────────────────────────────────
547
+ # Top categories with high recent failure rate, plus categories that have
548
+ # registered probes but never been tested in this DB. Skip if both empty.
549
+ try:
550
+ weak = conn.execute(
551
+ "SELECT category, fail_rate_7d, attempts, last_at FROM reliability "
552
+ "WHERE fail_rate_7d IS NOT NULL AND attempts >= 3 "
553
+ "ORDER BY fail_rate_7d DESC LIMIT 3"
554
+ ).fetchall()
555
+ unknown = conn.execute(
556
+ "SELECT DISTINCT p.category FROM probes p "
557
+ "LEFT JOIN reliability r ON r.category = p.category "
558
+ "WHERE p.enabled = 1 AND r.category IS NULL LIMIT 3"
559
+ ).fetchall()
560
+ except sqlite3.OperationalError:
561
+ weak, unknown = [], []
562
+ if weak or unknown:
563
+ out.append("")
564
+ out.append("weak_spots")
565
+ for r in weak:
566
+ age = fmt_age(now - r["last_at"]) if r["last_at"] else "?"
567
+ out.append(
568
+ f" {r['category']} fail7d={r['fail_rate_7d']:.2f} "
569
+ f"n={r['attempts']} last={age}_ago"
570
+ )
571
+ for r in unknown:
572
+ out.append(f" {r['category']} (never_tested)")
573
+
574
+ # ── concepts (high-confidence, recent) ─────────────────────────────────
575
+ try:
576
+ cs = conn.execute(
577
+ "SELECT id, description FROM concepts "
578
+ "WHERE confidence='high' "
579
+ "ORDER BY registered_at DESC LIMIT 3"
580
+ ).fetchall()
581
+ except sqlite3.OperationalError:
582
+ cs = []
583
+ if cs:
584
+ out.append("")
585
+ out.append("concepts (high-conf)")
586
+ for c in cs:
587
+ snip = c["description"][:140].replace("\n", " ")
588
+ out.append(f" {c['id']} {q(snip)}")
589
+
590
+ # ── user_model (dialectic) ────────────────────────────────────────────
591
+ # Honcho-inspired dialectic snapshot: medium+high confidence claims about
592
+ # the user, grouped by domain. Excludes low/disputed; never inferred from
593
+ # one signal — claims earn confidence through accumulating evidence.
594
+ try:
595
+ syn_rows = conn.execute(
596
+ "SELECT id, claim, domain, confidence, "
597
+ " support_count, contradict_count "
598
+ "FROM user_dialectic "
599
+ "WHERE state='active' AND confidence IN ('medium','high') "
600
+ "ORDER BY "
601
+ " CASE confidence WHEN 'high' THEN 0 ELSE 1 END, "
602
+ " (support_count - contradict_count) DESC, "
603
+ " domain ASC "
604
+ "LIMIT 12"
605
+ ).fetchall()
606
+ except sqlite3.OperationalError:
607
+ syn_rows = []
608
+ if syn_rows:
609
+ # Group by domain inline (keep total ≤ 10 lines incl. headers).
610
+ out.append("")
611
+ out.append("user_model (dialectic)")
612
+ grouped: dict[str, list] = {}
613
+ order: list[str] = []
614
+ for r in syn_rows:
615
+ key = r["domain"] or "other"
616
+ if key not in grouped:
617
+ grouped[key] = []
618
+ order.append(key)
619
+ grouped[key].append(r)
620
+ line_budget = 8
621
+ for dom in order:
622
+ if line_budget <= 0:
623
+ break
624
+ out.append(f" [{dom}]")
625
+ line_budget -= 1
626
+ for r in grouped[dom]:
627
+ if line_budget <= 0:
628
+ break
629
+ tag = "★" if r["confidence"] == "high" else "·"
630
+ snip = r["claim"][:140].replace("\n", " ")
631
+ if len(r["claim"]) > 140:
632
+ snip += "…"
633
+ out.append(f" {tag} {snip}")
634
+ line_budget -= 1
635
+
636
+ # ── distill_pending (votes >= 2) ───────────────────────────────────────
637
+ try:
638
+ ds = conn.execute(
639
+ "SELECT id, kind, vote_sum FROM distill "
640
+ "WHERE vote_sum >= 2 AND exported_at IS NULL "
641
+ "ORDER BY vote_sum DESC LIMIT 3"
642
+ ).fetchall()
643
+ except sqlite3.OperationalError:
644
+ ds = []
645
+ if ds:
646
+ out.append("")
647
+ out.append("distill_pending (vote≥2)")
648
+ for d in ds:
649
+ out.append(
650
+ f" {d['id']} {d['kind']} votes={d['vote_sum']:.1f}"
651
+ )
652
+
653
+ # ── extract_pending ────────────────────────────────────────────────────
654
+ try:
655
+ ex_pending = conn.execute(
656
+ "SELECT COUNT(*) c FROM extract_candidates WHERE status='pending'"
657
+ ).fetchone()["c"]
658
+ except sqlite3.OperationalError:
659
+ ex_pending = 0
660
+ if ex_pending > 0:
661
+ out.append("")
662
+ out.append(
663
+ f"extract_pending n={ex_pending} (review_candidates / "
664
+ f"accept_candidate to materialize)"
665
+ )
666
+
667
+ # ── pickup_top ────────────────────────────────────────────────────────
668
+ # Surface the single oldest unclaimed unresolved thread as a hint that
669
+ # a free-context session could pick it up. Only show when no high-value
670
+ # current work (i.e. fewer than 3 active threads).
671
+ try:
672
+ active_count = conn.execute(
673
+ "SELECT COUNT(*) c FROM threads WHERE state='active'"
674
+ ).fetchone()["c"]
675
+ if active_count < 3:
676
+ top = conn.execute(
677
+ "SELECT id, question, last_touched_at FROM threads "
678
+ "WHERE state IN ('active','idle') AND claimed_at IS NULL "
679
+ "AND last_touched_at <= ? "
680
+ "ORDER BY last_touched_at ASC LIMIT 1",
681
+ (now - 3 * 86400,),
682
+ ).fetchone()
683
+ if top:
684
+ out.append("")
685
+ out.append("pickup_top")
686
+ out.append(
687
+ f" {top['id']} idle={fmt_age(now - top['last_touched_at'])} "
688
+ f"q={q(top['question'][:120])}"
689
+ )
690
+ except sqlite3.OperationalError:
691
+ pass
692
+
693
+ # ── evolve hints ──────────────────────────────────────────────────────
694
+ pend = conn.execute(
695
+ "SELECT suggestion FROM evolve WHERE applied=0 "
696
+ "ORDER BY created_at DESC LIMIT 3"
697
+ ).fetchall()
698
+ if pend:
699
+ out.append("")
700
+ out.append("evolve_pending")
701
+ for e in pend:
702
+ out.append(f" {q(e['suggestion'][:200])}")
703
+
704
+ # ── footer reminder: IDs are tool-call internals only ─────────────────
705
+ # Evolve_pending #1 noted that brief's T-codes/cids leak into user-facing
706
+ # replies when claude paraphrases. Loud trailing reminder beats quiet
707
+ # style line buried mid-brief.
708
+ out.append("")
709
+ out.append(
710
+ "⚠️ user-facing: paraphrase plain. Do NOT cite internal IDs above "
711
+ "(thread T-codes, cids, signal #ids, session s_codes, task tk_codes, "
712
+ "probe P-codes) when replying to the user — those are tool-call only."
713
+ )
714
+
715
+ return "\n".join(out)
716
+
717
+
718
+ def _append_dialog_log(from_cid: Optional[str], to_cid: Optional[str],
719
+ kind: str, content: str) -> None:
720
+ """Single-line log of every cross-session signal. Tailed by
721
+ open_dialog_window() so the user sees the live conversation."""
722
+ try:
723
+ TASK_LOG_DIR.mkdir(parents=True, exist_ok=True)
724
+ ts = datetime.now().strftime("%H:%M:%S")
725
+ f = (from_cid or "?")[:8]
726
+ t = (to_cid or "*")[:8]
727
+ # collapse newlines so each signal stays on one log line
728
+ body = content.replace("\n", " ⏎ ")
729
+ if len(body) > 280:
730
+ body = body[:280] + "…"
731
+ line = f"[{ts}] {f} → {t:<8} [{kind:<9}] {body}\n"
732
+ with DIALOG_LOG.open("a", encoding="utf-8") as fp:
733
+ fp.write(line)
734
+ except OSError:
735
+ pass