@goondocks/myco 0.6.4 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. package/.claude-plugin/marketplace.json +2 -3
  2. package/.claude-plugin/plugin.json +3 -3
  3. package/CONTRIBUTING.md +37 -30
  4. package/README.md +64 -28
  5. package/bin/myco-run +2 -0
  6. package/dist/agent-run-EFICNTAU.js +34 -0
  7. package/dist/agent-run-EFICNTAU.js.map +1 -0
  8. package/dist/agent-tasks-RXJ7Z5NG.js +180 -0
  9. package/dist/agent-tasks-RXJ7Z5NG.js.map +1 -0
  10. package/dist/chunk-2T7RPVPP.js +116 -0
  11. package/dist/chunk-2T7RPVPP.js.map +1 -0
  12. package/dist/chunk-3K5WGSJ4.js +165 -0
  13. package/dist/chunk-3K5WGSJ4.js.map +1 -0
  14. package/dist/chunk-46PWOKSI.js +26 -0
  15. package/dist/chunk-46PWOKSI.js.map +1 -0
  16. package/dist/chunk-4LPQ26CK.js +277 -0
  17. package/dist/chunk-4LPQ26CK.js.map +1 -0
  18. package/dist/chunk-5PEUFJ6U.js +92 -0
  19. package/dist/chunk-5PEUFJ6U.js.map +1 -0
  20. package/dist/chunk-5VZ52A4T.js +136 -0
  21. package/dist/chunk-5VZ52A4T.js.map +1 -0
  22. package/dist/chunk-BUSP3OJB.js +103 -0
  23. package/dist/chunk-BUSP3OJB.js.map +1 -0
  24. package/dist/chunk-D7TYRPRM.js +7312 -0
  25. package/dist/chunk-D7TYRPRM.js.map +1 -0
  26. package/dist/chunk-DCXRSSBP.js +22 -0
  27. package/dist/chunk-DCXRSSBP.js.map +1 -0
  28. package/dist/chunk-E4VLWIJC.js +2 -0
  29. package/dist/chunk-FFAYUQ5N.js +39 -0
  30. package/dist/chunk-FFAYUQ5N.js.map +1 -0
  31. package/dist/chunk-IB76KGBY.js +2 -0
  32. package/dist/chunk-JMJJEQ3P.js +486 -0
  33. package/dist/chunk-JMJJEQ3P.js.map +1 -0
  34. package/dist/{chunk-N33KUCFP.js → chunk-JTYZRPX5.js} +1 -9
  35. package/dist/chunk-JTYZRPX5.js.map +1 -0
  36. package/dist/{chunk-NLUE6CYG.js → chunk-JYOOJCPQ.js} +33 -17
  37. package/dist/chunk-JYOOJCPQ.js.map +1 -0
  38. package/dist/{chunk-Z74SDEKE.js → chunk-KB4DGYIY.js} +91 -9
  39. package/dist/chunk-KB4DGYIY.js.map +1 -0
  40. package/dist/{chunk-ERG2IEWX.js → chunk-KH64DHOY.js} +3 -7413
  41. package/dist/chunk-KH64DHOY.js.map +1 -0
  42. package/dist/chunk-KV4OC4H3.js +498 -0
  43. package/dist/chunk-KV4OC4H3.js.map +1 -0
  44. package/dist/chunk-KYLDNM7H.js +66 -0
  45. package/dist/chunk-KYLDNM7H.js.map +1 -0
  46. package/dist/chunk-LPUQPDC2.js +19 -0
  47. package/dist/chunk-LPUQPDC2.js.map +1 -0
  48. package/dist/chunk-M5XWW7UI.js +97 -0
  49. package/dist/chunk-M5XWW7UI.js.map +1 -0
  50. package/dist/chunk-MHSCMET3.js +275 -0
  51. package/dist/chunk-MHSCMET3.js.map +1 -0
  52. package/dist/chunk-MYX5NCRH.js +45 -0
  53. package/dist/chunk-MYX5NCRH.js.map +1 -0
  54. package/dist/chunk-OXZSXYAT.js +877 -0
  55. package/dist/chunk-OXZSXYAT.js.map +1 -0
  56. package/dist/chunk-PB6TOLRQ.js +35 -0
  57. package/dist/chunk-PB6TOLRQ.js.map +1 -0
  58. package/dist/chunk-PT5IC642.js +162 -0
  59. package/dist/chunk-PT5IC642.js.map +1 -0
  60. package/dist/chunk-QIK2XSDQ.js +187 -0
  61. package/dist/chunk-QIK2XSDQ.js.map +1 -0
  62. package/dist/chunk-RJ6ZQKG5.js +26 -0
  63. package/dist/chunk-RJ6ZQKG5.js.map +1 -0
  64. package/dist/{chunk-YIQLYIHW.js → chunk-TRUJLI6K.js} +29 -43
  65. package/dist/chunk-TRUJLI6K.js.map +1 -0
  66. package/dist/chunk-U3IBO3O3.js +41 -0
  67. package/dist/chunk-U3IBO3O3.js.map +1 -0
  68. package/dist/{chunk-7WHF2OIZ.js → chunk-UBZPD4HN.js} +25 -7
  69. package/dist/chunk-UBZPD4HN.js.map +1 -0
  70. package/dist/{chunk-HIN3UVOG.js → chunk-V7XG6V6C.js} +20 -11
  71. package/dist/chunk-V7XG6V6C.js.map +1 -0
  72. package/dist/chunk-WGTCA2NU.js +84 -0
  73. package/dist/chunk-WGTCA2NU.js.map +1 -0
  74. package/dist/{chunk-O6PERU7U.js → chunk-XNOCTDHF.js} +2 -2
  75. package/dist/chunk-YDN4OM33.js +80 -0
  76. package/dist/chunk-YDN4OM33.js.map +1 -0
  77. package/dist/cli-ODLFRIYS.js +128 -0
  78. package/dist/cli-ODLFRIYS.js.map +1 -0
  79. package/dist/client-EYOTW3JU.js +19 -0
  80. package/dist/client-MXRNQ5FI.js +13 -0
  81. package/dist/{config-IBS6KOLQ.js → config-UR5BSGVX.js} +21 -34
  82. package/dist/config-UR5BSGVX.js.map +1 -0
  83. package/dist/detect-H5OPI7GD.js +17 -0
  84. package/dist/detect-H5OPI7GD.js.map +1 -0
  85. package/dist/detect-providers-Q42OD4OS.js +26 -0
  86. package/dist/detect-providers-Q42OD4OS.js.map +1 -0
  87. package/dist/doctor-JLKTXDEH.js +258 -0
  88. package/dist/doctor-JLKTXDEH.js.map +1 -0
  89. package/dist/executor-ONSDHPGX.js +1441 -0
  90. package/dist/executor-ONSDHPGX.js.map +1 -0
  91. package/dist/init-6GWY345B.js +198 -0
  92. package/dist/init-6GWY345B.js.map +1 -0
  93. package/dist/init-wizard-UONLDYLI.js +294 -0
  94. package/dist/init-wizard-UONLDYLI.js.map +1 -0
  95. package/dist/llm-BV3QNVRD.js +17 -0
  96. package/dist/llm-BV3QNVRD.js.map +1 -0
  97. package/dist/loader-SH67XD54.js +28 -0
  98. package/dist/loader-SH67XD54.js.map +1 -0
  99. package/dist/loader-XVXKZZDH.js +18 -0
  100. package/dist/loader-XVXKZZDH.js.map +1 -0
  101. package/dist/{chunk-H7PRCVGQ.js → logs-QZVYF6FP.js} +74 -5
  102. package/dist/logs-QZVYF6FP.js.map +1 -0
  103. package/dist/main-BMCL7CPO.js +4393 -0
  104. package/dist/main-BMCL7CPO.js.map +1 -0
  105. package/dist/openai-embeddings-C265WRNK.js +14 -0
  106. package/dist/openai-embeddings-C265WRNK.js.map +1 -0
  107. package/dist/openrouter-U6VFCRX2.js +14 -0
  108. package/dist/openrouter-U6VFCRX2.js.map +1 -0
  109. package/dist/post-compact-OWFSOITU.js +26 -0
  110. package/dist/post-compact-OWFSOITU.js.map +1 -0
  111. package/dist/post-tool-use-DOUM7CGQ.js +56 -0
  112. package/dist/post-tool-use-DOUM7CGQ.js.map +1 -0
  113. package/dist/post-tool-use-failure-SG3C7PE6.js +28 -0
  114. package/dist/post-tool-use-failure-SG3C7PE6.js.map +1 -0
  115. package/dist/pre-compact-3J33CHXQ.js +25 -0
  116. package/dist/pre-compact-3J33CHXQ.js.map +1 -0
  117. package/dist/provider-check-3WBPZADE.js +12 -0
  118. package/dist/provider-check-3WBPZADE.js.map +1 -0
  119. package/dist/registry-J4XTWARS.js +25 -0
  120. package/dist/registry-J4XTWARS.js.map +1 -0
  121. package/dist/resolution-events-TFEQPVKS.js +12 -0
  122. package/dist/resolution-events-TFEQPVKS.js.map +1 -0
  123. package/dist/resolve-3FEUV462.js +9 -0
  124. package/dist/resolve-3FEUV462.js.map +1 -0
  125. package/dist/{restart-XCMILOL5.js → restart-2VM33WOB.js} +10 -6
  126. package/dist/{restart-XCMILOL5.js.map → restart-2VM33WOB.js.map} +1 -1
  127. package/dist/search-ZGQR5MDE.js +91 -0
  128. package/dist/search-ZGQR5MDE.js.map +1 -0
  129. package/dist/{server-6UDN35QN.js → server-6KMBJCHZ.js} +308 -517
  130. package/dist/server-6KMBJCHZ.js.map +1 -0
  131. package/dist/session-Z2FXDDG6.js +68 -0
  132. package/dist/session-Z2FXDDG6.js.map +1 -0
  133. package/dist/session-end-FLVX32LE.js +38 -0
  134. package/dist/session-end-FLVX32LE.js.map +1 -0
  135. package/dist/session-start-UCLK7PXE.js +169 -0
  136. package/dist/session-start-UCLK7PXE.js.map +1 -0
  137. package/dist/setup-digest-4KDSXAIV.js +15 -0
  138. package/dist/setup-digest-4KDSXAIV.js.map +1 -0
  139. package/dist/setup-llm-GKMCHURK.js +81 -0
  140. package/dist/setup-llm-GKMCHURK.js.map +1 -0
  141. package/dist/src/agent/definitions/agent.yaml +35 -0
  142. package/dist/src/agent/definitions/tasks/digest-only.yaml +84 -0
  143. package/dist/src/agent/definitions/tasks/extract-only.yaml +87 -0
  144. package/dist/src/agent/definitions/tasks/full-intelligence.yaml +472 -0
  145. package/dist/src/agent/definitions/tasks/graph-maintenance.yaml +92 -0
  146. package/dist/src/agent/definitions/tasks/review-session.yaml +132 -0
  147. package/dist/src/agent/definitions/tasks/supersession-sweep.yaml +86 -0
  148. package/dist/src/agent/definitions/tasks/title-summary.yaml +88 -0
  149. package/dist/src/agent/prompts/agent.md +121 -0
  150. package/dist/src/agent/prompts/orchestrator.md +91 -0
  151. package/dist/src/cli.js +1 -8
  152. package/dist/src/cli.js.map +1 -1
  153. package/dist/src/daemon/main.js +1 -8
  154. package/dist/src/daemon/main.js.map +1 -1
  155. package/dist/src/hooks/post-tool-use.js +3 -50
  156. package/dist/src/hooks/post-tool-use.js.map +1 -1
  157. package/dist/src/hooks/session-end.js +3 -32
  158. package/dist/src/hooks/session-end.js.map +1 -1
  159. package/dist/src/hooks/session-start.js +2 -8
  160. package/dist/src/hooks/session-start.js.map +1 -1
  161. package/dist/src/hooks/stop.js +3 -42
  162. package/dist/src/hooks/stop.js.map +1 -1
  163. package/dist/src/hooks/user-prompt-submit.js +3 -53
  164. package/dist/src/hooks/user-prompt-submit.js.map +1 -1
  165. package/dist/src/mcp/server.js +1 -8
  166. package/dist/src/mcp/server.js.map +1 -1
  167. package/dist/src/prompts/digest-system.md +1 -1
  168. package/dist/src/symbionts/manifests/claude-code.yaml +16 -0
  169. package/dist/src/symbionts/manifests/cursor.yaml +14 -0
  170. package/dist/stats-IUJPZSVZ.js +94 -0
  171. package/dist/stats-IUJPZSVZ.js.map +1 -0
  172. package/dist/stop-XRQLLXST.js +42 -0
  173. package/dist/stop-XRQLLXST.js.map +1 -0
  174. package/dist/stop-failure-2CAJJKRG.js +26 -0
  175. package/dist/stop-failure-2CAJJKRG.js.map +1 -0
  176. package/dist/subagent-start-MWWQTZMQ.js +26 -0
  177. package/dist/subagent-start-MWWQTZMQ.js.map +1 -0
  178. package/dist/subagent-stop-PJXYGRXB.js +28 -0
  179. package/dist/subagent-stop-PJXYGRXB.js.map +1 -0
  180. package/dist/task-completed-4LFRJVGI.js +27 -0
  181. package/dist/task-completed-4LFRJVGI.js.map +1 -0
  182. package/dist/ui/assets/index-DZrElonz.js +744 -0
  183. package/dist/ui/assets/index-TkeiYbZB.css +1 -0
  184. package/dist/ui/favicon.svg +7 -7
  185. package/dist/ui/fonts/Inter-Variable.woff2 +0 -0
  186. package/dist/ui/fonts/JetBrainsMono-Variable.woff2 +0 -0
  187. package/dist/ui/fonts/Newsreader-Italic-Variable.woff2 +0 -0
  188. package/dist/ui/fonts/Newsreader-Variable.woff2 +0 -0
  189. package/dist/ui/index.html +2 -2
  190. package/dist/user-prompt-submit-KSM3AR6P.js +59 -0
  191. package/dist/user-prompt-submit-KSM3AR6P.js.map +1 -0
  192. package/dist/{verify-TOWQHPBX.js → verify-UDAYVX37.js} +17 -22
  193. package/dist/verify-UDAYVX37.js.map +1 -0
  194. package/dist/{version-36RVCQA6.js → version-KLBN4HZT.js} +3 -4
  195. package/dist/version-KLBN4HZT.js.map +1 -0
  196. package/hooks/hooks.json +82 -5
  197. package/package.json +6 -3
  198. package/skills/myco/SKILL.md +10 -10
  199. package/skills/myco/references/cli-usage.md +15 -13
  200. package/skills/myco/references/vault-status.md +3 -3
  201. package/skills/myco/references/wisdom.md +4 -4
  202. package/skills/myco-curate/SKILL.md +86 -0
  203. package/dist/chunk-2ZIBCEYO.js +0 -113
  204. package/dist/chunk-2ZIBCEYO.js.map +0 -1
  205. package/dist/chunk-4RMSHZE4.js +0 -107
  206. package/dist/chunk-4RMSHZE4.js.map +0 -1
  207. package/dist/chunk-4XVKZ3WA.js +0 -1078
  208. package/dist/chunk-4XVKZ3WA.js.map +0 -1
  209. package/dist/chunk-6FQISQNA.js +0 -61
  210. package/dist/chunk-6FQISQNA.js.map +0 -1
  211. package/dist/chunk-7WHF2OIZ.js.map +0 -1
  212. package/dist/chunk-ERG2IEWX.js.map +0 -1
  213. package/dist/chunk-FPRXMJLT.js +0 -56
  214. package/dist/chunk-FPRXMJLT.js.map +0 -1
  215. package/dist/chunk-GENQ5QGP.js +0 -37
  216. package/dist/chunk-GENQ5QGP.js.map +0 -1
  217. package/dist/chunk-H7PRCVGQ.js.map +0 -1
  218. package/dist/chunk-HIN3UVOG.js.map +0 -1
  219. package/dist/chunk-HYVT345Y.js +0 -159
  220. package/dist/chunk-HYVT345Y.js.map +0 -1
  221. package/dist/chunk-J4D4CROB.js +0 -143
  222. package/dist/chunk-J4D4CROB.js.map +0 -1
  223. package/dist/chunk-MDLSAFPP.js +0 -99
  224. package/dist/chunk-MDLSAFPP.js.map +0 -1
  225. package/dist/chunk-N33KUCFP.js.map +0 -1
  226. package/dist/chunk-NL6WQO56.js +0 -65
  227. package/dist/chunk-NL6WQO56.js.map +0 -1
  228. package/dist/chunk-NLUE6CYG.js.map +0 -1
  229. package/dist/chunk-P723N2LP.js +0 -147
  230. package/dist/chunk-P723N2LP.js.map +0 -1
  231. package/dist/chunk-QLUE3BUL.js +0 -161
  232. package/dist/chunk-QLUE3BUL.js.map +0 -1
  233. package/dist/chunk-QN4W3JUA.js +0 -43
  234. package/dist/chunk-QN4W3JUA.js.map +0 -1
  235. package/dist/chunk-RGVBGTD6.js +0 -21
  236. package/dist/chunk-RGVBGTD6.js.map +0 -1
  237. package/dist/chunk-TWSTAVLO.js +0 -132
  238. package/dist/chunk-TWSTAVLO.js.map +0 -1
  239. package/dist/chunk-UP4P4OAA.js +0 -4423
  240. package/dist/chunk-UP4P4OAA.js.map +0 -1
  241. package/dist/chunk-YIQLYIHW.js.map +0 -1
  242. package/dist/chunk-YTFXA4RX.js +0 -86
  243. package/dist/chunk-YTFXA4RX.js.map +0 -1
  244. package/dist/chunk-Z74SDEKE.js.map +0 -1
  245. package/dist/cli-IHILSS6N.js +0 -97
  246. package/dist/cli-IHILSS6N.js.map +0 -1
  247. package/dist/client-AGFNR2S4.js +0 -12
  248. package/dist/config-IBS6KOLQ.js.map +0 -1
  249. package/dist/curate-3D4GHKJH.js +0 -78
  250. package/dist/curate-3D4GHKJH.js.map +0 -1
  251. package/dist/detect-providers-XEP4QA3R.js +0 -35
  252. package/dist/detect-providers-XEP4QA3R.js.map +0 -1
  253. package/dist/digest-7HLJXL77.js +0 -85
  254. package/dist/digest-7HLJXL77.js.map +0 -1
  255. package/dist/init-ARQ53JOR.js +0 -109
  256. package/dist/init-ARQ53JOR.js.map +0 -1
  257. package/dist/logs-IENORIYR.js +0 -84
  258. package/dist/logs-IENORIYR.js.map +0 -1
  259. package/dist/main-6AGPIMH2.js +0 -5715
  260. package/dist/main-6AGPIMH2.js.map +0 -1
  261. package/dist/rebuild-Q2ACEB6F.js +0 -64
  262. package/dist/rebuild-Q2ACEB6F.js.map +0 -1
  263. package/dist/reprocess-CDEFGQOV.js +0 -79
  264. package/dist/reprocess-CDEFGQOV.js.map +0 -1
  265. package/dist/search-7W25SKCB.js +0 -120
  266. package/dist/search-7W25SKCB.js.map +0 -1
  267. package/dist/server-6UDN35QN.js.map +0 -1
  268. package/dist/session-F326AWCH.js +0 -44
  269. package/dist/session-F326AWCH.js.map +0 -1
  270. package/dist/session-start-K6IGAC7H.js +0 -192
  271. package/dist/session-start-K6IGAC7H.js.map +0 -1
  272. package/dist/setup-digest-X5PN27F4.js +0 -15
  273. package/dist/setup-llm-S5OHQJXK.js +0 -15
  274. package/dist/src/prompts/classification.md +0 -43
  275. package/dist/stats-TTSDXGJV.js +0 -58
  276. package/dist/stats-TTSDXGJV.js.map +0 -1
  277. package/dist/templates-XPRBOWCE.js +0 -38
  278. package/dist/templates-XPRBOWCE.js.map +0 -1
  279. package/dist/ui/assets/index-08wKT7wS.css +0 -1
  280. package/dist/ui/assets/index-CMSMi4Jb.js +0 -369
  281. package/dist/verify-TOWQHPBX.js.map +0 -1
  282. package/skills/setup/SKILL.md +0 -174
  283. package/skills/setup/references/model-recommendations.md +0 -83
  284. /package/dist/{client-AGFNR2S4.js.map → chunk-E4VLWIJC.js.map} +0 -0
  285. /package/dist/{setup-digest-X5PN27F4.js.map → chunk-IB76KGBY.js.map} +0 -0
  286. /package/dist/{chunk-O6PERU7U.js.map → chunk-XNOCTDHF.js.map} +0 -0
  287. /package/dist/{setup-llm-S5OHQJXK.js.map → client-EYOTW3JU.js.map} +0 -0
  288. /package/dist/{version-36RVCQA6.js.map → client-MXRNQ5FI.js.map} +0 -0
@@ -0,0 +1,472 @@
1
+ # =============================================================================
2
+ # Built-in Task: Full Intelligence (Phased)
3
+ # =============================================================================
4
+ # Default task for: myco-agent
5
+ #
6
+ # Complete intelligence pipeline using phased execution. Each phase is a
7
+ # separate query() call with scoped tools and turn limits. The executor
8
+ # controls the loop — the LLM cannot skip phases.
9
+ # =============================================================================
10
+
11
+ name: full-intelligence
12
+ displayName: Full Intelligence
13
+ description: >
14
+ Complete intelligence pass over all unprocessed session data. Extracts
15
+ observations as spores, updates session summaries, consolidates related
16
+ spores into wisdom, builds knowledge graph entities and edges, and
17
+ regenerates digest extracts.
18
+ agent: myco-agent
19
+ isDefault: true
20
+ maxTurns: 130
21
+ timeoutSeconds: 1800
22
+ model: claude-sonnet-4-6
23
+
24
+ orchestrator:
25
+ enabled: true
26
+ model: claude-sonnet-4-6
27
+ maxTurns: 3
28
+
29
+ contextQueries:
30
+ pre-planning:
31
+ - tool: vault_unprocessed
32
+ queryTemplate: ""
33
+ limit: 5
34
+ purpose: "Check if there are unprocessed batches"
35
+ required: false
36
+ - tool: vault_state
37
+ queryTemplate: ""
38
+ limit: 10
39
+ purpose: "Get agent cursor state"
40
+ required: false
41
+ - tool: vault_spores
42
+ queryTemplate: ""
43
+ limit: 20
44
+ purpose: "Review recent spores for consolidation candidates"
45
+ required: false
46
+
47
+ prompt: >
48
+ You are executing one phase of a multi-phase intelligence pipeline.
49
+ Each phase has its own scoped tools — only use the tools available to you.
50
+ Do NOT report on tools or work outside your current phase.
51
+ Focus on quality over quantity — one precise observation is worth more
52
+ than ten vague ones.
53
+
54
+ phases:
55
+ - name: read-state
56
+ # Root phase — no dependsOn
57
+ prompt: |
58
+ Read the current vault state to determine what needs processing.
59
+
60
+ 1. Call `vault_state` to get your cursor (`last_processed_batch_id`)
61
+ 2. Call `vault_unprocessed` with `after_id` set to your cursor
62
+
63
+ If no unprocessed batches exist, report via `vault_report` with
64
+ action "skip" and reason "no unprocessed batches".
65
+ tools:
66
+ - vault_state
67
+ - vault_unprocessed
68
+ - vault_spores
69
+ - vault_report
70
+ maxTurns: 6
71
+ required: true
72
+
73
+ - name: extract
74
+ dependsOn: [read-state]
75
+ prompt: |
76
+ Extract observations from unprocessed batches as spores.
77
+ CRITICAL: the vault must stay sharp, not bloated. Supersede, don't duplicate.
78
+
79
+ If the prior phase reported no unprocessed batches, call `vault_report`
80
+ with action "skip" and reason "no unprocessed batches" and finish.
81
+
82
+ ## Step 1: Read batches and identify observations (budget: ~10 turns)
83
+
84
+ PASS 1: Read all batches via `vault_unprocessed`. For each, note candidate
85
+ insights as a mental list. Focus on genuine insights, not activity logs.
86
+
87
+ PASS 2: Group similar insights into topics. Batches often repeat the same
88
+ theme — group before searching to minimize tool calls.
89
+
90
+ ## Step 2: Search and create/supersede (budget: ~20 turns)
91
+
92
+ For each TOPIC you identified (not each batch — group first):
93
+ 1. Call `vault_search_semantic` with the topic to check existing coverage
94
+ 2. If a similar spore exists:
95
+ - If your observation adds meaningful new detail: create the new
96
+ spore, then supersede the old via `vault_resolve_spore`
97
+ action "supersede" with new_spore_id
98
+ - If your observation adds nothing new: skip it entirely
99
+ 3. If no similar spore exists: create it as new
100
+ 4. Mark the source batches as processed via `vault_mark_processed`
101
+
102
+ ## Step 3: Update cursor (budget: 1 turn)
103
+
104
+ Call `vault_set_state` to update `last_processed_batch_id`.
105
+
106
+ ## Quality rules
107
+ - One observation per spore — specific, not vague
108
+ - Include session_id, prompt_batch_id, importance 1-10, tags
109
+ - Supersede rather than duplicate — the vault gets sharper, not bigger
110
+ - Group batches by topic to minimize search calls
111
+ - If approaching the turn budget, stop and move on
112
+ tools:
113
+ - vault_unprocessed
114
+ - vault_spores
115
+ - vault_search_fts
116
+ - vault_search_semantic
117
+ - vault_create_spore
118
+ - vault_resolve_spore
119
+ - vault_mark_processed
120
+ - vault_set_state
121
+ - vault_report
122
+ maxTurns: 35
123
+ required: true
124
+
125
+ - name: summarize
126
+ dependsOn: [read-state]
127
+ prompt: |
128
+ Update session titles and summaries for sessions touched during extraction.
129
+
130
+ ## Step 1: Find sessions with new activity (budget: 2 turns)
131
+
132
+ 1. Call `vault_unprocessed` — group batches by session_id. Each session
133
+ with unprocessed batches needs its summary updated.
134
+ 2. Call `vault_sessions` to get existing titles/summaries for those sessions.
135
+
136
+ If no sessions have new activity, report "skip" and finish.
137
+
138
+ ## Step 2: Update each session (budget: 5 turns)
139
+
140
+ For each session with new batches:
141
+ 1. Read the EXISTING title and summary — context from prior runs.
142
+ 2. Read the NEW prompt batches (from Step 1) — user_prompt + response_summary.
143
+ This is your PRIMARY source. Understand the full arc of new work.
144
+ 3. Generate an UPDATED title and summary incorporating both existing
145
+ context and new activity.
146
+
147
+ ## Step 3: Write updates (budget: 2 turns)
148
+
149
+ Call `vault_update_session` with BOTH title and summary.
150
+
151
+ ### Title Rules (CRITICAL)
152
+
153
+ The title describes WHAT WAS ACCOMPLISHED, not what was asked.
154
+ - Under 80 chars, sentence case
155
+ - Synthesize from the full arc of prompt batches, not just the first one
156
+ - NEVER use file paths, directory names, or working directory
157
+ - NEVER copy the user's first message as the title
158
+ - Good: "Wave-based parallel executor and per-task provider config"
159
+ - Bad: "/git-worktree" (directory path, not a title)
160
+
161
+ ### Summary Rules
162
+
163
+ 2-4 sentences. Rich in detail — session summaries are embedded and used
164
+ for semantic search. Include: what was built/fixed, key files touched,
165
+ tools used, outcomes. Focus on what was accomplished across the FULL
166
+ session, not just the first or last prompt.
167
+
168
+ Report via `vault_report` with action "summary" for each update.
169
+ If no updates needed, report action "skip" with reason.
170
+ tools:
171
+ - vault_sessions
172
+ - vault_unprocessed
173
+ - vault_spores
174
+ - vault_update_session
175
+ - vault_report
176
+ maxTurns: 10
177
+ required: false
178
+
179
+ - name: consolidate
180
+ dependsOn: [extract]
181
+ prompt: |
182
+ Consolidate related spores into wisdom and clean up redundancy.
183
+ The vault must get SHARPER over time, not just bigger.
184
+
185
+ ## Step 1: Find clusters (budget: 5 turns)
186
+
187
+ 1. Call `vault_spores` with each `observation_type` filter (gotcha, decision,
188
+ discovery, trade_off, bug_fix) to list spores by type
189
+ 2. Call `vault_spores` with status "active" to see the full set
190
+ 3. Look for 2+ spores covering the same topic or component
191
+ 4. For candidate clusters, call `vault_search_semantic` to confirm
192
+ semantic similarity
193
+
194
+ ## Decision tree
195
+
196
+ - **3+ related active spores on same topic** → create wisdom, consolidate all
197
+ - **Exactly 2 related spores** → keep the better one, supersede the weaker
198
+ - **1 spore (no duplicates)** → no action
199
+
200
+ ## Step 2: Create wisdom (budget: 8 turns)
201
+
202
+ When you find 3+ related active spores on the same topic:
203
+ 1. Create a wisdom spore via `vault_create_spore`:
204
+ - observation_type: "wisdom"
205
+ - properties: '{"consolidated_from": ["id-1", "id-2", "id-3"]}'
206
+ - Content MUST preserve ALL specific details from sources —
207
+ file paths, error messages, concrete values. Wisdom is a
208
+ comprehensive reference, not a vague summary.
209
+ 2. Resolve EACH source via `vault_resolve_spore` action "consolidate"
210
+ — this removes them from search results and context injection
211
+
212
+ ## Step 3: Supersede stale spores (budget: 5 turns)
213
+
214
+ Also look for spores that are outdated or contradicted by newer ones:
215
+ - If two spores describe the same thing but one is more recent/detailed,
216
+ supersede the older one via `vault_resolve_spore` action "supersede"
217
+ - Superseded spores stay in the DB but are removed from search and
218
+ context injection — this keeps the vault relevant
219
+
220
+ Skip spores with status "consolidated" or "superseded" to prevent cycles.
221
+
222
+ If `vault_search_semantic` returns no results (embedding unavailable), report
223
+ action "skip" and move on.
224
+ tools:
225
+ - vault_spores
226
+ - vault_search_semantic
227
+ - vault_create_spore
228
+ - vault_resolve_spore
229
+ - vault_report
230
+ maxTurns: 22
231
+ required: false
232
+
233
+ - name: graph
234
+ dependsOn: [extract]
235
+ prompt: |
236
+ Build the knowledge graph: create entities, then link spores to them.
237
+
238
+ The graph has two layers:
239
+ - Lineage (automatic): spore→session, spore→batch — already done
240
+ - Semantic (your job): spore→entity, entity→entity — you create these
241
+
242
+ ## Step 1: Review spores and existing entities (2 turns)
243
+
244
+ 1. Call `vault_spores` with status "active" to see current spores
245
+ 2. Call `vault_entities` to see existing graph entities
246
+ 3. Call `vault_edges` to see existing relationships
247
+
248
+ ## Step 2: Create entities (budget: 6 turns)
249
+
250
+ For components/concepts/people that appear across multiple spores:
251
+ - Call `vault_create_entity` with type: component | concept | person
252
+ - Focus on entities that help understand the system architecture
253
+ - Prefer fewer well-defined entities over many vague ones
254
+
255
+ ## Step 3: Link spores to entities (budget: 12 turns) — CRITICAL
256
+
257
+ For EACH entity (newly created AND existing unlinked ones):
258
+ 1. Call `vault_search_semantic` with the entity name to find related spores
259
+ 2. Before creating an edge, call `vault_edges` with `source_id` and
260
+ `target_id` to verify it does not already exist
261
+ 3. For each relevant search result, call `vault_create_edge`:
262
+ - type: REFERENCES
263
+ - source_id: the spore ID, source_type: spore
264
+ - target_id: the entity ID, target_type: entity
265
+ 4. Entities without edges are invisible — always link after creating
266
+
267
+ ## Step 4: Structural edges between entities (remaining turns)
268
+
269
+ For architectural dependencies between entities:
270
+ - DEPENDS_ON (entity→entity): "daemon" depends on "SQLite"
271
+ - AFFECTS (spore→entity): a gotcha that impacts a component
272
+
273
+ Do NOT create lineage edges (FROM_SESSION, EXTRACTED_FROM, etc.)
274
+
275
+ Call `vault_report` with action "graph" reporting:
276
+ - Entities created: N
277
+ - Spore→entity REFERENCES edges: N
278
+ - Entity→entity structural edges: N
279
+ tools:
280
+ - vault_spores
281
+ - vault_sessions
282
+ - vault_search_semantic
283
+ - vault_entities
284
+ - vault_edges
285
+ - vault_create_entity
286
+ - vault_create_edge
287
+ - vault_report
288
+ maxTurns: 25
289
+ required: false
290
+
291
+ # ---- Digest: assess + 3 parallel tier writes ----
292
+ # The assess phase gathers context, then each tier runs independently
293
+ # in the same wave via Promise.allSettled().
294
+
295
+ - name: digest-assess
296
+ dependsOn: [consolidate]
297
+ prompt: |
298
+ Assess current digest state and gather material for tier updates.
299
+ The 3 digest tier phases run in parallel after you — your summary
300
+ is their primary input. Be thorough.
301
+
302
+ ## Step 1: Check current state (budget: 2 turns)
303
+
304
+ Call `vault_read_digest` (no tier param) to see current digest state.
305
+ Note each tier's `generated_at` timestamp — this tells you how fresh
306
+ each tier is. Review the prior phase results (in your context):
307
+ - How many spores were created/superseded?
308
+ - How many entities/edges were added?
309
+
310
+ If changes are minor (< 3 new spores, 0 entity changes), call
311
+ `vault_report` with action "skip" and reason explaining why the
312
+ current digest is still sufficient. STOP.
313
+
314
+ ## Step 2: Gather new material (budget: 5 turns)
315
+
316
+ Search for material relevant to what changed:
317
+ 1. Call `vault_search_semantic` with themes from newly created/superseded
318
+ spores and new entity names
319
+ 2. Call `vault_spores` to see recently created observations
320
+ 3. Call `vault_sessions` for recent session context
321
+
322
+ ## Step 3: Produce detailed findings
323
+
324
+ Your final response is critical — the per-tier phases see it as their
325
+ primary context (they cannot call vault_spores or vault_sessions).
326
+ Structure your summary as:
327
+
328
+ **New material to integrate:**
329
+ - List each new insight, decision, pattern, gotcha with enough detail
330
+ that a tier writer can incorporate it without re-searching
331
+ - Include specific spore content, not just "3 new spores were created"
332
+
333
+ **Themes and connections:**
334
+ - What overarching themes connect the new material?
335
+ - What existing digest sections are affected?
336
+
337
+ **Per-tier freshness and guidance:**
338
+ For each tier, state whether it should UPDATE or SKIP based on both
339
+ the volume of new material AND the tier's `generated_at` freshness:
340
+ - A tier written < 30 minutes ago with only minor changes → SKIP
341
+ - A tier written hours ago or with significant changes → UPDATE
342
+ - Larger tiers (10000) tolerate more frequent updates since
343
+ they have room for incremental additions
344
+ - Smaller tiers (1500) should only update for material that
345
+ genuinely changes the executive picture
346
+
347
+ Per-tier directives:
348
+ - 10000: UPDATE/SKIP + what to add (any new content)
349
+ - 5000: UPDATE/SKIP + what to add (trade-offs and patterns)
350
+ - 1500: UPDATE/SKIP + what to add (only critical changes)
351
+ tools:
352
+ - vault_spores
353
+ - vault_sessions
354
+ - vault_search_semantic
355
+ - vault_read_digest
356
+ - vault_report
357
+ maxTurns: 8
358
+ required: true
359
+
360
+ - name: digest-10000
361
+ dependsOn: [digest-assess]
362
+ prompt: |
363
+ Update digest tier 10000 — Full institutional knowledge.
364
+ Budget: 7 turns (1 read + 1-2 search + 1 write + buffer).
365
+
366
+ This is the largest tier (~10,000 tokens). Update if any new content
367
+ was found. The digest-assess phase summary in your context contains
368
+ the material to integrate and per-tier guidance.
369
+
370
+ 1. Call `vault_read_digest` with tier 10000 to get current content
371
+ 2. Call `vault_search_semantic` for themes identified in the assess
372
+ findings — gather specific details to weave in
373
+ 3. Integrate new material into the existing digest
374
+ 4. Call `vault_write_digest` with the updated content
375
+
376
+ The existing digest is your baseline — integrate new material, don't
377
+ rewrite from scratch. Preserve well-crafted existing content.
378
+
379
+ Prioritize: recent insights > active decisions > unresolved gotchas >
380
+ architectural patterns > historical context.
381
+
382
+ If this tier genuinely has no new material to integrate, call
383
+ `vault_report` with action "skip" for this tier.
384
+ tools:
385
+ - vault_search_semantic
386
+ - vault_read_digest
387
+ - vault_write_digest
388
+ - vault_report
389
+ maxTurns: 7
390
+ required: false
391
+
392
+ - name: digest-5000
393
+ dependsOn: [digest-assess]
394
+ prompt: |
395
+ Update digest tier 5000 — Deep onboarding.
396
+ Budget: 5 turns (1 read + 1 search + 1 write + buffer).
397
+
398
+ This tier (~5,000 tokens) focuses on trade-offs and patterns. The
399
+ digest-assess phase summary in your context contains the material
400
+ to integrate and per-tier guidance.
401
+
402
+ 1. Call `vault_read_digest` with tier 5000 to get current content
403
+ 2. Optionally call `vault_search_semantic` for additional context
404
+ 3. Integrate new material from the assess findings
405
+ 4. Call `vault_write_digest` with the updated content
406
+
407
+ The existing digest is your baseline — integrate new material, don't
408
+ rewrite from scratch. Preserve well-crafted existing content.
409
+
410
+ Prioritize: recent insights > active decisions > unresolved gotchas >
411
+ architectural patterns > historical context.
412
+
413
+ If this tier genuinely has no new material to integrate, call
414
+ `vault_report` with action "skip" for this tier.
415
+ tools:
416
+ - vault_search_semantic
417
+ - vault_read_digest
418
+ - vault_write_digest
419
+ - vault_report
420
+ maxTurns: 5
421
+ required: false
422
+
423
+ - name: digest-1500
424
+ dependsOn: [digest-assess]
425
+ prompt: |
426
+ Update digest tier 1500 — Executive briefing.
427
+ Budget: 3 turns (1 read + 1 write + buffer).
428
+
429
+ This is the most compressed tier (~1,500 tokens). Only update if
430
+ important new decisions, gotchas, or critical changes were found.
431
+ The digest-assess phase summary in your context contains the material
432
+ to integrate and per-tier guidance.
433
+
434
+ 1. Call `vault_read_digest` with tier 1500 to get current content
435
+ 2. Integrate new material from the assess findings — be very selective,
436
+ only critical changes belong at this tier
437
+ 3. Call `vault_write_digest` with the updated content
438
+
439
+ The existing digest is your baseline — integrate new material, don't
440
+ rewrite from scratch. Preserve well-crafted existing content.
441
+
442
+ Prioritize: recent insights > active decisions > unresolved gotchas >
443
+ architectural patterns > historical context.
444
+
445
+ If this tier genuinely has no new material to integrate, call
446
+ `vault_report` with action "skip" for this tier.
447
+ tools:
448
+ - vault_read_digest
449
+ - vault_write_digest
450
+ - vault_report
451
+ maxTurns: 3
452
+ required: false
453
+
454
+ - name: report
455
+ dependsOn: [extract, summarize, consolidate, graph, digest-assess, digest-10000, digest-5000, digest-1500]
456
+ skipPriorContext: true
457
+ prompt: |
458
+ Summarize what was done across all phases.
459
+
460
+ Call `vault_report` with action "complete" and these details:
461
+ - Batches processed: N
462
+ - Spores created: N (by type)
463
+ - Sessions updated: N
464
+ - Wisdom created: N
465
+ - Entities created: N, edges created: N
466
+ - Digest tiers written: N
467
+
468
+ This report MUST be the last tool call of the run.
469
+ tools:
470
+ - vault_report
471
+ maxTurns: 3
472
+ required: true
@@ -0,0 +1,92 @@
1
+ # =============================================================================
2
+ # Built-in Task: Graph Maintenance
3
+ # =============================================================================
4
+ # Scan existing spores to build/update entities and semantic edges.
5
+ # Does not extract new spores or regenerate digest.
6
+ # =============================================================================
7
+
8
+ name: graph-maintenance
9
+ displayName: Graph Maintenance
10
+ description: >
11
+ Scan existing spores and sessions to update the knowledge graph. Adds
12
+ missing entities and semantic edges, merges duplicates. Does not extract
13
+ new spores or regenerate digest.
14
+ agent: myco-agent
15
+ isDefault: false
16
+ maxTurns: 40
17
+ timeoutSeconds: 480
18
+
19
+ prompt: |
20
+ Maintain the knowledge graph from existing spores. Budget: ~35 turns.
21
+
22
+ ## Phase 1 — Inventory (budget: 3 turns)
23
+
24
+ 1. Call `vault_state` for context
25
+ 2. Call `vault_spores` with status "active" to see current observations
26
+ 3. Call `vault_entities` to see existing entities
27
+ 4. Call `vault_edges` to check existing relationships
28
+
29
+ ## Phase 2 — Entity Discovery (budget: 15 turns)
30
+
31
+ Call `vault_entities` to see existing entities before creating new ones.
32
+
33
+ Scan active spores for entity candidates:
34
+ 1. For each candidate: count distinct `session_id` values across spores
35
+ mentioning it. Only create if referenced in 2+ sessions AND 3+ spores.
36
+ 2. Check `vault_entities` results — if the entity already exists, skip it
37
+ 3. Call `vault_create_entity` for each valid NEW candidate:
38
+ - type: component | concept | person
39
+ - name: specific, named thing (not abstract categories)
40
+ 4. Good: "DaemonClient", "cursor-based pagination", "Chris"
41
+ 5. Bad: "testing phase", "technical debt", "code quality"
42
+
43
+ ## Phase 3 — Semantic Edges (budget: 10 turns)
44
+
45
+ Link spores and entities with semantic edges. For each entity:
46
+ 1. Call `vault_search_semantic` with the entity name to find spores that discuss it
47
+ 2. For each relevant spore found, call `vault_create_edge`:
48
+ - type: REFERENCES
49
+ - source_id: the spore ID, source_type: spore
50
+ - target_id: the entity ID, target_type: entity
51
+ 3. Before creating any edge, call `vault_edges` with source_id and target_id
52
+ to verify it does not already exist — avoid creating duplicate edges
53
+ for the same (source, type, target) triple
54
+
55
+ Also create structural edges between entities:
56
+ - DEPENDS_ON (entity→entity): architectural dependencies
57
+ - AFFECTS (spore→entity): when an observation directly impacts a component
58
+ - RELATES_TO: general semantic connections
59
+
60
+ Always include source_type and target_type on every edge.
61
+ Do NOT create lineage edges (FROM_SESSION, EXTRACTED_FROM, etc.).
62
+ Entities without any edges are invisible in the graph — always link after
63
+ creating.
64
+
65
+ ## Phase 4 — Deduplication (budget: 5 turns)
66
+
67
+ Call `vault_entities` to list all entities. Look for entities of the same
68
+ type with similar names (e.g., "DaemonClient" and "daemon-client", or
69
+ "SQLite" and "sqlite-vec"):
70
+ 1. Compare entity names within each type for near-duplicates
71
+ 2. Use `vault_search_semantic` to confirm semantic similarity if unsure
72
+ 3. If duplicates found, keep the better-named one and note the issue
73
+ via `vault_report`
74
+
75
+ ## Phase 5 — Report (budget: 2 turns)
76
+
77
+ Call `vault_report` with action "graph":
78
+ - Entities created: N (by type)
79
+ - Edges created: N (by type)
80
+ - Duplicates found: N
81
+
82
+ toolOverrides:
83
+ - vault_spores
84
+ - vault_sessions
85
+ - vault_search_semantic
86
+ - vault_state
87
+ - vault_entities
88
+ - vault_edges
89
+ - vault_create_entity
90
+ - vault_create_edge
91
+ - vault_set_state
92
+ - vault_report
@@ -0,0 +1,132 @@
1
+ # =============================================================================
2
+ # Built-in Task: Review Session
3
+ # =============================================================================
4
+ # Targeted end-to-end pass over a single session: extract spores, build
5
+ # graph entities and edges, run supersession checks, update title/summary.
6
+ # =============================================================================
7
+
8
+ name: review-session
9
+ displayName: Review Session
10
+ description: >
11
+ Process a single session end-to-end. Extracts spores from all prompt
12
+ batches in the session, builds graph entities and edges, runs supersession
13
+ checks, and generates the session title and summary.
14
+ agent: myco-agent
15
+ isDefault: false
16
+ maxTurns: 40
17
+ timeoutSeconds: 480
18
+
19
+ prompt: |
20
+ Review the specified session end-to-end. Budget: ~35 turns.
21
+
22
+ Target session: {{session_id}}
23
+
24
+ ## Phase 1 — Read State (budget: 3 turns)
25
+
26
+ 1. Call `vault_state` to get current cursor and context
27
+ 2. Call `vault_sessions` to find the target session and its current
28
+ title/summary/processed status
29
+ 3. Call `vault_unprocessed` to get all unprocessed batches — note which
30
+ ones belong to the target session (by matching session_id in the batch data)
31
+
32
+ If no unprocessed batches belong to this session, call `vault_report` with
33
+ action "skip" and reason "no unprocessed batches for session", then stop.
34
+ Only process batches from the target session — ignore batches from other sessions.
35
+
36
+ ## Phase 2 — Extract Spores (budget: 15 turns)
37
+
38
+ Extract observations from all unprocessed batches in this session.
39
+ CRITICAL: the vault must stay sharp, not bloated — supersede, don't duplicate.
40
+
41
+ 1. Read all batch content first — understand the full scope of the session
42
+ 2. Group candidate observations by TOPIC across all batches
43
+ 3. Call `vault_spores` to see existing spores for context
44
+
45
+ For each TOPIC group:
46
+ 1. Call `vault_search_semantic` with the topic to find existing spores that
47
+ already cover this topic
48
+ 2. If a similar spore exists:
49
+ - If your observation adds meaningful new detail: call `vault_create_spore`
50
+ then supersede the old via `vault_resolve_spore` action "supersede"
51
+ with new_spore_id
52
+ - If your observation adds nothing new: skip it entirely
53
+ 3. If no similar spore exists: call `vault_create_spore` with session_id,
54
+ prompt_batch_id, importance 1-10, and relevant tags
55
+
56
+ After processing all batches: call `vault_mark_processed` for each batch,
57
+ then call `vault_set_state` to update the cursor.
58
+
59
+ ## Phase 3 — Graph (budget: 10 turns)
60
+
61
+ Build entities and semantic edges for knowledge discovered in this session.
62
+
63
+ 1. Call `vault_spores` to review the spores just created plus existing ones
64
+ 2. Call `vault_entities` and `vault_edges` to see what already exists
65
+ 3. Check `vault_spores` for cross-session references before creating entities.
66
+ Only create entities referenced in 2+ sessions. For a single-session review,
67
+ this means the entity must also appear in spores from OTHER sessions.
68
+ 4. For valid entity candidates:
69
+ - Call `vault_create_entity` with type: component | concept | person
70
+ - Good: "DaemonClient", "cursor-based pagination" — specific named things
71
+ - Bad: "testing phase", "code quality" — abstract categories
72
+ 5. For each entity (new and existing relevant ones):
73
+ - Call `vault_search_semantic` with the entity name to find related spores
74
+ - Before creating, call `vault_edges` with source_id and target_id to
75
+ check if the edge already exists
76
+ - Call `vault_create_edge` with type REFERENCES, source_type spore,
77
+ target_type entity for each relevant spore
78
+ 6. Create structural edges: DEPENDS_ON (entity→entity), AFFECTS (spore→entity)
79
+
80
+ Always include source_type and target_type on every edge.
81
+ Do NOT create lineage edges (FROM_SESSION, EXTRACTED_FROM, etc.).
82
+
83
+ ## Phase 4 — Supersession Check (budget: 3 turns)
84
+
85
+ Scan active spores for newly introduced redundancy:
86
+ 1. Call `vault_spores` with status "active" to see the current set
87
+ 2. For any spores that are now outdated or contradicted by the newly created
88
+ ones, call `vault_resolve_spore` with action "supersede" or "archive"
89
+ 3. Only supersede when the new information genuinely replaces the old;
90
+ when in doubt, keep both
91
+
92
+ ## Phase 5 — Title & Summary (budget: 3 turns)
93
+
94
+ 1. Call `vault_sessions` to check the current title and summary
95
+ 2. Review the prompt batches you processed in Phase 2 — use user_prompt
96
+ and response_summary as your PRIMARY source for the title and summary.
97
+ Call `vault_spores` with `session_id` as supplemental context.
98
+ 3. Call `vault_update_session` with BOTH title and summary
99
+
100
+ Title: describes WHAT WAS ACCOMPLISHED (under 80 chars, sentence case).
101
+ NEVER use file paths, directory names, or the user's first message as title.
102
+ Good: "SQLite migration with FTS5 and vector embeddings"
103
+ Bad: "/git-worktree", "Help me fix the bug in..."
104
+ Summary: 2-4 sentences on key work done + outcomes.
105
+
106
+ ## Phase 6 — Report (budget: 1 turn)
107
+
108
+ Call `vault_report` with action "complete":
109
+ - Session reviewed: {{session_id}}
110
+ - Batches processed: N
111
+ - Spores created: N (by type)
112
+ - Spores superseded: N
113
+ - Entities created: N, edges created: N
114
+ - Title/summary updated: yes/no
115
+
116
+ toolOverrides:
117
+ - vault_unprocessed
118
+ - vault_spores
119
+ - vault_sessions
120
+ - vault_search_fts
121
+ - vault_search_semantic
122
+ - vault_entities
123
+ - vault_edges
124
+ - vault_create_spore
125
+ - vault_resolve_spore
126
+ - vault_create_entity
127
+ - vault_create_edge
128
+ - vault_update_session
129
+ - vault_mark_processed
130
+ - vault_set_state
131
+ - vault_state
132
+ - vault_report