@goondocks/myco 0.18.0 → 0.18.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/dist/{agent-run-2NFYMQXW.js → agent-run-I4O2K2CK.js} +4 -4
  2. package/dist/{agent-tasks-MEIYLXGN.js → agent-tasks-UOW5BQIB.js} +4 -4
  3. package/dist/{chunk-JMOUFG6Y.js → chunk-44PZCAYS.js} +47 -5
  4. package/dist/chunk-44PZCAYS.js.map +1 -0
  5. package/dist/{chunk-JDI4DPWD.js → chunk-C3EGL5JX.js} +632 -145
  6. package/dist/chunk-C3EGL5JX.js.map +1 -0
  7. package/dist/{chunk-OW433Q4C.js → chunk-CURS2TNP.js} +44 -3
  8. package/dist/chunk-CURS2TNP.js.map +1 -0
  9. package/dist/{chunk-FABWUX5G.js → chunk-DPSLJ242.js} +16 -2
  10. package/dist/chunk-DPSLJ242.js.map +1 -0
  11. package/dist/{chunk-DLFDBKEV.js → chunk-LSP5HYOO.js} +17 -14
  12. package/dist/chunk-LSP5HYOO.js.map +1 -0
  13. package/dist/{chunk-VOCGURV7.js → chunk-N75GMQGA.js} +3 -3
  14. package/dist/{chunk-U7GJTVSX.js → chunk-RIDSOQDR.js} +20 -6
  15. package/dist/chunk-RIDSOQDR.js.map +1 -0
  16. package/dist/{chunk-KWTOCJLB.js → chunk-TCSVDQF5.js} +1128 -193
  17. package/dist/chunk-TCSVDQF5.js.map +1 -0
  18. package/dist/{chunk-55QEICRO.js → chunk-TLK46KKD.js} +2 -2
  19. package/dist/{chunk-NZI7WBZI.js → chunk-TOER6RNC.js} +21 -1
  20. package/dist/chunk-TOER6RNC.js.map +1 -0
  21. package/dist/{chunk-7OYXB2NM.js → chunk-TZAXQKO6.js} +5 -1
  22. package/dist/chunk-TZAXQKO6.js.map +1 -0
  23. package/dist/{chunk-EO2RQW4S.js → chunk-W7WENJ6F.js} +2 -2
  24. package/dist/{chunk-BUIR3JWM.js → chunk-XWOQL4XN.js} +2 -2
  25. package/dist/{chunk-PFWIPRF6.js → chunk-YZPI2Y3E.js} +2 -2
  26. package/dist/{cli-IIMBALPV.js → cli-D3TJYJ2U.js} +35 -35
  27. package/dist/{client-VZCUISHZ.js → client-4LLEXLVK.js} +3 -3
  28. package/dist/{detect-GEM3NVK6.js → detect-SZ2KDUF4.js} +2 -2
  29. package/dist/{doctor-QYD34X7Q.js → doctor-KCTXPX5D.js} +6 -6
  30. package/dist/{executor-NSPRTH4M.js → executor-UYIZC3L5.js} +83 -275
  31. package/dist/executor-UYIZC3L5.js.map +1 -0
  32. package/dist/{init-WYYL44KZ.js → init-QFNBKKDC.js} +7 -7
  33. package/dist/{llm-KEDHK3TQ.js → llm-SMA5ZEAW.js} +2 -2
  34. package/dist/{main-6PY3ITQ5.js → main-5THODR77.js} +427 -196
  35. package/dist/main-5THODR77.js.map +1 -0
  36. package/dist/{open-HRFMJDQX.js → open-7737CSPN.js} +4 -4
  37. package/dist/{post-compact-HT24YMAN.js → post-compact-2TJ5FPZH.js} +6 -6
  38. package/dist/{post-tool-use-DENRI5WB.js → post-tool-use-FRTSICC3.js} +5 -5
  39. package/dist/{post-tool-use-failure-A6SNJX42.js → post-tool-use-failure-KYO2NCNB.js} +6 -6
  40. package/dist/{pre-compact-3Q4BALCL.js → pre-compact-J6GCJEJR.js} +6 -6
  41. package/dist/{remove-YB5A6HY2.js → remove-3WZZC7AX.js} +5 -5
  42. package/dist/{restart-RGDVHELZ.js → restart-HUHEFOXU.js} +5 -5
  43. package/dist/{search-WOHT3G55.js → search-ZGN3LDXG.js} +5 -5
  44. package/dist/{server-6SUNYDV7.js → server-PTXLVVEE.js} +3 -3
  45. package/dist/{session-W3SKRFRV.js → session-7VV3IQMO.js} +5 -5
  46. package/dist/{session-end-OUTY7AFF.js → session-end-SMU55UCM.js} +5 -5
  47. package/dist/{session-start-5MB3LFOA.js → session-start-NIMWEOIZ.js} +16 -11
  48. package/dist/{session-start-5MB3LFOA.js.map → session-start-NIMWEOIZ.js.map} +1 -1
  49. package/dist/{setup-llm-ZMYGIQX5.js → setup-llm-7S3VPAPN.js} +4 -4
  50. package/dist/src/agent/definitions/tasks/extract-only.yaml +1 -1
  51. package/dist/src/agent/definitions/tasks/full-intelligence.yaml +10 -0
  52. package/dist/src/agent/definitions/tasks/skill-evolve.yaml +163 -49
  53. package/dist/src/agent/definitions/tasks/skill-generate.yaml +44 -27
  54. package/dist/src/agent/definitions/tasks/skill-survey.yaml +132 -138
  55. package/dist/src/agent/definitions/tasks/supersession-sweep.yaml +1 -1
  56. package/dist/src/cli.js +1 -1
  57. package/dist/src/daemon/main.js +1 -1
  58. package/dist/src/hooks/post-tool-use.js +1 -1
  59. package/dist/src/hooks/session-end.js +1 -1
  60. package/dist/src/hooks/session-start.js +1 -1
  61. package/dist/src/hooks/stop.js +1 -1
  62. package/dist/src/hooks/user-prompt-submit.js +1 -1
  63. package/dist/src/mcp/server.js +1 -1
  64. package/dist/src/symbionts/manifests/codex.yaml +45 -7
  65. package/dist/src/worker/src/index.ts +8 -2
  66. package/dist/src/worker/src/schema.ts +2 -0
  67. package/dist/{stats-DGI6B3HX.js → stats-GEOQ2DFF.js} +5 -5
  68. package/dist/{stop-YGHODSP7.js → stop-7AKYBJJ2.js} +5 -5
  69. package/dist/{stop-failure-7IJTPJ6W.js → stop-failure-NLE2EURG.js} +6 -6
  70. package/dist/{subagent-start-ZBQ5PJB5.js → subagent-start-LBNZF2TG.js} +6 -6
  71. package/dist/{subagent-stop-N2TDQU2D.js → subagent-stop-B2Z5GYAB.js} +6 -6
  72. package/dist/{task-completed-BDLMRSBB.js → task-completed-PO5TETJ7.js} +6 -6
  73. package/dist/{team-2ZFGTSIN.js → team-DPNP2RN7.js} +3 -3
  74. package/dist/ui/assets/{index-DtT9_nlT.js → index-CiI1fwas.js} +2 -2
  75. package/dist/ui/index.html +1 -1
  76. package/dist/{update-STLAN7LR.js → update-WBWB5URU.js} +5 -5
  77. package/dist/{user-prompt-submit-4IBFUYQ3.js → user-prompt-submit-IZJC3NV7.js} +11 -8
  78. package/dist/user-prompt-submit-IZJC3NV7.js.map +1 -0
  79. package/dist/{verify-EJYPO7QA.js → verify-FNSP62I3.js} +2 -2
  80. package/dist/{version-YPBIKH77.js → version-QEVU66NT.js} +2 -2
  81. package/package.json +7 -7
  82. package/dist/chunk-7OYXB2NM.js.map +0 -1
  83. package/dist/chunk-DLFDBKEV.js.map +0 -1
  84. package/dist/chunk-FABWUX5G.js.map +0 -1
  85. package/dist/chunk-JDI4DPWD.js.map +0 -1
  86. package/dist/chunk-JMOUFG6Y.js.map +0 -1
  87. package/dist/chunk-KWTOCJLB.js.map +0 -1
  88. package/dist/chunk-NZI7WBZI.js.map +0 -1
  89. package/dist/chunk-OW433Q4C.js.map +0 -1
  90. package/dist/chunk-U7GJTVSX.js.map +0 -1
  91. package/dist/executor-NSPRTH4M.js.map +0 -1
  92. package/dist/main-6PY3ITQ5.js.map +0 -1
  93. package/dist/user-prompt-submit-4IBFUYQ3.js.map +0 -1
  94. /package/dist/{agent-run-2NFYMQXW.js.map → agent-run-I4O2K2CK.js.map} +0 -0
  95. /package/dist/{agent-tasks-MEIYLXGN.js.map → agent-tasks-UOW5BQIB.js.map} +0 -0
  96. /package/dist/{chunk-VOCGURV7.js.map → chunk-N75GMQGA.js.map} +0 -0
  97. /package/dist/{chunk-55QEICRO.js.map → chunk-TLK46KKD.js.map} +0 -0
  98. /package/dist/{chunk-EO2RQW4S.js.map → chunk-W7WENJ6F.js.map} +0 -0
  99. /package/dist/{chunk-BUIR3JWM.js.map → chunk-XWOQL4XN.js.map} +0 -0
  100. /package/dist/{chunk-PFWIPRF6.js.map → chunk-YZPI2Y3E.js.map} +0 -0
  101. /package/dist/{cli-IIMBALPV.js.map → cli-D3TJYJ2U.js.map} +0 -0
  102. /package/dist/{client-VZCUISHZ.js.map → client-4LLEXLVK.js.map} +0 -0
  103. /package/dist/{detect-GEM3NVK6.js.map → detect-SZ2KDUF4.js.map} +0 -0
  104. /package/dist/{doctor-QYD34X7Q.js.map → doctor-KCTXPX5D.js.map} +0 -0
  105. /package/dist/{init-WYYL44KZ.js.map → init-QFNBKKDC.js.map} +0 -0
  106. /package/dist/{llm-KEDHK3TQ.js.map → llm-SMA5ZEAW.js.map} +0 -0
  107. /package/dist/{open-HRFMJDQX.js.map → open-7737CSPN.js.map} +0 -0
  108. /package/dist/{post-compact-HT24YMAN.js.map → post-compact-2TJ5FPZH.js.map} +0 -0
  109. /package/dist/{post-tool-use-DENRI5WB.js.map → post-tool-use-FRTSICC3.js.map} +0 -0
  110. /package/dist/{post-tool-use-failure-A6SNJX42.js.map → post-tool-use-failure-KYO2NCNB.js.map} +0 -0
  111. /package/dist/{pre-compact-3Q4BALCL.js.map → pre-compact-J6GCJEJR.js.map} +0 -0
  112. /package/dist/{remove-YB5A6HY2.js.map → remove-3WZZC7AX.js.map} +0 -0
  113. /package/dist/{restart-RGDVHELZ.js.map → restart-HUHEFOXU.js.map} +0 -0
  114. /package/dist/{search-WOHT3G55.js.map → search-ZGN3LDXG.js.map} +0 -0
  115. /package/dist/{server-6SUNYDV7.js.map → server-PTXLVVEE.js.map} +0 -0
  116. /package/dist/{session-W3SKRFRV.js.map → session-7VV3IQMO.js.map} +0 -0
  117. /package/dist/{session-end-OUTY7AFF.js.map → session-end-SMU55UCM.js.map} +0 -0
  118. /package/dist/{setup-llm-ZMYGIQX5.js.map → setup-llm-7S3VPAPN.js.map} +0 -0
  119. /package/dist/{stats-DGI6B3HX.js.map → stats-GEOQ2DFF.js.map} +0 -0
  120. /package/dist/{stop-YGHODSP7.js.map → stop-7AKYBJJ2.js.map} +0 -0
  121. /package/dist/{stop-failure-7IJTPJ6W.js.map → stop-failure-NLE2EURG.js.map} +0 -0
  122. /package/dist/{subagent-start-ZBQ5PJB5.js.map → subagent-start-LBNZF2TG.js.map} +0 -0
  123. /package/dist/{subagent-stop-N2TDQU2D.js.map → subagent-stop-B2Z5GYAB.js.map} +0 -0
  124. /package/dist/{task-completed-BDLMRSBB.js.map → task-completed-PO5TETJ7.js.map} +0 -0
  125. /package/dist/{team-2ZFGTSIN.js.map → team-DPNP2RN7.js.map} +0 -0
  126. /package/dist/{update-STLAN7LR.js.map → update-WBWB5URU.js.map} +0 -0
  127. /package/dist/{verify-EJYPO7QA.js.map → verify-FNSP62I3.js.map} +0 -0
  128. /package/dist/{version-YPBIKH77.js.map → version-QEVU66NT.js.map} +0 -0
@@ -1,15 +1,16 @@
1
1
  name: skill-survey
2
2
  displayName: Skill Candidate Survey
3
3
  description: >-
4
- Analyze vault knowledge for procedural skill candidates. Identifies
5
- clusters of related sessions, spores, and plans that describe how
6
- to accomplish tasks in this project.
4
+ Analyze vault knowledge for procedural domain candidates. Identifies
5
+ broad categories of recurring developer work not individual micro-
6
+ procedures that warrant guided skill context.
7
7
  agent: myco-agent
8
8
  prompt: >-
9
- Survey the vault knowledge graph for procedural skill candidates.
9
+ Survey the vault knowledge graph for procedural domain candidates.
10
+ The instruction contains pre-assembled vault context.
10
11
  isDefault: false
11
12
  model: claude-sonnet-4-6
12
- maxTurns: 65
13
+ maxTurns: 35
13
14
  timeoutSeconds: 600
14
15
  schedule:
15
16
  enabled: true
@@ -17,167 +18,160 @@ schedule:
17
18
  runIn:
18
19
  - idle
19
20
  phases:
20
- - name: explore-spores
21
+ - name: explore
21
22
  prompt: |
22
- Identify procedural patterns from spores and entities.
23
- You are one of three parallel exploration phases — focus only on spores.
24
- Budget is tightbe efficient, not exhaustive.
23
+ The instruction contains pre-assembled vault context: digest,
24
+ recent wisdom spores, decisions, gotchas, sessions, and the
25
+ current skill inventory. Read it carefully this is your
26
+ primary input.
25
27
 
26
- 1. Read the digest (vault_read_digest) for a high-level overview.
27
- This gives you the landscape in one call.
28
+ Your goal: identify PROCEDURAL DOMAINS broad categories of
29
+ recurring work where a developer benefits from guided context.
28
30
 
29
- 2. Query wisdom spores (vault_spores, observation_type: wisdom).
30
- These are highest signal — synthesized from multiple sessions.
31
+ ## What is a procedural domain?
31
32
 
32
- 3. Query decisions and gotchas (vault_spores, limit 30 each).
33
- One page per type is sufficient — don't paginate exhaustively.
33
+ A domain groups related procedures that share prerequisite
34
+ knowledge. Think "extending the daemon infrastructure" (covering
35
+ PowerManager jobs, MCP tools, config writes, notification
36
+ wiring) — NOT "registering a PowerManager job" alone.
34
37
 
35
- 4. Check vault_entities for high-mention components.
38
+ Good domain examples:
39
+ - "Vault schema and data layer extension" (migrations, tables,
40
+ queries, FTS indexes, constants)
41
+ - "Agent pipeline task authoring" (YAML anatomy, phases,
42
+ scheduling, parameter injection, fault tolerance)
43
+ - "Symbiont integration lifecycle" (manifests, hooks, config
44
+ directories, capture rules, installer registration)
36
45
 
37
- Group findings by procedural topic ("how to do X").
38
- Stop when you have a clear picture — diminishing returns
39
- from additional queries waste turns.
40
- tools:
41
- - vault_spores
42
- - vault_entities
43
- - vault_edges
44
- - vault_read_digest
45
- maxTurns: 15
46
- required: true
47
- readOnly: true
46
+ Bad examples (too narrow these are SECTIONS within a domain):
47
+ - "How to register a PowerManager job"
48
+ - "Adding an FTS5 index"
49
+ - "Writing a symbiont capture rule"
48
50
 
49
- - name: explore-sessions
50
- prompt: |
51
- Identify procedural workflows from session history.
52
- You are one of three parallel exploration phases — focus only on sessions.
53
- Budget is tight — be efficient, not exhaustive.
51
+ ## Scale guidance
54
52
 
55
- 1. List recent sessions (vault_sessions, limit 20). Read summaries
56
- to identify multi-step workflows and recurring implementation patterns.
53
+ 20-30 broad skills for a complex project is the right order
54
+ of magnitude. If a topic would be a section heading within a
55
+ broader skill, it is not its own candidate.
57
56
 
58
- 2. Do 2-3 targeted FTS searches for procedural keywords:
59
- vault_search_fts with queries like "migration step", "configure install",
60
- "refactor debug". Combine related terms in ONE query rather than
61
- searching each keyword separately.
57
+ ## Process
62
58
 
63
- Group findings by procedural topic ("how to do X").
64
- Stop when you have a clear picture — you don't need to search
65
- every possible keyword.
66
- tools:
67
- - vault_sessions
68
- - vault_search_fts
69
- maxTurns: 10
70
- required: true
71
- readOnly: true
59
+ 1. Read the pre-assembled context for orientation.
72
60
 
73
- - name: explore-plans
74
- prompt: |
75
- Identify procedures from plans and artifacts.
76
- You are one of three parallel exploration phases — focus only on plans.
77
- Budget is tight — be efficient, not exhaustive.
61
+ 2. Use vault tools for TARGETED follow-up on promising
62
+ clusters. The baseline gives you direction — follow threads
63
+ that suggest a procedural domain exists.
78
64
 
79
- Plans contain explicit step-by-step procedures and design decisions.
80
- They are often the most directly skill-ready content.
65
+ Good follow-up queries:
66
+ - vault_search_fts to find additional sessions touching
67
+ a domain you spotted in the digest
68
+ - vault_spores to read full content of high-signal spores
69
+ summarized in the baseline
70
+ - vault_entities for components with high mention counts
71
+ that might anchor a domain
81
72
 
82
- 1. Use vault_search_semantic with 2-3 broad queries to find plans:
83
- - "implementation plan design architecture"
84
- - "migration setup configuration deployment"
85
- Combine related concepts in each query. Do NOT search each
86
- keyword separately.
73
+ Do NOT exhaustively paginate or search every keyword.
74
+ You have ~12 tool calls — use them purposefully.
87
75
 
88
- 2. If semantic search returns thin results, do ONE targeted FTS
89
- search for a specific procedural term.
76
+ 3. Group findings into candidate domains. For each domain,
77
+ note: the core theme, which procedures it covers, and
78
+ which source items provide evidence.
90
79
 
91
- Group findings by procedural topic ("how to do X").
92
- Stop when you have coverage — more queries have diminishing returns.
80
+ Store your domain clusters in working notes for the next phase.
93
81
  tools:
94
- - vault_search_semantic
82
+ - vault_spores
83
+ - vault_entities
84
+ - vault_edges
85
+ - vault_sessions
95
86
  - vault_search_fts
96
- maxTurns: 22
87
+ - vault_search_semantic
88
+ maxTurns: 15
97
89
  required: true
98
90
  readOnly: true
99
91
 
100
- - name: evaluate
92
+ - name: synthesize-evaluate
93
+ model: claude-sonnet-4-6
101
94
  prompt: |
102
- Three parallel exploration phases have completed one explored
103
- spores and entities, one explored sessions, one explored plans.
104
- Their findings are in the prior context.
95
+ The explore phase identified procedural domain clusters from
96
+ the vault. Now evaluate each and create candidates for domains
97
+ that pass all criteria.
105
98
 
106
- BEFORE creating any candidates, you MUST load the dedup context:
107
- 1. vault_skill_records (action: list) — all active skills on disk
99
+ BEFORE creating any candidates, load the dedup context:
100
+ 1. vault_skill_records (action: list) — all active skills
108
101
  2. vault_skill_candidates (action: list) — all existing candidates
109
- Keep both lists for reference throughout this phase.
110
-
111
- For each procedural cluster identified, evaluate ALL of these
112
- criteria. A candidate must pass EVERY one:
113
-
114
- 1. PROCEDURE TEST: Is this "how to do X in this project"?
115
- "What is X" or "X exists" is knowledge, not a skill. Skip it.
116
-
117
- 2. REPEATABILITY TEST: Will someone need to do this again?
118
- Skills are repeatable how-to's for recurring tasks — things
119
- a developer will do more than once as the project grows.
120
- One-time fixes, bug patches, version-specific workarounds,
121
- and already-applied migration steps are NOT skills. They are
122
- vault knowledge (spores/wisdom) that should stay as spores.
123
- Ask: "if a new contributor joined tomorrow, would they ever
124
- need to follow this procedure?" If the answer is "no, it was
125
- done once and the result is permanent," it is not a skill.
126
- Examples of SKILLS: adding a new symbiont, creating a DB
127
- migration, registering an MCP tool, authoring an agent task.
128
- NOT skills: fixing a specific bug, applying a one-off schema
129
- change, adapting to a framework update, addressing a one-off
130
- behavioral change in a dependency.
131
-
132
- 3. CROSS-SESSION EVIDENCE: Knowledge from 2+ sessions, 3+ source
133
- items across sessions, spores, and plans.
134
-
135
- 4. DEDUPLICATION (mandatory failures here create real problems):
136
- Check BOTH lists from step 0 above.
137
- - If an active skill record's name or description covers the
138
- same procedure, do NOT create a duplicate. Skip it entirely.
139
- - If a non-dismissed candidate covers the same topic, UPDATE
140
- that candidate with new evidence instead of creating a new one.
141
- - If a generated/dismissed candidate covered this, it was
142
- already handled. Do not re-create it.
143
-
144
- 5. CONFIDENCE SCORE (0.0-1.0):
145
- - Knowledge density (more sources = higher)
102
+ Keep both lists for reference.
103
+
104
+ ## Evaluation criteria
105
+
106
+ Each candidate must pass ALL of these at the DOMAIN level:
107
+
108
+ 1. PROCEDURE TEST: Does this domain describe "how to do a
109
+ family of related tasks in this project"? A domain about
110
+ "what X is" or "X exists" is knowledge, not a skill.
111
+
112
+ 2. REPEATABILITY TEST: Will developers need these procedures
113
+ again as the project grows? Domains covering extension
114
+ points (adding new X, extending Y) are strong candidates.
115
+ One-time fixes and bug patches are NOT skills.
116
+
117
+ 3. BREADTH TEST: Does this domain cover 2+ distinct procedures
118
+ that share prerequisite knowledge? A single procedure is
119
+ too narrow it should be a section within a broader domain.
120
+ If you can't imagine 3+ major section headings, it's not
121
+ broad enough.
122
+
123
+ 4. CROSS-SESSION EVIDENCE: Knowledge from 2+ sessions and 3+
124
+ source items (spores, sessions, plans).
125
+
126
+ 5. DEDUPLICATION: Check BOTH lists from step 0.
127
+ - If an active skill already covers this domain, skip.
128
+ - If the domain SUBSUMES existing narrow skills (the domain
129
+ is broader and the narrow skills are sections within it),
130
+ create the candidate with `supersedes` listing the narrow
131
+ skill names as a JSON array. The dedup gate exempts
132
+ superseded skills from vocabulary overlap checks.
133
+ - If a non-dismissed candidate covers the same domain,
134
+ UPDATE it with new evidence instead of creating.
135
+ - Dismissed candidates: the gate allows creation with a
136
+ warning. If the dismissed topic is a narrow subset of
137
+ your broader domain, proceed.
138
+
139
+ 6. CONFIDENCE SCORE (0.0-1.0):
140
+ - Domain breadth (more procedures covered = higher)
146
141
  - Cross-session evidence (wider = higher)
147
- - Presence of wisdom spores (higher quality signal)
148
- - Presence of plans with step-by-step procedures (higher)
149
-
150
- For each valid candidate, use vault_skill_candidates (action: create)
151
- with topic, rationale, confidence, and source_ids (JSON array of
152
- {id, type} objects referencing the spores, sessions, and plans).
153
-
154
- IMPORTANT: Write the rationale field as markdown. Include these
155
- sections in every rationale:
156
- - **Procedure verdict**: PASS/FAIL
157
- - **Repeatability verdict**: PASS/FAIL with justification
158
- - **Cross-session evidence**: session IDs and source count
159
- - **Existing coverage**: list the skills/candidates you checked
160
- and why this is genuinely new (not "NONE" — name what you checked)
161
- Use **bold** for verdicts, numbered lists for steps, `code` for
162
- file paths and function names. The rationale is displayed directly
163
- to the user in the dashboard.
164
-
165
- If an existing candidate has new evidence, use vault_skill_candidates
166
- (action: update) to increase confidence and add source_ids.
167
-
168
- Dismiss candidates whose underlying knowledge has been superseded
169
- (action: update, status: dismissed).
170
-
171
- Report a summary of candidates created, updated, and dismissed
172
- using vault_report.
142
+ - Wisdom spore presence (higher quality signal)
143
+ - Plan presence (step-by-step procedures = higher)
144
+
145
+ ## Creating candidates
146
+
147
+ Use vault_skill_candidates (action: create) with:
148
+ - topic: the domain name (e.g., "Vault Schema and Data Layer Extension")
149
+ - rationale: markdown with these sections:
150
+ - **Domain scope**: what procedures this covers (bulleted list)
151
+ - **Procedure verdict**: PASS/FAIL
152
+ - **Repeatability verdict**: PASS/FAIL with justification
153
+ - **Breadth verdict**: PASS/FAIL with section count estimate
154
+ - **Cross-session evidence**: session IDs and source count
155
+ - **Existing coverage**: skills/candidates checked and why this
156
+ is new or broader (name what you checked, not "NONE")
157
+ - **Supersedes**: which existing narrow skills this replaces
158
+ (if any)
159
+ - confidence: 0.0-1.0
160
+ - source_ids: JSON array of {id, type} objects
161
+ - supersedes: JSON array of skill names this replaces (or omit)
162
+
163
+ If an existing candidate has new evidence, use action: update.
164
+
165
+ Dismiss candidates whose underlying knowledge has been
166
+ superseded (action: update, status: dismissed).
167
+
168
+ Report a summary via vault_report.
173
169
  tools:
174
170
  - vault_skill_candidates
175
171
  - vault_skill_records
176
172
  - vault_set_state
177
173
  - vault_report
178
- maxTurns: 12
174
+ maxTurns: 15
179
175
  required: true
180
176
  dependsOn:
181
- - explore-spores
182
- - explore-sessions
183
- - explore-plans
177
+ - explore
@@ -13,7 +13,7 @@ description: >
13
13
  Records resolution events for audit trail.
14
14
  agent: myco-agent
15
15
  isDefault: false
16
- model: claude-sonnet-4-6
16
+ model: claude-haiku-4-5-20251001
17
17
  maxTurns: 30
18
18
  timeoutSeconds: 300
19
19
 
package/dist/src/cli.js CHANGED
@@ -2,5 +2,5 @@
2
2
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
3
3
 
4
4
  // src/entries/cli.ts
5
- await import("../cli-IIMBALPV.js");
5
+ await import("../cli-D3TJYJ2U.js");
6
6
  //# sourceMappingURL=cli.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/daemon.ts
4
- var { main } = await import("../../main-6PY3ITQ5.js");
4
+ var { main } = await import("../../main-5THODR77.js");
5
5
  await main();
6
6
  //# sourceMappingURL=main.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/post-tool-use.ts
4
- var { main } = await import("../../post-tool-use-DENRI5WB.js");
4
+ var { main } = await import("../../post-tool-use-FRTSICC3.js");
5
5
  await main();
6
6
  //# sourceMappingURL=post-tool-use.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/session-end.ts
4
- var { main } = await import("../../session-end-OUTY7AFF.js");
4
+ var { main } = await import("../../session-end-SMU55UCM.js");
5
5
  await main();
6
6
  //# sourceMappingURL=session-end.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/session-start.ts
4
- var { main } = await import("../../session-start-5MB3LFOA.js");
4
+ var { main } = await import("../../session-start-NIMWEOIZ.js");
5
5
  await main();
6
6
  //# sourceMappingURL=session-start.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/stop.ts
4
- var { main } = await import("../../stop-YGHODSP7.js");
4
+ var { main } = await import("../../stop-7AKYBJJ2.js");
5
5
  await main();
6
6
  //# sourceMappingURL=stop.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/user-prompt-submit.ts
4
- var { main } = await import("../../user-prompt-submit-4IBFUYQ3.js");
4
+ var { main } = await import("../../user-prompt-submit-IZJC3NV7.js");
5
5
  await main();
6
6
  //# sourceMappingURL=user-prompt-submit.js.map
@@ -1,6 +1,6 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
 
3
3
  // src/entries/mcp-server.ts
4
- var { main } = await import("../../server-6SUNYDV7.js");
4
+ var { main } = await import("../../server-PTXLVVEE.js");
5
5
  await main();
6
6
  //# sourceMappingURL=server.js.map
@@ -11,6 +11,8 @@ hookFields:
11
11
  lastResponse: last_assistant_message
12
12
  capture:
13
13
  planDirs: []
14
+ planTags:
15
+ - proposed_plan
14
16
  rules:
15
17
  # Ephemeral sub-invocation filter (structural, two-layer defense).
16
18
  #
@@ -51,13 +53,49 @@ capture:
51
53
  action: drop
52
54
  reason: ephemeral-sub-invocation
53
55
 
54
- # NOTE: the VS Code Codex extension wraps user prompts with an IDE
55
- # context preamble (`# Context from my IDE setup:` … `## My request
56
- # for Codex:`). Stripping that belongs in a follow-up that keys on
57
- # the transcript's `session_meta.payload.originator == "codex_vscode"`
58
- # signal rather than matching preamble text, per Chris's correctness
59
- # bar against fragile text matching. Leaving unfiltered for now —
60
- # session data is preserved, just shown with the wrapper.
56
+ # Layer 3 sub-agent thread spawn filter (structural).
57
+ #
58
+ # Codex spawns sub-agents (thread_spawn) for code review, testing,
59
+ # etc. These write real transcript files but are NOT user-initiated
60
+ # sessions. Their session_meta has:
61
+ # "source": {"subagent": {"thread_spawn": {...}}}
62
+ # while user sessions have "source": "vscode" or similar strings.
63
+ #
64
+ # This condition reads the transcript's first JSON line and checks
65
+ # the dot-path field — a structural signal that won't drift.
66
+ - event: session_start
67
+ scope: this_agent
68
+ when:
69
+ transcript_meta_field_exists: source.subagent
70
+ action: drop
71
+ reason: subagent-thread-spawn
72
+
73
+ # Layer 4 — safety net for sub-agent prompts that slip past
74
+ # SessionStart (e.g. transcript not yet flushed).
75
+ - event: user_prompt
76
+ scope: this_agent
77
+ when:
78
+ transcript_meta_field_exists: source.subagent
79
+ action: drop
80
+ reason: subagent-thread-spawn
81
+
82
+ # Codex Desktop wraps user prompts with a file-mention preamble when
83
+ # screenshots or files are attached:
84
+ # "# Files mentioned by the user:\n## <filename>: <path>\n## My request for Codex:\n<actual prompt>"
85
+ # Strip the preamble so the captured prompt contains only the user's text.
86
+ - event: user_prompt
87
+ scope: this_agent
88
+ when:
89
+ prompt_contains: "## My request for Codex:"
90
+ action: rewrite_prompt
91
+ extract_after: "## My request for Codex:\n"
92
+ reason: codex-desktop-file-preamble
93
+
94
+ # NOTE: the VS Code Codex extension uses a different preamble
95
+ # (`# Context from my IDE setup:` … `## My request for Codex:`).
96
+ # Keying on the shared `## My request for Codex:` marker above
97
+ # should handle both Desktop and VS Code variants. If the VS Code
98
+ # preamble diverges, add a separate rule keyed on its specific marker.
61
99
  registration:
62
100
  hooksTarget: .codex/hooks.json
63
101
  mcpTarget: .codex/config.toml
@@ -32,6 +32,7 @@ const EMBEDDABLE_TABLES: Record<string, string> = {
32
32
  sessions: 'summary',
33
33
  plans: 'content',
34
34
  artifacts: 'content',
35
+ skill_records: 'description',
35
36
  };
36
37
 
37
38
  /** All tables the sync endpoint accepts records for. */
@@ -269,7 +270,11 @@ async function handleSync(request: Request, env: Env): Promise<Response> {
269
270
  if (table === 'spores' && record.data.status === 'superseded') {
270
271
  embeddingTasks.push(() => deleteVector(env, table, id, machine_id));
271
272
  } else {
272
- embeddingTasks.push(() => embedAndUpsert(env, table, id, machine_id, textContent));
273
+ // Include domain-specific metadata for richer search results
274
+ const extra: Record<string, string> = {};
275
+ if (table === 'skill_records' && record.data.name) extra.name = record.data.name as string;
276
+ if (table === 'spores' && record.data.observation_type) extra.observation_type = record.data.observation_type as string;
277
+ embeddingTasks.push(() => embedAndUpsert(env, table, id, machine_id, textContent, extra));
273
278
  }
274
279
  }
275
280
  }
@@ -311,6 +316,7 @@ async function embedAndUpsert(
311
316
  id: string,
312
317
  machineId: string,
313
318
  text: string,
319
+ extra?: Record<string, string>,
314
320
  ): Promise<void> {
315
321
  const vector = await embedText(env.AI, text);
316
322
  const vid = vectorId(table, id, machineId);
@@ -318,7 +324,7 @@ async function embedAndUpsert(
318
324
  {
319
325
  id: vid,
320
326
  values: vector,
321
- metadata: { table, id, machine_id: machineId },
327
+ metadata: { table, id, machine_id: machineId, ...extra },
322
328
  },
323
329
  ]);
324
330
  }
@@ -198,6 +198,7 @@ const SKILL_CANDIDATES_TABLE = `
198
198
  status TEXT NOT NULL DEFAULT 'identified',
199
199
  source_ids TEXT NOT NULL DEFAULT '[]',
200
200
  skill_id TEXT,
201
+ supersedes TEXT,
201
202
  approved_at INTEGER,
202
203
  created_at INTEGER NOT NULL,
203
204
  updated_at INTEGER NOT NULL,
@@ -301,6 +302,7 @@ export async function initD1Schema(db: D1Database): Promise<void> {
301
302
  const migrations = [
302
303
  'ALTER TABLE skill_usage ADD COLUMN synced_at INTEGER',
303
304
  'ALTER TABLE skill_candidates ADD COLUMN approved_at INTEGER',
305
+ 'ALTER TABLE skill_candidates ADD COLUMN supersedes TEXT',
304
306
  ];
305
307
  for (const sql of migrations) {
306
308
  try {
@@ -1,17 +1,17 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  gatherStats
4
- } from "./chunk-DLFDBKEV.js";
4
+ } from "./chunk-LSP5HYOO.js";
5
5
  import {
6
6
  initVaultDb
7
- } from "./chunk-55QEICRO.js";
7
+ } from "./chunk-TLK46KKD.js";
8
8
  import "./chunk-SAKJMNSR.js";
9
9
  import "./chunk-WYOE4IAX.js";
10
10
  import "./chunk-CML4MCYF.js";
11
11
  import "./chunk-2V7HR7HB.js";
12
12
  import "./chunk-MYX5NCRH.js";
13
- import "./chunk-BUIR3JWM.js";
14
- import "./chunk-EO2RQW4S.js";
13
+ import "./chunk-XWOQL4XN.js";
14
+ import "./chunk-W7WENJ6F.js";
15
15
  import "./chunk-LPUQPDC2.js";
16
16
  import "./chunk-CKJAWZQE.js";
17
17
  import "./chunk-E7NUADTQ.js";
@@ -92,4 +92,4 @@ function formatUptime(seconds) {
92
92
  export {
93
93
  run
94
94
  };
95
- //# sourceMappingURL=stats-DGI6B3HX.js.map
95
+ //# sourceMappingURL=stats-GEOQ2DFF.js.map
@@ -2,15 +2,15 @@ import { createRequire as __cr } from 'node:module'; const require = __cr(import
2
2
  import {
3
3
  normalizeHookInput,
4
4
  readStdin
5
- } from "./chunk-PFWIPRF6.js";
5
+ } from "./chunk-YZPI2Y3E.js";
6
6
  import {
7
7
  resolveVaultDir
8
8
  } from "./chunk-5ZT2Q6P5.js";
9
9
  import {
10
10
  DaemonClient
11
- } from "./chunk-BUIR3JWM.js";
12
- import "./chunk-EO2RQW4S.js";
13
- import "./chunk-FABWUX5G.js";
11
+ } from "./chunk-XWOQL4XN.js";
12
+ import "./chunk-W7WENJ6F.js";
13
+ import "./chunk-DPSLJ242.js";
14
14
  import "./chunk-LPUQPDC2.js";
15
15
  import "./chunk-CKJAWZQE.js";
16
16
  import "./chunk-E7NUADTQ.js";
@@ -45,4 +45,4 @@ async function main() {
45
45
  export {
46
46
  main
47
47
  };
48
- //# sourceMappingURL=stop-YGHODSP7.js.map
48
+ //# sourceMappingURL=stop-7AKYBJJ2.js.map
@@ -1,13 +1,13 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  sendEvent
4
- } from "./chunk-VOCGURV7.js";
4
+ } from "./chunk-N75GMQGA.js";
5
5
  import "./chunk-V7XG6V6C.js";
6
- import "./chunk-PFWIPRF6.js";
6
+ import "./chunk-YZPI2Y3E.js";
7
7
  import "./chunk-5ZT2Q6P5.js";
8
- import "./chunk-BUIR3JWM.js";
9
- import "./chunk-EO2RQW4S.js";
10
- import "./chunk-FABWUX5G.js";
8
+ import "./chunk-XWOQL4XN.js";
9
+ import "./chunk-W7WENJ6F.js";
10
+ import "./chunk-DPSLJ242.js";
11
11
  import "./chunk-LPUQPDC2.js";
12
12
  import "./chunk-CKJAWZQE.js";
13
13
  import "./chunk-E7NUADTQ.js";
@@ -27,4 +27,4 @@ async function main() {
27
27
  export {
28
28
  main
29
29
  };
30
- //# sourceMappingURL=stop-failure-7IJTPJ6W.js.map
30
+ //# sourceMappingURL=stop-failure-NLE2EURG.js.map
@@ -1,13 +1,13 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  sendEvent
4
- } from "./chunk-VOCGURV7.js";
4
+ } from "./chunk-N75GMQGA.js";
5
5
  import "./chunk-V7XG6V6C.js";
6
- import "./chunk-PFWIPRF6.js";
6
+ import "./chunk-YZPI2Y3E.js";
7
7
  import "./chunk-5ZT2Q6P5.js";
8
- import "./chunk-BUIR3JWM.js";
9
- import "./chunk-EO2RQW4S.js";
10
- import "./chunk-FABWUX5G.js";
8
+ import "./chunk-XWOQL4XN.js";
9
+ import "./chunk-W7WENJ6F.js";
10
+ import "./chunk-DPSLJ242.js";
11
11
  import "./chunk-LPUQPDC2.js";
12
12
  import "./chunk-CKJAWZQE.js";
13
13
  import "./chunk-E7NUADTQ.js";
@@ -27,4 +27,4 @@ async function main() {
27
27
  export {
28
28
  main
29
29
  };
30
- //# sourceMappingURL=subagent-start-ZBQ5PJB5.js.map
30
+ //# sourceMappingURL=subagent-start-LBNZF2TG.js.map