@goondocks/myco 0.3.7 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/.claude-plugin/marketplace.json +1 -1
  2. package/.claude-plugin/plugin.json +1 -1
  3. package/README.md +9 -4
  4. package/commands/init.md +63 -39
  5. package/commands/setup-llm.md +69 -44
  6. package/commands/status.md +28 -10
  7. package/dist/{chunk-YFG2O5HR.js → chunk-2GJFTIWX.js} +2 -2
  8. package/dist/{chunk-ISCT2SI6.js → chunk-6UJWI4IW.js} +7359 -60
  9. package/dist/chunk-6UJWI4IW.js.map +1 -0
  10. package/dist/{chunk-PA3VMINE.js → chunk-AK6GNLPV.js} +6 -1
  11. package/dist/chunk-AK6GNLPV.js.map +1 -0
  12. package/dist/{chunk-JKOALBZC.js → chunk-BNIYWCST.js} +2 -2
  13. package/dist/{chunk-AWF3M57N.js → chunk-FPEDTLQ6.js} +9 -9
  14. package/dist/{chunk-AWF3M57N.js.map → chunk-FPEDTLQ6.js.map} +1 -1
  15. package/dist/{chunk-QWU7QLZI.js → chunk-I7PNZEBO.js} +10 -10
  16. package/dist/chunk-I7PNZEBO.js.map +1 -0
  17. package/dist/{chunk-7WNE22W7.js → chunk-IVS5MYBL.js} +3 -3
  18. package/dist/{chunk-7WNE22W7.js.map → chunk-IVS5MYBL.js.map} +1 -1
  19. package/dist/{chunk-7VPJK56U.js → chunk-JBD5KP5G.js} +31 -16
  20. package/dist/chunk-JBD5KP5G.js.map +1 -0
  21. package/dist/chunk-MIU3DKLN.js +37 -0
  22. package/dist/chunk-MIU3DKLN.js.map +1 -0
  23. package/dist/{chunk-NYAWCMRZ.js → chunk-OUFSLZTX.js} +4 -4
  24. package/dist/chunk-P7RNAYU7.js +242 -0
  25. package/dist/chunk-P7RNAYU7.js.map +1 -0
  26. package/dist/chunk-T7OC6GH5.js +99 -0
  27. package/dist/chunk-T7OC6GH5.js.map +1 -0
  28. package/dist/chunk-TBRZAJ7W.js +135 -0
  29. package/dist/chunk-TBRZAJ7W.js.map +1 -0
  30. package/dist/chunk-UKWO26VI.js +147 -0
  31. package/dist/chunk-UKWO26VI.js.map +1 -0
  32. package/dist/{chunk-FFQNE6CT.js → chunk-V2OWD2VV.js} +45 -31
  33. package/dist/chunk-V2OWD2VV.js.map +1 -0
  34. package/dist/chunk-WBT5DWGC.js +49 -0
  35. package/dist/chunk-WBT5DWGC.js.map +1 -0
  36. package/dist/{chunk-LR7RQCOB.js → chunk-XCPQHC4X.js} +2 -2
  37. package/dist/{chunk-CCIV47S4.js → chunk-XHWIIU5D.js} +8 -9
  38. package/dist/chunk-XHWIIU5D.js.map +1 -0
  39. package/dist/{chunk-ZBNT6E22.js → chunk-ZCBL5HER.js} +2 -2
  40. package/dist/{cli-3WQSDSW6.js → cli-IGZA3TZC.js} +23 -17
  41. package/dist/cli-IGZA3TZC.js.map +1 -0
  42. package/dist/{client-5T4M42UQ.js → client-5SUO2UYH.js} +5 -5
  43. package/dist/{config-MD4XMLUS.js → config-5FGLQGCW.js} +4 -4
  44. package/dist/{detect-providers-LNOLBICR.js → detect-providers-5FU3BN5Q.js} +3 -3
  45. package/dist/{init-RALMQKOQ.js → init-M3GDZRKI.js} +51 -60
  46. package/dist/init-M3GDZRKI.js.map +1 -0
  47. package/dist/{main-S3WSUF5T.js → main-3JSO25IZ.js} +657 -228
  48. package/dist/main-3JSO25IZ.js.map +1 -0
  49. package/dist/{rebuild-JW6BCHHZ.js → rebuild-MW4GCY6Z.js} +10 -10
  50. package/dist/rebuild-MW4GCY6Z.js.map +1 -0
  51. package/dist/{reprocess-SNXFNKBN.js → reprocess-SWRFIIDZ.js} +18 -18
  52. package/dist/reprocess-SWRFIIDZ.js.map +1 -0
  53. package/dist/{restart-YE2IGOYT.js → restart-5UY2KV54.js} +6 -6
  54. package/dist/{search-2HMG3ON7.js → search-IYVMRZU2.js} +9 -9
  55. package/dist/{server-JM3TM7D2.js → server-FSUSHJ3Y.js} +77 -54
  56. package/dist/{server-JM3TM7D2.js.map → server-FSUSHJ3Y.js.map} +1 -1
  57. package/dist/{session-5GI2YU6R.js → session-QF6MILAC.js} +2 -2
  58. package/dist/{session-start-2UEEEO52.js → session-start-YB4A4PZB.js} +29 -28
  59. package/dist/session-start-YB4A4PZB.js.map +1 -0
  60. package/dist/setup-digest-6TK5SPS6.js +15 -0
  61. package/dist/setup-llm-UGZBURZJ.js +15 -0
  62. package/dist/setup-llm-UGZBURZJ.js.map +1 -0
  63. package/dist/src/cli.js +4 -4
  64. package/dist/src/daemon/main.js +4 -4
  65. package/dist/src/hooks/post-tool-use.js +5 -5
  66. package/dist/src/hooks/session-end.js +5 -5
  67. package/dist/src/hooks/session-start.js +4 -4
  68. package/dist/src/hooks/stop.js +7 -7
  69. package/dist/src/hooks/user-prompt-submit.js +5 -5
  70. package/dist/src/hooks/user-prompt-submit.js.map +1 -1
  71. package/dist/src/mcp/server.js +4 -4
  72. package/dist/src/prompts/classification.md +1 -0
  73. package/dist/src/prompts/digest-10000.md +74 -0
  74. package/dist/src/prompts/digest-1500.md +25 -0
  75. package/dist/src/prompts/digest-3000.md +32 -0
  76. package/dist/src/prompts/digest-5000.md +43 -0
  77. package/dist/src/prompts/digest-system.md +32 -0
  78. package/dist/src/prompts/extraction.md +11 -10
  79. package/dist/src/prompts/summary.md +11 -1
  80. package/dist/src/prompts/title.md +1 -1
  81. package/dist/{stats-IOWXG576.js → stats-IVIXIKTS.js} +12 -12
  82. package/dist/stats-IVIXIKTS.js.map +1 -0
  83. package/dist/{verify-7MWOV72E.js → verify-WEGRM4W2.js} +6 -6
  84. package/dist/{version-S7MHLD5P.js → version-5B2TWXQJ.js} +4 -4
  85. package/dist/version-5B2TWXQJ.js.map +1 -0
  86. package/package.json +1 -1
  87. package/skills/myco/SKILL.md +20 -20
  88. package/skills/myco/references/wisdom.md +14 -14
  89. package/skills/rules/SKILL.md +4 -4
  90. package/dist/chunk-7VPJK56U.js.map +0 -1
  91. package/dist/chunk-BA23DROX.js +0 -160
  92. package/dist/chunk-BA23DROX.js.map +0 -1
  93. package/dist/chunk-CCIV47S4.js.map +0 -1
  94. package/dist/chunk-EF4JVH24.js +0 -7299
  95. package/dist/chunk-EF4JVH24.js.map +0 -1
  96. package/dist/chunk-FFQNE6CT.js.map +0 -1
  97. package/dist/chunk-ISCT2SI6.js.map +0 -1
  98. package/dist/chunk-PA3VMINE.js.map +0 -1
  99. package/dist/chunk-QWU7QLZI.js.map +0 -1
  100. package/dist/chunk-YMYJ7FNH.js +0 -19
  101. package/dist/chunk-YMYJ7FNH.js.map +0 -1
  102. package/dist/cli-3WQSDSW6.js.map +0 -1
  103. package/dist/init-RALMQKOQ.js.map +0 -1
  104. package/dist/main-S3WSUF5T.js.map +0 -1
  105. package/dist/rebuild-JW6BCHHZ.js.map +0 -1
  106. package/dist/reprocess-SNXFNKBN.js.map +0 -1
  107. package/dist/session-start-2UEEEO52.js.map +0 -1
  108. package/dist/stats-IOWXG576.js.map +0 -1
  109. /package/dist/{chunk-YFG2O5HR.js.map → chunk-2GJFTIWX.js.map} +0 -0
  110. /package/dist/{chunk-JKOALBZC.js.map → chunk-BNIYWCST.js.map} +0 -0
  111. /package/dist/{chunk-NYAWCMRZ.js.map → chunk-OUFSLZTX.js.map} +0 -0
  112. /package/dist/{chunk-LR7RQCOB.js.map → chunk-XCPQHC4X.js.map} +0 -0
  113. /package/dist/{chunk-ZBNT6E22.js.map → chunk-ZCBL5HER.js.map} +0 -0
  114. /package/dist/{client-5T4M42UQ.js.map → client-5SUO2UYH.js.map} +0 -0
  115. /package/dist/{config-MD4XMLUS.js.map → config-5FGLQGCW.js.map} +0 -0
  116. /package/dist/{detect-providers-LNOLBICR.js.map → detect-providers-5FU3BN5Q.js.map} +0 -0
  117. /package/dist/{restart-YE2IGOYT.js.map → restart-5UY2KV54.js.map} +0 -0
  118. /package/dist/{search-2HMG3ON7.js.map → search-IYVMRZU2.js.map} +0 -0
  119. /package/dist/{session-5GI2YU6R.js.map → session-QF6MILAC.js.map} +0 -0
  120. /package/dist/{version-S7MHLD5P.js.map → setup-digest-6TK5SPS6.js.map} +0 -0
  121. /package/dist/{verify-7MWOV72E.js.map → verify-WEGRM4W2.js.map} +0 -0
@@ -12,7 +12,7 @@
12
12
  "source": {
13
13
  "source": "npm",
14
14
  "package": "@goondocks/myco",
15
- "version": "0.3.6"
15
+ "version": "0.4.0"
16
16
  },
17
17
  "description": "Collective agent intelligence — captures session knowledge and serves it back via MCP",
18
18
  "license": "MIT",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "myco",
3
- "version": "0.3.7",
3
+ "version": "0.4.1",
4
4
  "description": "Collective agent intelligence — captures session knowledge and serves it back to your team via MCP",
5
5
  "author": {
6
6
  "name": "goondocks-co",
package/README.md CHANGED
@@ -37,9 +37,10 @@ Myco captures everything your AI agents do — sessions, decisions, plans, disco
37
37
  myco_search("how did we handle auth?") → semantically matched sessions, decisions, and linked context
38
38
  myco_recall("migration plan") → full decision history with session lineage
39
39
  myco_remember(observation) → persist a discovery for the team
40
+ myco_context(tier: 3000) → pre-computed project understanding, instantly available
40
41
  ```
41
42
 
42
- **For humans** — open the vault in Obsidian and browse the intelligence graph visually. Sessions link to plans, plans link to decisions, decisions link to memories. It's all Markdown with backlinks — your team's connected knowledge, navigable and searchable.
43
+ **For humans** — open the vault in Obsidian and browse the intelligence graph visually. Sessions link to plans, plans link to decisions, decisions link to spores. It's all Markdown with backlinks — your team's connected knowledge, navigable and searchable.
43
44
 
44
45
  **For teams** — the vault is a Git-friendly directory of Markdown files. Share it through your existing Git workflow.
45
46
 
@@ -47,7 +48,11 @@ myco_remember(observation) → persist a discovery for the team
47
48
 
48
49
  ### Capture
49
50
 
50
- A background daemon reads your agent's conversation transcript after each turn — the full dialogue including prompts, AI responses, tool calls, and screenshots. Observations (decisions, gotchas, discoveries) are extracted automatically via a local LLM and written as linked vault notes.
51
+ A background daemon reads your agent's conversation transcript after each turn — the full dialogue including prompts, AI responses, tool calls, and screenshots. Observations called **spores** (decisions, gotchas, discoveries, trade-offs, bug fixes) are extracted automatically via a local LLM and written as linked vault notes.
52
+
53
+ ### Digest
54
+
55
+ A **continuous reasoning engine** runs inside the daemon, periodically synthesizing all accumulated knowledge into tiered context extracts. These pre-computed summaries give agents an instant, rich understanding of the project at session start — no searching required. Four tiers serve different needs: executive briefing (1.5K tokens), team standup (3K), deep onboarding (5K), and institutional knowledge (10K).
51
56
 
52
57
  ### Index
53
58
 
@@ -55,11 +60,11 @@ Every note is indexed for both keyword search (SQLite FTS5) and semantic search
55
60
 
56
61
  ### Serve
57
62
 
58
- An MCP server exposes the vault to any agent runtime. Relevant memories are injected into every prompt automatically no manual lookup needed. Agents build on your team's accumulated knowledge without being told to.
63
+ An MCP server exposes the vault to any agent runtime. The digest extract is injected at session start for immediate context, and relevant spores are injected per-prompt for targeted intelligence. Agents build on your team's accumulated knowledge without being told to.
59
64
 
60
65
  ### Connect
61
66
 
62
- Sessions link to plans. Plans link to decisions. Decisions link to memories. Obsidian backlinks and metadata create a navigable graph of your team's institutional knowledge. Open the vault in [Obsidian](https://obsidian.md) to browse it visually, or let agents traverse it via MCP tools.
67
+ Sessions link to plans. Plans link to decisions. Decisions link to spores. Obsidian backlinks and metadata create a navigable graph of your team's institutional knowledge. Open the vault in [Obsidian](https://obsidian.md) to browse it visually, or let agents traverse it via MCP tools.
63
68
 
64
69
  ### Multi-agent
65
70
 
package/commands/init.md CHANGED
@@ -9,20 +9,24 @@ Guide the user through setup using the composable CLI commands. **Do NOT create
9
9
 
10
10
  **Ask each question one at a time using AskUserQuestion with selectable options.** Wait for the user's answer before proceeding to the next question. Do NOT combine multiple questions into one message.
11
11
 
12
- ## Step 1: Detect available providers
12
+ The streamlined setup asks just four questions: vault location, provider, model, and embedding model. One model handles everything — hooks, extraction, summaries, and digest — sized for the most demanding task (digestion). Advanced configuration is available via CLI commands after init.
13
13
 
14
- Run the provider detection command to see what's available:
14
+ ## Step 1: Detect available providers and system capabilities
15
+
16
+ Run the provider detection command and detect system RAM:
15
17
 
16
18
  ```bash
17
19
  node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js detect-providers
18
20
  ```
19
21
 
22
+ Detect RAM:
23
+ - **macOS**: `sysctl -n hw.memsize` (bytes → GB)
24
+ - **Linux**: parse `/proc/meminfo` for `MemTotal`
25
+
20
26
  Parse the JSON output. This tells you which providers are running and what models are available.
21
27
 
22
28
  ## Step 2: Choose vault location
23
29
 
24
- Ask the user:
25
-
26
30
  **Question:** "Where would you like to store the Myco vault?"
27
31
 
28
32
  **Options:**
@@ -30,69 +34,89 @@ Ask the user:
30
34
  - "Centralized (~/.myco/vaults/<project-name>/)" — vault stays outside the repo, good for public repos or personal use
31
35
  - "Custom path" — specify your own location
32
36
 
33
- If the user picks "Custom path", ask them to type the path.
37
+ ## Step 3: Choose provider and model
38
+
39
+ **Question:** "Which LLM provider and model?"
40
+
41
+ List only providers where `available` is `true`. Recommend a model sized for digest based on detected RAM:
42
+
43
+ | RAM | Recommended Model | Digest Context |
44
+ |-----|-------------------|----------------|
45
+ | **64GB+** | `qwen3.5:35b` (MoE, recommended) | 65536 |
46
+ | **32–64GB** | `qwen3.5:27b` | 32768 |
47
+ | **16–32GB** | `qwen3.5:latest` (~10B) | 16384 |
48
+ | **8–16GB** | `qwen3.5:4b` | 8192 |
34
49
 
35
- ## Step 3: Choose LLM provider
50
+ The same model handles hooks (at 8K context), extraction, summaries, and digest (at the larger context from the table). No separate model configuration needed.
36
51
 
37
- Using the detected providers from Step 1, ask the user:
52
+ If the model isn't installed, offer to pull it:
53
+ - **Ollama**: `ollama pull qwen3.5`
54
+ - **LM Studio**: search for `qwen3.5` in the model browser
38
55
 
39
- **Question:** "Which LLM provider for summarization?"
56
+ ## Step 4: Choose embedding model
40
57
 
41
- **Options:** List only providers where `available` is `true`, with recommended models. Example:
42
- - "Ollama — gpt-oss (recommended)"
43
- - "LM Studio — openai/gpt-oss-20b"
44
- - "Anthropic"
58
+ **Question:** "Which embedding model?"
45
59
 
46
- After the user picks a provider, ask them to choose a specific model from that provider's model list (from the detect-providers output).
60
+ **Options:** List only providers that support embeddings (Anthropic does not):
61
+ - **Ollama** — list available embedding models. If none are available, offer to pull one (e.g., `bge-m3` or `nomic-embed-text`).
62
+ - **LM Studio** — filter the model list for names containing `text-embedding`. If none are available, guide the user to search for and download an embedding model through LM Studio's model browser.
47
63
 
48
- ## Step 4: Choose embedding provider
64
+ If no embedding models are available on the chosen provider, help the user get one before proceeding.
49
65
 
50
- Ask the user:
66
+ ## Step 5: Choose digest inject tier
51
67
 
52
- **Question:** "Which embedding provider?"
68
+ **Question:** "How much context should the agent receive at session start?"
53
69
 
54
- **Options:** List only providers where `available` is `true` and that support embeddings (Anthropic does not). Example:
55
- - "Ollama — bge-m3 (recommended)"
56
- - "LM Studio — text-embedding-bge-m3"
70
+ Based on RAM, present the recommended tiers:
57
71
 
58
- After the user picks a provider, ask them to choose a specific embedding model.
72
+ | RAM | Options | Default |
73
+ |-----|---------|---------|
74
+ | **64GB+** | 1500, 3000, 5000, 10000 | 3000 |
75
+ | **32–64GB** | 1500, 3000, 5000 | 3000 |
76
+ | **16–32GB** | 1500, 3000 | 1500 |
77
+ | **8–16GB** | 1500 | 1500 |
59
78
 
60
- If the recommended embedding model isn't available, offer to pull it:
61
- - **Ollama**: `ollama pull bge-m3`
79
+ **Options:**
80
+ - "1500 executive briefing (fastest, lightest)"
81
+ - "3000 — team standup (recommended)"
82
+ - "5000 — deep onboarding"
83
+ - "10000 — institutional knowledge (richest)"
84
+
85
+ This controls what gets auto-injected at the start of every session. Agents can always request a different tier on-demand via the `myco_context` tool.
62
86
 
63
- ## Step 5: Run init with all gathered inputs
87
+ ## Step 6: Run init and configure
64
88
 
65
- Pass everything to the init command in a single call:
89
+ Create the vault and apply settings:
66
90
 
67
91
  ```bash
92
+ # Create vault structure and base config
68
93
  node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js init \
69
94
  --vault <chosen-path> \
70
95
  --llm-provider <provider> \
71
96
  --llm-model <model> \
72
- --llm-url <base-url> \
73
- --embedding-provider <provider> \
74
- --embedding-model <model> \
75
- --embedding-url <base-url>
76
- ```
97
+ --embedding-provider <embedding-provider> \
98
+ --embedding-model <embedding-model>
77
99
 
78
- The CLI creates the vault structure, writes myco.yaml, .gitignore, _dashboard.md, initializes the FTS index, and configures MYCO_VAULT_DIR if the vault is external.
79
-
80
- ## Step 6: Verify connectivity
100
+ # Set digest context window and inject tier based on user choices
101
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest \
102
+ --context-window <from-ram-table> \
103
+ --inject-tier <chosen-tier>
104
+ ```
81
105
 
82
- Run the verify command to confirm providers are reachable:
106
+ ## Step 7: Verify connectivity
83
107
 
84
108
  ```bash
85
109
  node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js verify
86
110
  ```
87
111
 
88
- If verification fails, help the user troubleshoot (check if the provider is running, model is loaded, etc.).
89
-
90
- ## Step 7: Display summary
112
+ If verification fails, help the user troubleshoot.
91
113
 
92
- Show the user a setup summary table:
114
+ ## Step 8: Display summary
93
115
 
94
116
  | Setting | Value |
95
117
  |---------|-------|
96
118
  | Vault path | `<resolved path>` |
97
- | LLM provider | `<provider>` / `<model>` |
98
- | Embedding provider | `<provider>` / `<model>` |
119
+ | Provider | `<provider>` / `<model>` |
120
+ | Embedding | `<embedding-provider>` / `<embedding-model>` |
121
+ | Digest | enabled (context: `<context-window>`) |
122
+ | RAM detected | `<X>` GB |
@@ -7,11 +7,13 @@ description: Configure or change the intelligence backend (Ollama, LM Studio, or
7
7
 
8
8
  Guide the user through configuring their intelligence backend. This command can be run at any time to change providers or models.
9
9
 
10
+ The streamlined setup asks just three questions: provider, model, and embedding model. One model handles everything — hooks, extraction, summaries, and digest — at different context windows per request. Advanced configuration is available via the CLI for power users.
11
+
10
12
  ## Prerequisites
11
13
 
12
14
  Read the existing `myco.yaml` from the vault directory to show current settings before making changes.
13
15
 
14
- ## Step 1: Detect available providers
16
+ ## Step 1: Detect available providers and system capabilities
15
17
 
16
18
  Check which providers are reachable:
17
19
 
@@ -19,66 +21,62 @@ Check which providers are reachable:
19
21
  - **LM Studio** — fetch `http://localhost:1234/v1/models`, list model names
20
22
  - **Anthropic** — check if `ANTHROPIC_API_KEY` is set in the environment
21
23
 
22
- Report which are available and which are not.
23
-
24
- ## Step 2: Choose LLM provider
24
+ Detect system RAM for recommendations:
25
+ - **macOS**: `sysctl -n hw.memsize` (bytes → GB)
26
+ - **Linux**: parse `/proc/meminfo` for `MemTotal`
25
27
 
26
- Ask the user to select from available providers:
28
+ Report which providers are available and the detected RAM.
27
29
 
28
- - **Ollama** list available models
29
- - **LM Studio** — list available models
30
- - **Anthropic** — verify API key works, default model `claude-haiku-4-5-20251001`
30
+ ## Step 2: Choose provider and model
31
31
 
32
- Recommended summarization models by hardware tier:
32
+ Ask the user to select from available providers. After picking a provider, recommend a model sized for digest (the most demanding task). The same model handles hooks and extraction at smaller context windows automatically.
33
33
 
34
- | Tier | Models | RAM |
35
- |------|--------|-----|
36
- | **High** | `gpt-oss` (~20B), `gemma3:27b`, `qwen3.5:14b` | 16GB+ |
37
- | **Mid** | `qwen3.5:8b`, `gemma3:12b` | 8GB+ |
38
- | **Light** | `gemma3:4b`, `qwen3.5:4b` | 4GB+ |
34
+ Recommended models by hardware tier Qwen 3.5 is preferred for its strong instruction-following and synthesis quality:
39
35
 
40
- Any instruction-tuned model that handles JSON output works. Prefer what the user already has loaded.
36
+ | RAM | Model | Context for Digest |
37
+ |-----|-------|--------------------|
38
+ | **64GB+** | `qwen3.5:35b` (MoE, recommended) | 65536 |
39
+ | **32–64GB** | `qwen3.5:27b` | 32768 |
40
+ | **16–32GB** | `qwen3.5:latest` (~10B) | 16384 |
41
+ | **8–16GB** | `qwen3.5:4b` | 8192 |
41
42
 
42
- For local providers (Ollama, LM Studio), also configure:
43
- - `context_window` — ask or accept default of 8192
44
- - `max_tokens` — ask or accept default of 1024
43
+ Any instruction-tuned model that handles JSON output works. Prefer what the user already has loaded, but recommend Qwen 3.5 if they're starting fresh.
45
44
 
46
45
  If the chosen model isn't installed, offer to pull it:
47
- - **Ollama**: `ollama pull gpt-oss` (pulls latest tag automatically)
48
- - **LM Studio**: `lms get openai/gpt-oss-20b` (uses `owner/model` format)
46
+ - **Ollama**: `ollama pull qwen3.5` (pulls latest tag automatically)
47
+ - **LM Studio**: search for `qwen3.5` in the model browser
49
48
 
50
- These settings do not apply to Anthropic (API-managed).
49
+ ## Step 3: Choose embedding model
51
50
 
52
- ## Step 3: Choose embedding provider
51
+ Ask the user to select an embedding model — **Anthropic is not an option** (it doesn't support embeddings):
53
52
 
54
- Ask the user to select from available providers **Anthropic is not an option** (it doesn't support embeddings):
53
+ - **Ollama** list available embedding models. If none are available, offer to pull one (e.g., `bge-m3` or `nomic-embed-text`).
54
+ - **LM Studio** — filter the model list for names containing `text-embedding`. If none are available, guide the user to search for and download an embedding model through LM Studio's model browser.
55
55
 
56
- - **Ollama** (recommended for embeddings) list available models, recommend **`bge-m3`** or `nomic-embed-text`
57
- - **LM Studio** — possible but not recommended for embeddings; better suited for LLM work
56
+ If no embedding models are available on the chosen provider, help the user get one before proceeding.
58
57
 
59
- If the embedding model isn't installed: `ollama pull bge-m3`
60
-
61
- **Important:** If the user changes the embedding model, the vector index must be rebuilt. Warn them:
58
+ **Important:** If the user changes the embedding model, warn them:
62
59
  > "Changing the embedding model will require a full rebuild of the vector index. Run `node dist/src/cli.js rebuild` after this change."
63
60
 
64
- ## Step 4: Update `myco.yaml`
65
-
66
- Write both `intelligence.llm` and `intelligence.embedding` sections with all values explicit:
67
-
68
- ```yaml
69
- intelligence:
70
- llm:
71
- provider: ollama
72
- model: gpt-oss
73
- base_url: http://localhost:11434
74
- context_window: 8192
75
- max_tokens: 1024
76
- embedding:
77
- provider: ollama
78
- model: bge-m3
79
- base_url: http://localhost:11434
61
+ ## Step 4: Apply settings
62
+
63
+ Use the CLI commands to write settings deterministically. The context window for the main LLM stays at 8192 (hooks don't need more). The digest context window is set based on the RAM tier recommendation.
64
+
65
+ ```bash
66
+ # Set provider and model
67
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-llm \
68
+ --llm-provider <provider> \
69
+ --llm-model <model> \
70
+ --embedding-provider <embedding-provider> \
71
+ --embedding-model <embedding-model>
72
+
73
+ # Set digest context window based on RAM tier (model inherits from main LLM)
74
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest \
75
+ --context-window <from-ram-table>
80
76
  ```
81
77
 
78
+ Only pass flags the user explicitly changed — Zod defaults handle the rest.
79
+
82
80
  If migrating from a v1 config (has `backend: local/cloud` structure), bump `version` to `2` and rewrite the entire intelligence section. The loader auto-maps `provider: haiku` to `anthropic`.
83
81
 
84
82
  ## Step 5: Verify and restart
@@ -87,3 +85,30 @@ If migrating from a v1 config (has `backend: local/cloud` structure), bump `vers
87
85
  2. Test the embedding provider with a test embedding
88
86
  3. Restart the daemon to pick up the new config: `node dist/src/cli.js restart`
89
87
  4. Report success or issues found
88
+
89
+ ## Advanced Configuration
90
+
91
+ For power users who want fine-grained control, all settings are available via CLI:
92
+
93
+ ```bash
94
+ # Separate digest model (e.g., larger model on LM Studio)
95
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest \
96
+ --provider lm-studio \
97
+ --model "qwen/qwen3.5-35b-a3b" \
98
+ --context-window 65536 \
99
+ --gpu-kv-cache false
100
+
101
+ # Custom tiers and injection
102
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest \
103
+ --tiers 1500,3000,5000,10000 \
104
+ --inject-tier 3000
105
+
106
+ # Capture token budgets
107
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest \
108
+ --extraction-tokens 2048 \
109
+ --summary-tokens 1024
110
+
111
+ # View current settings
112
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-llm --show
113
+ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest --show
114
+ ```
@@ -39,14 +39,24 @@ Query the FTS index for counts:
39
39
  | Metric | How to check |
40
40
  |--------|-------------|
41
41
  | Sessions | `index.query({ type: 'session' }).length` |
42
- | Memories | `index.query({ type: 'memory' }).length` |
42
+ | Spores | `index.query({ type: 'spore' }).length` |
43
43
  | Plans | `index.query({ type: 'plan' }).length` |
44
44
  | Artifacts | `index.query({ type: 'artifact' }).length` |
45
45
  | Embeddings | Vector index count |
46
46
 
47
- Also report memory breakdown by observation type (decision, gotcha, trade_off, etc.).
47
+ Also report spore breakdown by observation type (decision, gotcha, trade_off, etc.).
48
48
 
49
- ## Step 5: Intelligence backend health
49
+ ## Step 5: Digest status
50
+
51
+ Check the digest system state:
52
+
53
+ - **Enabled/disabled**: read `digest.enabled` from `myco.yaml`
54
+ - **Extracts**: list which tier files exist in `vault/digest/` (extract-1500.md, etc.) with file sizes and generated timestamps
55
+ - **Last cycle**: read last line of `vault/digest/trace.jsonl` — report cycle ID, timestamp, tiers generated, substrate count, duration
56
+ - **Metabolism**: report configured tiers, inject tier, and context window
57
+ - **Digest model**: if `digest.intelligence.model` is set, show it; otherwise note "inherits from main LLM"
58
+
59
+ ## Step 6: Intelligence backend health
50
60
 
51
61
  Test connectivity to the configured providers:
52
62
 
@@ -54,7 +64,7 @@ Test connectivity to the configured providers:
54
64
  - **Embedding provider**: call `isAvailable()` — report reachable or not
55
65
  - If either is unreachable, suggest running `/myco-setup-llm`
56
66
 
57
- ## Step 6: Pending issues
67
+ ## Step 7: Pending issues
58
68
 
59
69
  Check for problems:
60
70
 
@@ -63,13 +73,13 @@ Check for problems:
63
73
  - **Missing vectors**: does `vectors.db` exist? If not, embeddings are disabled
64
74
  - **Lineage**: does `lineage.json` exist? Report link count if so
65
75
 
66
- ## Step 7: Recent activity
76
+ ## Step 8: Recent activity
67
77
 
68
78
  Show the 3 most recent sessions with:
69
79
  - Session ID (short form)
70
80
  - Title
71
81
  - Started/ended timestamps
72
- - Number of memories extracted
82
+ - Number of spores extracted
73
83
  - Parent session (if lineage detected)
74
84
 
75
85
  ## Output format
@@ -92,18 +102,26 @@ Sessions: 1 active
92
102
 
93
103
  --- Vault ---
94
104
  Sessions: 12
95
- Memories: 183 (67 decision, 34 gotcha, 32 trade_off, 20 discovery, 19 bug_fix, 1 cross-cutting)
105
+ Spores: 183 (67 decision, 34 gotcha, 32 trade_off, 20 discovery, 19 bug_fix, 1 cross-cutting)
96
106
  Plans: 0
97
107
  Artifacts: 8
98
108
  Vectors: 224
99
109
 
110
+ --- Digest ---
111
+ Enabled: yes
112
+ Tiers: [1500, 3000, 5000, 10000]
113
+ Inject: 3000 (auto-inject at session start)
114
+ Model: gpt-oss (inherited from main LLM)
115
+ Last cycle: dc-a1b2c3 (2 min ago, 4 tiers, 12 notes, 45s)
116
+ Extracts: 1500 (1.1KB), 3000 (4.5KB), 5000 (6.9KB), 10000 (9.6KB)
117
+
100
118
  --- Lineage ---
101
119
  Links: 5 (3 clear, 1 inferred, 1 semantic_similarity)
102
120
 
103
121
  --- Recent Sessions ---
104
- 1. [abc123] "Auth redesign session" (2h 15m, 5 memories)
105
- 2. [def456] "Bug fix for CORS" (45m, 2 memories, parent: abc123)
106
- 3. [ghi789] "Config cleanup" (20m, 1 memory)
122
+ 1. [abc123] "Auth redesign session" (2h 15m, 5 spores)
123
+ 2. [def456] "Bug fix for CORS" (45m, 2 spores, parent: abc123)
124
+ 3. [ghi789] "Config cleanup" (20m, 1 spore)
107
125
 
108
126
  --- Issues ---
109
127
  None found.
@@ -1,7 +1,7 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  AgentRegistry
4
- } from "./chunk-JKOALBZC.js";
4
+ } from "./chunk-BNIYWCST.js";
5
5
 
6
6
  // src/version.ts
7
7
  import fs from "fs";
@@ -30,4 +30,4 @@ function readVersionFrom(dir) {
30
30
  export {
31
31
  getPluginVersion
32
32
  };
33
- //# sourceMappingURL=chunk-YFG2O5HR.js.map
33
+ //# sourceMappingURL=chunk-2GJFTIWX.js.map