@goondocks/myco 0.4.3 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/.claude-plugin/marketplace.json +1 -1
  2. package/.claude-plugin/plugin.json +1 -1
  3. package/dist/chunk-2AMAOSRF.js +105 -0
  4. package/dist/chunk-2AMAOSRF.js.map +1 -0
  5. package/dist/chunk-3F63SFZZ.js +381 -0
  6. package/dist/chunk-3F63SFZZ.js.map +1 -0
  7. package/dist/{chunk-WBT5DWGC.js → chunk-42R7KVAW.js} +2 -2
  8. package/dist/{chunk-GFBG73P4.js → chunk-5FIIK27E.js} +3 -3
  9. package/dist/{chunk-XCPQHC4X.js → chunk-6CAKKNGD.js} +2 -2
  10. package/dist/{chunk-I7PNZEBO.js → chunk-6LTNFMXO.js} +12 -1
  11. package/dist/{chunk-I7PNZEBO.js.map → chunk-6LTNFMXO.js.map} +1 -1
  12. package/dist/{chunk-V2OWD2VV.js → chunk-DKHYIA2V.js} +24 -146
  13. package/dist/chunk-DKHYIA2V.js.map +1 -0
  14. package/dist/{chunk-BNIYWCST.js → chunk-EQVQEFOA.js} +2 -2
  15. package/dist/{chunk-FPEDTLQ6.js → chunk-JJL6AMDA.js} +3 -101
  16. package/dist/chunk-JJL6AMDA.js.map +1 -0
  17. package/dist/{chunk-OUFSLZTX.js → chunk-KDWBZSOB.js} +21 -9
  18. package/dist/chunk-KDWBZSOB.js.map +1 -0
  19. package/dist/{chunk-67R6EMYD.js → chunk-OPO47BVS.js} +31 -52
  20. package/dist/chunk-OPO47BVS.js.map +1 -0
  21. package/dist/{chunk-IYFKPSRP.js → chunk-OSZRLHIJ.js} +3 -3
  22. package/dist/chunk-PD7LV22R.js +150 -0
  23. package/dist/chunk-PD7LV22R.js.map +1 -0
  24. package/dist/{chunk-JBD5KP5G.js → chunk-TDLQBGKA.js} +6 -2
  25. package/dist/chunk-TDLQBGKA.js.map +1 -0
  26. package/dist/{chunk-2GJFTIWX.js → chunk-TK2ZYIAL.js} +2 -2
  27. package/dist/{chunk-ZCBL5HER.js → chunk-XIIVIMFC.js} +2 -2
  28. package/dist/{cli-PMOFCZQL.js → cli-WOM4Z2Z4.js} +21 -18
  29. package/dist/cli-WOM4Z2Z4.js.map +1 -0
  30. package/dist/{client-5SUO2UYH.js → client-XCNF6NFT.js} +5 -5
  31. package/dist/{detect-providers-IRL2TTLK.js → detect-providers-CQSPTW2B.js} +3 -3
  32. package/dist/digest-WTS6S4XP.js +96 -0
  33. package/dist/digest-WTS6S4XP.js.map +1 -0
  34. package/dist/{init-NUF5UBUJ.js → init-VPLUEULI.js} +5 -5
  35. package/dist/{main-2XEBVUR6.js → main-OGXH6XWO.js} +230 -575
  36. package/dist/main-OGXH6XWO.js.map +1 -0
  37. package/dist/{rebuild-E6YFIRYZ.js → rebuild-Z4YUY6HT.js} +8 -7
  38. package/dist/{rebuild-E6YFIRYZ.js.map → rebuild-Z4YUY6HT.js.map} +1 -1
  39. package/dist/{reprocess-7G7KQWCN.js → reprocess-DMGPZTLC.js} +91 -20
  40. package/dist/reprocess-DMGPZTLC.js.map +1 -0
  41. package/dist/{restart-ABW4ZK3P.js → restart-QCQQ55KX.js} +6 -6
  42. package/dist/{search-MPD7SFK6.js → search-ACEFQOUW.js} +6 -6
  43. package/dist/{server-NZLZRITH.js → server-BQ3DWKZ6.js} +16 -14
  44. package/dist/{server-NZLZRITH.js.map → server-BQ3DWKZ6.js.map} +1 -1
  45. package/dist/{session-start-YB4A4PZB.js → session-start-BXRTKS4X.js} +6 -6
  46. package/dist/{setup-digest-K732MGOJ.js → setup-digest-EJXSQGZ5.js} +5 -5
  47. package/dist/{setup-llm-XCCH5LYD.js → setup-llm-P3MLWUDR.js} +5 -5
  48. package/dist/src/cli.js +4 -4
  49. package/dist/src/daemon/main.js +4 -4
  50. package/dist/src/hooks/post-tool-use.js +5 -5
  51. package/dist/src/hooks/session-end.js +5 -5
  52. package/dist/src/hooks/session-start.js +4 -4
  53. package/dist/src/hooks/stop.js +6 -6
  54. package/dist/src/hooks/stop.js.map +1 -1
  55. package/dist/src/hooks/user-prompt-submit.js +5 -5
  56. package/dist/src/mcp/server.js +4 -4
  57. package/dist/src/prompts/extraction.md +1 -1
  58. package/dist/src/prompts/summary.md +1 -11
  59. package/dist/{stats-6G7SN5YZ.js → stats-3FAP5FKV.js} +5 -5
  60. package/dist/{verify-JFHQH55Z.js → verify-3FTCOULE.js} +4 -4
  61. package/dist/{version-5B2TWXQJ.js → version-AL67JH7X.js} +4 -4
  62. package/package.json +1 -1
  63. package/skills/setup/SKILL.md +56 -28
  64. package/skills/setup/references/model-recommendations.md +49 -43
  65. package/dist/chunk-67R6EMYD.js.map +0 -1
  66. package/dist/chunk-FPEDTLQ6.js.map +0 -1
  67. package/dist/chunk-JBD5KP5G.js.map +0 -1
  68. package/dist/chunk-OUFSLZTX.js.map +0 -1
  69. package/dist/chunk-V2OWD2VV.js.map +0 -1
  70. package/dist/cli-PMOFCZQL.js.map +0 -1
  71. package/dist/main-2XEBVUR6.js.map +0 -1
  72. package/dist/reprocess-7G7KQWCN.js.map +0 -1
  73. /package/dist/{chunk-WBT5DWGC.js.map → chunk-42R7KVAW.js.map} +0 -0
  74. /package/dist/{chunk-GFBG73P4.js.map → chunk-5FIIK27E.js.map} +0 -0
  75. /package/dist/{chunk-XCPQHC4X.js.map → chunk-6CAKKNGD.js.map} +0 -0
  76. /package/dist/{chunk-BNIYWCST.js.map → chunk-EQVQEFOA.js.map} +0 -0
  77. /package/dist/{chunk-IYFKPSRP.js.map → chunk-OSZRLHIJ.js.map} +0 -0
  78. /package/dist/{chunk-2GJFTIWX.js.map → chunk-TK2ZYIAL.js.map} +0 -0
  79. /package/dist/{chunk-ZCBL5HER.js.map → chunk-XIIVIMFC.js.map} +0 -0
  80. /package/dist/{client-5SUO2UYH.js.map → client-XCNF6NFT.js.map} +0 -0
  81. /package/dist/{detect-providers-IRL2TTLK.js.map → detect-providers-CQSPTW2B.js.map} +0 -0
  82. /package/dist/{init-NUF5UBUJ.js.map → init-VPLUEULI.js.map} +0 -0
  83. /package/dist/{restart-ABW4ZK3P.js.map → restart-QCQQ55KX.js.map} +0 -0
  84. /package/dist/{search-MPD7SFK6.js.map → search-ACEFQOUW.js.map} +0 -0
  85. /package/dist/{session-start-YB4A4PZB.js.map → session-start-BXRTKS4X.js.map} +0 -0
  86. /package/dist/{setup-digest-K732MGOJ.js.map → setup-digest-EJXSQGZ5.js.map} +0 -0
  87. /package/dist/{setup-llm-XCCH5LYD.js.map → setup-llm-P3MLWUDR.js.map} +0 -0
  88. /package/dist/{stats-6G7SN5YZ.js.map → stats-3FAP5FKV.js.map} +0 -0
  89. /package/dist/{verify-JFHQH55Z.js.map → verify-3FTCOULE.js.map} +0 -0
  90. /package/dist/{version-5B2TWXQJ.js.map → version-AL67JH7X.js.map} +0 -0
@@ -12,21 +12,21 @@ import {
12
12
  import "./chunk-6UJWI4IW.js";
13
13
  import {
14
14
  readStdin
15
- } from "./chunk-ZCBL5HER.js";
15
+ } from "./chunk-XIIVIMFC.js";
16
16
  import {
17
17
  DaemonClient
18
- } from "./chunk-OUFSLZTX.js";
19
- import "./chunk-2GJFTIWX.js";
18
+ } from "./chunk-KDWBZSOB.js";
19
+ import "./chunk-TK2ZYIAL.js";
20
20
  import {
21
21
  resolveVaultDir
22
22
  } from "./chunk-N33KUCFP.js";
23
- import "./chunk-BNIYWCST.js";
23
+ import "./chunk-EQVQEFOA.js";
24
24
  import {
25
25
  CONTEXT_PLAN_PREVIEW_CHARS,
26
26
  CONTEXT_SESSION_PREVIEW_CHARS,
27
27
  CONTEXT_SPORE_PREVIEW_CHARS,
28
28
  estimateTokens
29
- } from "./chunk-JBD5KP5G.js";
29
+ } from "./chunk-TDLQBGKA.js";
30
30
  import "./chunk-PZUWP5VK.js";
31
31
 
32
32
  // src/context/relevance.ts
@@ -189,4 +189,4 @@ async function main() {
189
189
  }
190
190
  }
191
191
  main();
192
- //# sourceMappingURL=session-start-YB4A4PZB.js.map
192
+ //# sourceMappingURL=session-start-BXRTKS4X.js.map
@@ -2,14 +2,14 @@ import { createRequire as __cr } from 'node:module'; const require = __cr(import
2
2
  import {
3
3
  run
4
4
  } from "./chunk-UKWO26VI.js";
5
- import "./chunk-GFBG73P4.js";
5
+ import "./chunk-5FIIK27E.js";
6
6
  import "./chunk-SAKJMNSR.js";
7
- import "./chunk-67R6EMYD.js";
7
+ import "./chunk-OPO47BVS.js";
8
8
  import "./chunk-6UJWI4IW.js";
9
- import "./chunk-BNIYWCST.js";
10
- import "./chunk-JBD5KP5G.js";
9
+ import "./chunk-EQVQEFOA.js";
10
+ import "./chunk-TDLQBGKA.js";
11
11
  import "./chunk-PZUWP5VK.js";
12
12
  export {
13
13
  run
14
14
  };
15
- //# sourceMappingURL=setup-digest-K732MGOJ.js.map
15
+ //# sourceMappingURL=setup-digest-EJXSQGZ5.js.map
@@ -2,14 +2,14 @@ import { createRequire as __cr } from 'node:module'; const require = __cr(import
2
2
  import {
3
3
  run
4
4
  } from "./chunk-T7OC6GH5.js";
5
- import "./chunk-GFBG73P4.js";
5
+ import "./chunk-5FIIK27E.js";
6
6
  import "./chunk-SAKJMNSR.js";
7
- import "./chunk-67R6EMYD.js";
7
+ import "./chunk-OPO47BVS.js";
8
8
  import "./chunk-6UJWI4IW.js";
9
- import "./chunk-BNIYWCST.js";
10
- import "./chunk-JBD5KP5G.js";
9
+ import "./chunk-EQVQEFOA.js";
10
+ import "./chunk-TDLQBGKA.js";
11
11
  import "./chunk-PZUWP5VK.js";
12
12
  export {
13
13
  run
14
14
  };
15
- //# sourceMappingURL=setup-llm-XCCH5LYD.js.map
15
+ //# sourceMappingURL=setup-llm-P3MLWUDR.js.map
package/dist/src/cli.js CHANGED
@@ -2,12 +2,12 @@
2
2
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
3
3
  import {
4
4
  ensureNativeDeps
5
- } from "../chunk-XCPQHC4X.js";
6
- import "../chunk-BNIYWCST.js";
7
- import "../chunk-JBD5KP5G.js";
5
+ } from "../chunk-6CAKKNGD.js";
6
+ import "../chunk-EQVQEFOA.js";
7
+ import "../chunk-TDLQBGKA.js";
8
8
  import "../chunk-PZUWP5VK.js";
9
9
 
10
10
  // src/entries/cli.ts
11
11
  ensureNativeDeps();
12
- await import("../cli-PMOFCZQL.js");
12
+ await import("../cli-WOM4Z2Z4.js");
13
13
  //# sourceMappingURL=cli.js.map
@@ -1,13 +1,13 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  ensureNativeDeps
4
- } from "../../chunk-XCPQHC4X.js";
5
- import "../../chunk-BNIYWCST.js";
6
- import "../../chunk-JBD5KP5G.js";
4
+ } from "../../chunk-6CAKKNGD.js";
5
+ import "../../chunk-EQVQEFOA.js";
6
+ import "../../chunk-TDLQBGKA.js";
7
7
  import "../../chunk-PZUWP5VK.js";
8
8
 
9
9
  // src/entries/daemon.ts
10
10
  ensureNativeDeps();
11
- var { main } = await import("../../main-2XEBVUR6.js");
11
+ var { main } = await import("../../main-OGXH6XWO.js");
12
12
  await main();
13
13
  //# sourceMappingURL=main.js.map
@@ -4,18 +4,18 @@ import {
4
4
  } from "../../chunk-HIN3UVOG.js";
5
5
  import {
6
6
  readStdin
7
- } from "../../chunk-ZCBL5HER.js";
7
+ } from "../../chunk-XIIVIMFC.js";
8
8
  import {
9
9
  DaemonClient
10
- } from "../../chunk-OUFSLZTX.js";
11
- import "../../chunk-2GJFTIWX.js";
10
+ } from "../../chunk-KDWBZSOB.js";
11
+ import "../../chunk-TK2ZYIAL.js";
12
12
  import {
13
13
  resolveVaultDir
14
14
  } from "../../chunk-N33KUCFP.js";
15
- import "../../chunk-BNIYWCST.js";
15
+ import "../../chunk-EQVQEFOA.js";
16
16
  import {
17
17
  TOOL_OUTPUT_PREVIEW_CHARS
18
- } from "../../chunk-JBD5KP5G.js";
18
+ } from "../../chunk-TDLQBGKA.js";
19
19
  import "../../chunk-PZUWP5VK.js";
20
20
 
21
21
  // src/hooks/post-tool-use.ts
@@ -1,16 +1,16 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  readStdin
4
- } from "../../chunk-ZCBL5HER.js";
4
+ } from "../../chunk-XIIVIMFC.js";
5
5
  import {
6
6
  DaemonClient
7
- } from "../../chunk-OUFSLZTX.js";
8
- import "../../chunk-2GJFTIWX.js";
7
+ } from "../../chunk-KDWBZSOB.js";
8
+ import "../../chunk-TK2ZYIAL.js";
9
9
  import {
10
10
  resolveVaultDir
11
11
  } from "../../chunk-N33KUCFP.js";
12
- import "../../chunk-BNIYWCST.js";
13
- import "../../chunk-JBD5KP5G.js";
12
+ import "../../chunk-EQVQEFOA.js";
13
+ import "../../chunk-TDLQBGKA.js";
14
14
  import "../../chunk-PZUWP5VK.js";
15
15
 
16
16
  // src/hooks/session-end.ts
@@ -1,12 +1,12 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  ensureNativeDeps
4
- } from "../../chunk-XCPQHC4X.js";
5
- import "../../chunk-BNIYWCST.js";
6
- import "../../chunk-JBD5KP5G.js";
4
+ } from "../../chunk-6CAKKNGD.js";
5
+ import "../../chunk-EQVQEFOA.js";
6
+ import "../../chunk-TDLQBGKA.js";
7
7
  import "../../chunk-PZUWP5VK.js";
8
8
 
9
9
  // src/entries/session-start.ts
10
10
  ensureNativeDeps();
11
- await import("../../session-start-YB4A4PZB.js");
11
+ await import("../../session-start-BXRTKS4X.js");
12
12
  //# sourceMappingURL=session-start.js.map
@@ -5,16 +5,16 @@ import {
5
5
  import "../../chunk-6UJWI4IW.js";
6
6
  import {
7
7
  readStdin
8
- } from "../../chunk-ZCBL5HER.js";
8
+ } from "../../chunk-XIIVIMFC.js";
9
9
  import {
10
10
  DaemonClient
11
- } from "../../chunk-OUFSLZTX.js";
12
- import "../../chunk-2GJFTIWX.js";
11
+ } from "../../chunk-KDWBZSOB.js";
12
+ import "../../chunk-TK2ZYIAL.js";
13
13
  import {
14
14
  resolveVaultDir
15
15
  } from "../../chunk-N33KUCFP.js";
16
- import "../../chunk-BNIYWCST.js";
17
- import "../../chunk-JBD5KP5G.js";
16
+ import "../../chunk-EQVQEFOA.js";
17
+ import "../../chunk-TDLQBGKA.js";
18
18
  import "../../chunk-PZUWP5VK.js";
19
19
 
20
20
  // src/hooks/stop.ts
@@ -29,7 +29,7 @@ async function main() {
29
29
  if (!sessionId) return;
30
30
  const config = loadConfig(VAULT_DIR);
31
31
  const client = new DaemonClient(VAULT_DIR);
32
- await client.ensureRunning();
32
+ await client.ensureRunning({ checkStale: false });
33
33
  await client.post("/events/stop", {
34
34
  session_id: sessionId,
35
35
  user: config.team.user || void 0,
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../src/hooks/stop.ts"],"sourcesContent":["import { DaemonClient } from './client.js';\nimport { readStdin } from './read-stdin.js';\nimport { loadConfig } from '../config/loader.js';\nimport { resolveVaultDir } from '../vault/resolve.js';\nimport fs from 'node:fs';\nimport path from 'node:path';\n\nasync function main() {\n const VAULT_DIR = resolveVaultDir();\n if (!fs.existsSync(path.join(VAULT_DIR, 'myco.yaml'))) return;\n\n try {\n const input = JSON.parse(await readStdin());\n const sessionId = input.session_id ?? process.env.MYCO_SESSION_ID;\n if (!sessionId) return;\n\n const config = loadConfig(VAULT_DIR);\n const client = new DaemonClient(VAULT_DIR);\n\n await client.ensureRunning();\n\n // Pass transcript_path and last_assistant_message from Claude Code.\n // These are provided by the hook system and eliminate the need to\n // scan directories or mine the transcript for the AI response.\n await client.post('/events/stop', {\n session_id: sessionId,\n user: config.team.user || undefined,\n transcript_path: input.transcript_path,\n last_assistant_message: input.last_assistant_message,\n });\n } catch (error) {\n process.stderr.write(`[myco] stop error: ${(error as Error).message}\\n`);\n }\n}\n\nmain();\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAIA,OAAO,QAAQ;AACf,OAAO,UAAU;AAEjB,eAAe,OAAO;AACpB,QAAM,YAAY,gBAAgB;AAClC,MAAI,CAAC,GAAG,WAAW,KAAK,KAAK,WAAW,WAAW,CAAC,EAAG;AAEvD,MAAI;AACF,UAAM,QAAQ,KAAK,MAAM,MAAM,UAAU,CAAC;AAC1C,UAAM,YAAY,MAAM,cAAc,QAAQ,IAAI;AAClD,QAAI,CAAC,UAAW;AAEhB,UAAM,SAAS,WAAW,SAAS;AACnC,UAAM,SAAS,IAAI,aAAa,SAAS;AAEzC,UAAM,OAAO,cAAc;AAK3B,UAAM,OAAO,KAAK,gBAAgB;AAAA,MAChC,YAAY;AAAA,MACZ,MAAM,OAAO,KAAK,QAAQ;AAAA,MAC1B,iBAAiB,MAAM;AAAA,MACvB,wBAAwB,MAAM;AAAA,IAChC,CAAC;AAAA,EACH,SAAS,OAAO;AACd,YAAQ,OAAO,MAAM,sBAAuB,MAAgB,OAAO;AAAA,CAAI;AAAA,EACzE;AACF;AAEA,KAAK;","names":[]}
1
+ {"version":3,"sources":["../../../src/hooks/stop.ts"],"sourcesContent":["import { DaemonClient } from './client.js';\nimport { readStdin } from './read-stdin.js';\nimport { loadConfig } from '../config/loader.js';\nimport { resolveVaultDir } from '../vault/resolve.js';\nimport fs from 'node:fs';\nimport path from 'node:path';\n\nasync function main() {\n const VAULT_DIR = resolveVaultDir();\n if (!fs.existsSync(path.join(VAULT_DIR, 'myco.yaml'))) return;\n\n try {\n const input = JSON.parse(await readStdin());\n const sessionId = input.session_id ?? process.env.MYCO_SESSION_ID;\n if (!sessionId) return;\n\n const config = loadConfig(VAULT_DIR);\n const client = new DaemonClient(VAULT_DIR);\n\n await client.ensureRunning({ checkStale: false });\n\n // Pass transcript_path and last_assistant_message from Claude Code.\n // These are provided by the hook system and eliminate the need to\n // scan directories or mine the transcript for the AI response.\n await client.post('/events/stop', {\n session_id: sessionId,\n user: config.team.user || undefined,\n transcript_path: input.transcript_path,\n last_assistant_message: input.last_assistant_message,\n });\n } catch (error) {\n process.stderr.write(`[myco] stop error: ${(error as Error).message}\\n`);\n }\n}\n\nmain();\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAIA,OAAO,QAAQ;AACf,OAAO,UAAU;AAEjB,eAAe,OAAO;AACpB,QAAM,YAAY,gBAAgB;AAClC,MAAI,CAAC,GAAG,WAAW,KAAK,KAAK,WAAW,WAAW,CAAC,EAAG;AAEvD,MAAI;AACF,UAAM,QAAQ,KAAK,MAAM,MAAM,UAAU,CAAC;AAC1C,UAAM,YAAY,MAAM,cAAc,QAAQ,IAAI;AAClD,QAAI,CAAC,UAAW;AAEhB,UAAM,SAAS,WAAW,SAAS;AACnC,UAAM,SAAS,IAAI,aAAa,SAAS;AAEzC,UAAM,OAAO,cAAc,EAAE,YAAY,MAAM,CAAC;AAKhD,UAAM,OAAO,KAAK,gBAAgB;AAAA,MAChC,YAAY;AAAA,MACZ,MAAM,OAAO,KAAK,QAAQ;AAAA,MAC1B,iBAAiB,MAAM;AAAA,MACvB,wBAAwB,MAAM;AAAA,IAChC,CAAC;AAAA,EACH,SAAS,OAAO;AACd,YAAQ,OAAO,MAAM,sBAAuB,MAAgB,OAAO;AAAA,CAAI;AAAA,EACzE;AACF;AAEA,KAAK;","names":[]}
@@ -4,16 +4,16 @@ import {
4
4
  } from "../../chunk-HIN3UVOG.js";
5
5
  import {
6
6
  readStdin
7
- } from "../../chunk-ZCBL5HER.js";
7
+ } from "../../chunk-XIIVIMFC.js";
8
8
  import {
9
9
  DaemonClient
10
- } from "../../chunk-OUFSLZTX.js";
11
- import "../../chunk-2GJFTIWX.js";
10
+ } from "../../chunk-KDWBZSOB.js";
11
+ import "../../chunk-TK2ZYIAL.js";
12
12
  import {
13
13
  resolveVaultDir
14
14
  } from "../../chunk-N33KUCFP.js";
15
- import "../../chunk-BNIYWCST.js";
16
- import "../../chunk-JBD5KP5G.js";
15
+ import "../../chunk-EQVQEFOA.js";
16
+ import "../../chunk-TDLQBGKA.js";
17
17
  import "../../chunk-PZUWP5VK.js";
18
18
 
19
19
  // src/hooks/user-prompt-submit.ts
@@ -1,13 +1,13 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  ensureNativeDeps
4
- } from "../../chunk-XCPQHC4X.js";
5
- import "../../chunk-BNIYWCST.js";
6
- import "../../chunk-JBD5KP5G.js";
4
+ } from "../../chunk-6CAKKNGD.js";
5
+ import "../../chunk-EQVQEFOA.js";
6
+ import "../../chunk-TDLQBGKA.js";
7
7
  import "../../chunk-PZUWP5VK.js";
8
8
 
9
9
  // src/entries/mcp-server.ts
10
10
  ensureNativeDeps();
11
- var { main } = await import("../../server-NZLZRITH.js");
11
+ var { main } = await import("../../server-BQ3DWKZ6.js");
12
12
  await main();
13
13
  //# sourceMappingURL=server.js.map
@@ -1,5 +1,5 @@
1
1
  You are analyzing a coding session buffer for session "{{sessionId}}".
2
- You have a budget of ~{{maxTokens}} tokens for your response. Use it generously — richer, more detailed observations are more valuable than brief ones.
2
+ You have a budget of ~{{maxTokens}} tokens for your response.
3
3
 
4
4
  ## Events ({{eventCount}} total)
5
5
  {{toolSummary}}
@@ -1,19 +1,9 @@
1
1
  You are summarizing a coding session for user "{{user}}" (session "{{sessionId}}").
2
- You have a budget of ~{{maxTokens}} tokens. Use the full budget to produce a rich, detailed narrative.
3
2
 
4
3
  ## Session Content
5
4
  {{content}}
6
5
 
7
6
  ## Task
8
- Write a detailed narrative summary of this session. This summary will be used by the digest engine to synthesize project understanding, so richness and accuracy matter more than brevity.
9
-
10
- Cover:
11
- - **What was accomplished** — features built, bugs fixed, refactors completed
12
- - **Key decisions made** — what was chosen and why, including alternatives that were rejected
13
- - **Problems encountered** — what went wrong, how it was debugged, what the root cause was
14
- - **Discoveries and learnings** — anything surprising or non-obvious that was learned
15
- - **Current state** — where things stand at the end of the session, what's next
16
-
17
- Focus on outcomes and reasoning rather than individual tool calls. Include enough context that someone reading this summary months later would understand what happened and why.
7
+ Write a concise summary of this session in 2-4 sentences. Cover what was accomplished, key decisions made, and where things stand. Skip routine details focus on outcomes someone would need to know months later.
18
8
 
19
9
  Respond with plain text only, no JSON or markdown fences.
@@ -7,11 +7,11 @@ import {
7
7
  } from "./chunk-AK6GNLPV.js";
8
8
  import {
9
9
  isProcessAlive
10
- } from "./chunk-GFBG73P4.js";
10
+ } from "./chunk-5FIIK27E.js";
11
11
  import "./chunk-SAKJMNSR.js";
12
- import "./chunk-67R6EMYD.js";
13
- import "./chunk-BNIYWCST.js";
14
- import "./chunk-JBD5KP5G.js";
12
+ import "./chunk-OPO47BVS.js";
13
+ import "./chunk-EQVQEFOA.js";
14
+ import "./chunk-TDLQBGKA.js";
15
15
  import "./chunk-PZUWP5VK.js";
16
16
 
17
17
  // src/cli/stats.ts
@@ -74,4 +74,4 @@ Vectors: error \u2014 ${e.message}`);
74
74
  export {
75
75
  run
76
76
  };
77
- //# sourceMappingURL=stats-6G7SN5YZ.js.map
77
+ //# sourceMappingURL=stats-3FAP5FKV.js.map
@@ -2,13 +2,13 @@ import { createRequire as __cr } from 'node:module'; const require = __cr(import
2
2
  import {
3
3
  createEmbeddingProvider,
4
4
  createLlmProvider
5
- } from "./chunk-IYFKPSRP.js";
6
- import "./chunk-67R6EMYD.js";
5
+ } from "./chunk-OSZRLHIJ.js";
6
+ import "./chunk-OPO47BVS.js";
7
7
  import {
8
8
  loadConfig
9
9
  } from "./chunk-TBRZAJ7W.js";
10
10
  import "./chunk-6UJWI4IW.js";
11
- import "./chunk-JBD5KP5G.js";
11
+ import "./chunk-TDLQBGKA.js";
12
12
  import "./chunk-PZUWP5VK.js";
13
13
 
14
14
  // src/cli/verify.ts
@@ -47,4 +47,4 @@ async function run(_args, vaultDir) {
47
47
  export {
48
48
  run
49
49
  };
50
- //# sourceMappingURL=verify-JFHQH55Z.js.map
50
+ //# sourceMappingURL=verify-3FTCOULE.js.map
@@ -1,11 +1,11 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  getPluginVersion
4
- } from "./chunk-2GJFTIWX.js";
5
- import "./chunk-BNIYWCST.js";
6
- import "./chunk-JBD5KP5G.js";
4
+ } from "./chunk-TK2ZYIAL.js";
5
+ import "./chunk-EQVQEFOA.js";
6
+ import "./chunk-TDLQBGKA.js";
7
7
  import "./chunk-PZUWP5VK.js";
8
8
  export {
9
9
  getPluginVersion
10
10
  };
11
- //# sourceMappingURL=version-5B2TWXQJ.js.map
11
+ //# sourceMappingURL=version-AL67JH7X.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@goondocks/myco",
3
- "version": "0.4.3",
3
+ "version": "0.4.4",
4
4
  "description": "Collective agent intelligence — Claude Code plugin",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -39,14 +39,14 @@ For RAM detection, run the appropriate command for the OS:
39
39
 
40
40
  Use the RAM value to determine the recommended tier from `references/model-recommendations.md`:
41
41
 
42
- | RAM | Recommended Intelligence Model | Digest Context Window | Default Inject Tier |
43
- |-----|--------------------------------|-----------------------|---------------------|
44
- | 64GB+ | `qwen3.5:35b` | 65536 | 3000 |
45
- | 32–64GB | `qwen3.5:27b` | 32768 | 3000 |
46
- | 16–32GB | `qwen3.5:latest` (~10B) | 16384 | 1500 |
47
- | 8–16GB | `qwen3.5:4b` | 8192 | 1500 |
42
+ | RAM | Processor Model | Digest Model | Digest Context | Inject Tier |
43
+ |-----|----------------|--------------|----------------|-------------|
44
+ | 64GB+ | `qwen3.5:latest` | `qwen3.5:35b` | 65536 | 3000 |
45
+ | 48GB | `qwen3.5:latest` | `qwen3.5:27b` | 32768 | 3000 |
46
+ | 32GB | `qwen3.5:4b` | `qwen3.5:latest` | 16384 | 1500 |
47
+ | 16GB | `qwen3.5:4b` | `qwen3.5:4b` | 8192 | 1500 |
48
48
 
49
- Record: detected RAM (GB), recommended model, digest context window, and default inject tier. You will use these as defaults in the questions below.
49
+ Record: detected RAM (GB), recommended processor model, recommended digest model, digest context window, and default inject tier.
50
50
 
51
51
  ## Step 3: Ask Questions
52
52
 
@@ -62,40 +62,54 @@ Use AskUserQuestion to ask the user where to store the vault. Present three choi
62
62
 
63
63
  Record the resolved vault path.
64
64
 
65
- ### Question 2: Provider and Model
65
+ ### Question 2: Processor Model (extraction, summaries, titles)
66
66
 
67
- From the `detect-providers` output, list only providers where `available` is `true`. Present them as choices. For each available provider, list its available models.
67
+ Present the recommended processor model from the RAM table as the default. Show available models from the detected providers, grouped by provider.
68
68
 
69
- Pre-select the recommended model from Step 2 if it appears in the list. If the recommended model is not installed:
69
+ Explain: "The processor model handles session extraction, summaries, and titles. Smaller, faster models work well here speed matters more than depth."
70
70
 
71
- - **Ollama:** offer to run `ollama pull <recommended-model>` before continuing. Ask "Pull now or choose a different model?"
72
- - **LM Studio:** tell the user to open LM Studio, search for `<recommended-model>`, and download it. Offer to wait or to let the user choose a different available model.
71
+ If the recommended model is not installed:
73
72
 
74
- Record the chosen provider and model.
73
+ - **Ollama:** offer to run `ollama pull <recommended-model>` before continuing.
74
+ - **LM Studio:** tell the user to download it from the model browser.
75
75
 
76
- ### Question 3: Embedding Model
76
+ Record the chosen provider and processor model.
77
77
 
78
- List embedding models from the chosen provider. Exclude Anthropic — it does not support embeddings.
78
+ ### Question 3: Digest Model (vault synthesis)
79
+
80
+ Present the recommended digest model from the RAM table as the default. Show available models from the detected providers.
81
+
82
+ Explain: "The digest model synthesizes your vault into context extracts. Larger models produce better results here — quality matters more than speed. This can be the same as the processor model on smaller machines."
83
+
84
+ If the recommended model is not installed, offer to pull/download as above.
85
+
86
+ Record the chosen provider and digest model.
87
+
88
+ ### Question 4: Embedding Model
89
+
90
+ List embedding models from available providers. Exclude Anthropic — it does not support embeddings.
79
91
 
80
92
  If no embedding models are installed:
81
93
 
82
94
  - **Ollama:** offer to run `ollama pull bge-m3`. If the user accepts, run it before continuing.
83
- - **LM Studio:** tell the user to search for and download an embedding model (suggest `bge-m3` or any model with `text-embedding` in the name).
95
+ - **LM Studio:** tell the user to search for and download an embedding model.
84
96
 
85
97
  Recommend `bge-m3` as the default. Record the chosen embedding provider and model.
86
98
 
87
- ### Question 4: Inject Tier
99
+ ### Question 5: Inject Tier
88
100
 
89
- Show the inject tier options appropriate for the detected RAM, with the default pre-selected:
101
+ Ask the user which coding agent they primarily use, then recommend an inject tier based on the agent's context window (see `references/model-recommendations.md`). Show all tiers:
90
102
 
91
103
  | Tier | Description |
92
104
  |------|-------------|
93
105
  | 1500 | Executive briefing — fastest, lightest |
94
- | 3000 | Team standup — recommended for most setups |
95
- | 5000 | Deep onboarding |
96
- | 10000 | Institutional knowledge — richest context |
106
+ | 3000 | Team standup — balanced context |
107
+ | 5000 | Deep onboarding — good for 200K agents |
108
+ | 10000 | Institutional knowledge — richest, best for 1M+ agents |
109
+
110
+ Explain: "The inject tier controls how much vault context is injected at session start. Larger tiers give the agent more project history but use more of its context window. All tiers work regardless of local hardware."
97
111
 
98
- Show only the tiers available for the user's RAM tier (per the table in Step 2). Pre-select the default. Tell the user: "Agents can always request a different tier on-demand via the `myco_context` MCP tool."
112
+ Pre-select the default based on the agent's context window. Tell the user: "Agents can always request a different tier on-demand via the `myco_context` MCP tool."
99
113
 
100
114
  Record the chosen inject tier.
101
115
 
@@ -106,14 +120,16 @@ Run the following commands in sequence, substituting the recorded values. Show e
106
120
  ```bash
107
121
  node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js init \
108
122
  --vault <chosen-vault-path> \
109
- --llm-provider <provider> \
110
- --llm-model <model> \
123
+ --llm-provider <processor-provider> \
124
+ --llm-model <processor-model> \
111
125
  --embedding-provider <embedding-provider> \
112
126
  --embedding-model <embedding-model>
113
127
  ```
114
128
 
115
129
  ```bash
116
130
  node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js setup-digest \
131
+ --provider <digest-provider> \
132
+ --model <digest-model> \
117
133
  --context-window <digest-context-window-from-ram-table> \
118
134
  --inject-tier <chosen-inject-tier>
119
135
  ```
@@ -124,16 +140,28 @@ node ${CLAUDE_PLUGIN_ROOT}/dist/src/cli.js verify
124
140
 
125
141
  If any command fails, report the error and stop. Do not continue to the next command on failure. Show the full error output to the user and ask how to proceed.
126
142
 
127
- ## Step 5: Report
143
+ ## Step 5: Ollama Performance Tips
144
+
145
+ If the user is using Ollama, recommend adding these to their Ollama service configuration:
146
+
147
+ ```
148
+ OLLAMA_FLASH_ATTENTION=1 # Required for KV cache quantization
149
+ OLLAMA_KV_CACHE_TYPE=q8_0 # Halves KV cache memory
150
+ ```
151
+
152
+ Explain: "These settings halve the memory used for large context windows, making digest much more efficient. They're Ollama-wide settings — on macOS, add them to your Ollama launchd plist."
153
+
154
+ ## Step 6: Report
128
155
 
129
156
  Display a summary table:
130
157
 
131
158
  | Setting | Value |
132
159
  |---------|-------|
133
160
  | Vault path | `<resolved path>` |
134
- | Provider | `<provider>` / `<model>` |
161
+ | Processor | `<provider>` / `<processor-model>` |
162
+ | Digest | `<provider>` / `<digest-model>` (context: `<context-window>`) |
135
163
  | Embedding | `<embedding-provider>` / `<embedding-model>` |
136
- | Digest | enabled (context: `<context-window>`, inject: `<inject-tier>`) |
164
+ | Inject tier | `<inject-tier>` |
137
165
  | RAM detected | `<X>` GB |
138
166
 
139
167
  Tell the user: "Myco is ready. Start a new session to begin capturing knowledge."
@@ -143,4 +171,4 @@ Tell the user: "Myco is ready. Start a new session to begin capturing knowledge.
143
171
  - All writes via CLI commands — never read or modify `myco.yaml` directly.
144
172
  - All provider detection via `detect-providers` — no raw HTTP calls to provider APIs.
145
173
  - One question at a time — do not batch questions or present them together.
146
- - Two model choices in guided setup: intelligence model and embedding model. For a separate dedicated digestion model, direct the user to run `setup-digest` with a `--provider` and `--model` flag after setup completes (see `references/model-recommendations.md` Advanced section).
174
+ - Three model choices in guided setup: processor, digest, and embedding.