@sparkleideas/claude-flow-patch 3.1.0-alpha.44.patch.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/AGENTS.md +162 -0
  2. package/CLAUDE.md +458 -0
  3. package/README.md +306 -0
  4. package/bin/claude-flow-patch.mjs +148 -0
  5. package/check-patches.sh +176 -0
  6. package/lib/categories.json +15 -0
  7. package/lib/common.py +92 -0
  8. package/lib/discover.mjs +181 -0
  9. package/package.json +85 -0
  10. package/patch/010-CF-001-doctor-yaml/README.md +11 -0
  11. package/patch/010-CF-001-doctor-yaml/fix.py +20 -0
  12. package/patch/010-CF-001-doctor-yaml/sentinel +1 -0
  13. package/patch/020-CF-002-config-export-yaml/README.md +11 -0
  14. package/patch/020-CF-002-config-export-yaml/fix.py +130 -0
  15. package/patch/020-CF-002-config-export-yaml/sentinel +1 -0
  16. package/patch/030-DM-001-daemon-log-zero/README.md +12 -0
  17. package/patch/030-DM-001-daemon-log-zero/fix.py +37 -0
  18. package/patch/030-DM-001-daemon-log-zero/sentinel +1 -0
  19. package/patch/040-DM-002-cpu-load-threshold/README.md +11 -0
  20. package/patch/040-DM-002-cpu-load-threshold/fix.py +6 -0
  21. package/patch/040-DM-002-cpu-load-threshold/sentinel +1 -0
  22. package/patch/050-DM-003-macos-freemem/README.md +11 -0
  23. package/patch/050-DM-003-macos-freemem/fix.py +7 -0
  24. package/patch/050-DM-003-macos-freemem/sentinel +1 -0
  25. package/patch/060-DM-004-preload-worker-stub/README.md +11 -0
  26. package/patch/060-DM-004-preload-worker-stub/fix.py +34 -0
  27. package/patch/060-DM-004-preload-worker-stub/sentinel +1 -0
  28. package/patch/070-DM-005-consolidation-worker-stub/README.md +11 -0
  29. package/patch/070-DM-005-consolidation-worker-stub/fix.py +46 -0
  30. package/patch/070-DM-005-consolidation-worker-stub/sentinel +1 -0
  31. package/patch/080-EM-001-embedding-ignores-config/README.md +11 -0
  32. package/patch/080-EM-001-embedding-ignores-config/fix.py +111 -0
  33. package/patch/080-EM-001-embedding-ignores-config/sentinel +1 -0
  34. package/patch/090-EM-002-transformers-cache-eacces/README.md +11 -0
  35. package/patch/090-EM-002-transformers-cache-eacces/fix.sh +12 -0
  36. package/patch/090-EM-002-transformers-cache-eacces/sentinel +1 -0
  37. package/patch/100-GV-001-hnsw-ghost-vectors/README.md +11 -0
  38. package/patch/100-GV-001-hnsw-ghost-vectors/fix.py +34 -0
  39. package/patch/100-GV-001-hnsw-ghost-vectors/sentinel +1 -0
  40. package/patch/110-HK-001-post-edit-file-path/README.md +44 -0
  41. package/patch/110-HK-001-post-edit-file-path/fix.py +23 -0
  42. package/patch/110-HK-001-post-edit-file-path/sentinel +1 -0
  43. package/patch/120-HK-002-hooks-tools-stub/README.md +36 -0
  44. package/patch/120-HK-002-hooks-tools-stub/fix.py +155 -0
  45. package/patch/120-HK-002-hooks-tools-stub/sentinel +1 -0
  46. package/patch/130-HK-003-metrics-hardcoded/README.md +30 -0
  47. package/patch/130-HK-003-metrics-hardcoded/fix.py +82 -0
  48. package/patch/130-HK-003-metrics-hardcoded/sentinel +1 -0
  49. package/patch/140-HW-001-stdin-hang/README.md +11 -0
  50. package/patch/140-HW-001-stdin-hang/fix.py +6 -0
  51. package/patch/140-HW-001-stdin-hang/sentinel +1 -0
  52. package/patch/150-HW-002-failures-swallowed/README.md +11 -0
  53. package/patch/150-HW-002-failures-swallowed/fix.py +42 -0
  54. package/patch/150-HW-002-failures-swallowed/sentinel +1 -0
  55. package/patch/160-HW-003-aggressive-intervals/README.md +11 -0
  56. package/patch/160-HW-003-aggressive-intervals/fix.py +16 -0
  57. package/patch/160-HW-003-aggressive-intervals/sentinel +1 -0
  58. package/patch/170-IN-001-intelligence-stub/README.md +64 -0
  59. package/patch/170-IN-001-intelligence-stub/fix.py +70 -0
  60. package/patch/170-IN-001-intelligence-stub/sentinel +1 -0
  61. package/patch/180-MM-001-memory-persist-path/README.md +27 -0
  62. package/patch/180-MM-001-memory-persist-path/fix.py +54 -0
  63. package/patch/180-MM-001-memory-persist-path/sentinel +1 -0
  64. package/patch/190-NS-001-discovery-default-namespace/README.md +16 -0
  65. package/patch/190-NS-001-discovery-default-namespace/fix.py +68 -0
  66. package/patch/190-NS-001-discovery-default-namespace/sentinel +2 -0
  67. package/patch/200-NS-002-targeted-require-namespace/README.md +19 -0
  68. package/patch/200-NS-002-targeted-require-namespace/fix.py +158 -0
  69. package/patch/200-NS-002-targeted-require-namespace/sentinel +2 -0
  70. package/patch/210-NS-003-namespace-typo-pattern/README.md +15 -0
  71. package/patch/210-NS-003-namespace-typo-pattern/fix.py +23 -0
  72. package/patch/210-NS-003-namespace-typo-pattern/sentinel +1 -0
  73. package/patch/220-RS-001-better-sqlite3-node24/README.md +54 -0
  74. package/patch/220-RS-001-better-sqlite3-node24/fix.py +22 -0
  75. package/patch/220-RS-001-better-sqlite3-node24/rebuild.sh +31 -0
  76. package/patch/220-RS-001-better-sqlite3-node24/sentinel +2 -0
  77. package/patch/230-RV-001-force-learn-tick/README.md +31 -0
  78. package/patch/230-RV-001-force-learn-tick/fix.py +14 -0
  79. package/patch/230-RV-001-force-learn-tick/sentinel +2 -0
  80. package/patch/240-RV-002-trajectory-load/README.md +28 -0
  81. package/patch/240-RV-002-trajectory-load/fix.py +14 -0
  82. package/patch/240-RV-002-trajectory-load/sentinel +2 -0
  83. package/patch/250-RV-003-trajectory-stats-sync/README.md +31 -0
  84. package/patch/250-RV-003-trajectory-stats-sync/fix.py +18 -0
  85. package/patch/250-RV-003-trajectory-stats-sync/sentinel +2 -0
  86. package/patch/260-SG-001-init-settings/README.md +29 -0
  87. package/patch/260-SG-001-init-settings/fix.py +143 -0
  88. package/patch/260-SG-001-init-settings/sentinel +4 -0
  89. package/patch/270-SG-003-init-helpers-all-paths/README.md +60 -0
  90. package/patch/270-SG-003-init-helpers-all-paths/fix.py +164 -0
  91. package/patch/270-SG-003-init-helpers-all-paths/sentinel +3 -0
  92. package/patch/280-UI-001-intelligence-stats-crash/README.md +11 -0
  93. package/patch/280-UI-001-intelligence-stats-crash/fix.py +57 -0
  94. package/patch/280-UI-001-intelligence-stats-crash/sentinel +1 -0
  95. package/patch/290-UI-002-neural-status-not-loaded/README.md +11 -0
  96. package/patch/290-UI-002-neural-status-not-loaded/fix.py +19 -0
  97. package/patch/290-UI-002-neural-status-not-loaded/sentinel +1 -0
  98. package/patch/300-DM-006-log-rotation/README.md +11 -0
  99. package/patch/300-DM-006-log-rotation/fix.py +58 -0
  100. package/patch/300-DM-006-log-rotation/sentinel +1 -0
  101. package/patch/310-HW-004-runwithtimeout-orphan/README.md +11 -0
  102. package/patch/310-HW-004-runwithtimeout-orphan/fix.py +10 -0
  103. package/patch/310-HW-004-runwithtimeout-orphan/sentinel +1 -0
  104. package/patch-all.sh +203 -0
  105. package/repair-post-init.sh +245 -0
  106. package/scripts/update-docs.mjs +208 -0
  107. package/scripts/upstream-log.mjs +257 -0
@@ -0,0 +1,34 @@
1
+ # DM-004: Preload worker stub + missing from defaults
2
+ # Adds missing workers to DEFAULT_WORKERS and implements real preload
3
+ patch("11: add missing workers to defaults",
4
+ WD,
5
+ " { type: 'document', intervalMs: 60 * 60 * 1000, offsetMs: 0, priority: 'low', description: 'Auto-documentation', enabled: false },\n];",
6
+ """ { type: 'document', intervalMs: 60 * 60 * 1000, offsetMs: 0, priority: 'low', description: 'Auto-documentation', enabled: false },
7
+ { type: 'ultralearn', intervalMs: 0, offsetMs: 0, priority: 'normal', description: 'Deep knowledge acquisition (headless, manual trigger)', enabled: false },
8
+ { type: 'deepdive', intervalMs: 4 * 60 * 60 * 1000, offsetMs: 0, priority: 'low', description: 'Deep code analysis', enabled: false },
9
+ { type: 'refactor', intervalMs: 4 * 60 * 60 * 1000, offsetMs: 0, priority: 'low', description: 'Refactoring suggestions', enabled: false },
10
+ { type: 'benchmark', intervalMs: 2 * 60 * 60 * 1000, offsetMs: 0, priority: 'low', description: 'Performance benchmarking', enabled: false },
11
+ { type: 'preload', intervalMs: 10 * 60 * 1000, offsetMs: 0, priority: 'high', description: 'Embedding model + HNSW preload', enabled: true },
12
+ ];""")
13
+
14
+ patch("11: real preload worker",
15
+ WD,
16
+ """ async runPreloadWorkerLocal() {
17
+ return {
18
+ timestamp: new Date().toISOString(),
19
+ mode: 'local',
20
+ resourcesPreloaded: 0,
21
+ cacheStatus: 'active',
22
+ };
23
+ }""",
24
+ """ async runPreloadWorkerLocal() {
25
+ const result = { timestamp: new Date().toISOString(), mode: 'local', resourcesPreloaded: 0, cacheStatus: 'active' };
26
+ try {
27
+ const mi = await import('../memory/memory-initializer.js');
28
+ const modelResult = await mi.loadEmbeddingModel({ verbose: false });
29
+ if (modelResult.success) { result.resourcesPreloaded++; result.embeddingModel = modelResult.modelName; }
30
+ const hnswResult = await mi.getHNSWIndex();
31
+ if (hnswResult) { result.resourcesPreloaded++; result.hnswEntries = hnswResult.entries?.size ?? 0; }
32
+ } catch (e) { result.error = e?.message || String(e); }
33
+ return result;
34
+ }""")
@@ -0,0 +1 @@
1
+ grep "loadEmbeddingModel" services/worker-daemon.js
@@ -0,0 +1,11 @@
1
+ # DM-005: Consolidation worker stub (no decay/rebuild)
2
+ **Severity**: Enhancement
3
+ **GitHub**: [#1140](https://github.com/ruvnet/claude-flow/issues/1140)
4
+ ## Root Cause
5
+ The consolidation worker was a stub writing `{patternsConsolidated: 0}` to a JSON file. No actual memory consolidation occurred.
6
+ ## Fix
7
+ Call `applyTemporalDecay()` to reduce confidence of stale patterns, then `clearHNSWIndex()` + `getHNSWIndex({ forceRebuild: true })` to rebuild the index with current data.
8
+ ## Files Patched
9
+ - services/worker-daemon.js
10
+ ## Ops
11
+ 1 op in fix.py
@@ -0,0 +1,46 @@
1
+ # DM-005: Consolidation worker stub (no decay/rebuild)
2
+ # Makes consolidate worker actually call pattern decay + HNSW rebuild
3
+ patch("12: real consolidate worker",
4
+ WD,
5
+ """ async runConsolidateWorker() {
6
+ // Memory consolidation - clean up old patterns
7
+ const consolidateFile = join(this.projectRoot, '.claude-flow', 'metrics', 'consolidation.json');
8
+ const metricsDir = join(this.projectRoot, '.claude-flow', 'metrics');
9
+ if (!existsSync(metricsDir)) {
10
+ mkdirSync(metricsDir, { recursive: true });
11
+ }
12
+ const result = {
13
+ timestamp: new Date().toISOString(),
14
+ patternsConsolidated: 0,
15
+ memoryCleaned: 0,
16
+ duplicatesRemoved: 0,
17
+ };
18
+ writeFileSync(consolidateFile, JSON.stringify(result, null, 2));
19
+ return result;
20
+ }""",
21
+ """ async runConsolidateWorker() {
22
+ const consolidateFile = join(this.projectRoot, '.claude-flow', 'metrics', 'consolidation.json');
23
+ const metricsDir = join(this.projectRoot, '.claude-flow', 'metrics');
24
+ if (!existsSync(metricsDir)) {
25
+ mkdirSync(metricsDir, { recursive: true });
26
+ }
27
+ const result = {
28
+ timestamp: new Date().toISOString(),
29
+ patternsConsolidated: 0,
30
+ memoryCleaned: 0,
31
+ duplicatesRemoved: 0,
32
+ };
33
+ try {
34
+ const mi = await import('../memory/memory-initializer.js');
35
+ // 1. Apply temporal decay (reduce confidence of stale patterns)
36
+ const decayResult = await mi.applyTemporalDecay();
37
+ if (decayResult?.success) result.patternsConsolidated = decayResult.patternsDecayed || 0;
38
+ // 2. Rebuild HNSW index with current data
39
+ mi.clearHNSWIndex();
40
+ const hnsw = await mi.getHNSWIndex({ forceRebuild: true });
41
+ if (hnsw) result.hnswRebuilt = hnsw.entries?.size ?? 0;
42
+ result.memoryCleaned = 1;
43
+ } catch (e) { result.error = e?.message || String(e); }
44
+ writeFileSync(consolidateFile, JSON.stringify(result, null, 2));
45
+ return result;
46
+ }""")
@@ -0,0 +1 @@
1
+ grep "applyTemporalDecay" services/worker-daemon.js
@@ -0,0 +1,11 @@
1
+ # EM-001: Embedding system ignores project config (model + HNSW dims)
2
+ **Severity**: High
3
+ **GitHub**: [#1143](https://github.com/ruvnet/claude-flow/issues/1143)
4
+ ## Root Cause
5
+ `loadEmbeddingModel()` hardcodes `Xenova/all-MiniLM-L6-v2` (384-dim). Projects configured with a different model via `embeddings.json` (e.g. `all-mpnet-base-v2` 768-dim) still get MiniLM 384-dim vectors. HNSW index also hardcodes 384 dimensions, causing dimension mismatch when the actual model produces 768-dim vectors. Every search falls back to brute-force SQLite.
6
+ ## Fix
7
+ Read model name and dimensions from `.claude-flow/embeddings.json` at load time. Fall back to all-MiniLM-L6-v2 (384-dim) if no config exists. Delete stale persistent HNSW files on forceRebuild. Guard metadata loading and early-return to skip on forceRebuild.
8
+ ## Files Patched
9
+ - memory/memory-initializer.js
10
+ ## Ops
11
+ 6 ops in fix.py (merged from old patches 8 + 9)
@@ -0,0 +1,111 @@
1
+ # EM-001: Embedding system ignores project config (model + HNSW dims)
2
+ # Merged from old patches 8 (config-driven model) + 9 (HNSW dimension fix)
3
+
4
+ # --- Old Patch 8: Config-driven embedding model loader ---
5
+ patch("8: config-driven model",
6
+ MI,
7
+ """ // Try to import @xenova/transformers for ONNX embeddings
8
+ const transformers = await import('@xenova/transformers').catch(() => null);
9
+ if (transformers) {
10
+ if (verbose) {
11
+ console.log('Loading ONNX embedding model (all-MiniLM-L6-v2)...');
12
+ }
13
+ // Use small, fast model for local embeddings
14
+ const { pipeline } = transformers;
15
+ const embedder = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
16
+ embeddingModelState = {
17
+ loaded: true,
18
+ model: embedder,
19
+ tokenizer: null,
20
+ dimensions: 384 // MiniLM-L6 produces 384-dim vectors
21
+ };
22
+ return {
23
+ success: true,
24
+ dimensions: 384,
25
+ modelName: 'all-MiniLM-L6-v2',
26
+ loadTime: Date.now() - startTime
27
+ };
28
+ }""",
29
+ """ // Patch 8: Read embedding model from project config instead of hardcoding
30
+ let modelName = 'all-MiniLM-L6-v2';
31
+ let modelDimensions = 384;
32
+ try {
33
+ const embConfigPath = path.join(process.cwd(), '.claude-flow', 'embeddings.json');
34
+ if (fs.existsSync(embConfigPath)) {
35
+ const embConfig = JSON.parse(fs.readFileSync(embConfigPath, 'utf-8'));
36
+ if (embConfig.model) {
37
+ modelName = embConfig.model;
38
+ modelDimensions = embConfig.dimension || 768;
39
+ }
40
+ }
41
+ } catch { /* use defaults */ }
42
+ const xenovaModel = modelName.startsWith('Xenova/') ? modelName : `Xenova/${modelName}`;
43
+ // Try to import @xenova/transformers for ONNX embeddings
44
+ const transformers = await import('@xenova/transformers').catch(() => null);
45
+ if (transformers) {
46
+ if (verbose) {
47
+ console.log(`Loading ONNX embedding model (${modelName})...`);
48
+ }
49
+ const { pipeline } = transformers;
50
+ const embedder = await pipeline('feature-extraction', xenovaModel);
51
+ embeddingModelState = {
52
+ loaded: true,
53
+ model: embedder,
54
+ tokenizer: null,
55
+ dimensions: modelDimensions
56
+ };
57
+ return {
58
+ success: true,
59
+ dimensions: modelDimensions,
60
+ modelName: modelName,
61
+ loadTime: Date.now() - startTime
62
+ };
63
+ }""")
64
+
65
+ # --- Old Patch 9: HNSW dimension fix ---
66
+ patch("9: HNSW dim default",
67
+ MI,
68
+ " const dimensions = options?.dimensions ?? 384;",
69
+ """ // Patch 9: Read dims from embeddings.json
70
+ let dimensions = options?.dimensions;
71
+ if (!dimensions) {
72
+ try {
73
+ const embConfigPath = path.join(process.cwd(), '.claude-flow', 'embeddings.json');
74
+ if (fs.existsSync(embConfigPath)) {
75
+ const embConfig = JSON.parse(fs.readFileSync(embConfigPath, 'utf-8'));
76
+ dimensions = embConfig.dimension || 768;
77
+ }
78
+ } catch { /* ignore */ }
79
+ dimensions = dimensions || 768;
80
+ }""")
81
+
82
+ patch("9: HNSW stale cleanup",
83
+ MI,
84
+ """ const dbPath = options?.dbPath || path.join(swarmDir, 'memory.db');
85
+ // Create HNSW index with persistent storage""",
86
+ """ const dbPath = options?.dbPath || path.join(swarmDir, 'memory.db');
87
+ // Patch 9: delete stale persistent files on forceRebuild
88
+ if (options?.forceRebuild) {
89
+ try:
90
+ if (fs.existsSync(hnswPath)) fs.unlinkSync(hnswPath);
91
+ catch {}
92
+ try:
93
+ if (fs.existsSync(metadataPath)) fs.unlinkSync(metadataPath);
94
+ catch {}
95
+ }
96
+ // Create HNSW index with persistent storage""".replace("try:", "try {").replace("catch {}", "} catch {}"))
97
+
98
+ patch("9: HNSW metadata guard",
99
+ MI,
100
+ " if (fs.existsSync(metadataPath)) {",
101
+ " if (!options?.forceRebuild && fs.existsSync(metadataPath)) {")
102
+
103
+ patch("9: HNSW early-return guard",
104
+ MI,
105
+ " if (existingLen > 0 && entries.size > 0) {",
106
+ " if (existingLen > 0 && entries.size > 0 && !options?.forceRebuild) {")
107
+
108
+ patch("9: HNSW status default",
109
+ MI,
110
+ "dimensions: hnswIndex?.dimensions ?? 384",
111
+ "dimensions: hnswIndex?.dimensions ?? 768")
@@ -0,0 +1 @@
1
+ grep "embeddings.json" memory/memory-initializer.js
@@ -0,0 +1,11 @@
1
+ # EM-002: @xenova/transformers cache EACCES
2
+ **Severity**: Medium
3
+ **GitHub**: [#1144](https://github.com/ruvnet/claude-flow/issues/1144)
4
+ ## Root Cause
5
+ Global `npm install` creates root-owned directories. `@xenova/transformers` tries to write model cache next to its `package.json`, failing with EACCES.
6
+ ## Fix
7
+ Create `.cache` directory with open permissions. Alternative: set `TRANSFORMERS_CACHE` env var.
8
+ ## Files Patched
9
+ - N/A (filesystem permissions, not code)
10
+ ## Ops
11
+ fix.sh (chmod, not Python)
@@ -0,0 +1,12 @@
1
+ #!/bin/bash
2
+ # EM-002: @xenova/transformers cache EACCES on global install
3
+ # Global npm install creates root-owned dirs. Model cache write fails.
4
+
5
+ TRANSFORMERS_DIR=$(npm root -g)/claude-flow/node_modules/@xenova/transformers
6
+ if [ -d "$TRANSFORMERS_DIR" ]; then
7
+ sudo mkdir -p "$TRANSFORMERS_DIR/.cache"
8
+ sudo chmod 777 "$TRANSFORMERS_DIR/.cache"
9
+ echo " Applied: EM-002 transformers cache permissions"
10
+ else
11
+ echo " SKIP: EM-002 — @xenova/transformers not in global install"
12
+ fi
@@ -0,0 +1,11 @@
1
+ # GV-001: HNSW ghost vectors persist after memory delete
2
+ **Severity**: Medium
3
+ **GitHub**: [#1122](https://github.com/ruvnet/claude-flow/issues/1122)
4
+ ## Root Cause
5
+ `deleteEntry()` soft-deletes the SQLite row but never removes the vector from the in-memory HNSW index or its persisted metadata. The search code (`searchHNSWIndex`) iterates HNSW results and looks up each ID in `hnswIndex.entries` Map — ghost vectors match but return stale metadata (key, namespace, content) because the Map entry was never removed.
6
+ ## Fix
7
+ After the SQLite soft-delete, remove the entry from `hnswIndex.entries` Map and save updated metadata. The HNSW vector DB (`@ruvector/core`) doesn't support point removal, but the search code already skips entries missing from the Map (`if (!entry) continue`), so removing from the Map is sufficient to suppress ghost results.
8
+ ## Files Patched
9
+ - memory/memory-initializer.js
10
+ ## Ops
11
+ 1 op in fix.py
@@ -0,0 +1,34 @@
1
+ # GV-001: Remove HNSW ghost vectors on memory delete
2
+ # GitHub: #1122
3
+ # After SQLite soft-delete, remove entry from persisted HNSW metadata file
4
+ # and in-memory map. Each CLI invocation is a fresh process so hnswIndex is
5
+ # usually null — the file-based cleanup is the primary path.
6
+ # 1 op
7
+
8
+ patch("GV-001: remove HNSW entry on delete",
9
+ MI,
10
+ """ // Get remaining count
11
+ const countResult = db.exec(`SELECT COUNT(*) FROM memory_entries WHERE status = 'active'`);
12
+ const remainingEntries = countResult[0]?.values?.[0]?.[0] || 0;
13
+ // Save updated database""",
14
+ """ // Remove ghost vector from HNSW metadata file
15
+ const entryId = String(checkResult[0].values[0][0]);
16
+ try {
17
+ const swarmDir = path.join(process.cwd(), '.swarm');
18
+ const metadataPath = path.join(swarmDir, 'hnsw.metadata.json');
19
+ if (fs.existsSync(metadataPath)) {
20
+ const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8'));
21
+ const filtered = metadata.filter(([id]) => id !== entryId);
22
+ if (filtered.length < metadata.length) {
23
+ fs.writeFileSync(metadataPath, JSON.stringify(filtered));
24
+ }
25
+ }
26
+ } catch { /* best-effort */ }
27
+ // Also clear in-memory index if loaded
28
+ if (hnswIndex?.entries?.has(entryId)) {
29
+ hnswIndex.entries.delete(entryId);
30
+ }
31
+ // Get remaining count
32
+ const countResult = db.exec(`SELECT COUNT(*) FROM memory_entries WHERE status = 'active'`);
33
+ const remainingEntries = countResult[0]?.values?.[0]?.[0] || 0;
34
+ // Save updated database""")
@@ -0,0 +1 @@
1
+ grep "hnswIndex.entries.delete" memory/memory-initializer.js
@@ -0,0 +1,44 @@
1
+ # HK-001: post-edit hook records file_path as "unknown"
2
+
3
+ **Severity**: Medium
4
+ **GitHub**: [#1155](https://github.com/ruvnet/claude-flow/issues/1155)
5
+
6
+ ## Root Cause
7
+
8
+ The `hook-handler.cjs` template (in `helpers-generator.js`) reads the edited file
9
+ path from `process.env.TOOL_INPUT_file_path`. However, Claude Code's PostToolUse
10
+ hooks do **not** set individual `TOOL_INPUT_*` environment variables. Instead, tool
11
+ input is delivered via **stdin** as a JSON object:
12
+
13
+ ```json
14
+ {
15
+ "tool_name": "Edit",
16
+ "tool_input": { "file_path": "/path/to/file", ... },
17
+ "tool_response": { ... },
18
+ ...
19
+ }
20
+ ```
21
+
22
+ Because the env var is always empty, `recordEdit()` in `intelligence.cjs` logs
23
+ every edit as `file: "unknown"`. The data is consumed by `consolidate()` at session
24
+ end for edit-count analytics — cosmetic but wrong.
25
+
26
+ ## Fix
27
+
28
+ Two changes in `init/helpers-generator.js`:
29
+
30
+ 1. **Add stdin parsing** after the `argv` line — read and parse the JSON that
31
+ Claude Code pipes to PostToolUse hook commands.
32
+ 2. **Update post-edit handler** — read `stdinData.tool_input.file_path` instead
33
+ of `process.env.TOOL_INPUT_file_path`.
34
+
35
+ The `prompt` fallback for the `route` handler is also updated to prefer
36
+ `stdinData.tool_input.command` over the env var.
37
+
38
+ ## Files Patched
39
+
40
+ - init/helpers-generator.js
41
+
42
+ ## Ops
43
+
44
+ 2 ops in fix.py
@@ -0,0 +1,23 @@
1
+ # HK-001: post-edit hook records file_path as "unknown"
2
+ # Claude Code passes tool input via stdin JSON, not TOOL_INPUT_* env vars.
3
+ # 3 ops: add stdin parsing, fix prompt fallback, fix post-edit file extraction.
4
+
5
+ patch("HK-001a: add stdin parsing",
6
+ HELPERS_GEN,
7
+ """'const [,, command, ...args] = process.argv;',
8
+ "const prompt = process.env.PROMPT || process.env.TOOL_INPUT_command || args.join(' ') || '';",""",
9
+ """'const [,, command, ...args] = process.argv;',
10
+ '',
11
+ '// Read stdin JSON from Claude Code hooks (provides tool_input, tool_name, etc.)',
12
+ 'let stdinData = {};',
13
+ 'try {',
14
+ " const raw = require(\\'fs\\').readFileSync(0, \\'utf-8\\').trim();",
15
+ " if (raw) stdinData = JSON.parse(raw);",
16
+ '} catch (e) { /* stdin may be empty or non-JSON */ }',
17
+ '',
18
+ "const prompt = process.env.PROMPT || (stdinData.tool_input && stdinData.tool_input.command) || args.join(' ') || '';",""")
19
+
20
+ patch("HK-001b: post-edit read file_path from stdin",
21
+ HELPERS_GEN,
22
+ """" var file = process.env.TOOL_INPUT_file_path || args[0] || '';",""",
23
+ """" var file = (stdinData.tool_input && stdinData.tool_input.file_path) || args[0] || '';",""")
@@ -0,0 +1 @@
1
+ grep "stdinData" init/helpers-generator.js
@@ -0,0 +1,36 @@
1
+ # HK-002: MCP hook handlers are stubs that don't persist data
2
+
3
+ **Severity**: High
4
+ **GitHub**: [#1058](https://github.com/ruvnet/claude-flow/issues/1058)
5
+
6
+ ## Root Cause
7
+
8
+ Three MCP hook handlers in `hooks-tools.js` return success responses but **never persist any data**:
9
+
10
+ 1. **`hooksPostEdit`** (line 512) -- Returns `{recorded: true}` but has no database INSERT
11
+ 2. **`hooksPostCommand`** (line 568) -- Same, claims recorded but stores nothing
12
+ 3. **`hooksPostTask`** (line 886) -- Returns fake random duration and hardcoded pattern counts
13
+
14
+ The store function `getRealStoreFunction()` (line 23) already exists in the file and is used correctly by other handlers (`hooks_intelligence_trajectory-end`, `hooks_intelligence_pattern-store`). These three just never call it.
15
+
16
+ ## Fix
17
+
18
+ Patch each handler to call `getRealStoreFunction()` and persist to appropriate namespaces:
19
+ - `hooksPostEdit` -> namespace: `edits`
20
+ - `hooksPostCommand` -> namespace: `commands`
21
+ - `hooksPostTask` -> namespace: `tasks`
22
+
23
+ ## Impact Without Patch
24
+
25
+ - Edit patterns never stored -- can't learn from file edits
26
+ - Command history lost -- can't learn from command outcomes
27
+ - Task outcomes not tracked -- SONA learning has no data
28
+ - Misleading metrics -- statusline shows fake pattern counts
29
+
30
+ ## Files Patched
31
+
32
+ - `mcp-tools/hooks-tools.js`
33
+
34
+ ## Ops
35
+
36
+ 3 ops in fix.py
@@ -0,0 +1,155 @@
1
+ # HK-002: MCP hook handlers are stubs that don't persist data
2
+ # GitHub: #1058
3
+ # Restored from deleted HK-001 (commit 95a6a23)
4
+
5
+ # HK-002a: hooksPostEdit - add persistence
6
+ patch("HK-002a: hooksPostEdit persistence",
7
+ MCP_HOOKS,
8
+ """ handler: async (params) => {
9
+ const filePath = params.filePath;
10
+ const success = params.success !== false;
11
+ return {
12
+ recorded: true,
13
+ filePath,
14
+ success,
15
+ timestamp: new Date().toISOString(),
16
+ learningUpdate: success ? 'pattern_reinforced' : 'pattern_adjusted',
17
+ };
18
+ },
19
+ };
20
+ export const hooksPreCommand""",
21
+ """ handler: async (params) => {
22
+ const filePath = params.filePath;
23
+ const success = params.success !== false;
24
+ const agent = params.agent || 'unknown';
25
+ const timestamp = new Date().toISOString();
26
+ const editId = `edit-${Date.now()}-${Math.random().toString(36).substring(7)}`;
27
+ // HK-002a: Actually persist the edit record
28
+ const storeFn = await getRealStoreFunction();
29
+ let storeResult = { success: false };
30
+ if (storeFn) {
31
+ try {
32
+ storeResult = await storeFn({
33
+ key: editId,
34
+ value: JSON.stringify({ filePath, success, agent, timestamp }),
35
+ namespace: 'edits',
36
+ generateEmbeddingFlag: true,
37
+ tags: [success ? 'success' : 'failure', 'edit', agent],
38
+ });
39
+ } catch (e) { storeResult = { success: false, error: String(e) }; }
40
+ }
41
+ return {
42
+ recorded: storeResult.success,
43
+ filePath,
44
+ success,
45
+ timestamp,
46
+ learningUpdate: success ? 'pattern_reinforced' : 'pattern_adjusted',
47
+ };
48
+ },
49
+ };
50
+ export const hooksPreCommand""")
51
+
52
+ # HK-002b: hooksPostCommand - add persistence
53
+ patch("HK-002b: hooksPostCommand persistence",
54
+ MCP_HOOKS,
55
+ """ handler: async (params) => {
56
+ const command = params.command;
57
+ const exitCode = params.exitCode || 0;
58
+ return {
59
+ recorded: true,
60
+ command,
61
+ exitCode,
62
+ success: exitCode === 0,
63
+ timestamp: new Date().toISOString(),
64
+ };
65
+ },
66
+ };
67
+ export const hooksRoute""",
68
+ """ handler: async (params) => {
69
+ const command = params.command;
70
+ const exitCode = params.exitCode || 0;
71
+ const success = exitCode === 0;
72
+ const timestamp = new Date().toISOString();
73
+ const cmdId = `cmd-${Date.now()}-${Math.random().toString(36).substring(7)}`;
74
+ // HK-002b: Actually persist the command record
75
+ const storeFn = await getRealStoreFunction();
76
+ let storeResult = { success: false };
77
+ if (storeFn) {
78
+ try {
79
+ storeResult = await storeFn({
80
+ key: cmdId,
81
+ value: JSON.stringify({ command, exitCode, success, timestamp }),
82
+ namespace: 'commands',
83
+ generateEmbeddingFlag: true,
84
+ tags: [success ? 'success' : 'failure', 'command'],
85
+ });
86
+ } catch (e) { storeResult = { success: false, error: String(e) }; }
87
+ }
88
+ return {
89
+ recorded: storeResult.success,
90
+ command,
91
+ exitCode,
92
+ success,
93
+ timestamp,
94
+ };
95
+ },
96
+ };
97
+ export const hooksRoute""")
98
+
99
+ # HK-002c: hooksPostTask - add persistence, remove fake random data
100
+ patch("HK-002c: hooksPostTask persistence",
101
+ MCP_HOOKS,
102
+ """ handler: async (params) => {
103
+ const taskId = params.taskId;
104
+ const success = params.success !== false;
105
+ const quality = params.quality || (success ? 0.85 : 0.3);
106
+ return {
107
+ taskId,
108
+ success,
109
+ duration: Math.floor(Math.random() * 300) + 60, // 1-6 minutes in seconds
110
+ learningUpdates: {
111
+ patternsUpdated: success ? 2 : 1,
112
+ newPatterns: success ? 1 : 0,
113
+ trajectoryId: `traj-${Date.now()}`,
114
+ },
115
+ quality,
116
+ timestamp: new Date().toISOString(),
117
+ };
118
+ },
119
+ };
120
+ // Explain hook""",
121
+ """ handler: async (params) => {
122
+ const taskId = params.taskId;
123
+ const success = params.success !== false;
124
+ const agent = params.agent || 'unknown';
125
+ const quality = params.quality || (success ? 0.85 : 0.3);
126
+ const timestamp = new Date().toISOString();
127
+ // HK-002c: Actually persist the task record
128
+ const storeFn = await getRealStoreFunction();
129
+ let storeResult = { success: false };
130
+ if (storeFn) {
131
+ try {
132
+ storeResult = await storeFn({
133
+ key: `task-${taskId}`,
134
+ value: JSON.stringify({ taskId, success, agent, quality, timestamp }),
135
+ namespace: 'tasks',
136
+ generateEmbeddingFlag: true,
137
+ tags: [success ? 'success' : 'failure', 'task', agent],
138
+ });
139
+ } catch (e) { storeResult = { success: false, error: String(e) }; }
140
+ }
141
+ return {
142
+ taskId,
143
+ success,
144
+ recorded: storeResult.success,
145
+ learningUpdates: {
146
+ patternsUpdated: storeResult.success ? 1 : 0,
147
+ newPatterns: storeResult.success ? 1 : 0,
148
+ trajectoryId: `task-${taskId}`,
149
+ },
150
+ quality,
151
+ timestamp,
152
+ };
153
+ },
154
+ };
155
+ // Explain hook""")
@@ -0,0 +1 @@
1
+ grep "HK-002a" mcp-tools/hooks-tools.js
@@ -0,0 +1,30 @@
1
+ # HK-003: hooks_metrics MCP handler returns hardcoded fake data
2
+
3
+ **Severity**: High
4
+ **GitHub**: [#1158](https://github.com/ruvnet/claude-flow/issues/1158)
5
+
6
+ ## Root Cause
7
+
8
+ The `hooksMetrics` handler in `mcp-tools/hooks-tools.js` returns a static
9
+ object literal with fake values (15 patterns, 87% routing accuracy, 128
10
+ commands executed). It never reads from any persistence layer.
11
+
12
+ This is the same class of defect as HK-002 (hook handlers returning fake
13
+ data without persisting), but for the metrics/dashboard endpoint.
14
+
15
+ ## Fix
16
+
17
+ Replace the hardcoded return with a function that:
18
+ 1. Reads `.swarm/sona-patterns.json` for pattern counts, confidence, and routing stats
19
+ 2. Reads `.ruvector/intelligence.json` for trajectory/command counts and success rates
20
+ 3. Computes actual metrics from the persisted data
21
+ 4. Falls back to zeros when files don't exist
22
+ 5. Preserves the static performance targets (those are design goals, not metrics)
23
+
24
+ ## Files Patched
25
+
26
+ - `mcp-tools/hooks-tools.js`
27
+
28
+ ## Ops
29
+
30
+ 1 op in fix.py