specmem-hardwicksoftware 3.7.30 → 3.7.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bootstrap.cjs CHANGED
@@ -4919,6 +4919,25 @@ async function autoInstallThisMf() {
4919
4919
  // Non-fatal - MCP server will retry
4920
4920
  }
4921
4921
 
4922
+ // Acquire socket lock so statusbar/health checks can detect us
4923
+ const projectPath_uf = getProjectPath();
4924
+ const lockAcquired_uf = tryAcquireSocketLock(projectPath_uf);
4925
+ if (lockAcquired_uf) {
4926
+ writeProjectPidFile(projectPath_uf, process.pid);
4927
+ writeInstanceState(projectPath_uf, {
4928
+ pid: process.pid,
4929
+ projectPath: projectPath_uf,
4930
+ projectHash: hashProjectPath(projectPath_uf),
4931
+ startTime: new Date().toISOString(),
4932
+ status: 'running',
4933
+ bootstrapVersion: '1.0.0',
4934
+ mode: 'ultra-fast'
4935
+ });
4936
+ startupLog('Ultra-fast path: socket lock acquired, PID file written');
4937
+ } else {
4938
+ startupLog('Ultra-fast path: could not acquire socket lock (non-fatal)');
4939
+ }
4940
+
4922
4941
  // Start server BEFORE any other operations
4923
4942
  // The server handles its own deferred initialization
4924
4943
  // CRITICAL: startServer() is now async and imports the ES module directly
@@ -55,6 +55,14 @@
55
55
  {
56
56
  "matcher": "Read",
57
57
  "hooks": [
58
+ {
59
+ "type": "command",
60
+ "command": "node /root/.claude/hooks/specmem-search-enforcer.cjs",
61
+ "timeout": 2,
62
+ "env": {
63
+ "SPECMEM_PROJECT_PATH": "${cwd}"
64
+ }
65
+ },
58
66
  {
59
67
  "type": "command",
60
68
  "command": "node /root/.claude/hooks/team-comms-enforcer.cjs",
@@ -84,6 +92,14 @@
84
92
  {
85
93
  "matcher": "Edit",
86
94
  "hooks": [
95
+ {
96
+ "type": "command",
97
+ "command": "node /root/.claude/hooks/specmem-search-enforcer.cjs",
98
+ "timeout": 2,
99
+ "env": {
100
+ "SPECMEM_PROJECT_PATH": "${cwd}"
101
+ }
102
+ },
87
103
  {
88
104
  "type": "command",
89
105
  "command": "node /root/.claude/hooks/team-comms-enforcer.cjs",
@@ -97,6 +113,11 @@
97
113
  {
98
114
  "matcher": "Write",
99
115
  "hooks": [
116
+ {
117
+ "type": "command",
118
+ "command": "node /root/.claude/hooks/specmem-search-enforcer.cjs",
119
+ "timeout": 2
120
+ },
100
121
  {
101
122
  "type": "command",
102
123
  "command": "node /root/.claude/hooks/team-comms-enforcer.cjs",
@@ -110,6 +131,11 @@
110
131
  {
111
132
  "matcher": "Grep",
112
133
  "hooks": [
134
+ {
135
+ "type": "command",
136
+ "command": "node /root/.claude/hooks/specmem-search-enforcer.cjs",
137
+ "timeout": 2
138
+ },
113
139
  {
114
140
  "type": "command",
115
141
  "command": "node /root/.claude/hooks/team-comms-enforcer.cjs",
@@ -147,6 +173,11 @@
147
173
  {
148
174
  "matcher": "Glob",
149
175
  "hooks": [
176
+ {
177
+ "type": "command",
178
+ "command": "node /root/.claude/hooks/specmem-search-enforcer.cjs",
179
+ "timeout": 2
180
+ },
150
181
  {
151
182
  "type": "command",
152
183
  "command": "node /root/.claude/hooks/team-comms-enforcer.cjs",
@@ -184,6 +215,14 @@
184
215
  {
185
216
  "matcher": "Bash",
186
217
  "hooks": [
218
+ {
219
+ "type": "command",
220
+ "command": "node /root/.claude/hooks/specmem-search-enforcer.cjs",
221
+ "timeout": 2,
222
+ "env": {
223
+ "SPECMEM_PROJECT_PATH": "${cwd}"
224
+ }
225
+ },
187
226
  {
188
227
  "type": "command",
189
228
  "command": "node /root/.claude/hooks/team-comms-enforcer.cjs",
@@ -306,6 +345,66 @@
306
345
  }
307
346
  ],
308
347
  "PostToolUse": [
348
+ {
349
+ "matcher": "Grep",
350
+ "hooks": [
351
+ {
352
+ "type": "command",
353
+ "command": "node /root/.claude/hooks/specmem-search-tracker.cjs",
354
+ "timeout": 5
355
+ }
356
+ ]
357
+ },
358
+ {
359
+ "matcher": "Glob",
360
+ "hooks": [
361
+ {
362
+ "type": "command",
363
+ "command": "node /root/.claude/hooks/specmem-search-tracker.cjs",
364
+ "timeout": 5
365
+ }
366
+ ]
367
+ },
368
+ {
369
+ "matcher": "Read",
370
+ "hooks": [
371
+ {
372
+ "type": "command",
373
+ "command": "node /root/.claude/hooks/specmem-search-tracker.cjs",
374
+ "timeout": 5
375
+ }
376
+ ]
377
+ },
378
+ {
379
+ "matcher": "mcp__specmem__find_memory",
380
+ "hooks": [
381
+ {
382
+ "type": "command",
383
+ "command": "node /root/.claude/hooks/specmem-search-tracker.cjs",
384
+ "timeout": 5
385
+ }
386
+ ]
387
+ },
388
+ {
389
+ "matcher": "mcp__specmem__find_code_pointers",
390
+ "hooks": [
391
+ {
392
+ "type": "command",
393
+ "command": "node /root/.claude/hooks/specmem-search-tracker.cjs",
394
+ "timeout": 5
395
+ }
396
+ ]
397
+ },
398
+ {
399
+ "matcher": "mcp__specmem__drill_down",
400
+ "hooks": [
401
+ {
402
+ "type": "command",
403
+ "command": "node /root/.claude/hooks/specmem-search-tracker.cjs",
404
+ "timeout": 5
405
+ }
406
+ ]
407
+ },
309
408
  {
310
409
  "matcher": "Task",
311
410
  "hooks": [
@@ -0,0 +1,229 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * SPECMEM SEARCH ENFORCER - PreToolUse Hook
4
+ * ==========================================
5
+ *
6
+ * HARD BLOCKS agents that skip SpecMem semantic search.
7
+ *
8
+ * Rules:
9
+ * 1. Agents CANNOT do ANYTHING until they've called find_memory or find_code_pointers at least once
10
+ * 2. Every 3 searches (Grep/Glob/Read), agents MUST call find_code_pointers again
11
+ * 3. After find_code_pointers, agents MUST drill_down before continuing
12
+ * 4. 2nd search in a cycle: WARNING injected
13
+ * 5. 3rd search in a cycle: HARD BLOCK (deny)
14
+ * 6. Tool calls and other non-search tools DO NOT reset the counter
15
+ * 7. Main session (non-agent) gets suggestions, not blocks
16
+ *
17
+ * State: /tmp/specmem-search-enforcer-{session}.json
18
+ */
19
+
20
+ const fs = require('fs');
21
+ const path = require('path');
22
+
23
+ // --- Agent detection (inline, no require chain issues) ---
24
+ function isAgent() {
25
+ const markers = [
26
+ process.env.CLAUDE_AGENT === 'true',
27
+ process.env.CLAUDE_AGENT_TYPE,
28
+ process.env.TASK_ID,
29
+ (process.env.CLAUDE_WORKTREE || '').length > 0,
30
+ (process.env.CLAUDE_SESSION_ID || '').includes('task-'),
31
+ ];
32
+ return markers.some(Boolean);
33
+ }
34
+
35
+ // --- Config ---
36
+ const SEARCH_TOOLS = ['Grep', 'Glob', 'Read', 'Bash'];
37
+ const WRITE_TOOLS = ['Edit', 'Write'];
38
+ const ALL_BLOCKED_TOOLS = [...SEARCH_TOOLS, ...WRITE_TOOLS];
39
+
40
+ const SPECMEM_SEARCH_TOOLS = [
41
+ 'mcp__specmem__find_memory',
42
+ 'mcp__specmem__find_code_pointers',
43
+ 'mcp__specmem__smart_search',
44
+ ];
45
+
46
+ const SPECMEM_DRILLDOWN_TOOLS = [
47
+ 'mcp__specmem__drill_down',
48
+ 'mcp__specmem__get_memory',
49
+ 'mcp__specmem__get_memory_by_id',
50
+ 'mcp__specmem__getMemoryFull',
51
+ ];
52
+
53
+ const SPECMEM_CODE_POINTER_TOOLS = [
54
+ 'mcp__specmem__find_code_pointers',
55
+ ];
56
+
57
+ const SEARCH_CYCLE_LIMIT = 3; // block on 3rd search
58
+ const WARN_AT = 2; // warn on 2nd search
59
+
60
+ // --- State management ---
61
+ function getStateFile() {
62
+ const sessionId = process.env.CLAUDE_SESSION_ID || process.env.TASK_ID || 'default';
63
+ const sanitized = sessionId.replace(/[^a-zA-Z0-9_-]/g, '_');
64
+ return `/tmp/specmem-search-enforcer-${sanitized}.json`;
65
+ }
66
+
67
+ function getState() {
68
+ try {
69
+ const f = getStateFile();
70
+ if (fs.existsSync(f)) {
71
+ const data = JSON.parse(fs.readFileSync(f, 'utf-8'));
72
+ // Expire after 30 min
73
+ if (data.timestamp && (Date.now() - data.timestamp > 30 * 60 * 1000)) {
74
+ return freshState();
75
+ }
76
+ return data;
77
+ }
78
+ } catch (e) {
79
+ try { fs.unlinkSync(getStateFile()); } catch (_) {}
80
+ }
81
+ return freshState();
82
+ }
83
+
84
+ function freshState() {
85
+ return {
86
+ hasUsedSpecmemSearch: false,
87
+ searchesSinceLastCodePointers: 0,
88
+ pendingDrilldown: false,
89
+ lastCodePointersQuery: null,
90
+ timestamp: Date.now(),
91
+ };
92
+ }
93
+
94
+ function saveState(state) {
95
+ try {
96
+ state.timestamp = Date.now();
97
+ fs.writeFileSync(getStateFile(), JSON.stringify(state, null, 2));
98
+ } catch (e) { /* silent */ }
99
+ }
100
+
101
+ // --- stdin reader with timeout ---
102
+ function readStdinWithTimeout(timeoutMs = 5000) {
103
+ return new Promise((resolve) => {
104
+ let input = '';
105
+ const timer = setTimeout(() => {
106
+ process.stdin.destroy();
107
+ resolve(input);
108
+ }, timeoutMs);
109
+ process.stdin.setEncoding('utf8');
110
+ process.stdin.on('data', (chunk) => { input += chunk; });
111
+ process.stdin.on('end', () => { clearTimeout(timer); resolve(input); });
112
+ process.stdin.on('error', () => { clearTimeout(timer); resolve(input); });
113
+ });
114
+ }
115
+
116
+ // --- Main ---
117
+ async function main() {
118
+ const inputData = await readStdinWithTimeout(5000);
119
+
120
+ try {
121
+ const hookData = JSON.parse(inputData);
122
+ const toolName = hookData.tool_name || '';
123
+ const toolInput = hookData.tool_input || {};
124
+ const state = getState();
125
+
126
+ // --- SpecMem search tool used (find_memory, find_code_pointers, smart_search) ---
127
+ if (SPECMEM_SEARCH_TOOLS.includes(toolName)) {
128
+ state.hasUsedSpecmemSearch = true;
129
+
130
+ // find_code_pointers resets the search counter AND sets drilldown pending
131
+ if (SPECMEM_CODE_POINTER_TOOLS.includes(toolName)) {
132
+ state.searchesSinceLastCodePointers = 0;
133
+ state.pendingDrilldown = true;
134
+ state.lastCodePointersQuery = toolInput.query || '(unknown)';
135
+ }
136
+
137
+ saveState(state);
138
+ process.exit(0); // allow
139
+ }
140
+
141
+ // --- Drilldown tool used (drill_down, get_memory, etc) ---
142
+ if (SPECMEM_DRILLDOWN_TOOLS.includes(toolName)) {
143
+ state.pendingDrilldown = false;
144
+ saveState(state);
145
+ process.exit(0); // allow
146
+ }
147
+
148
+ // --- Non-agent: suggest only, never block ---
149
+ if (!isAgent()) {
150
+ process.exit(0); // allow everything for main session
151
+ }
152
+
153
+ // === AGENT ENFORCEMENT BELOW ===
154
+
155
+ // --- Rule 1: Agent hasn't used ANY specmem search yet -> HARD BLOCK everything ---
156
+ if (!state.hasUsedSpecmemSearch && ALL_BLOCKED_TOOLS.includes(toolName)) {
157
+ const output = {
158
+ hookSpecificOutput: {
159
+ hookEventName: 'PreToolUse',
160
+ permissionDecision: 'deny',
161
+ permissionDecisionReason: `BLOCKED: You MUST call find_memory or find_code_pointers BEFORE using ${toolName}. No Read/Write/Grep/Glob/Edit/Bash allowed until you search SpecMem first. Run: mcp__specmem__find_code_pointers({query: "your task description"})`
162
+ }
163
+ };
164
+ console.log(JSON.stringify(output));
165
+ process.exit(0);
166
+ }
167
+
168
+ // --- Rule 3: Pending drilldown after find_code_pointers -> BLOCK until drilled ---
169
+ if (state.pendingDrilldown && ALL_BLOCKED_TOOLS.includes(toolName)) {
170
+ const output = {
171
+ hookSpecificOutput: {
172
+ hookEventName: 'PreToolUse',
173
+ permissionDecision: 'deny',
174
+ permissionDecisionReason: `BLOCKED: You ran find_code_pointers("${state.lastCodePointersQuery}") but haven't drilled down into the results yet. You MUST call drill_down({drilldownID: N}) or get_memory({id: "ID"}) before using ${toolName}. Drill into the results first!`
175
+ }
176
+ };
177
+ console.log(JSON.stringify(output));
178
+ process.exit(0);
179
+ }
180
+
181
+ // --- Count searches for cycle enforcement ---
182
+ if (SEARCH_TOOLS.includes(toolName)) {
183
+ state.searchesSinceLastCodePointers++;
184
+ saveState(state);
185
+
186
+ // Rule 5: 3rd search -> HARD BLOCK
187
+ if (state.searchesSinceLastCodePointers >= SEARCH_CYCLE_LIMIT) {
188
+ const output = {
189
+ hookSpecificOutput: {
190
+ hookEventName: 'PreToolUse',
191
+ permissionDecision: 'deny',
192
+ permissionDecisionReason: `BLOCKED: You've done ${state.searchesSinceLastCodePointers} searches without calling find_code_pointers. Every 3 searches you MUST call mcp__specmem__find_code_pointers to refresh your semantic context. Do it now before continuing.`
193
+ }
194
+ };
195
+ console.log(JSON.stringify(output));
196
+ process.exit(0);
197
+ }
198
+
199
+ // Rule 4: 2nd search -> WARNING
200
+ if (state.searchesSinceLastCodePointers >= WARN_AT) {
201
+ const output = {
202
+ hookSpecificOutput: {
203
+ hookEventName: 'PreToolUse',
204
+ permissionDecision: 'allow',
205
+ permissionDecisionReason: `WARNING: ${state.searchesSinceLastCodePointers}/${SEARCH_CYCLE_LIMIT} searches used. You MUST call find_code_pointers before your next search or you'll be blocked. Consider running it now.`,
206
+ additionalContext: `\u26a0\ufe0f SEARCH LIMIT WARNING: ${state.searchesSinceLastCodePointers}/${SEARCH_CYCLE_LIMIT} searches since last find_code_pointers. Next search WILL BE BLOCKED. Run mcp__specmem__find_code_pointers now.`
207
+ }
208
+ };
209
+ console.log(JSON.stringify(output));
210
+ process.exit(0);
211
+ }
212
+ }
213
+
214
+ // --- Write tools increment search counter too (they shouldn't write blind) ---
215
+ if (WRITE_TOOLS.includes(toolName)) {
216
+ // Don't count writes toward search limit, but they're allowed if we passed the checks above
217
+ saveState(state);
218
+ }
219
+
220
+ // Allow everything else (Task, ToolSearch, MCP tools, etc)
221
+ process.exit(0);
222
+
223
+ } catch (error) {
224
+ // Parse failure = allow (don't break the session)
225
+ process.exit(0);
226
+ }
227
+ }
228
+
229
+ main().catch(() => process.exit(0));
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * SPECMEM SEARCH TRACKER - PostToolUse Hook
4
+ * ===========================================
5
+ *
6
+ * Tracks when agents use SpecMem semantic tools (find_memory, find_code_pointers, drill_down)
7
+ * and resets the search counter so enforcer unblocks.
8
+ *
9
+ * Also tracks Grep/Glob calls to increment search counter.
10
+ *
11
+ * AGENTS ONLY - main session skipped.
12
+ */
13
+
14
+ const fs = require('fs');
15
+ const path = require('path');
16
+
17
+ // Agent detection
18
+ function isAgent() {
19
+ const e = process.env;
20
+ if (e.CLAUDE_AGENT === '1' || e.CLAUDE_AGENT === 'true') return true;
21
+ if (e.TASK_ID || e.AGENT_ID || e.WORKTREE_PATH) return true;
22
+ if (e.CLAUDE_CODE_ENTRYPOINT === 'task') return true;
23
+ const ppid = e.CLAUDE_PARENT_PID || e.PARENT_PID;
24
+ if (ppid && ppid !== '1' && ppid !== String(process.pid)) return true;
25
+ return false;
26
+ }
27
+
28
+ function main() {
29
+ if (!isAgent()) {
30
+ console.log(JSON.stringify({ permissionDecision: 'allow' }));
31
+ return;
32
+ }
33
+
34
+ const toolName = process.env.TOOL_NAME || '';
35
+ const sessionId = process.env.SESSION_ID || process.env.CLAUDE_SESSION_ID || process.ppid?.toString() || 'unknown';
36
+ const stateDir = '/tmp/specmem-enforcer';
37
+ const stateFile = path.join(stateDir, `${sessionId}.json`);
38
+
39
+ try { fs.mkdirSync(stateDir, { recursive: true }); } catch {}
40
+
41
+ let state = { searchCount: 0, specmemUsed: false, drilldownRequired: false, drilldownDone: false };
42
+ try { state = JSON.parse(fs.readFileSync(stateFile, 'utf8')); } catch {}
43
+
44
+ const isSpecmemSearch = /find_memory|find_code_pointers/i.test(toolName);
45
+ const isDrilldown = /drill_down/i.test(toolName);
46
+ const isSearchTool = /^(Grep|Glob)$/i.test(toolName);
47
+
48
+ if (isSpecmemSearch) {
49
+ state.specmemUsed = true;
50
+ state.searchCount = 0; // Reset search counter
51
+ // find_code_pointers requires drill_down after
52
+ if (/find_code_pointers/i.test(toolName)) {
53
+ state.drilldownRequired = true;
54
+ state.drilldownDone = false;
55
+ }
56
+ }
57
+
58
+ if (isDrilldown) {
59
+ state.drilldownDone = true;
60
+ state.drilldownRequired = false;
61
+ }
62
+
63
+ if (isSearchTool) {
64
+ state.searchCount = (state.searchCount || 0) + 1;
65
+ }
66
+
67
+ try { fs.writeFileSync(stateFile, JSON.stringify(state)); } catch {}
68
+ console.log(JSON.stringify({ permissionDecision: 'allow' }));
69
+ }
70
+
71
+ main();
package/dist/config.js CHANGED
@@ -837,24 +837,19 @@ export function loadConfig() {
837
837
  const parsedUrl = parseDatabaseUrl();
838
838
  // Priority: DATABASE_URL > ENV VAR > .specmemrc > default
839
839
  // Per-project isolation still applies if DATABASE_URL not set
840
- // Container mode: use unix socket dir as host for postgres connection
841
- // When SPECMEM_CONTAINER_MODE is set, or container run dir has postgres socket
842
- // FIX: Try both {projectPath}/specmem/run and {projectPath}/run to handle
843
- // the case where projectPath IS the specmem dir (avoids specmem/specmem/run)
840
+ // Container mode: postgres via unix socket in specmem/run/ (bind-mounted from container /data/run)
841
+ // Socket appears after container starts PG dir must exist, socket arrives when PG is ready
844
842
  let containerRunDir = path.join(projectPath, 'specmem', 'run');
845
- const containerSocketExists = (() => {
846
- try {
847
- if (fs.existsSync(path.join(containerRunDir, '.s.PGSQL.5432'))) return true;
848
- // Fallback: projectPath might BE the specmem dir
849
- const altRunDir = path.join(projectPath, 'run');
850
- if (fs.existsSync(path.join(altRunDir, '.s.PGSQL.5432'))) {
851
- containerRunDir = altRunDir;
852
- return true;
853
- }
854
- return false;
855
- } catch { return false; }
856
- })();
843
+ // Also check projectPath/run in case projectPath IS the specmem dir
844
+ if (!fs.existsSync(containerRunDir) && fs.existsSync(path.join(projectPath, 'run'))) {
845
+ containerRunDir = path.join(projectPath, 'run');
846
+ }
847
+ const containerSocketExists = fs.existsSync(path.join(containerRunDir, '.s.PGSQL.5432'));
857
848
  const isContainerMode = process.env['SPECMEM_CONTAINER_MODE'] === 'true' || containerSocketExists;
849
+ if (isContainerMode) {
850
+ // Ensure socket directory exists on host — container bind-mounts here
851
+ try { fs.mkdirSync(containerRunDir, { recursive: true }); } catch (e) { /* may already exist */ }
852
+ }
858
853
  const defaultDbHost = isContainerMode ? containerRunDir : 'localhost';
859
854
  const dbHost = parsedUrl?.host || process.env['SPECMEM_DB_HOST'] || getRcValue(rc, 'database.host', defaultDbHost);
860
855
  const dbPort = parsedUrl?.port || projectDbPort;
@@ -17,10 +17,10 @@ types.setTypeParser(20, (val) => {
17
17
  return Number.isSafeInteger(n) ? n : BigInt(val);
18
18
  }); // bigint - safe for values > 2^53
19
19
  const DEFAULT_POOL_SETTINGS = {
20
- maxConnections: 20, // safe default - each project creates its own pool, 100 would exhaust PG max_connections
21
- minConnections: 5, // keep some warm connections fr
20
+ maxConnections: 6, // tuned for 4-core 8GB laptop - 20 was exhausting PG under concurrent tool calls
21
+ minConnections: 2, // keep a couple warm, don't hog connections
22
22
  idleTimeoutMs: 30000, // 30 sec timeout on idle connections
23
- connectionTimeoutMs: 30000, // 30 sec to establish connection
23
+ connectionTimeoutMs: 10000, // 10 sec to establish connection - fail fast under load
24
24
  statementTimeoutMs: 30000, // 30 sec statement timeout
25
25
  queryTimeoutMs: 60000, // 1 min query timeout for thicc queries
26
26
  healthCheckIntervalMs: 30000, // health check every 30 sec
package/dist/index.js CHANGED
@@ -1608,11 +1608,28 @@ class LocalEmbeddingProvider {
1608
1608
  * Runs in background to not block embedding requests
1609
1609
  */
1610
1610
  tryRestartContainer() {
1611
- // Container mode: brain container manages embedding server do NOT interfere.
1612
- // Self-healing attempts override sandboxSocketPath to wrong path (embeddings.sock vs embed.sock)
1613
- // and try to start conflicting Docker containers, causing CPU/RAM waste and socket confusion.
1611
+ // Container mode: restart the brain container instead of spawning a new process
1614
1612
  if (process.env.SPECMEM_CONTAINER_MODE === 'true') {
1615
- logger.debug('container mode active — brain manages embedding server, skipping self-heal restart');
1613
+ const now = Date.now();
1614
+ if (now - this.lastRestartAttempt < LocalEmbeddingProvider.RESTART_COOLDOWN_MS) {
1615
+ logger.debug('container restart cooldown active, skipping');
1616
+ return;
1617
+ }
1618
+ this.lastRestartAttempt = now;
1619
+ try {
1620
+ const { getContainerManager } = require('./container/containerManager.js');
1621
+ const projectPath = process.env.SPECMEM_PROJECT_PATH || process.cwd();
1622
+ const cm = getContainerManager(projectPath);
1623
+ logger.info({ projectPath }, '[LocalEmbeddingProvider] Restarting brain container...');
1624
+ cm.start().then(() => {
1625
+ logger.info('[LocalEmbeddingProvider] Brain container restarted');
1626
+ this.restartAttempts = 0;
1627
+ }).catch(err => {
1628
+ logger.error({ error: err?.message }, '[LocalEmbeddingProvider] Brain container restart failed');
1629
+ });
1630
+ } catch (err) {
1631
+ logger.error({ error: err?.message }, '[LocalEmbeddingProvider] Failed to get container manager');
1632
+ }
1616
1633
  return;
1617
1634
  }
1618
1635
  const now = Date.now();
@@ -2313,8 +2313,22 @@ export class EmbeddingServerManager extends EventEmitter {
2313
2313
  logger.warn({
2314
2314
  failures: this.config.maxFailuresBeforeRestart,
2315
2315
  restartCount: this.restartCount,
2316
- }, '[EmbeddingServerManager] Too many consecutive failures in container mode - cannot restart container process from host');
2316
+ }, '[EmbeddingServerManager] Too many consecutive failures in container mode - attempting container restart');
2317
2317
  this.emit('unhealthy_container', { failures: this.config.maxFailuresBeforeRestart });
2318
+ // FIX: Auto-restart brain container when embedding is dead
2319
+ try {
2320
+ const { getContainerManager } = require('../container/containerManager.js');
2321
+ const projectPath = process.env['SPECMEM_PROJECT_PATH'] || process.cwd();
2322
+ const cm = getContainerManager(projectPath);
2323
+ logger.info({ projectPath }, '[EmbeddingServerManager] Restarting brain container...');
2324
+ await cm.start();
2325
+ logger.info('[EmbeddingServerManager] Brain container restarted successfully');
2326
+ this.isRunning = true;
2327
+ this.startTime = Date.now();
2328
+ } catch (containerErr) {
2329
+ logger.error({ error: containerErr?.message || containerErr },
2330
+ '[EmbeddingServerManager] Failed to restart brain container');
2331
+ }
2318
2332
  }
2319
2333
  else {
2320
2334
  logger.warn({
@@ -424,12 +424,30 @@ export class MCPProtocolHandler {
424
424
  }
425
425
  }
426
426
  /**
427
- * batch handle multiple tool calls - for efficiency
427
+ * batch handle multiple tool calls - with concurrency limit
428
+ * prevents overwhelming the db pool when claude fires 5+ calls at once
428
429
  */
429
430
  async handleBatchToolCalls(calls) {
430
431
  const results = [];
431
- // process in parallel for speed
432
- const promises = calls.map(async (call) => {
432
+ // inline concurrency limiter - no npm deps needed
433
+ // max 2 concurrent to leave headroom on 4-core/8GB systems
434
+ const _limitConcurrency = (concurrency) => {
435
+ let active = 0;
436
+ const queue = [];
437
+ const next = () => {
438
+ while (active < concurrency && queue.length > 0) {
439
+ active++;
440
+ const { fn, resolve, reject } = queue.shift();
441
+ fn().then(resolve, reject).finally(() => { active--; next(); });
442
+ }
443
+ };
444
+ return (fn) => new Promise((resolve, reject) => {
445
+ queue.push({ fn, resolve, reject });
446
+ next();
447
+ });
448
+ };
449
+ const limit = _limitConcurrency(2);
450
+ const promises = calls.map((call) => limit(async () => {
433
451
  try {
434
452
  const result = await this.handleToolCall(call.name, call.args);
435
453
  return { name: call.name, result };
@@ -440,7 +458,7 @@ export class MCPProtocolHandler {
440
458
  error: error instanceof Error ? error.message : 'unknown error'
441
459
  };
442
460
  }
443
- });
461
+ }));
444
462
  const settled = await Promise.allSettled(promises);
445
463
  for (const result of settled) {
446
464
  if (result.status === 'fulfilled') {
@@ -236,8 +236,9 @@ export class SpecMemServer {
236
236
  this.announceToOnStartup();
237
237
  // Auto-start Codebook Learner (resource-capped background service)
238
238
  this._startCodebookLearner();
239
- // Auto-trigger background codebase indexing (populates code_definitions)
240
- // Runs deferred (10s delay) so it doesn't block MCP startup
239
+ // NOTE: _triggerCodebaseIndexing() is also called from deferredInitPromise.then()
240
+ // after DB migrations complete (which create codebase_files table).
241
+ // This early call may no-op if DB not ready yet — the post-DB call is the critical one.
241
242
  this._triggerCodebaseIndexing();
242
243
  };
243
244
  // get that db connection no cap
@@ -1285,6 +1286,14 @@ export class SpecMemServer {
1285
1286
  await this.initializeMiniCOTServerManager();
1286
1287
  startupLog('Mini COT server manager initialized');
1287
1288
  logger.info('SpecMem MCP server fully initialized — all components ready');
1289
+ // FIX: Trigger codebase indexing AFTER DB init (migrations create codebase_files table)
1290
+ // Previously only called in oninitialized which fires BEFORE deferred DB init,
1291
+ // causing checkCodebaseIndexStatus to see missing table → needsReindex=false → skip
1292
+ try {
1293
+ await this._triggerCodebaseIndexing();
1294
+ } catch (indexErr) {
1295
+ logger.warn({ error: indexErr?.message }, 'Post-DB-init codebase indexing failed (non-fatal)');
1296
+ }
1288
1297
  // Run initial sync on startup — ensures codebase is fresh when Claude Code launches
1289
1298
  await this._runStartupSync();
1290
1299
  // Start idle sync timer — auto-syncs when no tool calls for 60s
@@ -1354,7 +1363,11 @@ export class SpecMemServer {
1354
1363
  const checkResult = await CheckSyncStatus.execute({ detailed: false }, wm);
1355
1364
  const syncScore = checkResult?.syncScore ?? 100;
1356
1365
  // Only resync if drift detected (score < 100)
1357
- if (syncScore < 100) {
1366
+ // Skip resync if indexing is still pending (syncScore === -1)
1367
+ if (checkResult?.indexingPending) {
1368
+ process.stderr.write(`[SPECMEM IDLE-SYNC] Indexing still in progress, skipping resync\n`);
1369
+ }
1370
+ else if (syncScore < 100) {
1358
1371
  process.stderr.write(`[SPECMEM IDLE-SYNC] Drift detected (${syncScore}%), resyncing...\n`);
1359
1372
  const resyncResult = await ForceResync.execute({ dryRun: false }, wm);
1360
1373
  const added = resyncResult?.stats?.filesAdded ?? 0;
@@ -94,32 +94,30 @@ _cacheCleanupTimer.unref();
94
94
  /**
95
95
  * Get the project-scoped embedding cache
96
96
  */
97
- // HIGH-4: Simple lock to prevent concurrent eviction
98
- let _evictionInProgress = false;
97
+ // HIGH-4: Eviction uses while-loop to guarantee room before creating new entry.
98
+ // Old boolean _evictionInProgress flag was broken: when flag was true, eviction was
99
+ // skipped but new entry was still created at line 125, exceeding the 20-project limit.
99
100
  function getProjectEmbeddingCache() {
100
101
  const project = getProjectPath();
101
102
  _EMBEDDING_CACHE_ACCESS_TIMES.set(project, Date.now());
102
103
  if (!_EMBEDDING_CACHE_BY_PROJECT.has(project)) {
103
- // HIGH-4: Changed > to >= to evict at limit, not after. Added lock to prevent concurrent eviction.
104
- if (_EMBEDDING_CACHE_BY_PROJECT.size >= 20 && !_evictionInProgress) {
105
- _evictionInProgress = true;
106
- try {
107
- // Evict the least recently accessed project cache
108
- let oldestProject = null;
109
- let oldestTime = Infinity;
110
- for (const [p, t] of _EMBEDDING_CACHE_ACCESS_TIMES) {
111
- if (t < oldestTime) {
112
- oldestTime = t;
113
- oldestProject = p;
114
- }
104
+ // Evict until there's room - loop guarantees we never exceed limit
105
+ while (_EMBEDDING_CACHE_BY_PROJECT.size >= 20) {
106
+ let oldestProject = null;
107
+ let oldestTime = Infinity;
108
+ for (const [p, t] of _EMBEDDING_CACHE_ACCESS_TIMES) {
109
+ if (t < oldestTime && p !== project) {
110
+ oldestTime = t;
111
+ oldestProject = p;
115
112
  }
116
- if (oldestProject) {
117
- _EMBEDDING_CACHE_BY_PROJECT.delete(oldestProject);
118
- _EMBEDDING_CACHE_ACCESS_TIMES.delete(oldestProject);
119
- __debugLog('[MCP DEBUG]', Date.now(), 'CACHE_PROJECT_EVICTED', { evictedProject: oldestProject, reason: 'max_projects_reached' });
120
- }
121
- } finally {
122
- _evictionInProgress = false;
113
+ }
114
+ if (oldestProject) {
115
+ _EMBEDDING_CACHE_BY_PROJECT.delete(oldestProject);
116
+ _EMBEDDING_CACHE_ACCESS_TIMES.delete(oldestProject);
117
+ __debugLog('[MCP DEBUG]', Date.now(), 'CACHE_PROJECT_EVICTED', { evictedProject: oldestProject, reason: 'max_projects_reached' });
118
+ } else {
119
+ // Safety: no evictable project found (all entries are current project?), break to avoid infinite loop
120
+ break;
123
121
  }
124
122
  }
125
123
  _EMBEDDING_CACHE_BY_PROJECT.set(project, new Map());
@@ -27,7 +27,10 @@ export class CheckSyncStatus {
27
27
  const driftReport = await watcherManager.checkSync();
28
28
  // build summary message
29
29
  let summary;
30
- if (driftReport.inSync) {
30
+ if (driftReport.indexingPending) {
31
+ summary = `Codebase indexing in progress — sync score not yet available. ${driftReport.totalFiles} files on disk awaiting indexing.`;
32
+ }
33
+ else if (driftReport.inSync) {
31
34
  summary = `Everything is in sync! ${driftReport.upToDate} files are up to date.`;
32
35
  }
33
36
  else {
@@ -45,7 +48,8 @@ export class CheckSyncStatus {
45
48
  }
46
49
  const result = {
47
50
  inSync: driftReport.inSync,
48
- syncScore: driftReport.syncScore,
51
+ syncScore: driftReport.indexingPending ? -1 : driftReport.syncScore,
52
+ indexingPending: !!driftReport.indexingPending,
49
53
  driftPercentage: driftReport.driftPercentage,
50
54
  summary,
51
55
  stats: {
@@ -70,10 +74,12 @@ export class CheckSyncStatus {
70
74
  contentMismatch: driftReport.contentMismatch
71
75
  };
72
76
  }
73
- // Update statusbar sync score live
74
- try {
75
- await watcherManager.writeSyncScore(driftReport.syncScore);
76
- } catch (e) { /* non-critical */ }
77
+ // Update statusbar sync score live (skip if indexing pending — don't write -1)
78
+ if (!driftReport.indexingPending) {
79
+ try {
80
+ await watcherManager.writeSyncScore(driftReport.syncScore);
81
+ } catch (e) { /* non-critical */ }
82
+ }
77
83
  logger.info({ inSync: driftReport.inSync, syncScore: driftReport.syncScore }, 'sync check complete');
78
84
  // Build human readable response
79
85
  const drifted = driftReport.missingFromMcp.length + driftReport.missingFromDisk.length + driftReport.contentMismatch.length;
@@ -99,7 +105,8 @@ export class CheckSyncStatus {
99
105
  if (more > 0) detailLines += `\n ... and ${more} more`;
100
106
  }
101
107
  }
102
- const message = `Sync Score: ${Math.round(driftReport.syncScore * 100)}%
108
+ const displayScore = driftReport.indexingPending ? 'Pending' : `${Math.round(driftReport.syncScore * 100)}%`;
109
+ const message = `Sync Score: ${displayScore}
103
110
  ${summary}
104
111
 
105
112
  Stats:
@@ -55,6 +55,11 @@ export class WatchForChangesNoCap {
55
55
  debounceCleanupTimer = null;
56
56
  // FIX 7.14: Track pending flush promises so stop() can await them
57
57
  pendingFlushPromises = new Set();
58
+ // PERF: Batch-level debounce — collect per-file handler results into batches
59
+ // so git operations changing many files don't fire hundreds of individual handler calls
60
+ _batchTimer = null;
61
+ _batchQueue = [];
62
+ _batchDebounceMs = 500; // collect events for 500ms before dispatching batch
58
63
  // stats tracking
59
64
  stats = {
60
65
  filesWatched: 0,
@@ -75,6 +80,37 @@ export class WatchForChangesNoCap {
75
80
  verbose: config.verbose ?? false
76
81
  };
77
82
  }
83
+ /**
84
+ * _enqueueBatchEvent - batch-level debounce for handler calls
85
+ *
86
+ * Instead of calling changeHandler() immediately per-file, queue events
87
+ * and dispatch the entire batch after 500ms of quiet. This prevents
88
+ * git operations (checkout, merge, rebase) from firing hundreds of
89
+ * individual handler calls that each trigger sync/DB work.
90
+ */
91
+ _enqueueBatchEvent(event) {
92
+ this._batchQueue.push(event);
93
+ if (this._batchTimer) clearTimeout(this._batchTimer);
94
+ this._batchTimer = setTimeout(async () => {
95
+ this._batchTimer = null;
96
+ const batch = this._batchQueue.splice(0);
97
+ if (batch.length === 0 || !this.changeHandler) return;
98
+ const batchSize = batch.length;
99
+ if (batchSize > 5) {
100
+ logger.info({ batchSize }, `batch debounce: dispatching ${batchSize} file events as batch`);
101
+ }
102
+ // Process each event in the batch sequentially
103
+ // (changeHandler expects individual events)
104
+ for (const evt of batch) {
105
+ try {
106
+ await this.changeHandler(evt);
107
+ }
108
+ catch (err) {
109
+ logger.error({ error: err, path: evt.path }, 'batch handler error for file');
110
+ }
111
+ }
112
+ }, this._batchDebounceMs);
113
+ }
78
114
  /**
79
115
  * startWatching - fires up the file watcher
80
116
  *
@@ -189,7 +225,7 @@ export class WatchForChangesNoCap {
189
225
  depth: undefined, // watch all depths
190
226
  // debouncing built into chokidar
191
227
  awaitWriteFinish: {
192
- stabilityThreshold: 300, // wait 300ms for file to stop changing
228
+ stabilityThreshold: 500, // wait 500ms for file to stop changing (reduced CPU from rapid fire events)
193
229
  pollInterval: 100 // check every 100ms
194
230
  },
195
231
  // dont follow symlinks (security)
@@ -240,6 +276,18 @@ export class WatchForChangesNoCap {
240
276
  await Promise.allSettled([...this.pendingFlushPromises]);
241
277
  this.pendingFlushPromises.clear();
242
278
  }
279
+ // PERF: Clear batch timer and flush pending batch events
280
+ if (this._batchTimer) {
281
+ clearTimeout(this._batchTimer);
282
+ this._batchTimer = null;
283
+ }
284
+ if (this._pendingBatchEvents.length > 0 && this.changeHandler) {
285
+ // Flush remaining batch events before shutdown
286
+ const batch = this._pendingBatchEvents.splice(0);
287
+ for (const evt of batch) {
288
+ try { await this.changeHandler(evt); } catch { /* shutting down */ }
289
+ }
290
+ }
243
291
  // FIX MED-13: Cancel all debounced handlers before clearing to prevent memory leaks
244
292
  // The debounce library's clear() method cancels pending timer execution
245
293
  for (const handler of this.debouncedHandlers.values()) {
@@ -381,7 +429,9 @@ export class WatchForChangesNoCap {
381
429
  if (latestEvent) {
382
430
  // Update timestamp to reflect when we actually process the event
383
431
  latestEvent.timestamp = new Date();
384
- await this.changeHandler(latestEvent);
432
+ // PERF: Route through batch debounce instead of calling handler directly
433
+ // This prevents git operations (200+ files) from firing 200 individual handler calls
434
+ this._enqueueBatchEvent(latestEvent);
385
435
  this.stats.eventsProcessed++;
386
436
  this.stats.lastEventTime = new Date();
387
437
  }
@@ -435,7 +485,8 @@ export class WatchForChangesNoCap {
435
485
  const flushPromise = Promise.resolve().then(async () => {
436
486
  try {
437
487
  latestEvent.timestamp = new Date();
438
- await this.changeHandler(latestEvent);
488
+ // PERF: Route through batch debounce
489
+ this._enqueueBatchEvent(latestEvent);
439
490
  this.stats.eventsProcessed++;
440
491
  this.stats.lastEventTime = new Date();
441
492
  }
@@ -479,23 +530,9 @@ export class WatchForChangesNoCap {
479
530
  const latestEvent = this.pendingEventData.get(key);
480
531
  if (latestEvent && this.changeHandler) {
481
532
  this.pendingEventData.delete(key);
482
- // FIX 7.14: Track flush promise so stop() can await it
483
- const flushPromise = Promise.resolve().then(async () => {
484
- try {
485
- latestEvent.timestamp = new Date();
486
- await this.changeHandler(latestEvent);
487
- this.stats.eventsProcessed++;
488
- this.stats.lastEventTime = new Date();
489
- }
490
- catch (error) {
491
- this.stats.errors++;
492
- logger.error({ error, event: latestEvent }, 'error processing stale debounce entry');
493
- }
494
- finally {
495
- this.pendingFlushPromises.delete(flushPromise);
496
- }
497
- });
498
- this.pendingFlushPromises.add(flushPromise);
533
+ // PERF: Route stale entries through batch debounce too
534
+ latestEvent.timestamp = new Date();
535
+ this._enqueueBatchEvent(latestEvent);
499
536
  }
500
537
  else {
501
538
  this.pendingEventData.delete(key);
@@ -119,22 +119,26 @@ export class AreWeStillInSync {
119
119
  const totalFiles = diskFiles.length;
120
120
  const totalMemories = mcpFiles.length;
121
121
  const totalDrift = missingFromMcp.length + missingFromDisk.length + contentMismatch.length;
122
+ // FIX: If codebase_files is empty but disk files exist, indexing hasn't completed yet.
123
+ // Don't report 0% sync — that's misleading. Report indexing-pending state instead.
124
+ const indexingPending = totalMemories === 0 && totalFiles > 0;
122
125
  // Sync score = what % of disk files are correctly synced in MCP
123
126
  // Deleted-from-disk files are cleanup work, not sync failures
124
127
  const totalItems = totalFiles || 1;
125
- const driftPercentage = totalItems > 0 ? (totalDrift / totalItems) * 100 : 0;
126
- const syncScore = totalItems > 0 ? upToDate / totalItems : 1;
128
+ const driftPercentage = indexingPending ? 0 : (totalItems > 0 ? (totalDrift / totalItems) * 100 : 0);
129
+ const syncScore = indexingPending ? -1 : (totalItems > 0 ? upToDate / totalItems : 1);
127
130
  const report = {
128
- inSync: totalDrift === 0,
131
+ inSync: indexingPending ? false : totalDrift === 0,
129
132
  lastChecked: new Date(),
130
133
  totalFiles,
131
134
  totalMemories,
132
- missingFromMcp,
135
+ missingFromMcp: indexingPending ? [] : missingFromMcp,
133
136
  missingFromDisk,
134
- contentMismatch,
135
- upToDate,
137
+ contentMismatch: indexingPending ? [] : contentMismatch,
138
+ upToDate: indexingPending ? 0 : upToDate,
136
139
  driftPercentage,
137
- syncScore
140
+ syncScore,
141
+ indexingPending
138
142
  };
139
143
  this.lastSyncCheck = report.lastChecked;
140
144
  this.lastCheckTime = startTime; // FIX 7.03: Record check time for mtime optimization
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "specmem-hardwicksoftware",
3
- "version": "3.7.30",
3
+ "version": "3.7.31",
4
4
  "type": "module",
5
5
  "description": "Your Claude Code sessions don't have to start from scratch anymore — SpecMem gives your AI real memory. It won't forget your conversations, your code, or your architecture decisions between sessions. That's the whole point. Semantic code indexing that actually works: TypeScript, JavaScript, Python, Go, Rust, Java, Kotlin, C, C++, HTML and more. It doesn't just track functions — it gets classes, methods, fields, constants, enums, macros, imports, structs, the whole codebase graph. There's chat memory too, powered by pgvector embeddings. You've also got token compression, team coordination, multi-agent comms, and file watching built in. 74+ MCP tools. Runs on PostgreSQL + Docker. It's kind of a big deal. justcalljon.pro",
6
6
  "main": "dist/index.js",
@@ -947,9 +947,14 @@ function adjustPgAuth() {
947
947
  // Backup and modify
948
948
  run(`sudo cp ${pgHbaPath} ${pgHbaPath}.backup`);
949
949
 
950
- // Add password auth for our user
950
+ // Add password auth for our user — check if already exists to prevent duplicates
951
951
  const authLine = `host ${DB_CONFIG.name} ${DB_CONFIG.user} 127.0.0.1/32 md5`;
952
- run(`echo '${authLine}' | sudo tee -a ${pgHbaPath}`);
952
+ const alreadyExists = run(`sudo grep -qF '${authLine}' ${pgHbaPath}`, { silent: true });
953
+ if (!alreadyExists.success) {
954
+ run(`echo '${authLine}' | sudo tee -a ${pgHbaPath}`);
955
+ } else {
956
+ log.info('pg_hba.conf auth line already present, skipping');
957
+ }
953
958
 
954
959
  // Reload PostgreSQL
955
960
  run('sudo systemctl reload postgresql 2>/dev/null || sudo -u postgres pg_ctl reload');
@@ -8799,9 +8799,14 @@ CREATE INDEX IF NOT EXISTS idx_embedding_queue_project ON embedding_queue (proje
8799
8799
  ? path.join(specmemPkg, 'mcp-proxy.cjs')
8800
8800
  : path.join(specmemPkg, 'bootstrap.cjs');
8801
8801
  // Container mode: postgres via unix socket in specmem/run/, user=specmem, trust auth
8802
+ // Socket dir bind-mounted to /data/run in container — PG socket appears here after container starts
8802
8803
  // Legacy mode: postgres on localhost:5432, legacy credentials
8803
8804
  const isContainerMode = process.env.SPECMEM_CONTAINER_MODE === 'true';
8804
8805
  const runDir = path.join(projectPath, 'specmem', 'run');
8806
+ if (isContainerMode) {
8807
+ // Ensure socket directory exists on host — container bind-mounts dataDir:/data
8808
+ try { fs.mkdirSync(runDir, { recursive: true }); } catch (e) { /* may already exist */ }
8809
+ }
8805
8810
  const dbEnv = isContainerMode ? {
8806
8811
  SPECMEM_DB_HOST: runDir,
8807
8812
  SPECMEM_DB_PORT: "5432",
@@ -13,9 +13,10 @@
13
13
  "cpus": 8
14
14
  },
15
15
  "embedding": {
16
- "batchSize": 24,
16
+ "batchSize": 64,
17
17
  "maxConcurrent": 5,
18
- "timeout": 45000
18
+ "timeout": 45000,
19
+ "throttleDelayMs": 50
19
20
  },
20
21
  "watcher": {
21
22
  "debounceMs": 750,
@@ -33,13 +34,13 @@
33
34
  "maxChunks": 75
34
35
  },
35
36
  "resources": {
36
- "cpuMin": 20,
37
- "cpuMax": 40,
37
+ "cpuMin": 10,
38
+ "cpuMax": 45,
38
39
  "cpuCoreMin": 1,
39
40
  "cpuCoreMax": 4,
40
41
  "ramMinMb": 4000,
41
- "ramMaxMb": 11500,
42
- "updatedAt": "2026-02-21T22:33:46.533Z"
42
+ "ramMaxMb": 13500,
43
+ "updatedAt": "2026-02-24T11:56:33.760Z"
43
44
  },
44
45
  "resourcePool": {
45
46
  "embedding": {
@@ -79,5 +80,24 @@
79
80
  "description": "Adaptive batch sizing based on CPU/RAM"
80
81
  },
81
82
  "enabledAt": "2026-02-12T23:19:17.948Z"
83
+ },
84
+ "powerMode": {
85
+ "level": "high",
86
+ "description": "Max Performance - for 16GB+ RAM systems",
87
+ "lazyLoading": false,
88
+ "diskCache": false,
89
+ "diskCacheMaxMb": 0,
90
+ "aggressiveCleanup": false,
91
+ "idleUnloadSeconds": 0,
92
+ "batchSize": 32,
93
+ "throttleDelayMs": 50,
94
+ "setAt": "2026-02-24T11:57:23.121Z"
95
+ },
96
+ "heavyOps": {
97
+ "enabled": true,
98
+ "enabledAt": "2026-02-24T11:57:35.508Z",
99
+ "originalBatchSize": 32,
100
+ "batchSizeMultiplier": 2,
101
+ "throttleReduction": 0.2
82
102
  }
83
103
  }
@@ -1,6 +1,6 @@
1
1
  ; ============================================
2
2
  ; SPECMEM BRAIN CONTAINER - DYNAMIC SUPERVISORD CONFIG
3
- ; Generated by specmem-init at 2026-02-22T17:39:56.598Z
3
+ ; Generated by specmem-init at 2026-02-24T11:53:24.816Z
4
4
  ; Thread counts from model-config.json resourcePool
5
5
  ; ============================================
6
6
 
@@ -7,5 +7,17 @@
7
7
  "serviceMode": {
8
8
  "enabled": false,
9
9
  "disabledAt": "2026-02-18T21:38:50.526Z"
10
+ },
11
+ "powerMode": {
12
+ "level": "high",
13
+ "description": "Max Performance - for 16GB+ RAM systems",
14
+ "lazyLoading": false,
15
+ "diskCache": false,
16
+ "diskCacheMaxMb": 0,
17
+ "aggressiveCleanup": false,
18
+ "idleUnloadSeconds": 0,
19
+ "batchSize": 32,
20
+ "throttleDelayMs": 50,
21
+ "setAt": "2026-02-24T11:57:23.121Z"
10
22
  }
11
23
  }