@yemi33/minions 0.1.1747 → 0.1.1749

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,13 +1,21 @@
1
1
  # Changelog
2
2
 
3
- ## 0.1.1747 (2026-05-06)
3
+ ## 0.1.1749 (2026-05-06)
4
+
5
+ ### Features
6
+ - lock CC/doc session writes via mutateJsonFileLocked (#2126)
7
+ - lock meeting state RMW via mutateMeeting helper (#2125)
8
+ - bounds-validate parseCronField min/max (#2124)
9
+ - replace JSON.parse(safeRead) swallow with safeJsonArr in archiveList (#2123)
4
10
 
5
11
  ### Fixes
6
- - treat null branch-change as unconfirmed (#2096) (#2115)
12
+ - readBody aborts on >1MB and stops accumulating (P-c1read-7b3c) (#2121)
13
+ - guard w.title.replace against null/undefined titles (#2120)
7
14
 
8
- ### Other
9
- - test(dashboard): add unit tests for meeting/pipeline/work-item helpers (#2117)
10
- - test(timeout): add unit tests for parse helpers and runtime capability check (#2116)
15
+ ## 0.1.1746 (2026-05-06)
16
+
17
+ ### Fixes
18
+ - treat null branch-change as unconfirmed (#2096) (#2115)
11
19
 
12
20
  ## 0.1.1745 (2026-05-06)
13
21
 
package/dashboard.js CHANGED
@@ -268,6 +268,30 @@ function resolveWorkItemsCreateTarget(projectName, projects = PROJECTS) {
268
268
  wiPath: targetProject ? shared.projectWorkItemsPath(targetProject) : path.join(MINIONS_DIR, 'work-items.json'),
269
269
  };
270
270
  }
271
+
272
+ /**
273
+ * Aggregate archived work items from the central archive plus every project
274
+ * archive. Each item is tagged with `_source` (`'central'` or the project name)
275
+ * so the UI can group/filter. Reads via `safeJsonArr` — a corrupt archive
276
+ * surfaces a logged parse failure and contributes zero items, instead of
277
+ * throwing 500 or silently dropping the file.
278
+ *
279
+ * Exported for testing (P-h3arch-8c19).
280
+ */
281
+ function collectArchivedWorkItems(minionsDir = MINIONS_DIR, projects = PROJECTS) {
282
+ const archived = [];
283
+ const centralPath = path.join(minionsDir, 'work-items-archive.json');
284
+ for (const item of safeJsonArr(centralPath)) {
285
+ archived.push({ ...item, _source: 'central' });
286
+ }
287
+ for (const project of projects) {
288
+ const archPath = shared.projectWorkItemsPath(project).replace('.json', '-archive.json');
289
+ for (const item of safeJsonArr(archPath)) {
290
+ archived.push({ ...item, _source: project.name });
291
+ }
292
+ }
293
+ return archived;
294
+ }
271
295
  function linkPullRequestForTracking({ url, title, project: projectName, autoObserve, context, workItemId }, config = CONFIG, options = {}) {
272
296
  if (!url) {
273
297
  const err = new Error('url required');
@@ -1137,8 +1161,14 @@ function _filterCcTabSessions(sessions) {
1137
1161
  }
1138
1162
 
1139
1163
  function _readCcTabSessions({ prune = true } = {}) {
1140
- const sessions = _filterCcTabSessions(shared.safeJsonArr(CC_SESSIONS_PATH));
1141
- if (prune) safeWrite(CC_SESSIONS_PATH, sessions);
1164
+ if (!prune) return _filterCcTabSessions(shared.safeJsonArr(CC_SESSIONS_PATH));
1165
+ // P-c2sess-1d8e: read+filter+write atomically under the file lock so a
1166
+ // concurrent tab upsert/delete cannot lose entries to last-write-wins.
1167
+ let sessions;
1168
+ mutateJsonFileLocked(CC_SESSIONS_PATH, (raw) => {
1169
+ sessions = _filterCcTabSessions(raw);
1170
+ return sessions;
1171
+ }, { defaultValue: [] });
1142
1172
  return sessions;
1143
1173
  }
1144
1174
 
@@ -2148,6 +2178,7 @@ async function executeDocChatActions(actions) {
2148
2178
  // Session store for doc modals — keyed by filePath or title, persisted to disk
2149
2179
  const CC_SESSIONS_PATH = path.join(ENGINE_DIR, 'cc-sessions.json');
2150
2180
  const DOC_SESSIONS_PATH = path.join(ENGINE_DIR, 'doc-sessions.json');
2181
+ const CC_SESSION_PATH = path.join(ENGINE_DIR, 'cc-session.json');
2151
2182
  const DOC_SESSION_TTL_MS = shared.ENGINE_DEFAULTS.docSessionTtlMs;
2152
2183
  const DOC_SESSION_MAX_ENTRIES = shared.ENGINE_DEFAULTS.docSessionMaxEntries;
2153
2184
  const docSessions = new Map(); // key → { sessionId, lastActiveAt, turnCount }
@@ -2196,7 +2227,8 @@ function persistDocSessions() {
2196
2227
  pruneDocSessions();
2197
2228
  const obj = {};
2198
2229
  for (const [key, s] of docSessions) obj[key] = s;
2199
- safeWrite(DOC_SESSIONS_PATH, obj);
2230
+ // P-c2sess-1d8e: lock against engine/cleanup.js's cap-trim RMW.
2231
+ mutateJsonFileLocked(DOC_SESSIONS_PATH, () => obj, { defaultValue: {} });
2200
2232
  }
2201
2233
 
2202
2234
  const _docSessionPruneTimer = setInterval(() => {
@@ -2261,7 +2293,7 @@ function updateSession(store, key, sessionId, existing) {
2261
2293
  turnCount: (existing ? ccSession.turnCount : 0) + 1,
2262
2294
  _promptHash: _ccPromptHash,
2263
2295
  };
2264
- safeWrite(path.join(ENGINE_DIR, 'cc-session.json'), ccSession);
2296
+ mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
2265
2297
  } else if (key) {
2266
2298
  const prev = docSessions.get(key);
2267
2299
  docSessions.set(key, {
@@ -2335,7 +2367,7 @@ async function ccCall(message, { store = 'cc', sessionKey, extraContext, label =
2335
2367
  // Invalidate the dead session so future calls don't try to resume it
2336
2368
  if (store === 'cc') {
2337
2369
  ccSession = { sessionId: null, createdAt: null, lastActiveAt: null, turnCount: 0 };
2338
- safeWrite(path.join(ENGINE_DIR, 'cc-session.json'), ccSession);
2370
+ mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
2339
2371
  } else if (sessionKey) {
2340
2372
  docSessions.delete(sessionKey);
2341
2373
  schedulePersistDocSessions();
@@ -2423,7 +2455,7 @@ async function ccCallStreaming(message, { store = 'cc', sessionKey, extraContext
2423
2455
  sessionId = null;
2424
2456
  if (store === 'cc') {
2425
2457
  ccSession = { sessionId: null, createdAt: null, lastActiveAt: null, turnCount: 0 };
2426
- safeWrite(path.join(ENGINE_DIR, 'cc-session.json'), ccSession);
2458
+ mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
2427
2459
  } else if (sessionKey) {
2428
2460
  docSessions.delete(sessionKey);
2429
2461
  schedulePersistDocSessions();
@@ -2713,12 +2745,31 @@ async function ccDocCallStreaming({ message, document, title, filePath, selectio
2713
2745
  function readBody(req) {
2714
2746
  return new Promise((resolve, reject) => {
2715
2747
  let body = '';
2748
+ // P-c1read-7b3c: aborted closure flag prevents OOM from a misbehaving local
2749
+ // client streaming forever after rejection. The data handler MUST early-return
2750
+ // when aborted is true so no further chunks are appended.
2751
+ let aborted = false;
2716
2752
  const timeout = setTimeout(() => {
2753
+ // Set aborted FIRST so any late-arriving chunk (already in flight) is
2754
+ // dropped by the data handler instead of growing body unbounded.
2755
+ aborted = true;
2717
2756
  req.destroy();
2718
2757
  reject(new Error('Request body timeout after 30s'));
2719
2758
  }, 30000);
2720
- req.on('data', chunk => { body += chunk; if (body.length > 1e6) { clearTimeout(timeout); reject(new Error('Too large')); } });
2759
+ req.on('data', chunk => {
2760
+ if (aborted) return;
2761
+ body += chunk;
2762
+ if (body.length > 1e6) {
2763
+ // Order matters: set aborted first so any in-flight chunk early-returns,
2764
+ // then clear the timer, tear down the socket, and surface the failure.
2765
+ aborted = true;
2766
+ clearTimeout(timeout);
2767
+ req.destroy();
2768
+ reject(new Error('Too large'));
2769
+ }
2770
+ });
2721
2771
  req.on('end', () => {
2772
+ if (aborted) return;
2722
2773
  clearTimeout(timeout);
2723
2774
  let parsed;
2724
2775
  try { parsed = JSON.parse(body); } catch(e) { reject(e); return; }
@@ -2732,7 +2783,11 @@ function readBody(req) {
2732
2783
  }
2733
2784
  resolve(parsed);
2734
2785
  });
2735
- req.on('error', (e) => { clearTimeout(timeout); reject(e); });
2786
+ req.on('error', (e) => {
2787
+ if (aborted) return;
2788
+ clearTimeout(timeout);
2789
+ reject(e);
2790
+ });
2736
2791
  });
2737
2792
  }
2738
2793
 
@@ -3296,18 +3351,10 @@ const server = http.createServer(async (req, res) => {
3296
3351
 
3297
3352
  async function handleWorkItemsArchiveList(req, res) {
3298
3353
  try {
3299
- let allArchived = [];
3300
- // Central archive
3301
- const centralPath = path.join(MINIONS_DIR, 'work-items-archive.json');
3302
- const central = safeRead(centralPath);
3303
- if (central) { try { allArchived.push(...JSON.parse(central).map(i => ({ ...i, _source: 'central' }))); } catch {} }
3304
- // Project archives
3305
- for (const project of PROJECTS) {
3306
- const archPath = shared.projectWorkItemsPath(project).replace('.json', '-archive.json');
3307
- const content = safeRead(archPath);
3308
- if (content) { try { allArchived.push(...JSON.parse(content).map(i => ({ ...i, _source: project.name }))); } catch {} }
3309
- }
3310
- return jsonReply(res, 200, allArchived);
3354
+ // collectArchivedWorkItems uses safeJsonArr (typed default + logged parse
3355
+ // failure), so a corrupt archive file is surfaced via console.error and
3356
+ // contributes zero items instead of taking down the whole listing.
3357
+ return jsonReply(res, 200, collectArchivedWorkItems(MINIONS_DIR, PROJECTS));
3311
3358
  } catch (e) { console.error('Archive fetch error:', e.message); return jsonReply(res, e.statusCode || 500, { error: e.message }); }
3312
3359
  }
3313
3360
 
@@ -5390,7 +5437,8 @@ What would you like to discuss or change? When you're happy, say "approve" and I
5390
5437
  try { if (live.abortFn) live.abortFn(); } catch {}
5391
5438
  _clearCcLiveStream(tabId);
5392
5439
  }
5393
- safeWrite(path.join(ENGINE_DIR, 'cc-session.json'), ccSession);
5440
+ // P-c2sess-1d8e: lock single-session reset against concurrent updateSession writes.
5441
+ mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
5394
5442
  return jsonReply(res, 200, { ok: true });
5395
5443
  }
5396
5444
 
@@ -5419,9 +5467,12 @@ What would you like to discuss or change? When you're happy, say "approve" and I
5419
5467
  async function handleCCSessionDelete(req, res, match) {
5420
5468
  const id = match?.[1];
5421
5469
  if (!id) return jsonReply(res, 400, { error: 'id required' });
5422
- const sessions = _readCcTabSessions();
5423
- const filtered = sessions.filter(s => s.id !== id);
5424
- safeWrite(CC_SESSIONS_PATH, filtered);
5470
+ // P-c2sess-1d8e: one locked RMW so a concurrent upsert from the streaming
5471
+ // handler cannot resurrect the deleted tab between read and write.
5472
+ mutateJsonFileLocked(CC_SESSIONS_PATH, (raw) => {
5473
+ const sessions = _filterCcTabSessions(raw);
5474
+ return sessions.filter(s => s.id !== id);
5475
+ }, { defaultValue: [] });
5425
5476
  return jsonReply(res, 200, { ok: true });
5426
5477
  }
5427
5478
 
@@ -5773,20 +5824,24 @@ What would you like to discuss or change? When you're happy, say "approve" and I
5773
5824
  const _persistTabId = body.tabId;
5774
5825
  if (_persistTabId && responseSessionId) {
5775
5826
  try {
5776
- const sessions = _readCcTabSessions();
5777
- const existing = sessions.find(s => s.id === _persistTabId);
5827
+ // P-c2sess-1d8e: one locked RMW so concurrent multi-tab streams can't
5828
+ // race on read+modify+write both upsert paths share the lock.
5778
5829
  const preview = (body.message || '').slice(0, 80);
5779
- if (existing) {
5780
- existing.sessionId = responseSessionId;
5781
- existing.lastActiveAt = new Date(now).toISOString();
5782
- existing.turnCount = sessionReset ? 1 : (existing.turnCount || 0) + 1;
5783
- existing.preview = preview;
5784
- existing._promptHash = _ccPromptHash;
5785
- existing.runtime = currentRuntime;
5786
- } else {
5787
- sessions.push({ id: _persistTabId, title: (body.message || 'New chat').slice(0, 40), sessionId: responseSessionId, createdAt: new Date(now).toISOString(), lastActiveAt: new Date(now).toISOString(), turnCount: 1, preview, _promptHash: _ccPromptHash, runtime: currentRuntime });
5788
- }
5789
- safeWrite(CC_SESSIONS_PATH, sessions);
5830
+ mutateJsonFileLocked(CC_SESSIONS_PATH, (raw) => {
5831
+ const sessions = _filterCcTabSessions(raw);
5832
+ const existing = sessions.find(s => s.id === _persistTabId);
5833
+ if (existing) {
5834
+ existing.sessionId = responseSessionId;
5835
+ existing.lastActiveAt = new Date(now).toISOString();
5836
+ existing.turnCount = sessionReset ? 1 : (existing.turnCount || 0) + 1;
5837
+ existing.preview = preview;
5838
+ existing._promptHash = _ccPromptHash;
5839
+ existing.runtime = currentRuntime;
5840
+ } else {
5841
+ sessions.push({ id: _persistTabId, title: (body.message || 'New chat').slice(0, 40), sessionId: responseSessionId, createdAt: new Date(now).toISOString(), lastActiveAt: new Date(now).toISOString(), turnCount: 1, preview, _promptHash: _ccPromptHash, runtime: currentRuntime });
5842
+ }
5843
+ return sessions;
5844
+ }, { defaultValue: [] });
5790
5845
  } catch { /* non-critical */ }
5791
5846
  }
5792
5847
 
@@ -7329,6 +7384,7 @@ What would you like to discuss or change? When you're happy, say "approve" and I
7329
7384
  // Production entry points use the closures directly; tests import via require('./dashboard').
7330
7385
  module.exports = {
7331
7386
  getMcpServers,
7387
+ readBody,
7332
7388
  _filterCcTabSessions,
7333
7389
  _getVersionCheckInterval,
7334
7390
  _parseWatchInterval,
@@ -7345,6 +7401,7 @@ module.exports = {
7345
7401
  _findDuplicateWorkItemCreate: findDuplicateWorkItemCreate,
7346
7402
  _createWorkItemWithDedup: createWorkItemWithDedup,
7347
7403
  _resolveWorkItemsCreateTarget: resolveWorkItemsCreateTarget,
7404
+ _collectArchivedWorkItems: collectArchivedWorkItems,
7348
7405
  _createPipelineFromAction: createPipelineFromAction,
7349
7406
  executeCCActions,
7350
7407
  buildCCStatePreamble,
package/engine/cleanup.js CHANGED
@@ -687,21 +687,21 @@ async function runCleanup(config, verbose = false) {
687
687
  // silently invalidate live chat tabs the user expects to keep.
688
688
  cleaned.ccSessions = 0;
689
689
 
690
- // 10b. Prune doc-chat sessions — cap at 100 entries, remove oldest beyond cap
690
+ // 10b. Prune doc-chat sessions — cap at 100 entries, remove oldest beyond cap.
691
+ // P-c2sess-1d8e: read+sort+write must run atomically under the file lock so a
692
+ // concurrent dashboard persistDocSessions can't race the cap-trim.
691
693
  cleaned.docSessions = 0;
692
694
  try {
693
695
  const docSessionsPath = path.join(ENGINE_DIR, 'doc-sessions.json');
694
- const docSessions = safeJson(docSessionsPath);
695
- if (docSessions && typeof docSessions === 'object') {
696
+ const DOC_SESSIONS_CAP = 100;
697
+ mutateJsonFileLocked(docSessionsPath, (docSessions) => {
698
+ if (!docSessions || typeof docSessions !== 'object' || Array.isArray(docSessions)) return docSessions;
696
699
  const entries = Object.entries(docSessions);
697
- const DOC_SESSIONS_CAP = 100;
698
- if (entries.length > DOC_SESSIONS_CAP) {
699
- entries.sort((a, b) => new Date(b.lastActiveAt || 0) - new Date(a.lastActiveAt || 0));
700
- const keep = Object.fromEntries(entries.slice(0, DOC_SESSIONS_CAP));
701
- cleaned.docSessions = entries.length - DOC_SESSIONS_CAP;
702
- safeWrite(docSessionsPath, keep);
703
- }
704
- }
700
+ if (entries.length <= DOC_SESSIONS_CAP) return docSessions;
701
+ entries.sort((a, b) => new Date(b.lastActiveAt || 0) - new Date(a.lastActiveAt || 0));
702
+ cleaned.docSessions = entries.length - DOC_SESSIONS_CAP;
703
+ return Object.fromEntries(entries.slice(0, DOC_SESSIONS_CAP));
704
+ }, { defaultValue: {}, skipWriteIfUnchanged: true });
705
705
  } catch (e) { log('warn', 'prune doc-sessions: ' + e.message); }
706
706
 
707
707
  // 11. Cap cooldowns.json — keep at most 500 entries (on top of 24h TTL in cooldown.js)
@@ -1,5 +1,5 @@
1
1
  {
2
2
  "runtime": "copilot",
3
3
  "models": null,
4
- "cachedAt": "2026-05-06T15:03:08.443Z"
4
+ "cachedAt": "2026-05-06T18:54:37.862Z"
5
5
  }
@@ -134,8 +134,8 @@ function checkPlanCompletion(meta, config) {
134
134
  uniquePrs.length ? `- **${uniquePrs.length}** PR(s) created` : '',
135
135
  ``,
136
136
  `## Items`,
137
- ...doneItems.map(w => `- [done] ${w.id}: ${w.title.replace('Implement: ', '')}`),
138
- ...failedItems.map(w => `- [failed] ${w.id}: ${w.title.replace('Implement: ', '')}${w.failReason ? ' — ' + w.failReason : ''}`),
137
+ ...doneItems.map(w => `- [done] ${w.id}: ${(w.title || 'Untitled').replace('Implement: ', '')}`),
138
+ ...failedItems.map(w => `- [failed] ${w.id}: ${(w.title || 'Untitled').replace('Implement: ', '')}${w.failReason ? ' — ' + w.failReason : ''}`),
139
139
  uniquePrs.length ? `\n## Pull Requests` : '',
140
140
  ...uniquePrs.map(pr => `- ${pr.id}: ${pr.title || ''} ${pr.url || ''}`),
141
141
  ].filter(Boolean).join('\n');
@@ -170,7 +170,7 @@ function checkPlanCompletion(meta, config) {
170
170
  const id = 'PL-' + shared.uid();
171
171
  const featureBranch = plan.feature_branch;
172
172
  const mainBranch = shared.resolveMainBranch(primaryProject.localPath, primaryProject.mainBranch);
173
- const itemSummary = doneItems.map(w => '- ' + w.id + ': ' + w.title.replace('Implement: ', '')).join('\n');
173
+ const itemSummary = doneItems.map(w => '- ' + w.id + ': ' + (w.title || 'Untitled').replace('Implement: ', '')).join('\n');
174
174
  mutateWorkItems(wiPath, workItems => {
175
175
  if (workItems.some(w => w.sourcePlan === planFile && w.itemType === 'pr')) return workItems;
176
176
  workItems.push({
@@ -255,7 +255,7 @@ function checkPlanCompletion(meta, config) {
255
255
  const itemsWithCriteria = doneItems.map(w => {
256
256
  const planItem = plan.missing_features?.find(f => f.id === w.id);
257
257
  const criteria = (planItem?.acceptance_criteria || []).map(c => ` - ${c}`).join('\n');
258
- return `### ${w.id}: ${w.title.replace('Implement: ', '')}\n${criteria ? '**Acceptance Criteria:**\n' + criteria : ''}`;
258
+ return `### ${w.id}: ${(w.title || 'Untitled').replace('Implement: ', '')}\n${criteria ? '**Acceptance Criteria:**\n' + criteria : ''}`;
259
259
  }).join('\n\n');
260
260
 
261
261
  const prSummary = uniquePrs.map(pr =>
package/engine/meeting.js CHANGED
@@ -6,7 +6,7 @@
6
6
  const fs = require('fs');
7
7
  const path = require('path');
8
8
  const shared = require('./shared');
9
- const { safeJson, safeWrite, safeRead, uid, log, ts, ENGINE_DEFAULTS, WORK_TYPE, DISPATCH_RESULT } = shared;
9
+ const { safeJson, uid, log, ts, ENGINE_DEFAULTS, WORK_TYPE, DISPATCH_RESULT } = shared;
10
10
  const queries = require('./queries');
11
11
  const { getDispatch, getConfig } = queries;
12
12
  const { renderPlaybook } = require('./playbook');
@@ -349,9 +349,57 @@ function getMeeting(id) {
349
349
  return m;
350
350
  }
351
351
 
352
- function saveMeeting(meeting) {
352
+ /**
353
+ * Read-modify-write helper for meetings/<id>.json under a file lock.
354
+ *
355
+ * Mirrors the mutateDispatch / mutateWorkItems / mutatePullRequests pattern.
356
+ * Use this for ANY change to a meeting's persisted state — bare safeWrite
357
+ * losses concurrent agent findings (every meeting round writes from a
358
+ * separate agent process).
359
+ *
360
+ * `fn` receives the parsed meeting object (with default fields populated like
361
+ * getMeeting), or `null` when the file is absent. Return the mutated meeting
362
+ * to persist it; return `null`/`undefined` to skip the write (the underlying
363
+ * mutateJsonFileLocked handles the no-op via skipWriteIfUnchanged).
364
+ *
365
+ * CRITICAL: keep `fn` fast. Never spawn agents, kill processes, run git
366
+ * commands, or `await` inside the callback — the lock is held for the
367
+ * duration of the synchronous call. Do that work BEFORE or AFTER mutateMeeting.
368
+ */
369
+ function mutateMeeting(id, fn) {
353
370
  if (!fs.existsSync(MEETINGS_DIR)) fs.mkdirSync(MEETINGS_DIR, { recursive: true });
354
- safeWrite(path.join(MEETINGS_DIR, meeting.id + '.json'), meeting);
371
+ const filePath = path.join(MEETINGS_DIR, id + '.json');
372
+ let userResult;
373
+ shared.mutateJsonFileLocked(filePath, (data) => {
374
+ const isMeeting = data && typeof data === 'object' && !Array.isArray(data) && data.id;
375
+ const meeting = isMeeting ? data : null;
376
+ if (meeting) {
377
+ // Match getMeeting()'s default-field normalization.
378
+ if (!meeting.findings) meeting.findings = {};
379
+ if (!meeting.debate) meeting.debate = {};
380
+ if (!meeting.humanNotes) meeting.humanNotes = [];
381
+ if (!meeting.participants) meeting.participants = [];
382
+ if (!meeting.transcript) meeting.transcript = [];
383
+ if (!meeting.roundFailures || typeof meeting.roundFailures !== 'object') meeting.roundFailures = {};
384
+ }
385
+ userResult = fn(meeting);
386
+ if (userResult === undefined || userResult === null) {
387
+ // Skip-write: return original data so JSON.stringify equality holds
388
+ // and mutateJsonFileLocked's skipWriteIfUnchanged guard takes effect.
389
+ return data;
390
+ }
391
+ return userResult;
392
+ }, { defaultValue: {}, skipWriteIfUnchanged: true });
393
+ return userResult === undefined ? null : userResult;
394
+ }
395
+
396
+ /**
397
+ * Persist a meeting object as-is. Thin wrapper over mutateMeeting so every
398
+ * write goes through the file lock — covers the create-new-file path
399
+ * (createMeeting) and any tests that pre-seed meeting state.
400
+ */
401
+ function saveMeeting(meeting) {
402
+ return mutateMeeting(meeting.id, () => meeting);
355
403
  }
356
404
 
357
405
  function createMeeting({ title, agenda, participants }) {
@@ -529,92 +577,98 @@ function discoverMeetingWork(config) {
529
577
  * Called from runPostCompletionHooks when type === 'meeting'.
530
578
  */
531
579
  function collectMeetingFindings(meetingId, agentId, roundName, output, structuredCompletion = null, expectedRound = null, completionInfo = {}) {
532
- const meeting = getMeeting(meetingId);
533
- if (!meeting) return;
534
- if (isTerminalMeetingStatus(meeting.status)) {
535
- log('info', `Ignoring late findings from ${agentId} for completed meeting ${meetingId}`);
536
- return;
537
- }
538
-
539
- const expectedStatus = expectedMeetingStatusForRound(roundName);
540
- if (!expectedStatus) {
541
- log('warn', `Meeting ${meetingId}: ignoring ${agentId} output for unknown round "${roundName || '(empty)'}"`);
542
- return;
543
- }
544
- if (meeting.status !== expectedStatus) {
545
- log('info', `Ignoring stale ${roundName} output from ${agentId} for meeting ${meetingId} currently ${meeting.status}`);
546
- return;
547
- }
548
- if (expectedRound !== null && expectedRound !== undefined && Number(meeting.round || 1) !== Number(expectedRound)) {
549
- log('info', `Ignoring stale round ${expectedRound} output from ${agentId} for meeting ${meetingId} currently on round ${meeting.round || 1}`);
550
- return;
551
- }
552
- if (hasRoundTerminalOutcome(meeting, roundName, agentId, meeting.round)) {
553
- log('info', `Ignoring duplicate ${roundName} output from ${agentId} for meeting ${meetingId}`);
554
- return;
555
- }
556
-
580
+ // Resolve content OUTSIDE the lock — file reads (note artifacts) and stream
581
+ // parsing are slow and lock callbacks must stay fast.
557
582
  const content = resolveMeetingContributionContent(output, structuredCompletion);
558
583
  const completionSucceeded = completionInfo?.success !== false;
559
584
 
560
- if (!completionSucceeded || isEmptyMeetingContent(content)) {
561
- const failures = getRoundFailures(meeting, roundName, meeting.round, true);
562
- const reason = !completionSucceeded
563
- ? (completionInfo?.reason || completionInfo?.completionStatus || 'Agent failed before completing the meeting round')
564
- : 'Agent produced empty meeting output';
565
- failures[agentId] = {
566
- reason,
567
- content: content || completionInfo?.summary || '',
568
- submittedAt: ts(),
569
- };
570
- meeting.transcript.push({
571
- round: meeting.round,
572
- agent: agentId,
573
- type: 'failure',
574
- content: reason,
575
- at: ts(),
576
- });
577
- log('warn', `Meeting ${meetingId}: agent ${agentId} failed ${roundName} — ${reason}`);
578
- advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
579
- saveMeeting(meeting);
580
- return;
581
- }
585
+ let concludedMeeting = null;
586
+ let configForInbox = null;
582
587
 
583
- if (roundName === 'investigate') {
584
- meeting.findings[agentId] = { content, submittedAt: ts() };
585
- meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'finding', content, at: ts() });
586
- } else if (roundName === 'debate') {
587
- meeting.debate[agentId] = { content, submittedAt: ts() };
588
- meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'debate', content, at: ts() });
589
- } else if (roundName === 'conclude') {
590
- meeting.conclusion = { content, agent: agentId, submittedAt: ts() };
591
- meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'conclusion', content, at: ts() });
592
- meeting.status = 'completed';
593
- meeting.completedAt = ts();
588
+ mutateMeeting(meetingId, (meeting) => {
589
+ if (!meeting) return null; // file missing — nothing to do
590
+ if (isTerminalMeetingStatus(meeting.status)) {
591
+ log('info', `Ignoring late findings from ${agentId} for completed meeting ${meetingId}`);
592
+ return null;
593
+ }
594
+
595
+ const expectedStatus = expectedMeetingStatusForRound(roundName);
596
+ if (!expectedStatus) {
597
+ log('warn', `Meeting ${meetingId}: ignoring ${agentId} output for unknown round "${roundName || '(empty)'}"`);
598
+ return null;
599
+ }
600
+ if (meeting.status !== expectedStatus) {
601
+ log('info', `Ignoring stale ${roundName} output from ${agentId} for meeting ${meetingId} currently ${meeting.status}`);
602
+ return null;
603
+ }
604
+ if (expectedRound !== null && expectedRound !== undefined && Number(meeting.round || 1) !== Number(expectedRound)) {
605
+ log('info', `Ignoring stale round ${expectedRound} output from ${agentId} for meeting ${meetingId} currently on round ${meeting.round || 1}`);
606
+ return null;
607
+ }
608
+ if (hasRoundTerminalOutcome(meeting, roundName, agentId, meeting.round)) {
609
+ log('info', `Ignoring duplicate ${roundName} output from ${agentId} for meeting ${meetingId}`);
610
+ return null;
611
+ }
612
+
613
+ if (!completionSucceeded || isEmptyMeetingContent(content)) {
614
+ const failures = getRoundFailures(meeting, roundName, meeting.round, true);
615
+ const reason = !completionSucceeded
616
+ ? (completionInfo?.reason || completionInfo?.completionStatus || 'Agent failed before completing the meeting round')
617
+ : 'Agent produced empty meeting output';
618
+ failures[agentId] = {
619
+ reason,
620
+ content: content || completionInfo?.summary || '',
621
+ submittedAt: ts(),
622
+ };
623
+ meeting.transcript.push({
624
+ round: meeting.round,
625
+ agent: agentId,
626
+ type: 'failure',
627
+ content: reason,
628
+ at: ts(),
629
+ });
630
+ log('warn', `Meeting ${meetingId}: agent ${agentId} failed ${roundName} — ${reason}`);
631
+ advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
632
+ return meeting;
633
+ }
634
+
635
+ if (roundName === 'investigate') {
636
+ meeting.findings[agentId] = { content, submittedAt: ts() };
637
+ meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'finding', content, at: ts() });
638
+ } else if (roundName === 'debate') {
639
+ meeting.debate[agentId] = { content, submittedAt: ts() };
640
+ meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'debate', content, at: ts() });
641
+ } else if (roundName === 'conclude') {
642
+ meeting.conclusion = { content, agent: agentId, submittedAt: ts() };
643
+ meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'conclusion', content, at: ts() });
644
+ meeting.status = 'completed';
645
+ meeting.completedAt = ts();
646
+ // Defer inbox write until AFTER the lock releases — writeToInbox hits
647
+ // the filesystem (slug dedup, write) and must not block other writers.
648
+ concludedMeeting = meeting;
649
+ try { configForInbox = queries.getConfig(); } catch { configForInbox = { agents: {} }; }
650
+ return meeting;
651
+ }
594
652
 
595
- // Write transcript to inbox so agents learn from it (slug-based dedup)
653
+ advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
654
+ return meeting;
655
+ });
656
+
657
+ if (concludedMeeting) {
596
658
  try {
597
- const config = queries.getConfig();
598
- writeMeetingTranscriptToInbox(meeting, meetingId, config.agents || {});
659
+ writeMeetingTranscriptToInbox(concludedMeeting, meetingId, (configForInbox && configForInbox.agents) || {});
599
660
  } catch (e) { log('warn', `Meeting ${meetingId} inbox write: ${e.message}`); }
600
-
601
661
  log('info', `Meeting ${meetingId} completed — transcript written to inbox`);
602
- saveMeeting(meeting);
603
- return;
604
662
  }
605
-
606
- advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
607
-
608
- saveMeeting(meeting);
609
663
  }
610
664
 
611
665
  function addMeetingNote(meetingId, note) {
612
- const meeting = getMeeting(meetingId);
613
- if (!meeting) return null;
614
- meeting.humanNotes.push(note);
615
- meeting.transcript.push({ round: meeting.round, agent: 'human', type: 'note', content: note, at: ts() });
616
- saveMeeting(meeting);
617
- return meeting;
666
+ return mutateMeeting(meetingId, (meeting) => {
667
+ if (!meeting) return null;
668
+ meeting.humanNotes.push(note);
669
+ meeting.transcript.push({ round: meeting.round, agent: 'human', type: 'note', content: note, at: ts() });
670
+ return meeting;
671
+ });
618
672
  }
619
673
 
620
674
  function _killMeetingDispatches(meetingId) {
@@ -672,44 +726,60 @@ function _killMeetingDispatches(meetingId) {
672
726
  }
673
727
 
674
728
  function advanceMeetingRound(meetingId) {
675
- const meeting = getMeeting(meetingId);
676
- if (!meeting || meeting.status === 'completed' || meeting.status === 'archived') return null;
729
+ // Pre-check (read-only) so we don't kill dispatches for a meeting that's
730
+ // already terminal. The authoritative status check still runs INSIDE the
731
+ // lock below.
732
+ const existing = getMeeting(meetingId);
733
+ if (!existing || existing.status === 'completed' || existing.status === 'archived') return null;
734
+
735
+ // CRITICAL: kill BEFORE acquiring the meeting lock. _killMeetingDispatches
736
+ // takes the dispatch.json lock and shells out to kill processes — never
737
+ // run that under the meeting lock (per CLAUDE.md, lock callbacks must
738
+ // stay fast and never spawn / kill / await).
677
739
  _killMeetingDispatches(meetingId);
678
- if (meeting.status === 'investigating') { meeting.status = 'debating'; meeting.round = 2; }
679
- else if (meeting.status === 'debating') { meeting.status = 'concluding'; meeting.round = 3; }
680
- else if (meeting.status === 'concluding') { meeting.status = 'completed'; meeting.completedAt = ts(); }
681
- else return meeting; // no change
682
- meeting.roundStartedAt = ts();
683
- saveMeeting(meeting);
684
- return meeting;
740
+
741
+ return mutateMeeting(meetingId, (meeting) => {
742
+ if (!meeting || meeting.status === 'completed' || meeting.status === 'archived') return null;
743
+ if (meeting.status === 'investigating') { meeting.status = 'debating'; meeting.round = 2; }
744
+ else if (meeting.status === 'debating') { meeting.status = 'concluding'; meeting.round = 3; }
745
+ else if (meeting.status === 'concluding') { meeting.status = 'completed'; meeting.completedAt = ts(); }
746
+ else return meeting; // unknown active status — no state change, but report current
747
+ meeting.roundStartedAt = ts();
748
+ return meeting;
749
+ });
685
750
  }
686
751
 
687
752
  function endMeeting(meetingId) {
688
- const meeting = getMeeting(meetingId);
689
- if (!meeting) return null;
753
+ // See advanceMeetingRound — kill happens BEFORE the meeting lock so dispatch
754
+ // teardown / process kills never run inside our lock callback.
755
+ const existing = getMeeting(meetingId);
756
+ if (!existing) return null;
690
757
  _killMeetingDispatches(meetingId);
691
- meeting.status = 'completed';
692
- meeting.completedAt = ts();
693
- saveMeeting(meeting);
694
- return meeting;
758
+
759
+ return mutateMeeting(meetingId, (meeting) => {
760
+ if (!meeting) return null;
761
+ meeting.status = 'completed';
762
+ meeting.completedAt = ts();
763
+ return meeting;
764
+ });
695
765
  }
696
766
 
697
767
  function archiveMeeting(id) {
698
- const meeting = getMeeting(id);
699
- if (!meeting) return null;
700
- meeting.status = 'archived';
701
- meeting.archivedAt = ts();
702
- saveMeeting(meeting);
703
- return meeting;
768
+ return mutateMeeting(id, (meeting) => {
769
+ if (!meeting) return null;
770
+ meeting.status = 'archived';
771
+ meeting.archivedAt = ts();
772
+ return meeting;
773
+ });
704
774
  }
705
775
 
706
776
  function unarchiveMeeting(id) {
707
- const meeting = getMeeting(id);
708
- if (!meeting || meeting.status !== 'archived') return null;
709
- meeting.status = 'completed';
710
- delete meeting.archivedAt;
711
- saveMeeting(meeting);
712
- return meeting;
777
+ return mutateMeeting(id, (meeting) => {
778
+ if (!meeting || meeting.status !== 'archived') return null;
779
+ meeting.status = 'completed';
780
+ delete meeting.archivedAt;
781
+ return meeting;
782
+ });
713
783
  }
714
784
 
715
785
  function deleteMeeting(id) {
@@ -717,6 +787,9 @@ function deleteMeeting(id) {
717
787
  const filePath = path.join(MEETINGS_DIR, id + '.json');
718
788
  if (!fs.existsSync(filePath)) return false;
719
789
  fs.unlinkSync(filePath);
790
+ // mutateMeeting writes a .backup sidecar; safeJson auto-restores from it
791
+ // when the primary is missing, so deletion must also drop the backup.
792
+ try { fs.unlinkSync(filePath + '.backup'); } catch { /* sidecar may not exist */ }
720
793
  return true;
721
794
  }
722
795
 
@@ -733,68 +806,86 @@ function checkMeetingTimeouts(config) {
733
806
  const hardTimeout = (config.engine || {}).meetingRoundHardTimeout
734
807
  || ENGINE_DEFAULTS.meetingRoundHardTimeout;
735
808
 
736
- for (const meeting of meetings) {
737
- if (isTerminalMeetingStatus(meeting.status)) continue;
738
- if (!ACTIVE_MEETING_STATUSES.has(meeting.status)) continue;
739
- if (!meeting.roundStartedAt) continue;
809
+ for (const snapshot of meetings) {
810
+ if (isTerminalMeetingStatus(snapshot.status)) continue;
811
+ if (!ACTIVE_MEETING_STATUSES.has(snapshot.status)) continue;
812
+ if (!snapshot.roundStartedAt) continue;
740
813
 
741
- const roundStartedMs = new Date(meeting.roundStartedAt).getTime();
814
+ const roundStartedMs = new Date(snapshot.roundStartedAt).getTime();
742
815
  if (!Number.isFinite(roundStartedMs)) continue;
743
816
  const elapsed = Date.now() - roundStartedMs;
744
817
  if (elapsed < timeout) continue;
745
818
 
746
- const respondedCount = meeting.status === 'investigating'
747
- ? Object.keys(meeting.findings || {}).length
748
- : meeting.status === 'debating'
749
- ? Object.keys(meeting.debate || {}).length
750
- : 0;
751
- const totalCount = meeting.participants.length;
752
-
753
- const roundName = meeting.status === 'investigating'
754
- ? 'investigate'
755
- : meeting.status === 'debating'
756
- ? 'debate'
757
- : 'conclude';
758
-
759
- if (roundName !== 'conclude') {
760
- if (allParticipantsFinishedRound(meeting, roundName, meeting.round)) {
761
- log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(elapsed / 60000)}min but all participants are terminal — advancing`);
762
- meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} timed out after all participants finished`, at: ts() });
763
- advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
764
- saveMeeting(meeting);
765
- } else if (elapsed >= hardTimeout) {
766
- const failures = getRoundFailures(meeting, roundName, meeting.round, true);
767
- const stalled = (meeting.participants || []).filter(p => !hasRoundTerminalOutcome(meeting, roundName, p, meeting.round));
768
- const reason = `Hard meeting timeout after ${Math.round(elapsed / 60000)}min — agent did not produce ${roundName} output`;
769
- for (const agentId of stalled) {
770
- failures[agentId] = { reason, content: '', submittedAt: ts() };
771
- meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'failure', content: reason, at: ts() });
819
+ // Re-evaluate the timeout transition under the file lock to avoid lost
820
+ // updates if an agent finalised mid-tick. Helpers (advanceMeetingIfRoundComplete
821
+ // etc.) operate on the locked-and-rehydrated meeting object.
822
+ mutateMeeting(snapshot.id, (meeting) => {
823
+ if (!meeting) return null;
824
+ if (isTerminalMeetingStatus(meeting.status)) return null;
825
+ if (!ACTIVE_MEETING_STATUSES.has(meeting.status)) return null;
826
+ // Use the latest roundStartedAt — the round may have advanced inside
827
+ // a concurrent collectMeetingFindings call between snapshot and lock.
828
+ const liveStartedMs = new Date(meeting.roundStartedAt || 0).getTime();
829
+ if (!Number.isFinite(liveStartedMs)) return null;
830
+ const liveElapsed = Date.now() - liveStartedMs;
831
+ if (liveElapsed < timeout) return null;
832
+
833
+ const respondedCount = meeting.status === 'investigating'
834
+ ? Object.keys(meeting.findings || {}).length
835
+ : meeting.status === 'debating'
836
+ ? Object.keys(meeting.debate || {}).length
837
+ : 0;
838
+ const totalCount = meeting.participants.length;
839
+
840
+ const roundName = meeting.status === 'investigating'
841
+ ? 'investigate'
842
+ : meeting.status === 'debating'
843
+ ? 'debate'
844
+ : 'conclude';
845
+
846
+ if (roundName !== 'conclude') {
847
+ if (allParticipantsFinishedRound(meeting, roundName, meeting.round)) {
848
+ log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(liveElapsed / 60000)}min but all participants are terminal — advancing`);
849
+ meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} timed out after all participants finished`, at: ts() });
850
+ advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
851
+ return meeting;
852
+ } else if (liveElapsed >= hardTimeout) {
853
+ const failures = getRoundFailures(meeting, roundName, meeting.round, true);
854
+ const stalled = (meeting.participants || []).filter(p => !hasRoundTerminalOutcome(meeting, roundName, p, meeting.round));
855
+ const reason = `Hard meeting timeout after ${Math.round(liveElapsed / 60000)}min — agent did not produce ${roundName} output`;
856
+ for (const agentId of stalled) {
857
+ failures[agentId] = { reason, content: '', submittedAt: ts() };
858
+ meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'failure', content: reason, at: ts() });
859
+ }
860
+ log('warn', `Meeting ${meeting.id}: round ${meeting.round} hit hard timeout after ${Math.round(liveElapsed / 60000)}min — marking ${stalled.length}/${totalCount} non-responders as failed and advancing`);
861
+ meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} hard timeout — ${stalled.length} non-responder(s) marked failed`, at: ts() });
862
+ advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
863
+ return meeting;
864
+ } else {
865
+ log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(liveElapsed / 60000)}min — waiting for all participants to finish (${respondedCount}/${totalCount} succeeded)`);
866
+ return null; // observational only — no state change
867
+ }
868
+ } else if (meeting.status === 'concluding') {
869
+ if (liveElapsed >= hardTimeout) {
870
+ const reason = `Hard meeting timeout after ${Math.round(liveElapsed / 60000)}min — conclusion agent did not produce output`;
871
+ const failures = getRoundFailures(meeting, 'conclude', meeting.round, true);
872
+ const conclusionAgent = (meeting.participants || []).find(p => !hasRoundTerminalOutcome(meeting, 'conclude', p, meeting.round)) || meeting.participants?.[0] || 'system';
873
+ failures[conclusionAgent] = { reason, content: '', submittedAt: ts() };
874
+ meeting.transcript.push({ round: meeting.round, agent: conclusionAgent, type: 'failure', content: reason, at: ts() });
875
+ log('warn', `Meeting ${meeting.id}: conclusion round hit hard timeout after ${Math.round(liveElapsed / 60000)}min — synthesising fallback conclusion`);
876
+ advanceMeetingIfRoundComplete(meeting, 'conclude', meeting.id, config);
877
+ return meeting;
878
+ } else {
879
+ log('warn', `Meeting ${meeting.id}: conclusion round timed out after ${Math.round(liveElapsed / 60000)}min — waiting for the conclusion agent to finish`);
880
+ return null;
772
881
  }
773
- log('warn', `Meeting ${meeting.id}: round ${meeting.round} hit hard timeout after ${Math.round(elapsed / 60000)}min — marking ${stalled.length}/${totalCount} non-responders as failed and advancing`);
774
- meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} hard timeout — ${stalled.length} non-responder(s) marked failed`, at: ts() });
775
- advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
776
- saveMeeting(meeting);
777
- } else {
778
- log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(elapsed / 60000)}min — waiting for all participants to finish (${respondedCount}/${totalCount} succeeded)`);
779
- }
780
- } else if (meeting.status === 'concluding') {
781
- if (elapsed >= hardTimeout) {
782
- const reason = `Hard meeting timeout after ${Math.round(elapsed / 60000)}min — conclusion agent did not produce output`;
783
- const failures = getRoundFailures(meeting, 'conclude', meeting.round, true);
784
- const conclusionAgent = (meeting.participants || []).find(p => !hasRoundTerminalOutcome(meeting, 'conclude', p, meeting.round)) || meeting.participants?.[0] || 'system';
785
- failures[conclusionAgent] = { reason, content: '', submittedAt: ts() };
786
- meeting.transcript.push({ round: meeting.round, agent: conclusionAgent, type: 'failure', content: reason, at: ts() });
787
- log('warn', `Meeting ${meeting.id}: conclusion round hit hard timeout after ${Math.round(elapsed / 60000)}min — synthesising fallback conclusion`);
788
- advanceMeetingIfRoundComplete(meeting, 'conclude', meeting.id, config);
789
- saveMeeting(meeting);
790
- } else {
791
- log('warn', `Meeting ${meeting.id}: conclusion round timed out after ${Math.round(elapsed / 60000)}min — waiting for the conclusion agent to finish`);
792
882
  }
793
- }
883
+ return null;
884
+ });
794
885
  }
795
886
  }
796
887
  module.exports = {
797
- MEETINGS_DIR, getMeetings, getMeeting, saveMeeting, createMeeting,
888
+ MEETINGS_DIR, getMeetings, getMeeting, saveMeeting, mutateMeeting, createMeeting,
798
889
  discoverMeetingWork, collectMeetingFindings, checkMeetingTimeouts,
799
890
  addMeetingNote, advanceMeetingRound, endMeeting, archiveMeeting, unarchiveMeeting, deleteMeeting,
800
891
  EMPTY_OUTPUT_PATTERNS,
@@ -51,26 +51,51 @@ function resolveScheduleTemplateVars(str) {
51
51
  // Parse a single cron field into a matcher function.
52
52
  // field: e.g., "*", "5", "1,3,5", "*/15"
53
53
  // min/max: valid range (0-59 for minute, 0-23 for hour, 0-6 for dow)
54
+ //
55
+ // Bounds policy (P-h4cron-2ab8): out-of-range fields produce a matcher that
56
+ // never fires (`() => false`), rather than null. This keeps the function's
57
+ // contract (always returns a function) and matches existing behavior for
58
+ // other invalid forms (`*/0`, `*/abc`, unparseable syntax). parseCronExpr
59
+ // still returns its wrapper object — but its `.matches()` returns false for
60
+ // every Date when any field is out of range, so the schedule never fires.
61
+ // This catches typos like minute=99, hour=24, dow=9 that today are accepted
62
+ // as exact-value matchers and silently never trigger.
54
63
  function parseCronField(field, min, max) {
55
64
  field = field.trim();
56
65
  if (field === '*') return () => true;
57
66
 
58
- // Step: */N
67
+ const hasMin = typeof min === 'number';
68
+ const hasMax = typeof max === 'number';
69
+
70
+ // Step: */N — step must be > 0 AND not exceed the field's max.
71
+ // A step larger than max either matches only val=0 (e.g., */60 for minute)
72
+ // or nothing meaningful — treat as never-fires for predictability.
59
73
  if (field.startsWith('*/')) {
60
74
  const step = parseInt(field.slice(2), 10);
61
75
  if (isNaN(step) || step <= 0) return () => false;
76
+ if (hasMax && step > max) return () => false;
62
77
  return (val) => val % step === 0;
63
78
  }
64
79
 
65
- // List: N,M,O
80
+ // List: N,M,O — drop NaN entries AND entries outside [min, max] before
81
+ // building the Set. A list with no surviving entries falls through to the
82
+ // empty-Set matcher, which never matches anything.
66
83
  if (field.includes(',')) {
67
- const values = new Set(field.split(',').map(v => parseInt(v.trim(), 10)).filter(v => !isNaN(v)));
84
+ const values = new Set(
85
+ field
86
+ .split(',')
87
+ .map(v => parseInt(v.trim(), 10))
88
+ .filter(v => !isNaN(v) && (!hasMin || v >= min) && (!hasMax || v <= max))
89
+ );
68
90
  return (val) => values.has(val);
69
91
  }
70
92
 
71
- // Single value: N
93
+ // Single value: N — out-of-range exact values never fire.
72
94
  const exact = parseInt(field, 10);
73
- if (!isNaN(exact)) return (val) => val === exact;
95
+ if (!isNaN(exact)) {
96
+ if ((hasMin && exact < min) || (hasMax && exact > max)) return () => false;
97
+ return (val) => val === exact;
98
+ }
74
99
 
75
100
  return () => false;
76
101
  }
package/engine/shared.js CHANGED
@@ -180,33 +180,52 @@ function safeReadDir(dir) {
180
180
  }
181
181
 
182
182
  function safeJson(p) {
183
+ // Split the read from the parse so we can distinguish "file missing" (normal
184
+ // pre-create state — silent) from "file present but corrupt JSON" (real
185
+ // integrity failure — must log). Without this split a `JSON.parse(read)` in
186
+ // a single try/catch silently hides corruption (P-h3arch-8c19).
187
+ let primaryRaw = null;
188
+ let primaryRead = false;
183
189
  try {
184
- return JSON.parse(fs.readFileSync(p, 'utf8'));
190
+ primaryRaw = fs.readFileSync(p, 'utf8');
191
+ primaryRead = true;
185
192
  } catch {
186
- // Primary file missing or corruptedtry restoring from .backup sidecar
187
- const backupPath = p + '.backup';
193
+ // ENOENT / EACCES / etcfall through to backup attempt without logging.
194
+ }
195
+ if (primaryRead) {
188
196
  try {
189
- const backupData = JSON.parse(fs.readFileSync(backupPath, 'utf8'));
190
- // Backup is valid — restore it to the primary file (atomic via safeWrite)
191
- console.log(`[safeJson] restored ${path.basename(p)} from .backup sidecar`);
192
- try {
193
- safeWrite(p, backupData);
194
- // Verify the restored file matches expected content
195
- const verifyData = JSON.parse(fs.readFileSync(p, 'utf8'));
196
- if (JSON.stringify(verifyData) !== JSON.stringify(backupData)) {
197
- console.error(`[safeJson] CRITICAL: backup restore verification failed for ${p} — written data does not match backup`);
198
- }
199
- } catch (restoreErr) {
200
- // Restore-to-primary is best-effort — backupData is already parsed and valid.
201
- // Don't throw: disk-full / permission errors should not discard valid data.
202
- console.error(`[safeJson] restore write failed for ${p}: ${restoreErr.message}`);
197
+ return JSON.parse(primaryRaw);
198
+ } catch (parseErr) {
199
+ // File existed but JSON was unparseable — surface so silent corruption
200
+ // doesn't accumulate. Callers (incl. safeJsonArr / safeJsonObj wrappers)
201
+ // rely on this log to satisfy the "typed default + logged parse failure"
202
+ // contract documented in CLAUDE.md.
203
+ console.error(`[safeJson] parse failure for ${path.basename(p)}: ${parseErr.message}`);
204
+ }
205
+ }
206
+ // Primary missing or corrupted — try restoring from .backup sidecar.
207
+ const backupPath = p + '.backup';
208
+ try {
209
+ const backupData = JSON.parse(fs.readFileSync(backupPath, 'utf8'));
210
+ // Backup is valid — restore it to the primary file (atomic via safeWrite)
211
+ console.log(`[safeJson] restored ${path.basename(p)} from .backup sidecar`);
212
+ try {
213
+ safeWrite(p, backupData);
214
+ // Verify the restored file matches expected content
215
+ const verifyData = JSON.parse(fs.readFileSync(p, 'utf8'));
216
+ if (JSON.stringify(verifyData) !== JSON.stringify(backupData)) {
217
+ console.error(`[safeJson] CRITICAL: backup restore verification failed for ${p} — written data does not match backup`);
203
218
  }
204
- return backupData;
205
- } catch (outerErr) {
206
- // Let CRITICAL errors propagate callers must know about data integrity failures
207
- if (outerErr.message && outerErr.message.includes('CRITICAL')) throw outerErr;
208
- return null;
219
+ } catch (restoreErr) {
220
+ // Restore-to-primary is best-effort — backupData is already parsed and valid.
221
+ // Don't throw: disk-full / permission errors should not discard valid data.
222
+ console.error(`[safeJson] restore write failed for ${p}: ${restoreErr.message}`);
209
223
  }
224
+ return backupData;
225
+ } catch (outerErr) {
226
+ // Let CRITICAL errors propagate — callers must know about data integrity failures
227
+ if (outerErr.message && outerErr.message.includes('CRITICAL')) throw outerErr;
228
+ return null;
210
229
  }
211
230
  }
212
231
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@yemi33/minions",
3
- "version": "0.1.1747",
3
+ "version": "0.1.1749",
4
4
  "description": "Multi-agent AI dev team that runs from ~/.minions/ — five autonomous agents share a single engine, dashboard, and knowledge base",
5
5
  "bin": {
6
6
  "minions": "bin/minions.js"