@yemi33/minions 0.1.1748 → 0.1.1750
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +5 -0
- package/dashboard.js +108 -42
- package/engine/cleanup.js +11 -11
- package/engine/copilot-models.json +1 -1
- package/engine/meeting.js +247 -156
- package/engine/queries.js +13 -11
- package/engine/scheduler.js +30 -5
- package/engine/shared.js +56 -23
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dashboard.js
CHANGED
|
@@ -268,6 +268,30 @@ function resolveWorkItemsCreateTarget(projectName, projects = PROJECTS) {
|
|
|
268
268
|
wiPath: targetProject ? shared.projectWorkItemsPath(targetProject) : path.join(MINIONS_DIR, 'work-items.json'),
|
|
269
269
|
};
|
|
270
270
|
}
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* Aggregate archived work items from the central archive plus every project
|
|
274
|
+
* archive. Each item is tagged with `_source` (`'central'` or the project name)
|
|
275
|
+
* so the UI can group/filter. Reads via `safeJsonArr` — a corrupt archive
|
|
276
|
+
* surfaces a logged parse failure and contributes zero items, instead of
|
|
277
|
+
* throwing 500 or silently dropping the file.
|
|
278
|
+
*
|
|
279
|
+
* Exported for testing (P-h3arch-8c19).
|
|
280
|
+
*/
|
|
281
|
+
function collectArchivedWorkItems(minionsDir = MINIONS_DIR, projects = PROJECTS) {
|
|
282
|
+
const archived = [];
|
|
283
|
+
const centralPath = path.join(minionsDir, 'work-items-archive.json');
|
|
284
|
+
for (const item of safeJsonArr(centralPath)) {
|
|
285
|
+
archived.push({ ...item, _source: 'central' });
|
|
286
|
+
}
|
|
287
|
+
for (const project of projects) {
|
|
288
|
+
const archPath = shared.projectWorkItemsPath(project).replace('.json', '-archive.json');
|
|
289
|
+
for (const item of safeJsonArr(archPath)) {
|
|
290
|
+
archived.push({ ...item, _source: project.name });
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
return archived;
|
|
294
|
+
}
|
|
271
295
|
function linkPullRequestForTracking({ url, title, project: projectName, autoObserve, context, workItemId }, config = CONFIG, options = {}) {
|
|
272
296
|
if (!url) {
|
|
273
297
|
const err = new Error('url required');
|
|
@@ -1137,8 +1161,14 @@ function _filterCcTabSessions(sessions) {
|
|
|
1137
1161
|
}
|
|
1138
1162
|
|
|
1139
1163
|
function _readCcTabSessions({ prune = true } = {}) {
|
|
1140
|
-
|
|
1141
|
-
|
|
1164
|
+
if (!prune) return _filterCcTabSessions(shared.safeJsonArr(CC_SESSIONS_PATH));
|
|
1165
|
+
// P-c2sess-1d8e: read+filter+write atomically under the file lock so a
|
|
1166
|
+
// concurrent tab upsert/delete cannot lose entries to last-write-wins.
|
|
1167
|
+
let sessions;
|
|
1168
|
+
mutateJsonFileLocked(CC_SESSIONS_PATH, (raw) => {
|
|
1169
|
+
sessions = _filterCcTabSessions(raw);
|
|
1170
|
+
return sessions;
|
|
1171
|
+
}, { defaultValue: [] });
|
|
1142
1172
|
return sessions;
|
|
1143
1173
|
}
|
|
1144
1174
|
|
|
@@ -2148,6 +2178,7 @@ async function executeDocChatActions(actions) {
|
|
|
2148
2178
|
// Session store for doc modals — keyed by filePath or title, persisted to disk
|
|
2149
2179
|
const CC_SESSIONS_PATH = path.join(ENGINE_DIR, 'cc-sessions.json');
|
|
2150
2180
|
const DOC_SESSIONS_PATH = path.join(ENGINE_DIR, 'doc-sessions.json');
|
|
2181
|
+
const CC_SESSION_PATH = path.join(ENGINE_DIR, 'cc-session.json');
|
|
2151
2182
|
const DOC_SESSION_TTL_MS = shared.ENGINE_DEFAULTS.docSessionTtlMs;
|
|
2152
2183
|
const DOC_SESSION_MAX_ENTRIES = shared.ENGINE_DEFAULTS.docSessionMaxEntries;
|
|
2153
2184
|
const docSessions = new Map(); // key → { sessionId, lastActiveAt, turnCount }
|
|
@@ -2196,7 +2227,8 @@ function persistDocSessions() {
|
|
|
2196
2227
|
pruneDocSessions();
|
|
2197
2228
|
const obj = {};
|
|
2198
2229
|
for (const [key, s] of docSessions) obj[key] = s;
|
|
2199
|
-
|
|
2230
|
+
// P-c2sess-1d8e: lock against engine/cleanup.js's cap-trim RMW.
|
|
2231
|
+
mutateJsonFileLocked(DOC_SESSIONS_PATH, () => obj, { defaultValue: {} });
|
|
2200
2232
|
}
|
|
2201
2233
|
|
|
2202
2234
|
const _docSessionPruneTimer = setInterval(() => {
|
|
@@ -2261,7 +2293,7 @@ function updateSession(store, key, sessionId, existing) {
|
|
|
2261
2293
|
turnCount: (existing ? ccSession.turnCount : 0) + 1,
|
|
2262
2294
|
_promptHash: _ccPromptHash,
|
|
2263
2295
|
};
|
|
2264
|
-
|
|
2296
|
+
mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
|
|
2265
2297
|
} else if (key) {
|
|
2266
2298
|
const prev = docSessions.get(key);
|
|
2267
2299
|
docSessions.set(key, {
|
|
@@ -2335,7 +2367,7 @@ async function ccCall(message, { store = 'cc', sessionKey, extraContext, label =
|
|
|
2335
2367
|
// Invalidate the dead session so future calls don't try to resume it
|
|
2336
2368
|
if (store === 'cc') {
|
|
2337
2369
|
ccSession = { sessionId: null, createdAt: null, lastActiveAt: null, turnCount: 0 };
|
|
2338
|
-
|
|
2370
|
+
mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
|
|
2339
2371
|
} else if (sessionKey) {
|
|
2340
2372
|
docSessions.delete(sessionKey);
|
|
2341
2373
|
schedulePersistDocSessions();
|
|
@@ -2423,7 +2455,7 @@ async function ccCallStreaming(message, { store = 'cc', sessionKey, extraContext
|
|
|
2423
2455
|
sessionId = null;
|
|
2424
2456
|
if (store === 'cc') {
|
|
2425
2457
|
ccSession = { sessionId: null, createdAt: null, lastActiveAt: null, turnCount: 0 };
|
|
2426
|
-
|
|
2458
|
+
mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
|
|
2427
2459
|
} else if (sessionKey) {
|
|
2428
2460
|
docSessions.delete(sessionKey);
|
|
2429
2461
|
schedulePersistDocSessions();
|
|
@@ -2614,8 +2646,11 @@ async function ccDocCall({ message, document, title, filePath, selection, canEdi
|
|
|
2614
2646
|
store: 'doc', sessionKey,
|
|
2615
2647
|
extraContext, label: 'doc-chat',
|
|
2616
2648
|
timeout: DOC_CHAT_TIMEOUT_MS,
|
|
2617
|
-
|
|
2618
|
-
|
|
2649
|
+
// Match Command Center's full tool surface and turn budget so doc-chat
|
|
2650
|
+
// can take action (read/write/edit/dispatch) instead of being limited
|
|
2651
|
+
// to Q&A. The doc-chat sysprompt still scopes orchestration to explicit
|
|
2652
|
+
// requests, and ---DOCUMENT--- remains the only document edit channel.
|
|
2653
|
+
allowedTools: 'Bash,Read,Write,Edit,Glob,Grep,WebFetch,WebSearch',
|
|
2619
2654
|
skipStatePreamble: true,
|
|
2620
2655
|
systemPrompt: DOC_CHAT_SYSTEM_PROMPT,
|
|
2621
2656
|
...(model ? { model } : {}),
|
|
@@ -2670,8 +2705,10 @@ async function ccDocCallStreaming({ message, document, title, filePath, selectio
|
|
|
2670
2705
|
store: 'doc', sessionKey,
|
|
2671
2706
|
extraContext, label: 'doc-chat',
|
|
2672
2707
|
timeout: DOC_CHAT_TIMEOUT_MS,
|
|
2673
|
-
|
|
2674
|
-
|
|
2708
|
+
// Match Command Center's full tool surface — see ccDocCall for
|
|
2709
|
+
// rationale. Both wrappers must share the same policy so the streaming
|
|
2710
|
+
// variant doesn't diverge from the non-streaming one.
|
|
2711
|
+
allowedTools: 'Bash,Read,Write,Edit,Glob,Grep,WebFetch,WebSearch',
|
|
2675
2712
|
skipStatePreamble: true,
|
|
2676
2713
|
systemPrompt: DOC_CHAT_SYSTEM_PROMPT,
|
|
2677
2714
|
...(model ? { model } : {}),
|
|
@@ -2713,12 +2750,31 @@ async function ccDocCallStreaming({ message, document, title, filePath, selectio
|
|
|
2713
2750
|
function readBody(req) {
|
|
2714
2751
|
return new Promise((resolve, reject) => {
|
|
2715
2752
|
let body = '';
|
|
2753
|
+
// P-c1read-7b3c: aborted closure flag prevents OOM from a misbehaving local
|
|
2754
|
+
// client streaming forever after rejection. The data handler MUST early-return
|
|
2755
|
+
// when aborted is true so no further chunks are appended.
|
|
2756
|
+
let aborted = false;
|
|
2716
2757
|
const timeout = setTimeout(() => {
|
|
2758
|
+
// Set aborted FIRST so any late-arriving chunk (already in flight) is
|
|
2759
|
+
// dropped by the data handler instead of growing body unbounded.
|
|
2760
|
+
aborted = true;
|
|
2717
2761
|
req.destroy();
|
|
2718
2762
|
reject(new Error('Request body timeout after 30s'));
|
|
2719
2763
|
}, 30000);
|
|
2720
|
-
req.on('data', chunk => {
|
|
2764
|
+
req.on('data', chunk => {
|
|
2765
|
+
if (aborted) return;
|
|
2766
|
+
body += chunk;
|
|
2767
|
+
if (body.length > 1e6) {
|
|
2768
|
+
// Order matters: set aborted first so any in-flight chunk early-returns,
|
|
2769
|
+
// then clear the timer, tear down the socket, and surface the failure.
|
|
2770
|
+
aborted = true;
|
|
2771
|
+
clearTimeout(timeout);
|
|
2772
|
+
req.destroy();
|
|
2773
|
+
reject(new Error('Too large'));
|
|
2774
|
+
}
|
|
2775
|
+
});
|
|
2721
2776
|
req.on('end', () => {
|
|
2777
|
+
if (aborted) return;
|
|
2722
2778
|
clearTimeout(timeout);
|
|
2723
2779
|
let parsed;
|
|
2724
2780
|
try { parsed = JSON.parse(body); } catch(e) { reject(e); return; }
|
|
@@ -2732,7 +2788,11 @@ function readBody(req) {
|
|
|
2732
2788
|
}
|
|
2733
2789
|
resolve(parsed);
|
|
2734
2790
|
});
|
|
2735
|
-
req.on('error', (e) => {
|
|
2791
|
+
req.on('error', (e) => {
|
|
2792
|
+
if (aborted) return;
|
|
2793
|
+
clearTimeout(timeout);
|
|
2794
|
+
reject(e);
|
|
2795
|
+
});
|
|
2736
2796
|
});
|
|
2737
2797
|
}
|
|
2738
2798
|
|
|
@@ -3296,18 +3356,10 @@ const server = http.createServer(async (req, res) => {
|
|
|
3296
3356
|
|
|
3297
3357
|
async function handleWorkItemsArchiveList(req, res) {
|
|
3298
3358
|
try {
|
|
3299
|
-
|
|
3300
|
-
//
|
|
3301
|
-
|
|
3302
|
-
|
|
3303
|
-
if (central) { try { allArchived.push(...JSON.parse(central).map(i => ({ ...i, _source: 'central' }))); } catch {} }
|
|
3304
|
-
// Project archives
|
|
3305
|
-
for (const project of PROJECTS) {
|
|
3306
|
-
const archPath = shared.projectWorkItemsPath(project).replace('.json', '-archive.json');
|
|
3307
|
-
const content = safeRead(archPath);
|
|
3308
|
-
if (content) { try { allArchived.push(...JSON.parse(content).map(i => ({ ...i, _source: project.name }))); } catch {} }
|
|
3309
|
-
}
|
|
3310
|
-
return jsonReply(res, 200, allArchived);
|
|
3359
|
+
// collectArchivedWorkItems uses safeJsonArr (typed default + logged parse
|
|
3360
|
+
// failure), so a corrupt archive file is surfaced via console.error and
|
|
3361
|
+
// contributes zero items instead of taking down the whole listing.
|
|
3362
|
+
return jsonReply(res, 200, collectArchivedWorkItems(MINIONS_DIR, PROJECTS));
|
|
3311
3363
|
} catch (e) { console.error('Archive fetch error:', e.message); return jsonReply(res, e.statusCode || 500, { error: e.message }); }
|
|
3312
3364
|
}
|
|
3313
3365
|
|
|
@@ -5320,7 +5372,11 @@ What would you like to discuss or change? When you're happy, say "approve" and I
|
|
|
5320
5372
|
const result = removeProject(target, { keepData: body.keepData === true, purge: body.purge === true });
|
|
5321
5373
|
if (!result.ok) return jsonReply(res, result.error?.includes('No project') ? 404 : 400, result);
|
|
5322
5374
|
reloadConfig();
|
|
5323
|
-
|
|
5375
|
+
// includeSlow: getStatus() caches the projects[] field in slow state (60s
|
|
5376
|
+
// TTL) — without flushing it, the removed project keeps appearing under
|
|
5377
|
+
// status.projects for up to a minute even though PROJECTS in memory is
|
|
5378
|
+
// already up to date.
|
|
5379
|
+
invalidateStatusCache({ includeSlow: true });
|
|
5324
5380
|
return jsonReply(res, 200, result);
|
|
5325
5381
|
} catch (e) { return jsonReply(res, 400, { error: e.message }); }
|
|
5326
5382
|
}
|
|
@@ -5390,7 +5446,8 @@ What would you like to discuss or change? When you're happy, say "approve" and I
|
|
|
5390
5446
|
try { if (live.abortFn) live.abortFn(); } catch {}
|
|
5391
5447
|
_clearCcLiveStream(tabId);
|
|
5392
5448
|
}
|
|
5393
|
-
|
|
5449
|
+
// P-c2sess-1d8e: lock single-session reset against concurrent updateSession writes.
|
|
5450
|
+
mutateJsonFileLocked(CC_SESSION_PATH, () => ccSession, { defaultValue: {} });
|
|
5394
5451
|
return jsonReply(res, 200, { ok: true });
|
|
5395
5452
|
}
|
|
5396
5453
|
|
|
@@ -5419,9 +5476,12 @@ What would you like to discuss or change? When you're happy, say "approve" and I
|
|
|
5419
5476
|
async function handleCCSessionDelete(req, res, match) {
|
|
5420
5477
|
const id = match?.[1];
|
|
5421
5478
|
if (!id) return jsonReply(res, 400, { error: 'id required' });
|
|
5422
|
-
|
|
5423
|
-
|
|
5424
|
-
|
|
5479
|
+
// P-c2sess-1d8e: one locked RMW so a concurrent upsert from the streaming
|
|
5480
|
+
// handler cannot resurrect the deleted tab between read and write.
|
|
5481
|
+
mutateJsonFileLocked(CC_SESSIONS_PATH, (raw) => {
|
|
5482
|
+
const sessions = _filterCcTabSessions(raw);
|
|
5483
|
+
return sessions.filter(s => s.id !== id);
|
|
5484
|
+
}, { defaultValue: [] });
|
|
5425
5485
|
return jsonReply(res, 200, { ok: true });
|
|
5426
5486
|
}
|
|
5427
5487
|
|
|
@@ -5773,20 +5833,24 @@ What would you like to discuss or change? When you're happy, say "approve" and I
|
|
|
5773
5833
|
const _persistTabId = body.tabId;
|
|
5774
5834
|
if (_persistTabId && responseSessionId) {
|
|
5775
5835
|
try {
|
|
5776
|
-
|
|
5777
|
-
|
|
5836
|
+
// P-c2sess-1d8e: one locked RMW so concurrent multi-tab streams can't
|
|
5837
|
+
// race on read+modify+write — both upsert paths share the lock.
|
|
5778
5838
|
const preview = (body.message || '').slice(0, 80);
|
|
5779
|
-
|
|
5780
|
-
|
|
5781
|
-
existing
|
|
5782
|
-
|
|
5783
|
-
|
|
5784
|
-
|
|
5785
|
-
|
|
5786
|
-
|
|
5787
|
-
|
|
5788
|
-
|
|
5789
|
-
|
|
5839
|
+
mutateJsonFileLocked(CC_SESSIONS_PATH, (raw) => {
|
|
5840
|
+
const sessions = _filterCcTabSessions(raw);
|
|
5841
|
+
const existing = sessions.find(s => s.id === _persistTabId);
|
|
5842
|
+
if (existing) {
|
|
5843
|
+
existing.sessionId = responseSessionId;
|
|
5844
|
+
existing.lastActiveAt = new Date(now).toISOString();
|
|
5845
|
+
existing.turnCount = sessionReset ? 1 : (existing.turnCount || 0) + 1;
|
|
5846
|
+
existing.preview = preview;
|
|
5847
|
+
existing._promptHash = _ccPromptHash;
|
|
5848
|
+
existing.runtime = currentRuntime;
|
|
5849
|
+
} else {
|
|
5850
|
+
sessions.push({ id: _persistTabId, title: (body.message || 'New chat').slice(0, 40), sessionId: responseSessionId, createdAt: new Date(now).toISOString(), lastActiveAt: new Date(now).toISOString(), turnCount: 1, preview, _promptHash: _ccPromptHash, runtime: currentRuntime });
|
|
5851
|
+
}
|
|
5852
|
+
return sessions;
|
|
5853
|
+
}, { defaultValue: [] });
|
|
5790
5854
|
} catch { /* non-critical */ }
|
|
5791
5855
|
}
|
|
5792
5856
|
|
|
@@ -7329,6 +7393,7 @@ What would you like to discuss or change? When you're happy, say "approve" and I
|
|
|
7329
7393
|
// Production entry points use the closures directly; tests import via require('./dashboard').
|
|
7330
7394
|
module.exports = {
|
|
7331
7395
|
getMcpServers,
|
|
7396
|
+
readBody,
|
|
7332
7397
|
_filterCcTabSessions,
|
|
7333
7398
|
_getVersionCheckInterval,
|
|
7334
7399
|
_parseWatchInterval,
|
|
@@ -7345,6 +7410,7 @@ module.exports = {
|
|
|
7345
7410
|
_findDuplicateWorkItemCreate: findDuplicateWorkItemCreate,
|
|
7346
7411
|
_createWorkItemWithDedup: createWorkItemWithDedup,
|
|
7347
7412
|
_resolveWorkItemsCreateTarget: resolveWorkItemsCreateTarget,
|
|
7413
|
+
_collectArchivedWorkItems: collectArchivedWorkItems,
|
|
7348
7414
|
_createPipelineFromAction: createPipelineFromAction,
|
|
7349
7415
|
executeCCActions,
|
|
7350
7416
|
buildCCStatePreamble,
|
package/engine/cleanup.js
CHANGED
|
@@ -687,21 +687,21 @@ async function runCleanup(config, verbose = false) {
|
|
|
687
687
|
// silently invalidate live chat tabs the user expects to keep.
|
|
688
688
|
cleaned.ccSessions = 0;
|
|
689
689
|
|
|
690
|
-
// 10b. Prune doc-chat sessions — cap at 100 entries, remove oldest beyond cap
|
|
690
|
+
// 10b. Prune doc-chat sessions — cap at 100 entries, remove oldest beyond cap.
|
|
691
|
+
// P-c2sess-1d8e: read+sort+write must run atomically under the file lock so a
|
|
692
|
+
// concurrent dashboard persistDocSessions can't race the cap-trim.
|
|
691
693
|
cleaned.docSessions = 0;
|
|
692
694
|
try {
|
|
693
695
|
const docSessionsPath = path.join(ENGINE_DIR, 'doc-sessions.json');
|
|
694
|
-
const
|
|
695
|
-
|
|
696
|
+
const DOC_SESSIONS_CAP = 100;
|
|
697
|
+
mutateJsonFileLocked(docSessionsPath, (docSessions) => {
|
|
698
|
+
if (!docSessions || typeof docSessions !== 'object' || Array.isArray(docSessions)) return docSessions;
|
|
696
699
|
const entries = Object.entries(docSessions);
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
safeWrite(docSessionsPath, keep);
|
|
703
|
-
}
|
|
704
|
-
}
|
|
700
|
+
if (entries.length <= DOC_SESSIONS_CAP) return docSessions;
|
|
701
|
+
entries.sort((a, b) => new Date(b.lastActiveAt || 0) - new Date(a.lastActiveAt || 0));
|
|
702
|
+
cleaned.docSessions = entries.length - DOC_SESSIONS_CAP;
|
|
703
|
+
return Object.fromEntries(entries.slice(0, DOC_SESSIONS_CAP));
|
|
704
|
+
}, { defaultValue: {}, skipWriteIfUnchanged: true });
|
|
705
705
|
} catch (e) { log('warn', 'prune doc-sessions: ' + e.message); }
|
|
706
706
|
|
|
707
707
|
// 11. Cap cooldowns.json — keep at most 500 entries (on top of 24h TTL in cooldown.js)
|
package/engine/meeting.js
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
const fs = require('fs');
|
|
7
7
|
const path = require('path');
|
|
8
8
|
const shared = require('./shared');
|
|
9
|
-
const { safeJson,
|
|
9
|
+
const { safeJson, uid, log, ts, ENGINE_DEFAULTS, WORK_TYPE, DISPATCH_RESULT } = shared;
|
|
10
10
|
const queries = require('./queries');
|
|
11
11
|
const { getDispatch, getConfig } = queries;
|
|
12
12
|
const { renderPlaybook } = require('./playbook');
|
|
@@ -349,9 +349,57 @@ function getMeeting(id) {
|
|
|
349
349
|
return m;
|
|
350
350
|
}
|
|
351
351
|
|
|
352
|
-
|
|
352
|
+
/**
|
|
353
|
+
* Read-modify-write helper for meetings/<id>.json under a file lock.
|
|
354
|
+
*
|
|
355
|
+
* Mirrors the mutateDispatch / mutateWorkItems / mutatePullRequests pattern.
|
|
356
|
+
* Use this for ANY change to a meeting's persisted state — bare safeWrite
|
|
357
|
+
* losses concurrent agent findings (every meeting round writes from a
|
|
358
|
+
* separate agent process).
|
|
359
|
+
*
|
|
360
|
+
* `fn` receives the parsed meeting object (with default fields populated like
|
|
361
|
+
* getMeeting), or `null` when the file is absent. Return the mutated meeting
|
|
362
|
+
* to persist it; return `null`/`undefined` to skip the write (the underlying
|
|
363
|
+
* mutateJsonFileLocked handles the no-op via skipWriteIfUnchanged).
|
|
364
|
+
*
|
|
365
|
+
* CRITICAL: keep `fn` fast. Never spawn agents, kill processes, run git
|
|
366
|
+
* commands, or `await` inside the callback — the lock is held for the
|
|
367
|
+
* duration of the synchronous call. Do that work BEFORE or AFTER mutateMeeting.
|
|
368
|
+
*/
|
|
369
|
+
function mutateMeeting(id, fn) {
|
|
353
370
|
if (!fs.existsSync(MEETINGS_DIR)) fs.mkdirSync(MEETINGS_DIR, { recursive: true });
|
|
354
|
-
|
|
371
|
+
const filePath = path.join(MEETINGS_DIR, id + '.json');
|
|
372
|
+
let userResult;
|
|
373
|
+
shared.mutateJsonFileLocked(filePath, (data) => {
|
|
374
|
+
const isMeeting = data && typeof data === 'object' && !Array.isArray(data) && data.id;
|
|
375
|
+
const meeting = isMeeting ? data : null;
|
|
376
|
+
if (meeting) {
|
|
377
|
+
// Match getMeeting()'s default-field normalization.
|
|
378
|
+
if (!meeting.findings) meeting.findings = {};
|
|
379
|
+
if (!meeting.debate) meeting.debate = {};
|
|
380
|
+
if (!meeting.humanNotes) meeting.humanNotes = [];
|
|
381
|
+
if (!meeting.participants) meeting.participants = [];
|
|
382
|
+
if (!meeting.transcript) meeting.transcript = [];
|
|
383
|
+
if (!meeting.roundFailures || typeof meeting.roundFailures !== 'object') meeting.roundFailures = {};
|
|
384
|
+
}
|
|
385
|
+
userResult = fn(meeting);
|
|
386
|
+
if (userResult === undefined || userResult === null) {
|
|
387
|
+
// Skip-write: return original data so JSON.stringify equality holds
|
|
388
|
+
// and mutateJsonFileLocked's skipWriteIfUnchanged guard takes effect.
|
|
389
|
+
return data;
|
|
390
|
+
}
|
|
391
|
+
return userResult;
|
|
392
|
+
}, { defaultValue: {}, skipWriteIfUnchanged: true });
|
|
393
|
+
return userResult === undefined ? null : userResult;
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
/**
|
|
397
|
+
* Persist a meeting object as-is. Thin wrapper over mutateMeeting so every
|
|
398
|
+
* write goes through the file lock — covers the create-new-file path
|
|
399
|
+
* (createMeeting) and any tests that pre-seed meeting state.
|
|
400
|
+
*/
|
|
401
|
+
function saveMeeting(meeting) {
|
|
402
|
+
return mutateMeeting(meeting.id, () => meeting);
|
|
355
403
|
}
|
|
356
404
|
|
|
357
405
|
function createMeeting({ title, agenda, participants }) {
|
|
@@ -529,92 +577,98 @@ function discoverMeetingWork(config) {
|
|
|
529
577
|
* Called from runPostCompletionHooks when type === 'meeting'.
|
|
530
578
|
*/
|
|
531
579
|
function collectMeetingFindings(meetingId, agentId, roundName, output, structuredCompletion = null, expectedRound = null, completionInfo = {}) {
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
if (isTerminalMeetingStatus(meeting.status)) {
|
|
535
|
-
log('info', `Ignoring late findings from ${agentId} for completed meeting ${meetingId}`);
|
|
536
|
-
return;
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
const expectedStatus = expectedMeetingStatusForRound(roundName);
|
|
540
|
-
if (!expectedStatus) {
|
|
541
|
-
log('warn', `Meeting ${meetingId}: ignoring ${agentId} output for unknown round "${roundName || '(empty)'}"`);
|
|
542
|
-
return;
|
|
543
|
-
}
|
|
544
|
-
if (meeting.status !== expectedStatus) {
|
|
545
|
-
log('info', `Ignoring stale ${roundName} output from ${agentId} for meeting ${meetingId} currently ${meeting.status}`);
|
|
546
|
-
return;
|
|
547
|
-
}
|
|
548
|
-
if (expectedRound !== null && expectedRound !== undefined && Number(meeting.round || 1) !== Number(expectedRound)) {
|
|
549
|
-
log('info', `Ignoring stale round ${expectedRound} output from ${agentId} for meeting ${meetingId} currently on round ${meeting.round || 1}`);
|
|
550
|
-
return;
|
|
551
|
-
}
|
|
552
|
-
if (hasRoundTerminalOutcome(meeting, roundName, agentId, meeting.round)) {
|
|
553
|
-
log('info', `Ignoring duplicate ${roundName} output from ${agentId} for meeting ${meetingId}`);
|
|
554
|
-
return;
|
|
555
|
-
}
|
|
556
|
-
|
|
580
|
+
// Resolve content OUTSIDE the lock — file reads (note artifacts) and stream
|
|
581
|
+
// parsing are slow and lock callbacks must stay fast.
|
|
557
582
|
const content = resolveMeetingContributionContent(output, structuredCompletion);
|
|
558
583
|
const completionSucceeded = completionInfo?.success !== false;
|
|
559
584
|
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
const reason = !completionSucceeded
|
|
563
|
-
? (completionInfo?.reason || completionInfo?.completionStatus || 'Agent failed before completing the meeting round')
|
|
564
|
-
: 'Agent produced empty meeting output';
|
|
565
|
-
failures[agentId] = {
|
|
566
|
-
reason,
|
|
567
|
-
content: content || completionInfo?.summary || '',
|
|
568
|
-
submittedAt: ts(),
|
|
569
|
-
};
|
|
570
|
-
meeting.transcript.push({
|
|
571
|
-
round: meeting.round,
|
|
572
|
-
agent: agentId,
|
|
573
|
-
type: 'failure',
|
|
574
|
-
content: reason,
|
|
575
|
-
at: ts(),
|
|
576
|
-
});
|
|
577
|
-
log('warn', `Meeting ${meetingId}: agent ${agentId} failed ${roundName} — ${reason}`);
|
|
578
|
-
advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
|
|
579
|
-
saveMeeting(meeting);
|
|
580
|
-
return;
|
|
581
|
-
}
|
|
585
|
+
let concludedMeeting = null;
|
|
586
|
+
let configForInbox = null;
|
|
582
587
|
|
|
583
|
-
|
|
584
|
-
meeting
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
588
|
+
mutateMeeting(meetingId, (meeting) => {
|
|
589
|
+
if (!meeting) return null; // file missing — nothing to do
|
|
590
|
+
if (isTerminalMeetingStatus(meeting.status)) {
|
|
591
|
+
log('info', `Ignoring late findings from ${agentId} for completed meeting ${meetingId}`);
|
|
592
|
+
return null;
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
const expectedStatus = expectedMeetingStatusForRound(roundName);
|
|
596
|
+
if (!expectedStatus) {
|
|
597
|
+
log('warn', `Meeting ${meetingId}: ignoring ${agentId} output for unknown round "${roundName || '(empty)'}"`);
|
|
598
|
+
return null;
|
|
599
|
+
}
|
|
600
|
+
if (meeting.status !== expectedStatus) {
|
|
601
|
+
log('info', `Ignoring stale ${roundName} output from ${agentId} for meeting ${meetingId} currently ${meeting.status}`);
|
|
602
|
+
return null;
|
|
603
|
+
}
|
|
604
|
+
if (expectedRound !== null && expectedRound !== undefined && Number(meeting.round || 1) !== Number(expectedRound)) {
|
|
605
|
+
log('info', `Ignoring stale round ${expectedRound} output from ${agentId} for meeting ${meetingId} currently on round ${meeting.round || 1}`);
|
|
606
|
+
return null;
|
|
607
|
+
}
|
|
608
|
+
if (hasRoundTerminalOutcome(meeting, roundName, agentId, meeting.round)) {
|
|
609
|
+
log('info', `Ignoring duplicate ${roundName} output from ${agentId} for meeting ${meetingId}`);
|
|
610
|
+
return null;
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
if (!completionSucceeded || isEmptyMeetingContent(content)) {
|
|
614
|
+
const failures = getRoundFailures(meeting, roundName, meeting.round, true);
|
|
615
|
+
const reason = !completionSucceeded
|
|
616
|
+
? (completionInfo?.reason || completionInfo?.completionStatus || 'Agent failed before completing the meeting round')
|
|
617
|
+
: 'Agent produced empty meeting output';
|
|
618
|
+
failures[agentId] = {
|
|
619
|
+
reason,
|
|
620
|
+
content: content || completionInfo?.summary || '',
|
|
621
|
+
submittedAt: ts(),
|
|
622
|
+
};
|
|
623
|
+
meeting.transcript.push({
|
|
624
|
+
round: meeting.round,
|
|
625
|
+
agent: agentId,
|
|
626
|
+
type: 'failure',
|
|
627
|
+
content: reason,
|
|
628
|
+
at: ts(),
|
|
629
|
+
});
|
|
630
|
+
log('warn', `Meeting ${meetingId}: agent ${agentId} failed ${roundName} — ${reason}`);
|
|
631
|
+
advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
|
|
632
|
+
return meeting;
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
if (roundName === 'investigate') {
|
|
636
|
+
meeting.findings[agentId] = { content, submittedAt: ts() };
|
|
637
|
+
meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'finding', content, at: ts() });
|
|
638
|
+
} else if (roundName === 'debate') {
|
|
639
|
+
meeting.debate[agentId] = { content, submittedAt: ts() };
|
|
640
|
+
meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'debate', content, at: ts() });
|
|
641
|
+
} else if (roundName === 'conclude') {
|
|
642
|
+
meeting.conclusion = { content, agent: agentId, submittedAt: ts() };
|
|
643
|
+
meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'conclusion', content, at: ts() });
|
|
644
|
+
meeting.status = 'completed';
|
|
645
|
+
meeting.completedAt = ts();
|
|
646
|
+
// Defer inbox write until AFTER the lock releases — writeToInbox hits
|
|
647
|
+
// the filesystem (slug dedup, write) and must not block other writers.
|
|
648
|
+
concludedMeeting = meeting;
|
|
649
|
+
try { configForInbox = queries.getConfig(); } catch { configForInbox = { agents: {} }; }
|
|
650
|
+
return meeting;
|
|
651
|
+
}
|
|
594
652
|
|
|
595
|
-
|
|
653
|
+
advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
|
|
654
|
+
return meeting;
|
|
655
|
+
});
|
|
656
|
+
|
|
657
|
+
if (concludedMeeting) {
|
|
596
658
|
try {
|
|
597
|
-
|
|
598
|
-
writeMeetingTranscriptToInbox(meeting, meetingId, config.agents || {});
|
|
659
|
+
writeMeetingTranscriptToInbox(concludedMeeting, meetingId, (configForInbox && configForInbox.agents) || {});
|
|
599
660
|
} catch (e) { log('warn', `Meeting ${meetingId} inbox write: ${e.message}`); }
|
|
600
|
-
|
|
601
661
|
log('info', `Meeting ${meetingId} completed — transcript written to inbox`);
|
|
602
|
-
saveMeeting(meeting);
|
|
603
|
-
return;
|
|
604
662
|
}
|
|
605
|
-
|
|
606
|
-
advanceMeetingIfRoundComplete(meeting, roundName, meetingId);
|
|
607
|
-
|
|
608
|
-
saveMeeting(meeting);
|
|
609
663
|
}
|
|
610
664
|
|
|
611
665
|
function addMeetingNote(meetingId, note) {
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
666
|
+
return mutateMeeting(meetingId, (meeting) => {
|
|
667
|
+
if (!meeting) return null;
|
|
668
|
+
meeting.humanNotes.push(note);
|
|
669
|
+
meeting.transcript.push({ round: meeting.round, agent: 'human', type: 'note', content: note, at: ts() });
|
|
670
|
+
return meeting;
|
|
671
|
+
});
|
|
618
672
|
}
|
|
619
673
|
|
|
620
674
|
function _killMeetingDispatches(meetingId) {
|
|
@@ -672,44 +726,60 @@ function _killMeetingDispatches(meetingId) {
|
|
|
672
726
|
}
|
|
673
727
|
|
|
674
728
|
function advanceMeetingRound(meetingId) {
|
|
675
|
-
|
|
676
|
-
|
|
729
|
+
// Pre-check (read-only) so we don't kill dispatches for a meeting that's
|
|
730
|
+
// already terminal. The authoritative status check still runs INSIDE the
|
|
731
|
+
// lock below.
|
|
732
|
+
const existing = getMeeting(meetingId);
|
|
733
|
+
if (!existing || existing.status === 'completed' || existing.status === 'archived') return null;
|
|
734
|
+
|
|
735
|
+
// CRITICAL: kill BEFORE acquiring the meeting lock. _killMeetingDispatches
|
|
736
|
+
// takes the dispatch.json lock and shells out to kill processes — never
|
|
737
|
+
// run that under the meeting lock (per CLAUDE.md, lock callbacks must
|
|
738
|
+
// stay fast and never spawn / kill / await).
|
|
677
739
|
_killMeetingDispatches(meetingId);
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
740
|
+
|
|
741
|
+
return mutateMeeting(meetingId, (meeting) => {
|
|
742
|
+
if (!meeting || meeting.status === 'completed' || meeting.status === 'archived') return null;
|
|
743
|
+
if (meeting.status === 'investigating') { meeting.status = 'debating'; meeting.round = 2; }
|
|
744
|
+
else if (meeting.status === 'debating') { meeting.status = 'concluding'; meeting.round = 3; }
|
|
745
|
+
else if (meeting.status === 'concluding') { meeting.status = 'completed'; meeting.completedAt = ts(); }
|
|
746
|
+
else return meeting; // unknown active status — no state change, but report current
|
|
747
|
+
meeting.roundStartedAt = ts();
|
|
748
|
+
return meeting;
|
|
749
|
+
});
|
|
685
750
|
}
|
|
686
751
|
|
|
687
752
|
function endMeeting(meetingId) {
|
|
688
|
-
|
|
689
|
-
|
|
753
|
+
// See advanceMeetingRound — kill happens BEFORE the meeting lock so dispatch
|
|
754
|
+
// teardown / process kills never run inside our lock callback.
|
|
755
|
+
const existing = getMeeting(meetingId);
|
|
756
|
+
if (!existing) return null;
|
|
690
757
|
_killMeetingDispatches(meetingId);
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
758
|
+
|
|
759
|
+
return mutateMeeting(meetingId, (meeting) => {
|
|
760
|
+
if (!meeting) return null;
|
|
761
|
+
meeting.status = 'completed';
|
|
762
|
+
meeting.completedAt = ts();
|
|
763
|
+
return meeting;
|
|
764
|
+
});
|
|
695
765
|
}
|
|
696
766
|
|
|
697
767
|
function archiveMeeting(id) {
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
768
|
+
return mutateMeeting(id, (meeting) => {
|
|
769
|
+
if (!meeting) return null;
|
|
770
|
+
meeting.status = 'archived';
|
|
771
|
+
meeting.archivedAt = ts();
|
|
772
|
+
return meeting;
|
|
773
|
+
});
|
|
704
774
|
}
|
|
705
775
|
|
|
706
776
|
function unarchiveMeeting(id) {
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
777
|
+
return mutateMeeting(id, (meeting) => {
|
|
778
|
+
if (!meeting || meeting.status !== 'archived') return null;
|
|
779
|
+
meeting.status = 'completed';
|
|
780
|
+
delete meeting.archivedAt;
|
|
781
|
+
return meeting;
|
|
782
|
+
});
|
|
713
783
|
}
|
|
714
784
|
|
|
715
785
|
function deleteMeeting(id) {
|
|
@@ -717,6 +787,9 @@ function deleteMeeting(id) {
|
|
|
717
787
|
const filePath = path.join(MEETINGS_DIR, id + '.json');
|
|
718
788
|
if (!fs.existsSync(filePath)) return false;
|
|
719
789
|
fs.unlinkSync(filePath);
|
|
790
|
+
// mutateMeeting writes a .backup sidecar; safeJson auto-restores from it
|
|
791
|
+
// when the primary is missing, so deletion must also drop the backup.
|
|
792
|
+
try { fs.unlinkSync(filePath + '.backup'); } catch { /* sidecar may not exist */ }
|
|
720
793
|
return true;
|
|
721
794
|
}
|
|
722
795
|
|
|
@@ -733,68 +806,86 @@ function checkMeetingTimeouts(config) {
|
|
|
733
806
|
const hardTimeout = (config.engine || {}).meetingRoundHardTimeout
|
|
734
807
|
|| ENGINE_DEFAULTS.meetingRoundHardTimeout;
|
|
735
808
|
|
|
736
|
-
for (const
|
|
737
|
-
if (isTerminalMeetingStatus(
|
|
738
|
-
if (!ACTIVE_MEETING_STATUSES.has(
|
|
739
|
-
if (!
|
|
809
|
+
for (const snapshot of meetings) {
|
|
810
|
+
if (isTerminalMeetingStatus(snapshot.status)) continue;
|
|
811
|
+
if (!ACTIVE_MEETING_STATUSES.has(snapshot.status)) continue;
|
|
812
|
+
if (!snapshot.roundStartedAt) continue;
|
|
740
813
|
|
|
741
|
-
const roundStartedMs = new Date(
|
|
814
|
+
const roundStartedMs = new Date(snapshot.roundStartedAt).getTime();
|
|
742
815
|
if (!Number.isFinite(roundStartedMs)) continue;
|
|
743
816
|
const elapsed = Date.now() - roundStartedMs;
|
|
744
817
|
if (elapsed < timeout) continue;
|
|
745
818
|
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
819
|
+
// Re-evaluate the timeout transition under the file lock to avoid lost
|
|
820
|
+
// updates if an agent finalised mid-tick. Helpers (advanceMeetingIfRoundComplete
|
|
821
|
+
// etc.) operate on the locked-and-rehydrated meeting object.
|
|
822
|
+
mutateMeeting(snapshot.id, (meeting) => {
|
|
823
|
+
if (!meeting) return null;
|
|
824
|
+
if (isTerminalMeetingStatus(meeting.status)) return null;
|
|
825
|
+
if (!ACTIVE_MEETING_STATUSES.has(meeting.status)) return null;
|
|
826
|
+
// Use the latest roundStartedAt — the round may have advanced inside
|
|
827
|
+
// a concurrent collectMeetingFindings call between snapshot and lock.
|
|
828
|
+
const liveStartedMs = new Date(meeting.roundStartedAt || 0).getTime();
|
|
829
|
+
if (!Number.isFinite(liveStartedMs)) return null;
|
|
830
|
+
const liveElapsed = Date.now() - liveStartedMs;
|
|
831
|
+
if (liveElapsed < timeout) return null;
|
|
832
|
+
|
|
833
|
+
const respondedCount = meeting.status === 'investigating'
|
|
834
|
+
? Object.keys(meeting.findings || {}).length
|
|
835
|
+
: meeting.status === 'debating'
|
|
836
|
+
? Object.keys(meeting.debate || {}).length
|
|
837
|
+
: 0;
|
|
838
|
+
const totalCount = meeting.participants.length;
|
|
839
|
+
|
|
840
|
+
const roundName = meeting.status === 'investigating'
|
|
841
|
+
? 'investigate'
|
|
842
|
+
: meeting.status === 'debating'
|
|
843
|
+
? 'debate'
|
|
844
|
+
: 'conclude';
|
|
845
|
+
|
|
846
|
+
if (roundName !== 'conclude') {
|
|
847
|
+
if (allParticipantsFinishedRound(meeting, roundName, meeting.round)) {
|
|
848
|
+
log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(liveElapsed / 60000)}min but all participants are terminal — advancing`);
|
|
849
|
+
meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} timed out after all participants finished`, at: ts() });
|
|
850
|
+
advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
|
|
851
|
+
return meeting;
|
|
852
|
+
} else if (liveElapsed >= hardTimeout) {
|
|
853
|
+
const failures = getRoundFailures(meeting, roundName, meeting.round, true);
|
|
854
|
+
const stalled = (meeting.participants || []).filter(p => !hasRoundTerminalOutcome(meeting, roundName, p, meeting.round));
|
|
855
|
+
const reason = `Hard meeting timeout after ${Math.round(liveElapsed / 60000)}min — agent did not produce ${roundName} output`;
|
|
856
|
+
for (const agentId of stalled) {
|
|
857
|
+
failures[agentId] = { reason, content: '', submittedAt: ts() };
|
|
858
|
+
meeting.transcript.push({ round: meeting.round, agent: agentId, type: 'failure', content: reason, at: ts() });
|
|
859
|
+
}
|
|
860
|
+
log('warn', `Meeting ${meeting.id}: round ${meeting.round} hit hard timeout after ${Math.round(liveElapsed / 60000)}min — marking ${stalled.length}/${totalCount} non-responders as failed and advancing`);
|
|
861
|
+
meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} hard timeout — ${stalled.length} non-responder(s) marked failed`, at: ts() });
|
|
862
|
+
advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
|
|
863
|
+
return meeting;
|
|
864
|
+
} else {
|
|
865
|
+
log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(liveElapsed / 60000)}min — waiting for all participants to finish (${respondedCount}/${totalCount} succeeded)`);
|
|
866
|
+
return null; // observational only — no state change
|
|
867
|
+
}
|
|
868
|
+
} else if (meeting.status === 'concluding') {
|
|
869
|
+
if (liveElapsed >= hardTimeout) {
|
|
870
|
+
const reason = `Hard meeting timeout after ${Math.round(liveElapsed / 60000)}min — conclusion agent did not produce output`;
|
|
871
|
+
const failures = getRoundFailures(meeting, 'conclude', meeting.round, true);
|
|
872
|
+
const conclusionAgent = (meeting.participants || []).find(p => !hasRoundTerminalOutcome(meeting, 'conclude', p, meeting.round)) || meeting.participants?.[0] || 'system';
|
|
873
|
+
failures[conclusionAgent] = { reason, content: '', submittedAt: ts() };
|
|
874
|
+
meeting.transcript.push({ round: meeting.round, agent: conclusionAgent, type: 'failure', content: reason, at: ts() });
|
|
875
|
+
log('warn', `Meeting ${meeting.id}: conclusion round hit hard timeout after ${Math.round(liveElapsed / 60000)}min — synthesising fallback conclusion`);
|
|
876
|
+
advanceMeetingIfRoundComplete(meeting, 'conclude', meeting.id, config);
|
|
877
|
+
return meeting;
|
|
878
|
+
} else {
|
|
879
|
+
log('warn', `Meeting ${meeting.id}: conclusion round timed out after ${Math.round(liveElapsed / 60000)}min — waiting for the conclusion agent to finish`);
|
|
880
|
+
return null;
|
|
772
881
|
}
|
|
773
|
-
log('warn', `Meeting ${meeting.id}: round ${meeting.round} hit hard timeout after ${Math.round(elapsed / 60000)}min — marking ${stalled.length}/${totalCount} non-responders as failed and advancing`);
|
|
774
|
-
meeting.transcript.push({ round: meeting.round, agent: 'system', type: 'timeout', content: `Round ${meeting.round} hard timeout — ${stalled.length} non-responder(s) marked failed`, at: ts() });
|
|
775
|
-
advanceMeetingIfRoundComplete(meeting, roundName, meeting.id, config);
|
|
776
|
-
saveMeeting(meeting);
|
|
777
|
-
} else {
|
|
778
|
-
log('warn', `Meeting ${meeting.id}: round ${meeting.round} timed out after ${Math.round(elapsed / 60000)}min — waiting for all participants to finish (${respondedCount}/${totalCount} succeeded)`);
|
|
779
|
-
}
|
|
780
|
-
} else if (meeting.status === 'concluding') {
|
|
781
|
-
if (elapsed >= hardTimeout) {
|
|
782
|
-
const reason = `Hard meeting timeout after ${Math.round(elapsed / 60000)}min — conclusion agent did not produce output`;
|
|
783
|
-
const failures = getRoundFailures(meeting, 'conclude', meeting.round, true);
|
|
784
|
-
const conclusionAgent = (meeting.participants || []).find(p => !hasRoundTerminalOutcome(meeting, 'conclude', p, meeting.round)) || meeting.participants?.[0] || 'system';
|
|
785
|
-
failures[conclusionAgent] = { reason, content: '', submittedAt: ts() };
|
|
786
|
-
meeting.transcript.push({ round: meeting.round, agent: conclusionAgent, type: 'failure', content: reason, at: ts() });
|
|
787
|
-
log('warn', `Meeting ${meeting.id}: conclusion round hit hard timeout after ${Math.round(elapsed / 60000)}min — synthesising fallback conclusion`);
|
|
788
|
-
advanceMeetingIfRoundComplete(meeting, 'conclude', meeting.id, config);
|
|
789
|
-
saveMeeting(meeting);
|
|
790
|
-
} else {
|
|
791
|
-
log('warn', `Meeting ${meeting.id}: conclusion round timed out after ${Math.round(elapsed / 60000)}min — waiting for the conclusion agent to finish`);
|
|
792
882
|
}
|
|
793
|
-
|
|
883
|
+
return null;
|
|
884
|
+
});
|
|
794
885
|
}
|
|
795
886
|
}
|
|
796
887
|
module.exports = {
|
|
797
|
-
MEETINGS_DIR, getMeetings, getMeeting, saveMeeting, createMeeting,
|
|
888
|
+
MEETINGS_DIR, getMeetings, getMeeting, saveMeeting, mutateMeeting, createMeeting,
|
|
798
889
|
discoverMeetingWork, collectMeetingFindings, checkMeetingTimeouts,
|
|
799
890
|
addMeetingNote, advanceMeetingRound, endMeeting, archiveMeeting, unarchiveMeeting, deleteMeeting,
|
|
800
891
|
EMPTY_OUTPUT_PATTERNS,
|
package/engine/queries.js
CHANGED
|
@@ -626,30 +626,32 @@ function getPullRequests(config) {
|
|
|
626
626
|
const projectByName = new Map(projects.map(p => [p.name, p]));
|
|
627
627
|
const allPrs = [];
|
|
628
628
|
const seenIds = new Set();
|
|
629
|
-
// Single pass over projects/*
|
|
630
|
-
//
|
|
631
|
-
//
|
|
632
|
-
//
|
|
629
|
+
// Single pass over projects/* intersected with the configured project list.
|
|
630
|
+
// Filesystem dirs not in CONFIG.projects (e.g. a leftover .minions/projects/
|
|
631
|
+
// <removedName>/ recreated by a stale code path between archive and reload)
|
|
632
|
+
// are skipped so removed projects can't resurrect themselves through the
|
|
633
|
+
// status payload. Hidden dirs (.archived sidecar) are also skipped defensively
|
|
634
|
+
// — they live as siblings under projects/ and would otherwise be enumerated.
|
|
633
635
|
let projectDirs = [];
|
|
634
636
|
try {
|
|
635
637
|
projectDirs = fs.readdirSync(path.join(MINIONS_DIR, 'projects'), { withFileTypes: true })
|
|
636
|
-
.filter(d => d.isDirectory()).map(d => d.name);
|
|
638
|
+
.filter(d => d.isDirectory() && !d.name.startsWith('.')).map(d => d.name);
|
|
637
639
|
} catch { /* projects dir missing */ }
|
|
638
640
|
for (const dirName of projectDirs) {
|
|
639
|
-
const project = projectByName.get(dirName)
|
|
640
|
-
|
|
641
|
+
const project = projectByName.get(dirName);
|
|
642
|
+
if (!project) continue; // unconfigured/removed — don't surface
|
|
643
|
+
const prPath = projectPrPath(project);
|
|
641
644
|
const prs = readJsonNoRestore(prPath);
|
|
642
645
|
if (!Array.isArray(prs)) continue;
|
|
643
646
|
shared.normalizePrRecords(prs, project);
|
|
644
|
-
const base = project
|
|
647
|
+
const base = project.prUrlBase || '';
|
|
645
648
|
for (const pr of prs) {
|
|
646
649
|
if (!pr?.id || seenIds.has(pr.id)) continue;
|
|
647
|
-
if (
|
|
650
|
+
if (!pr.url && base) {
|
|
648
651
|
const prNumber = shared.getPrNumber(pr);
|
|
649
652
|
if (prNumber != null) pr.url = base + prNumber;
|
|
650
653
|
}
|
|
651
|
-
pr._project = project
|
|
652
|
-
if (!project) pr._ghost = true;
|
|
654
|
+
pr._project = project.name || 'Project';
|
|
653
655
|
allPrs.push(pr);
|
|
654
656
|
seenIds.add(pr.id);
|
|
655
657
|
}
|
package/engine/scheduler.js
CHANGED
|
@@ -51,26 +51,51 @@ function resolveScheduleTemplateVars(str) {
|
|
|
51
51
|
// Parse a single cron field into a matcher function.
|
|
52
52
|
// field: e.g., "*", "5", "1,3,5", "*/15"
|
|
53
53
|
// min/max: valid range (0-59 for minute, 0-23 for hour, 0-6 for dow)
|
|
54
|
+
//
|
|
55
|
+
// Bounds policy (P-h4cron-2ab8): out-of-range fields produce a matcher that
|
|
56
|
+
// never fires (`() => false`), rather than null. This keeps the function's
|
|
57
|
+
// contract (always returns a function) and matches existing behavior for
|
|
58
|
+
// other invalid forms (`*/0`, `*/abc`, unparseable syntax). parseCronExpr
|
|
59
|
+
// still returns its wrapper object — but its `.matches()` returns false for
|
|
60
|
+
// every Date when any field is out of range, so the schedule never fires.
|
|
61
|
+
// This catches typos like minute=99, hour=24, dow=9 that today are accepted
|
|
62
|
+
// as exact-value matchers and silently never trigger.
|
|
54
63
|
function parseCronField(field, min, max) {
|
|
55
64
|
field = field.trim();
|
|
56
65
|
if (field === '*') return () => true;
|
|
57
66
|
|
|
58
|
-
|
|
67
|
+
const hasMin = typeof min === 'number';
|
|
68
|
+
const hasMax = typeof max === 'number';
|
|
69
|
+
|
|
70
|
+
// Step: */N — step must be > 0 AND not exceed the field's max.
|
|
71
|
+
// A step larger than max either matches only val=0 (e.g., */60 for minute)
|
|
72
|
+
// or nothing meaningful — treat as never-fires for predictability.
|
|
59
73
|
if (field.startsWith('*/')) {
|
|
60
74
|
const step = parseInt(field.slice(2), 10);
|
|
61
75
|
if (isNaN(step) || step <= 0) return () => false;
|
|
76
|
+
if (hasMax && step > max) return () => false;
|
|
62
77
|
return (val) => val % step === 0;
|
|
63
78
|
}
|
|
64
79
|
|
|
65
|
-
// List: N,M,O
|
|
80
|
+
// List: N,M,O — drop NaN entries AND entries outside [min, max] before
|
|
81
|
+
// building the Set. A list with no surviving entries falls through to the
|
|
82
|
+
// empty-Set matcher, which never matches anything.
|
|
66
83
|
if (field.includes(',')) {
|
|
67
|
-
const values = new Set(
|
|
84
|
+
const values = new Set(
|
|
85
|
+
field
|
|
86
|
+
.split(',')
|
|
87
|
+
.map(v => parseInt(v.trim(), 10))
|
|
88
|
+
.filter(v => !isNaN(v) && (!hasMin || v >= min) && (!hasMax || v <= max))
|
|
89
|
+
);
|
|
68
90
|
return (val) => values.has(val);
|
|
69
91
|
}
|
|
70
92
|
|
|
71
|
-
// Single value: N
|
|
93
|
+
// Single value: N — out-of-range exact values never fire.
|
|
72
94
|
const exact = parseInt(field, 10);
|
|
73
|
-
if (!isNaN(exact))
|
|
95
|
+
if (!isNaN(exact)) {
|
|
96
|
+
if ((hasMin && exact < min) || (hasMax && exact > max)) return () => false;
|
|
97
|
+
return (val) => val === exact;
|
|
98
|
+
}
|
|
74
99
|
|
|
75
100
|
return () => false;
|
|
76
101
|
}
|
package/engine/shared.js
CHANGED
|
@@ -180,33 +180,52 @@ function safeReadDir(dir) {
|
|
|
180
180
|
}
|
|
181
181
|
|
|
182
182
|
function safeJson(p) {
|
|
183
|
+
// Split the read from the parse so we can distinguish "file missing" (normal
|
|
184
|
+
// pre-create state — silent) from "file present but corrupt JSON" (real
|
|
185
|
+
// integrity failure — must log). Without this split a `JSON.parse(read)` in
|
|
186
|
+
// a single try/catch silently hides corruption (P-h3arch-8c19).
|
|
187
|
+
let primaryRaw = null;
|
|
188
|
+
let primaryRead = false;
|
|
183
189
|
try {
|
|
184
|
-
|
|
190
|
+
primaryRaw = fs.readFileSync(p, 'utf8');
|
|
191
|
+
primaryRead = true;
|
|
185
192
|
} catch {
|
|
186
|
-
//
|
|
187
|
-
|
|
193
|
+
// ENOENT / EACCES / etc — fall through to backup attempt without logging.
|
|
194
|
+
}
|
|
195
|
+
if (primaryRead) {
|
|
188
196
|
try {
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
197
|
+
return JSON.parse(primaryRaw);
|
|
198
|
+
} catch (parseErr) {
|
|
199
|
+
// File existed but JSON was unparseable — surface so silent corruption
|
|
200
|
+
// doesn't accumulate. Callers (incl. safeJsonArr / safeJsonObj wrappers)
|
|
201
|
+
// rely on this log to satisfy the "typed default + logged parse failure"
|
|
202
|
+
// contract documented in CLAUDE.md.
|
|
203
|
+
console.error(`[safeJson] parse failure for ${path.basename(p)}: ${parseErr.message}`);
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
// Primary missing or corrupted — try restoring from .backup sidecar.
|
|
207
|
+
const backupPath = p + '.backup';
|
|
208
|
+
try {
|
|
209
|
+
const backupData = JSON.parse(fs.readFileSync(backupPath, 'utf8'));
|
|
210
|
+
// Backup is valid — restore it to the primary file (atomic via safeWrite)
|
|
211
|
+
console.log(`[safeJson] restored ${path.basename(p)} from .backup sidecar`);
|
|
212
|
+
try {
|
|
213
|
+
safeWrite(p, backupData);
|
|
214
|
+
// Verify the restored file matches expected content
|
|
215
|
+
const verifyData = JSON.parse(fs.readFileSync(p, 'utf8'));
|
|
216
|
+
if (JSON.stringify(verifyData) !== JSON.stringify(backupData)) {
|
|
217
|
+
console.error(`[safeJson] CRITICAL: backup restore verification failed for ${p} — written data does not match backup`);
|
|
203
218
|
}
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
//
|
|
207
|
-
|
|
208
|
-
return null;
|
|
219
|
+
} catch (restoreErr) {
|
|
220
|
+
// Restore-to-primary is best-effort — backupData is already parsed and valid.
|
|
221
|
+
// Don't throw: disk-full / permission errors should not discard valid data.
|
|
222
|
+
console.error(`[safeJson] restore write failed for ${p}: ${restoreErr.message}`);
|
|
209
223
|
}
|
|
224
|
+
return backupData;
|
|
225
|
+
} catch (outerErr) {
|
|
226
|
+
// Let CRITICAL errors propagate — callers must know about data integrity failures
|
|
227
|
+
if (outerErr.message && outerErr.message.includes('CRITICAL')) throw outerErr;
|
|
228
|
+
return null;
|
|
210
229
|
}
|
|
211
230
|
}
|
|
212
231
|
|
|
@@ -1480,9 +1499,22 @@ function projectRoot(project) {
|
|
|
1480
1499
|
|
|
1481
1500
|
// All project state files live centrally in .minions/projects/{name}/
|
|
1482
1501
|
// No state files in project repos — avoids worktree/git interference.
|
|
1502
|
+
//
|
|
1503
|
+
// projectStateDir is path-only (no fs side effects) — safe to call with stale
|
|
1504
|
+
// project references after `removeProject` archived the data dir. Write paths
|
|
1505
|
+
// (safeWrite, withFileLock, mutateJsonFileLocked) already mkdir the parent dir
|
|
1506
|
+
// at write time, so the dir is created lazily only when something is actually
|
|
1507
|
+
// written. Use projectStateDirEnsure() when a caller specifically needs the
|
|
1508
|
+
// directory to exist before doing its own fs ops.
|
|
1483
1509
|
function projectStateDir(project) {
|
|
1484
1510
|
const name = project.name || path.basename(project.localPath);
|
|
1485
|
-
|
|
1511
|
+
return path.join(MINIONS_DIR, 'projects', name);
|
|
1512
|
+
}
|
|
1513
|
+
|
|
1514
|
+
// Same as projectStateDir() but mkdirs the directory. Use when the caller does
|
|
1515
|
+
// raw fs ops (writeFileSync etc.) that don't already create the parent dir.
|
|
1516
|
+
function projectStateDirEnsure(project) {
|
|
1517
|
+
const dir = projectStateDir(project);
|
|
1486
1518
|
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
|
|
1487
1519
|
return dir;
|
|
1488
1520
|
}
|
|
@@ -2841,6 +2873,7 @@ module.exports = {
|
|
|
2841
2873
|
getProjects,
|
|
2842
2874
|
projectRoot,
|
|
2843
2875
|
projectStateDir,
|
|
2876
|
+
projectStateDirEnsure,
|
|
2844
2877
|
projectWorkItemsPath,
|
|
2845
2878
|
projectPrPath,
|
|
2846
2879
|
resolveProjectForPrPath, // exported for testing
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@yemi33/minions",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.1750",
|
|
4
4
|
"description": "Multi-agent AI dev team that runs from ~/.minions/ — five autonomous agents share a single engine, dashboard, and knowledge base",
|
|
5
5
|
"bin": {
|
|
6
6
|
"minions": "bin/minions.js"
|