@triflux/remote 10.0.0-alpha.2 → 10.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,204 +0,0 @@
1
- // hub/team/swarm-locks.mjs — File-level lease lock manager for swarm execution
2
- // Prevents multiple workers from writing to the same file simultaneously.
3
- // Lock state is kept in-memory (single-process hypervisor) with optional
4
- // JSON persistence to .triflux/swarm-locks.json for crash recovery.
5
-
6
- import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'node:fs';
7
- import { dirname, resolve, relative } from 'node:path';
8
-
9
- const LOCK_TTL_MS = 10 * 60_000; // 10 minutes default TTL
10
-
11
- /**
12
- * Swarm lock manager factory.
13
- * @param {object} [opts]
14
- * @param {string} [opts.repoRoot] — repository root for relative path normalization
15
- * @param {string} [opts.persistPath] — JSON file path for crash recovery persistence
16
- * @param {number} [opts.ttlMs=600000] — lock TTL in ms (auto-expire stale locks)
17
- * @returns {SwarmLockManager}
18
- */
19
- export function createSwarmLocks(opts = {}) {
20
- const {
21
- repoRoot = process.cwd(),
22
- persistPath,
23
- ttlMs = LOCK_TTL_MS,
24
- } = opts;
25
-
26
- /** @type {Map<string, LockEntry>} normalized relative path → lock */
27
- const locks = new Map();
28
-
29
- // ── helpers ──────────────────────────────────────────────────
30
-
31
- function normalizePath(filePath) {
32
- const abs = resolve(repoRoot, filePath);
33
- return relative(repoRoot, abs).replace(/\\/g, '/');
34
- }
35
-
36
- function now() {
37
- return Date.now();
38
- }
39
-
40
- function isExpired(entry) {
41
- return now() - entry.acquiredAt > ttlMs;
42
- }
43
-
44
- function pruneExpired() {
45
- for (const [path, entry] of locks) {
46
- if (isExpired(entry)) locks.delete(path);
47
- }
48
- }
49
-
50
- // ── persistence ─────────────────────────────────────────────
51
-
52
- function persist() {
53
- if (!persistPath) return;
54
- try {
55
- mkdirSync(dirname(persistPath), { recursive: true });
56
- const data = Object.fromEntries(
57
- [...locks].map(([k, v]) => [k, { ...v }]),
58
- );
59
- writeFileSync(persistPath, JSON.stringify(data, null, 2), 'utf8');
60
- } catch { /* best-effort */ }
61
- }
62
-
63
- function restore() {
64
- if (!persistPath || !existsSync(persistPath)) return;
65
- try {
66
- const data = JSON.parse(readFileSync(persistPath, 'utf8'));
67
- const ts = now();
68
- for (const [path, entry] of Object.entries(data)) {
69
- if (ts - entry.acquiredAt <= ttlMs) {
70
- locks.set(path, entry);
71
- }
72
- }
73
- } catch { /* corrupted file — start fresh */ }
74
- }
75
-
76
- // restore on creation
77
- restore();
78
-
79
- // ── public API ──────────────────────────────────────────────
80
-
81
- /**
82
- * Acquire file leases for a worker.
83
- * @param {string} workerId — worker/shard identifier
84
- * @param {string[]} files — file paths to lock
85
- * @returns {{ ok: boolean, acquired: string[], conflicts: Array<{ file: string, holder: string }> }}
86
- */
87
- function acquire(workerId, files) {
88
- pruneExpired();
89
-
90
- const normalized = files.map((f) => normalizePath(f));
91
- const conflicts = [];
92
- const toAcquire = [];
93
-
94
- for (let i = 0; i < normalized.length; i++) {
95
- const path = normalized[i];
96
- const existing = locks.get(path);
97
-
98
- if (existing && existing.workerId !== workerId && !isExpired(existing)) {
99
- conflicts.push({ file: files[i], holder: existing.workerId });
100
- } else {
101
- toAcquire.push(path);
102
- }
103
- }
104
-
105
- if (conflicts.length > 0) {
106
- return { ok: false, acquired: [], conflicts };
107
- }
108
-
109
- const ts = now();
110
- for (const path of toAcquire) {
111
- locks.set(path, { workerId, acquiredAt: ts });
112
- }
113
-
114
- persist();
115
- return { ok: true, acquired: toAcquire, conflicts: [] };
116
- }
117
-
118
- /**
119
- * Release all locks held by a worker.
120
- * @param {string} workerId
121
- * @returns {number} number of locks released
122
- */
123
- function release(workerId) {
124
- let count = 0;
125
- for (const [path, entry] of locks) {
126
- if (entry.workerId === workerId) {
127
- locks.delete(path);
128
- count += 1;
129
- }
130
- }
131
- if (count > 0) persist();
132
- return count;
133
- }
134
-
135
- /**
136
- * Check if a file write would violate any lease.
137
- * @param {string} workerId — the worker attempting the write
138
- * @param {string} filePath — the file being written
139
- * @returns {{ allowed: boolean, holder?: string }}
140
- */
141
- function check(workerId, filePath) {
142
- pruneExpired();
143
- const path = normalizePath(filePath);
144
- const entry = locks.get(path);
145
-
146
- if (!entry || isExpired(entry)) return { allowed: true };
147
- if (entry.workerId === workerId) return { allowed: true };
148
- return { allowed: false, holder: entry.workerId };
149
- }
150
-
151
- /**
152
- * Validate a set of changed files against the lease map.
153
- * Returns all violations found.
154
- * @param {string} workerId
155
- * @param {string[]} changedFiles
156
- * @returns {Array<{ file: string, holder: string }>}
157
- */
158
- function validateChanges(workerId, changedFiles) {
159
- pruneExpired();
160
- const violations = [];
161
-
162
- for (const file of changedFiles) {
163
- const path = normalizePath(file);
164
- const entry = locks.get(path);
165
-
166
- if (entry && entry.workerId !== workerId && !isExpired(entry)) {
167
- violations.push({ file, holder: entry.workerId });
168
- }
169
- }
170
-
171
- return violations;
172
- }
173
-
174
- /**
175
- * Get snapshot of all active locks.
176
- * @returns {Array<{ file: string, workerId: string, acquiredAt: number }>}
177
- */
178
- function snapshot() {
179
- pruneExpired();
180
- return [...locks].map(([file, entry]) => ({
181
- file,
182
- workerId: entry.workerId,
183
- acquiredAt: entry.acquiredAt,
184
- }));
185
- }
186
-
187
- /**
188
- * Release all locks.
189
- */
190
- function releaseAll() {
191
- locks.clear();
192
- persist();
193
- }
194
-
195
- return Object.freeze({
196
- acquire,
197
- release,
198
- check,
199
- validateChanges,
200
- snapshot,
201
- releaseAll,
202
- get size() { return locks.size; },
203
- });
204
- }
@@ -1,256 +0,0 @@
1
- // hub/team/swarm-planner.mjs — PRD → Swarm execution plan generator
2
- // Parses a PRD markdown document into shards (units of work), each with:
3
- // - file-lease-map: files the shard is allowed to modify
4
- // - MCP manifest: MCP servers the shard needs
5
- // - mergeOrder: topological order for integrating results
6
- //
7
- // PRD format expected:
8
- // ## Shard: <name>
9
- // - agent: codex|gemini|claude
10
- // - files: path/a.mjs, path/b.mjs
11
- // - mcp: server1, server2
12
- // - depends: shard-name-1, shard-name-2
13
- // - critical: true|false
14
- // - prompt: |
15
- // multi-line prompt text
16
-
17
- import { readFileSync } from 'node:fs';
18
-
19
- /** Shard schema defaults */
20
- const SHARD_DEFAULTS = Object.freeze({
21
- agent: 'codex',
22
- files: [],
23
- mcp: [],
24
- depends: [],
25
- critical: false,
26
- prompt: '',
27
- });
28
-
29
- /**
30
- * Parse a PRD markdown into shard definitions.
31
- * @param {string} content — PRD markdown content
32
- * @returns {Shard[]}
33
- */
34
- export function parseShards(content) {
35
- const lines = content.split(/\r?\n/);
36
- const shards = [];
37
- let current = null;
38
- let inPrompt = false;
39
- let promptLines = [];
40
-
41
- function flushPrompt() {
42
- if (current && promptLines.length > 0) {
43
- current.prompt = promptLines.join('\n').trim();
44
- promptLines = [];
45
- }
46
- inPrompt = false;
47
- }
48
-
49
- function flushShard() {
50
- flushPrompt();
51
- if (current) {
52
- shards.push({ ...SHARD_DEFAULTS, ...current });
53
- current = null;
54
- }
55
- }
56
-
57
- for (const line of lines) {
58
- // New shard header: ## Shard: <name>
59
- const shardMatch = line.match(/^##\s+Shard:\s*(.+)$/i);
60
- if (shardMatch) {
61
- flushShard();
62
- current = { name: shardMatch[1].trim() };
63
- continue;
64
- }
65
-
66
- // Non-shard heading ends current shard (e.g. ## Notes)
67
- if (/^##\s+/.test(line) && !line.match(/^##\s+Shard:/i)) {
68
- flushShard();
69
- continue;
70
- }
71
-
72
- if (!current) continue;
73
-
74
- // Prompt block continuation
75
- if (inPrompt) {
76
- if (/^- \w+:/i.test(line)) {
77
- flushPrompt();
78
- // fall through to field parsing
79
- } else {
80
- promptLines.push(line);
81
- continue;
82
- }
83
- }
84
-
85
- // Field parsing: - key: value
86
- const fieldMatch = line.match(/^-\s+(\w+):\s*(.*)$/i);
87
- if (!fieldMatch) continue;
88
-
89
- const [, key, rawValue] = fieldMatch;
90
- const value = rawValue.trim();
91
-
92
- switch (key.toLowerCase()) {
93
- case 'agent':
94
- current.agent = value.toLowerCase();
95
- break;
96
- case 'files':
97
- current.files = value.split(/,\s*/).map((f) => f.trim()).filter(Boolean);
98
- break;
99
- case 'mcp':
100
- current.mcp = value.split(/,\s*/).map((s) => s.trim()).filter(Boolean);
101
- break;
102
- case 'depends':
103
- current.depends = value.split(/,\s*/).map((d) => d.trim()).filter(Boolean);
104
- break;
105
- case 'critical':
106
- current.critical = /^(true|yes|1)$/i.test(value);
107
- break;
108
- case 'prompt':
109
- if (value && !value.startsWith('|')) {
110
- current.prompt = value;
111
- } else {
112
- inPrompt = true;
113
- promptLines = [];
114
- }
115
- break;
116
- default:
117
- // store unknown fields as-is
118
- current[key] = value;
119
- }
120
- }
121
-
122
- flushShard();
123
- return shards;
124
- }
125
-
126
- /**
127
- * Build file-lease-map from shards.
128
- * Maps each shard name to its allowed files.
129
- * Detects conflicting file assignments across shards.
130
- * @param {Shard[]} shards
131
- * @returns {{ leaseMap: Map<string, string[]>, conflicts: Array<{ file: string, shards: string[] }> }}
132
- */
133
- export function buildFileLeaseMap(shards) {
134
- const leaseMap = new Map();
135
- const fileOwners = new Map(); // file → [shard names]
136
-
137
- for (const shard of shards) {
138
- leaseMap.set(shard.name, [...shard.files]);
139
- for (const file of shard.files) {
140
- const owners = fileOwners.get(file) || [];
141
- owners.push(shard.name);
142
- fileOwners.set(file, owners);
143
- }
144
- }
145
-
146
- const conflicts = [];
147
- for (const [file, owners] of fileOwners) {
148
- if (owners.length > 1) {
149
- conflicts.push({ file, shards: owners });
150
- }
151
- }
152
-
153
- return { leaseMap, conflicts };
154
- }
155
-
156
- /**
157
- * Build MCP manifest from shards.
158
- * Maps each shard name to its required MCP servers.
159
- * @param {Shard[]} shards
160
- * @returns {Map<string, string[]>}
161
- */
162
- export function buildMcpManifest(shards) {
163
- const manifest = new Map();
164
- for (const shard of shards) {
165
- manifest.set(shard.name, [...shard.mcp]);
166
- }
167
- return manifest;
168
- }
169
-
170
- /**
171
- * Compute topological merge order based on shard dependencies.
172
- * @param {Shard[]} shards
173
- * @returns {{ order: string[], cycles: string[][] }}
174
- */
175
- export function computeMergeOrder(shards) {
176
- const nameSet = new Set(shards.map((s) => s.name));
177
- const adj = new Map(); // name → [dependents]
178
- const inDeg = new Map(); // name → number
179
-
180
- for (const shard of shards) {
181
- adj.set(shard.name, []);
182
- inDeg.set(shard.name, 0);
183
- }
184
-
185
- for (const shard of shards) {
186
- for (const dep of shard.depends) {
187
- if (!nameSet.has(dep)) continue; // ignore unknown deps
188
- adj.get(dep).push(shard.name);
189
- inDeg.set(shard.name, inDeg.get(shard.name) + 1);
190
- }
191
- }
192
-
193
- // Kahn's algorithm
194
- const queue = [];
195
- for (const [name, deg] of inDeg) {
196
- if (deg === 0) queue.push(name);
197
- }
198
-
199
- const order = [];
200
- while (queue.length > 0) {
201
- // stable sort: alphabetical among same-level nodes
202
- queue.sort();
203
- const node = queue.shift();
204
- order.push(node);
205
-
206
- for (const next of adj.get(node)) {
207
- const newDeg = inDeg.get(next) - 1;
208
- inDeg.set(next, newDeg);
209
- if (newDeg === 0) queue.push(next);
210
- }
211
- }
212
-
213
- // Detect cycles (nodes not in order)
214
- const cycles = [];
215
- if (order.length < shards.length) {
216
- const remaining = shards
217
- .filter((s) => !order.includes(s.name))
218
- .map((s) => s.name);
219
- cycles.push(remaining);
220
- }
221
-
222
- return { order, cycles };
223
- }
224
-
225
- /**
226
- * Full planning pipeline: parse PRD → build plan.
227
- * @param {string} prdPath — path to PRD markdown file
228
- * @param {object} [opts]
229
- * @param {string} [opts.content] — PRD content (if provided, prdPath is ignored)
230
- * @returns {SwarmPlan}
231
- */
232
- export function planSwarm(prdPath, opts = {}) {
233
- const content = opts.content || readFileSync(prdPath, 'utf8');
234
- const shards = parseShards(content);
235
-
236
- if (shards.length === 0) {
237
- throw new Error('No shards found in PRD. Expected "## Shard: <name>" sections.');
238
- }
239
-
240
- const { leaseMap, conflicts } = buildFileLeaseMap(shards);
241
- const mcpManifest = buildMcpManifest(shards);
242
- const { order: mergeOrder, cycles } = computeMergeOrder(shards);
243
-
244
- if (cycles.length > 0) {
245
- throw new Error(`Dependency cycle detected: ${cycles[0].join(' → ')}`);
246
- }
247
-
248
- return Object.freeze({
249
- shards: Object.freeze(shards.map((s) => Object.freeze({ ...s }))),
250
- leaseMap,
251
- mcpManifest,
252
- mergeOrder,
253
- conflicts,
254
- criticalShards: shards.filter((s) => s.critical).map((s) => s.name),
255
- });
256
- }
@@ -1,137 +0,0 @@
1
- // hub/team/swarm-reconciler.mjs — Redundant execution + result reconciliation
2
- // For critical shards: launches primary + verifier sessions, compares results,
3
- // applies conservative adoption (fewer changes wins) or HITL fallback.
4
-
5
- import { execFile } from 'node:child_process';
6
-
7
- /**
8
- * Compare two shard results and decide which to accept.
9
- * Strategy: conservative adoption — the result with fewer changed files wins.
10
- * If results are identical (same diff), primary is accepted.
11
- * If they diverge significantly, mark for HITL review.
12
- *
13
- * @param {object} primaryResult — { branchName, worktreePath, status }
14
- * @param {object} verifierResult — { branchName, worktreePath, status }
15
- * @param {object} [opts]
16
- * @param {string} [opts.rootDir=process.cwd()]
17
- * @param {number} [opts.maxDivergenceFiles=5] — beyond this, trigger HITL
18
- * @returns {Promise<ReconcileDecision>}
19
- */
20
- export async function reconcile(primaryResult, verifierResult, opts = {}) {
21
- const { rootDir = process.cwd(), maxDivergenceFiles = 5 } = opts;
22
-
23
- // If either failed, pick the one that succeeded
24
- if (primaryResult.status !== 'completed' && verifierResult.status === 'completed') {
25
- return decision('verifier', 'primary_failed', verifierResult);
26
- }
27
- if (verifierResult.status !== 'completed' && primaryResult.status === 'completed') {
28
- return decision('primary', 'verifier_failed', primaryResult);
29
- }
30
- if (primaryResult.status !== 'completed' && verifierResult.status !== 'completed') {
31
- return decision('none', 'both_failed', null);
32
- }
33
-
34
- // Both completed — compare diffs
35
- const primaryDiff = await getDiffStat(primaryResult.branchName, rootDir);
36
- const verifierDiff = await getDiffStat(verifierResult.branchName, rootDir);
37
-
38
- // Identical diffs → accept primary (no divergence)
39
- if (primaryDiff.hash === verifierDiff.hash) {
40
- return decision('primary', 'identical', primaryResult);
41
- }
42
-
43
- // Compute divergence
44
- const divergence = Math.abs(primaryDiff.filesChanged - verifierDiff.filesChanged);
45
-
46
- // High divergence → HITL
47
- if (divergence > maxDivergenceFiles) {
48
- return {
49
- selected: 'hitl',
50
- reason: `divergence_too_high (${divergence} files differ)`,
51
- result: null,
52
- requiresManualReview: true,
53
- primary: { filesChanged: primaryDiff.filesChanged, linesChanged: primaryDiff.linesChanged },
54
- verifier: { filesChanged: verifierDiff.filesChanged, linesChanged: verifierDiff.linesChanged },
55
- };
56
- }
57
-
58
- // Conservative adoption: fewer changes wins
59
- if (primaryDiff.linesChanged <= verifierDiff.linesChanged) {
60
- return decision('primary', 'conservative_adoption', primaryResult);
61
- }
62
- return decision('verifier', 'conservative_adoption', verifierResult);
63
- }
64
-
65
- function decision(selected, reason, result) {
66
- return {
67
- selected,
68
- reason,
69
- result,
70
- requiresManualReview: selected === 'hitl' || selected === 'none',
71
- primary: null,
72
- verifier: null,
73
- };
74
- }
75
-
76
- /**
77
- * Get diff statistics for a branch relative to its merge-base.
78
- *
79
- * @param {string} branch
80
- * @param {string} cwd
81
- * @returns {Promise<{ filesChanged: number, linesChanged: number, hash: string }>}
82
- */
83
- async function getDiffStat(branch, cwd) {
84
- try {
85
- const stat = await gitExec(['diff', '--stat', '--numstat', `${branch}~1..${branch}`], cwd);
86
- const lines = stat.split('\n').filter(Boolean);
87
- let filesChanged = 0;
88
- let linesChanged = 0;
89
-
90
- for (const line of lines) {
91
- const match = line.match(/^(\d+)\s+(\d+)\s+/);
92
- if (match) {
93
- filesChanged++;
94
- linesChanged += Number(match[1]) + Number(match[2]);
95
- }
96
- }
97
-
98
- // Get tree hash for identity comparison
99
- const hash = await gitExec(['rev-parse', `${branch}^{tree}`], cwd);
100
-
101
- return { filesChanged, linesChanged, hash: hash.trim() };
102
- } catch {
103
- return { filesChanged: 0, linesChanged: 0, hash: '' };
104
- }
105
- }
106
-
107
- function gitExec(args, cwd) {
108
- return new Promise((res, rej) => {
109
- execFile('git', args, { cwd, windowsHide: true, timeout: 15_000 }, (err, stdout) => {
110
- if (err) rej(err);
111
- else res(stdout);
112
- });
113
- });
114
- }
115
-
116
- /**
117
- * Build session configs for redundant execution (primary + verifier).
118
- *
119
- * @param {object} shard — from SwarmPlan
120
- * @param {string} runId
121
- * @returns {{ primaryId: string, verifierId: string }}
122
- */
123
- export function buildRedundantIds(shard, runId) {
124
- return {
125
- primaryId: `${runId}-${shard.id}-primary`,
126
- verifierId: `${runId}-${shard.id}-verifier`,
127
- };
128
- }
129
-
130
- /**
131
- * Check if a shard should use redundant execution.
132
- * @param {object} shard
133
- * @returns {boolean}
134
- */
135
- export function shouldRunRedundant(shard) {
136
- return shard.critical === true;
137
- }