@chuckssmith/agentloom 0.4.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +80 -37
- package/dist/cli.js +21 -3
- package/dist/commands/crew.js +129 -43
- package/dist/commands/status.js +43 -11
- package/dist/commands/stop.d.ts +1 -0
- package/dist/commands/stop.js +50 -0
- package/dist/commands/watch.d.ts +1 -0
- package/dist/commands/watch.js +69 -0
- package/dist/team/queue.d.ts +1 -0
- package/dist/team/queue.js +101 -23
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
# agentloom
|
|
2
2
|
|
|
3
|
-
A workflow layer for Claude Code
|
|
3
|
+
A workflow layer for Claude Code — persistence loops, parallel crews, and typed agent roles, built natively on what Claude Code already provides.
|
|
4
4
|
|
|
5
5
|
```bash
|
|
6
|
-
npm install -g agentloom
|
|
6
|
+
npm install -g @chuckssmith/agentloom
|
|
7
7
|
loom setup
|
|
8
8
|
```
|
|
9
9
|
|
|
@@ -12,10 +12,11 @@ loom setup
|
|
|
12
12
|
## What this is
|
|
13
13
|
|
|
14
14
|
Claude Code is the execution engine. agentloom adds:
|
|
15
|
+
|
|
15
16
|
- **`$grind`** — persistence loop that keeps working until a task is verified complete
|
|
16
|
-
- **`$crew`** — parallel workers that decompose and execute simultaneously
|
|
17
|
+
- **`$crew`** — parallel workers that decompose and execute simultaneously
|
|
17
18
|
- **`$architect`** — deep analysis mode before major decisions
|
|
18
|
-
- **`loom crew`** — CLI to spawn a crew
|
|
19
|
+
- **`loom crew`** — CLI to spawn and monitor a crew from your terminal
|
|
19
20
|
|
|
20
21
|
It does not replace Claude Code. It wraps it.
|
|
21
22
|
|
|
@@ -24,14 +25,24 @@ It does not replace Claude Code. It wraps it.
|
|
|
24
25
|
## Quick start
|
|
25
26
|
|
|
26
27
|
```bash
|
|
27
|
-
npm install -g agentloom
|
|
28
|
-
loom setup # installs
|
|
28
|
+
npm install -g @chuckssmith/agentloom
|
|
29
|
+
loom setup # installs $grind, $crew, $architect skills + validates deps
|
|
29
30
|
|
|
30
|
-
#
|
|
31
|
+
# Spawn workers from your terminal:
|
|
31
32
|
loom crew "audit every API endpoint for security issues"
|
|
32
33
|
loom crew 2:explore+1:code-reviewer "review the payment flow"
|
|
34
|
+
loom crew --dry-run 3 "migrate the database schema" # preview before launching
|
|
35
|
+
|
|
36
|
+
# Monitor:
|
|
37
|
+
loom watch # live tail all worker logs
|
|
38
|
+
loom status # session overview + stale worker detection
|
|
39
|
+
loom logs w00 # full output for one worker
|
|
40
|
+
|
|
41
|
+
# After workers finish:
|
|
42
|
+
loom collect # synthesize results with Claude
|
|
43
|
+
loom reset --force # clear state for next run
|
|
33
44
|
|
|
34
|
-
# Or
|
|
45
|
+
# Or use inside any Claude Code session:
|
|
35
46
|
# $grind "port the auth module to the new interface"
|
|
36
47
|
# $crew "analyze all three data pipeline stages in parallel"
|
|
37
48
|
```
|
|
@@ -40,49 +51,81 @@ loom crew 2:explore+1:code-reviewer "review the payment flow"
|
|
|
40
51
|
|
|
41
52
|
## Skills
|
|
42
53
|
|
|
43
|
-
Install with `loom setup`.
|
|
54
|
+
Install with `loom setup`. Use inside any Claude Code session:
|
|
44
55
|
|
|
45
|
-
| Skill | What it does |
|
|
46
|
-
|
|
47
|
-
| `$grind` | Persistence loop
|
|
48
|
-
| `$crew` |
|
|
49
|
-
| `$architect` | Deep analysis — maps system, finds real problems, recommends approach |
|
|
56
|
+
| Skill | Trigger | What it does |
|
|
57
|
+
|---|---|---|
|
|
58
|
+
| `$grind` | `$grind "<task>"` | Persistence loop — plans, executes in parallel, verifies. Won't stop until a code-reviewer subagent returns PASS |
|
|
59
|
+
| `$crew` | `$crew "<task>"` | Decomposes task into independent streams, runs workers simultaneously, verifies result |
|
|
60
|
+
| `$architect` | `$architect "<task>"` | Deep analysis — maps the system, finds real problems, recommends approach before you write code |
|
|
50
61
|
|
|
51
62
|
---
|
|
52
63
|
|
|
53
|
-
## CLI
|
|
64
|
+
## CLI reference
|
|
65
|
+
|
|
66
|
+
### Spawning workers
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
loom crew "<task>" 2 general-purpose workers (default)
|
|
70
|
+
loom crew 3 "<task>" 3 workers
|
|
71
|
+
loom crew 2:explore "<task>" 2 explore-type workers
|
|
72
|
+
loom crew 2:explore+1:code-reviewer "<task>" typed crew
|
|
73
|
+
loom crew --dry-run 3 "<task>" preview decomposed subtasks, no launch
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### Monitoring
|
|
54
77
|
|
|
55
78
|
```
|
|
56
|
-
loom
|
|
57
|
-
loom
|
|
58
|
-
loom
|
|
59
|
-
loom
|
|
60
|
-
loom status Show active session
|
|
61
|
-
loom setup Install skills + validate
|
|
79
|
+
loom watch Live tail all worker logs with color-coded output
|
|
80
|
+
loom status Session overview, task counts, stale worker detection
|
|
81
|
+
loom logs Summary of all workers (status + last line)
|
|
82
|
+
loom logs <workerId> Full log + result for one worker (e.g. loom logs w00)
|
|
62
83
|
```
|
|
63
84
|
|
|
64
|
-
###
|
|
85
|
+
### After workers finish
|
|
65
86
|
|
|
66
|
-
|
|
87
|
+
```
|
|
88
|
+
loom collect Read worker results + synthesize summary with Claude
|
|
89
|
+
loom collect --no-ai Concatenate results without Claude synthesis
|
|
90
|
+
```
|
|
67
91
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
92
|
+
### Housekeeping
|
|
93
|
+
|
|
94
|
+
```
|
|
95
|
+
loom setup Install skills to ~/.claude/skills/, validate deps
|
|
96
|
+
loom reset --force Wipe .claude-team/ state
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## Worker types
|
|
102
|
+
|
|
103
|
+
Each type gets a role-specific system prompt that shapes its behavior:
|
|
104
|
+
|
|
105
|
+
| Type | Role | Modifies files? |
|
|
106
|
+
|---|---|---|
|
|
107
|
+
| `explore` | Maps code, documents structure and connections | No |
|
|
108
|
+
| `plan` | Reasons about approach, produces ordered action plan | No |
|
|
109
|
+
| `code-reviewer` | Audits for correctness, security, quality; assigns severity | No |
|
|
110
|
+
| `frontend-developer` | UI, components, styling, client-side logic | Yes |
|
|
111
|
+
| `general-purpose` | Does whatever the subtask requires (default) | Yes |
|
|
75
112
|
|
|
76
113
|
---
|
|
77
114
|
|
|
78
115
|
## State directory
|
|
79
116
|
|
|
117
|
+
Session state lives in `.claude-team/` (gitignored):
|
|
118
|
+
|
|
80
119
|
```
|
|
81
|
-
.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
120
|
+
.claude-team/
|
|
121
|
+
session.json Active session metadata
|
|
122
|
+
context/ Shared context snapshots (workers read + append)
|
|
123
|
+
tasks/ Task queue — workers claim atomically via file rename
|
|
124
|
+
workers/
|
|
125
|
+
w00.log Live stdout from worker 00
|
|
126
|
+
w00-prompt.md Prompt sent to worker 00
|
|
127
|
+
w00-result.md Result summary written by worker 00 on completion
|
|
128
|
+
summary.md Final synthesis from loom collect
|
|
86
129
|
```
|
|
87
130
|
|
|
88
131
|
---
|
|
@@ -90,8 +133,8 @@ Matches Claude Code's built-in subagent types:
|
|
|
90
133
|
## Requirements
|
|
91
134
|
|
|
92
135
|
- Node.js 20+
|
|
93
|
-
- Claude Code CLI (`claude`)
|
|
94
|
-
- tmux (optional — used
|
|
136
|
+
- Claude Code CLI (`claude`) — authenticated
|
|
137
|
+
- tmux (optional — used on Mac/Linux; falls back to background processes on Windows/WSL)
|
|
95
138
|
|
|
96
139
|
---
|
|
97
140
|
|
package/dist/cli.js
CHANGED
|
@@ -5,6 +5,8 @@ import { status } from './commands/status.js';
|
|
|
5
5
|
import { logs } from './commands/logs.js';
|
|
6
6
|
import { collect } from './commands/collect.js';
|
|
7
7
|
import { reset } from './commands/reset.js';
|
|
8
|
+
import { watch } from './commands/watch.js';
|
|
9
|
+
import { stop } from './commands/stop.js';
|
|
8
10
|
const [, , command, ...args] = process.argv;
|
|
9
11
|
const usage = `
|
|
10
12
|
agentloom (loom) — workflow layer for Claude Code
|
|
@@ -14,13 +16,23 @@ Usage:
|
|
|
14
16
|
loom crew [N] "<task>" Spawn N parallel workers on a task
|
|
15
17
|
loom crew 2:explore "<task>" Spawn typed workers (explore/plan/code-reviewer)
|
|
16
18
|
loom crew --dry-run [N] "<task>" Preview decomposed subtasks without launching
|
|
17
|
-
loom
|
|
19
|
+
loom watch Live tail all worker logs (Ctrl+C to stop)
|
|
20
|
+
loom stop Kill all background workers (SIGTERM)
|
|
21
|
+
loom stop <workerId> Kill one worker
|
|
22
|
+
loom status Show active crew session + stale worker detection
|
|
18
23
|
loom logs Show worker output summary
|
|
19
24
|
loom logs <workerId> Show full log for a specific worker
|
|
20
25
|
loom collect Synthesize worker results into a summary
|
|
21
26
|
loom collect --no-ai Collect results without Claude synthesis
|
|
22
27
|
loom reset --force Clear all session state
|
|
23
28
|
|
|
29
|
+
Agent types (use with crew):
|
|
30
|
+
explore Read-only research and mapping
|
|
31
|
+
plan Architecture and approach planning
|
|
32
|
+
code-reviewer Audit for correctness, security, quality
|
|
33
|
+
frontend-developer UI and component work
|
|
34
|
+
general-purpose Default — does whatever the subtask requires
|
|
35
|
+
|
|
24
36
|
Modes (use $grind or $crew inside a Claude Code session):
|
|
25
37
|
$grind Persistence loop — keeps working until verified complete
|
|
26
38
|
$crew Parallel workers — decompose and execute simultaneously
|
|
@@ -31,8 +43,8 @@ Examples:
|
|
|
31
43
|
loom crew 3 "audit every API endpoint for security issues"
|
|
32
44
|
loom crew 2:explore+1:code-reviewer "review the payment flow"
|
|
33
45
|
loom crew --dry-run 3 "migrate the database schema"
|
|
34
|
-
loom
|
|
35
|
-
loom
|
|
46
|
+
loom watch
|
|
47
|
+
loom collect
|
|
36
48
|
`;
|
|
37
49
|
switch (command) {
|
|
38
50
|
case 'setup':
|
|
@@ -41,6 +53,9 @@ switch (command) {
|
|
|
41
53
|
case 'crew':
|
|
42
54
|
await crew(args);
|
|
43
55
|
break;
|
|
56
|
+
case 'watch':
|
|
57
|
+
await watch(args);
|
|
58
|
+
break;
|
|
44
59
|
case 'status':
|
|
45
60
|
await status();
|
|
46
61
|
break;
|
|
@@ -50,6 +65,9 @@ switch (command) {
|
|
|
50
65
|
case 'collect':
|
|
51
66
|
await collect(args);
|
|
52
67
|
break;
|
|
68
|
+
case 'stop':
|
|
69
|
+
await stop(args);
|
|
70
|
+
break;
|
|
53
71
|
case 'reset':
|
|
54
72
|
await reset(args);
|
|
55
73
|
break;
|
package/dist/commands/crew.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { execSync, spawn } from 'child_process';
|
|
1
|
+
import { execSync, spawn, spawnSync } from 'child_process';
|
|
2
2
|
import { writeFile, mkdir, open } from 'fs/promises';
|
|
3
3
|
import { join } from 'path';
|
|
4
4
|
import { parseWorkerSpec, initSession, writeContextSnapshot, decomposeTasks, } from '../team/orchestrator.js';
|
|
@@ -13,6 +13,54 @@ const hasTmux = () => {
|
|
|
13
13
|
}
|
|
14
14
|
};
|
|
15
15
|
const isWSL = () => process.platform === 'linux' && !!process.env.WSL_DISTRO_NAME;
|
|
16
|
+
// Roles that must NOT receive --dangerously-skip-permissions
|
|
17
|
+
const READ_ONLY_ROLES = new Set(['explore', 'plan', 'code-reviewer']);
|
|
18
|
+
const AGENT_ROLE = {
|
|
19
|
+
'explore': `Your role is EXPLORER. You are read-only — do not modify, create, or delete any files.
|
|
20
|
+
- Map out the relevant code, files, and structure
|
|
21
|
+
- Document what exists, how it connects, and what's notable
|
|
22
|
+
- Your output feeds the other workers — be thorough and specific`,
|
|
23
|
+
'plan': `Your role is PLANNER. You are read-only — do not modify, create, or delete any files.
|
|
24
|
+
- Reason about the best approach to the subtask
|
|
25
|
+
- Identify risks, dependencies, and open questions
|
|
26
|
+
- Produce a concrete, ordered action plan other workers can execute`,
|
|
27
|
+
'code-reviewer': `Your role is CODE REVIEWER. You are read-only — do not modify, create, or delete any files.
|
|
28
|
+
- Audit the relevant code for correctness, security, and quality
|
|
29
|
+
- Flag specific lines, patterns, or logic that are problematic
|
|
30
|
+
- Assign severity (critical / high / medium / low) to each finding`,
|
|
31
|
+
'frontend-developer': `Your role is FRONTEND DEVELOPER.
|
|
32
|
+
- Focus on UI, components, styling, and client-side logic
|
|
33
|
+
- Follow existing conventions in the codebase
|
|
34
|
+
- Write clean, accessible code`,
|
|
35
|
+
'general-purpose': `Your role is GENERAL PURPOSE WORKER.
|
|
36
|
+
- Do whatever the subtask requires — research, implementation, or both
|
|
37
|
+
- Use all tools available to you`,
|
|
38
|
+
};
|
|
39
|
+
function buildWorkerPrompt(subtask, contextPath, sessionId, workerId, agentType) {
|
|
40
|
+
const resultFile = join(STATE_DIR, 'workers', `${workerId}-result.md`);
|
|
41
|
+
const roleInstructions = AGENT_ROLE[agentType] ?? AGENT_ROLE['general-purpose'];
|
|
42
|
+
return `You are worker ${workerId} in an agentloom crew session (${sessionId}).
|
|
43
|
+
|
|
44
|
+
${roleInstructions}
|
|
45
|
+
|
|
46
|
+
## Your assigned subtask
|
|
47
|
+
|
|
48
|
+
"${subtask}"
|
|
49
|
+
|
|
50
|
+
## Protocol
|
|
51
|
+
|
|
52
|
+
1. Read the shared context: ${contextPath}
|
|
53
|
+
2. Do the work thoroughly using all tools available to you
|
|
54
|
+
3. Append key findings to the context file so other workers can see them
|
|
55
|
+
4. When done, write a result summary to: ${resultFile}
|
|
56
|
+
Format: brief markdown — what you did, what you found, any blockers
|
|
57
|
+
|
|
58
|
+
## Rules
|
|
59
|
+
- Stay focused on your assigned subtask and role
|
|
60
|
+
- Do not stop until your subtask is complete or you have hit a genuine blocker
|
|
61
|
+
|
|
62
|
+
Begin now.`;
|
|
63
|
+
}
|
|
16
64
|
export async function crew(args) {
|
|
17
65
|
if (args.length === 0) {
|
|
18
66
|
console.error('Usage: loom crew [--dry-run] [N] "<task>"');
|
|
@@ -41,86 +89,124 @@ export async function crew(args) {
|
|
|
41
89
|
console.log('Run without --dry-run to launch workers.');
|
|
42
90
|
return;
|
|
43
91
|
}
|
|
44
|
-
|
|
92
|
+
const useTmux = hasTmux() && !isWSL() && process.stdout.isTTY;
|
|
93
|
+
console.log(`Mode: ${useTmux ? 'tmux' : 'background processes'}\n`);
|
|
45
94
|
const session = await initSession(task, totalWorkers);
|
|
46
95
|
const contextPath = await writeContextSnapshot(slug, task);
|
|
47
96
|
const tasks = await decomposeTasks(task, specs);
|
|
48
97
|
console.log(`Session: ${session.id}`);
|
|
49
98
|
console.log(`Tasks: ${tasks.length} created`);
|
|
50
99
|
console.log(`Context: ${contextPath}\n`);
|
|
51
|
-
if (
|
|
52
|
-
await launchTmux(session.id,
|
|
100
|
+
if (useTmux) {
|
|
101
|
+
await launchTmux(session.id, specs, tasks, contextPath);
|
|
53
102
|
}
|
|
54
103
|
else {
|
|
55
|
-
await launchBackground(session.id, specs, tasks
|
|
104
|
+
await launchBackground(session.id, specs, tasks, contextPath);
|
|
56
105
|
}
|
|
57
106
|
console.log(`\nWorkers launched. Monitor with:`);
|
|
58
107
|
console.log(` loom status`);
|
|
59
|
-
console.log(` loom
|
|
108
|
+
console.log(` loom watch`);
|
|
109
|
+
console.log(` loom stop (kill all workers)`);
|
|
60
110
|
console.log(`State dir: ${STATE_DIR}/`);
|
|
61
111
|
}
|
|
62
|
-
function
|
|
63
|
-
const resultFile = join(STATE_DIR, 'workers', `${workerId}-result.md`);
|
|
64
|
-
return `You are worker ${workerId} in an agentloom crew session (${sessionId}).
|
|
65
|
-
|
|
66
|
-
Your assigned subtask: "${subtask}"
|
|
67
|
-
|
|
68
|
-
## Protocol
|
|
69
|
-
|
|
70
|
-
1. Read the shared context: ${contextPath}
|
|
71
|
-
2. Do the work thoroughly using all tools available to you
|
|
72
|
-
3. When done, write a result summary to: ${resultFile}
|
|
73
|
-
Format: brief markdown — what you did, what you found, any blockers
|
|
74
|
-
|
|
75
|
-
## Rules
|
|
76
|
-
- Focus only on your assigned subtask
|
|
77
|
-
- Write findings to the context file (${contextPath}) so other workers can see them
|
|
78
|
-
- Do not stop until your subtask is complete or you have hit a genuine blocker
|
|
79
|
-
|
|
80
|
-
Begin now.`;
|
|
81
|
-
}
|
|
82
|
-
async function launchBackground(sessionId, specs, subtasks, contextPath) {
|
|
112
|
+
async function launchBackground(sessionId, specs, tasks, contextPath) {
|
|
83
113
|
await mkdir(join(STATE_DIR, 'workers'), { recursive: true });
|
|
84
114
|
let workerIdx = 0;
|
|
85
115
|
for (const spec of specs) {
|
|
86
116
|
for (let i = 0; i < spec.count; i++) {
|
|
87
117
|
const workerId = `w${String(workerIdx).padStart(2, '0')}`;
|
|
88
|
-
const subtask =
|
|
118
|
+
const subtask = tasks[workerIdx]?.description ?? tasks[0]?.description ?? '';
|
|
119
|
+
const agentType = tasks[workerIdx]?.agentType ?? spec.agentType;
|
|
89
120
|
workerIdx++;
|
|
90
|
-
const prompt = buildWorkerPrompt(subtask, contextPath, sessionId, workerId);
|
|
121
|
+
const prompt = buildWorkerPrompt(subtask, contextPath, sessionId, workerId, agentType);
|
|
91
122
|
const logFile = join(STATE_DIR, 'workers', `${workerId}.log`);
|
|
92
|
-
|
|
123
|
+
const pidFile = join(STATE_DIR, 'workers', `${workerId}.pid`);
|
|
93
124
|
await writeFile(join(STATE_DIR, 'workers', `${workerId}-prompt.md`), prompt);
|
|
125
|
+
const claudeArgs = ['--print', '-p', prompt];
|
|
126
|
+
// Only pass --dangerously-skip-permissions to roles that write files
|
|
127
|
+
if (!READ_ONLY_ROLES.has(agentType)) {
|
|
128
|
+
claudeArgs.splice(2, 0, '--dangerously-skip-permissions');
|
|
129
|
+
}
|
|
94
130
|
const log = await open(logFile, 'w');
|
|
95
|
-
const child = spawn('claude',
|
|
131
|
+
const child = spawn('claude', claudeArgs, {
|
|
96
132
|
detached: true,
|
|
97
133
|
stdio: ['ignore', log.fd, log.fd],
|
|
98
134
|
env: { ...process.env, AGENTLOOM_WORKER_ID: workerId, AGENTLOOM_SESSION: sessionId },
|
|
99
135
|
});
|
|
100
|
-
child.on('
|
|
136
|
+
child.on('error', async (err) => {
|
|
137
|
+
await writeFile(join(STATE_DIR, 'workers', `${workerId}-result.md`), `# Launch Error\n\nFailed to start worker: ${err.message}\n`).catch(() => { });
|
|
138
|
+
log.close().catch(() => { });
|
|
139
|
+
});
|
|
140
|
+
child.on('close', () => { log.close().catch(() => { }); });
|
|
141
|
+
if (child.pid != null) {
|
|
142
|
+
await writeFile(pidFile, String(child.pid));
|
|
143
|
+
}
|
|
101
144
|
child.unref();
|
|
102
|
-
console.log(` ✓ Worker ${workerId} (${
|
|
145
|
+
console.log(` ✓ Worker ${workerId} (${agentType})${READ_ONLY_ROLES.has(agentType) ? ' [read-only]' : ''} launched [pid ${child.pid ?? '?'}] → ${logFile}`);
|
|
103
146
|
}
|
|
104
147
|
}
|
|
105
148
|
}
|
|
106
|
-
async function launchTmux(sessionId,
|
|
149
|
+
async function launchTmux(sessionId, specs, tasks, contextPath) {
|
|
107
150
|
const tmuxSession = `loom-${sessionId}`;
|
|
108
|
-
|
|
151
|
+
// Check for session name collision
|
|
152
|
+
const existing = spawnSync('tmux', ['has-session', '-t', tmuxSession], { stdio: 'ignore' });
|
|
153
|
+
if (existing.status === 0) {
|
|
154
|
+
console.error(`tmux session "${tmuxSession}" already exists. Run: tmux kill-session -t ${tmuxSession}`);
|
|
155
|
+
process.exit(1);
|
|
156
|
+
}
|
|
157
|
+
try {
|
|
158
|
+
execSync(`tmux new-session -d -s ${tmuxSession} -x 220 -y 50`);
|
|
159
|
+
}
|
|
160
|
+
catch (err) {
|
|
161
|
+
console.error(`Failed to create tmux session: ${err instanceof Error ? err.message : err}`);
|
|
162
|
+
process.exit(1);
|
|
163
|
+
}
|
|
164
|
+
await mkdir(join(STATE_DIR, 'workers'), { recursive: true });
|
|
109
165
|
let workerIdx = 0;
|
|
110
166
|
for (const spec of specs) {
|
|
111
167
|
for (let i = 0; i < spec.count; i++) {
|
|
112
168
|
const workerId = `w${String(workerIdx).padStart(2, '0')}`;
|
|
113
|
-
const subtask =
|
|
169
|
+
const subtask = tasks[workerIdx]?.description ?? tasks[0]?.description ?? '';
|
|
170
|
+
const agentType = tasks[workerIdx]?.agentType ?? spec.agentType;
|
|
114
171
|
workerIdx++;
|
|
115
|
-
const prompt = buildWorkerPrompt(subtask, contextPath, sessionId, workerId);
|
|
172
|
+
const prompt = buildWorkerPrompt(subtask, contextPath, sessionId, workerId, agentType);
|
|
173
|
+
// Write prompt and a runner script to disk — avoids ALL shell escaping issues
|
|
174
|
+
const scriptFile = join(STATE_DIR, 'workers', `${workerId}-run.sh`);
|
|
175
|
+
const permFlag = READ_ONLY_ROLES.has(agentType) ? '' : '--dangerously-skip-permissions ';
|
|
176
|
+
await writeFile(join(STATE_DIR, 'workers', `${workerId}-prompt.md`), prompt);
|
|
177
|
+
await writeFile(scriptFile, [
|
|
178
|
+
'#!/bin/sh',
|
|
179
|
+
`export AGENTLOOM_WORKER_ID=${workerId}`,
|
|
180
|
+
`export AGENTLOOM_SESSION=${sessionId}`,
|
|
181
|
+
`claude --print ${permFlag}-p "$(cat '${join(STATE_DIR, 'workers', `${workerId}-prompt.md`)}')"`,
|
|
182
|
+
`echo '[worker done]'`,
|
|
183
|
+
`read`,
|
|
184
|
+
].join('\n'));
|
|
116
185
|
if (workerIdx > 1) {
|
|
117
|
-
|
|
118
|
-
|
|
186
|
+
try {
|
|
187
|
+
execSync(`tmux split-window -h -t ${tmuxSession}`);
|
|
188
|
+
execSync(`tmux select-layout -t ${tmuxSession} tiled`);
|
|
189
|
+
}
|
|
190
|
+
catch {
|
|
191
|
+
// Non-fatal — continue with remaining workers even if layout fails
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
try {
|
|
195
|
+
execSync(`tmux send-keys -t ${tmuxSession} "sh '${scriptFile}'" Enter`);
|
|
196
|
+
}
|
|
197
|
+
catch (err) {
|
|
198
|
+
console.error(` ✗ Worker ${workerId}: failed to send tmux keys: ${err instanceof Error ? err.message : err}`);
|
|
199
|
+
continue;
|
|
119
200
|
}
|
|
120
|
-
|
|
121
|
-
execSync(`tmux send-keys -t ${tmuxSession} "${cmd}" Enter`);
|
|
122
|
-
console.log(` ✓ Worker ${workerId} (${spec.agentType}) launched in tmux pane`);
|
|
201
|
+
console.log(` ✓ Worker ${workerId} (${agentType})${READ_ONLY_ROLES.has(agentType) ? ' [read-only]' : ''} launched in tmux pane`);
|
|
123
202
|
}
|
|
124
203
|
}
|
|
125
|
-
|
|
204
|
+
// Attach only in interactive terminals
|
|
205
|
+
if (process.stdout.isTTY) {
|
|
206
|
+
spawnSync('tmux', ['attach-session', '-t', tmuxSession], { stdio: 'inherit' });
|
|
207
|
+
}
|
|
208
|
+
else {
|
|
209
|
+
console.log(`\nTmux session: ${tmuxSession}`);
|
|
210
|
+
console.log(`Attach with: tmux attach-session -t ${tmuxSession}`);
|
|
211
|
+
}
|
|
126
212
|
}
|
package/dist/commands/status.js
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
|
-
import { readSession, readTasks,
|
|
2
|
-
import { existsSync } from 'fs';
|
|
3
|
-
import {
|
|
1
|
+
import { readSession, readTasks, STATE_DIR } from '../state/session.js';
|
|
2
|
+
import { existsSync, statSync } from 'fs';
|
|
3
|
+
import { join } from 'path';
|
|
4
|
+
import { readdir } from 'fs/promises';
|
|
5
|
+
const STALE_THRESHOLD_MS = 10 * 60 * 1000; // 10 minutes with no log growth = stale
|
|
4
6
|
export async function status() {
|
|
5
7
|
if (!existsSync(STATE_DIR)) {
|
|
6
8
|
console.log('No active session. Run: loom crew "<task>"');
|
|
@@ -12,7 +14,6 @@ export async function status() {
|
|
|
12
14
|
return;
|
|
13
15
|
}
|
|
14
16
|
const tasks = await readTasks();
|
|
15
|
-
const workers = await readWorkers();
|
|
16
17
|
const pending = tasks.filter(t => t.status === 'pending').length;
|
|
17
18
|
const claimed = tasks.filter(t => t.status === 'claimed').length;
|
|
18
19
|
const done = tasks.filter(t => t.status === 'done').length;
|
|
@@ -22,13 +23,44 @@ export async function status() {
|
|
|
22
23
|
console.log(`Task: ${session.description}`);
|
|
23
24
|
console.log(`Started: ${session.createdAt}`);
|
|
24
25
|
console.log(`\nTasks: ${pending} pending ${claimed} active ${done} done ${failed} failed`);
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
26
|
+
// Worker status from log files
|
|
27
|
+
const workersDir = join(STATE_DIR, 'workers');
|
|
28
|
+
if (!existsSync(workersDir))
|
|
29
|
+
return;
|
|
30
|
+
const files = await readdir(workersDir);
|
|
31
|
+
const logFiles = files.filter(f => f.endsWith('.log')).sort();
|
|
32
|
+
if (logFiles.length === 0)
|
|
33
|
+
return;
|
|
34
|
+
console.log(`\nWorkers: ${logFiles.length}`);
|
|
35
|
+
const now = Date.now();
|
|
36
|
+
for (const logFile of logFiles) {
|
|
37
|
+
const workerId = logFile.replace('.log', '');
|
|
38
|
+
const logPath = join(workersDir, logFile);
|
|
39
|
+
const resultPath = join(workersDir, `${workerId}-result.md`);
|
|
40
|
+
const hasResult = existsSync(resultPath);
|
|
41
|
+
if (hasResult) {
|
|
42
|
+
console.log(` [${workerId}] done ✓`);
|
|
43
|
+
continue;
|
|
44
|
+
}
|
|
45
|
+
// Check if log is growing (worker is alive) or stale
|
|
46
|
+
const logStat = statSync(logPath);
|
|
47
|
+
const msSinceWrite = now - logStat.mtimeMs;
|
|
48
|
+
const isStale = msSinceWrite > STALE_THRESHOLD_MS;
|
|
49
|
+
const logSize = logStat.size;
|
|
50
|
+
if (logSize === 0) {
|
|
51
|
+
console.log(` [${workerId}] starting...`);
|
|
32
52
|
}
|
|
53
|
+
else if (isStale) {
|
|
54
|
+
const mins = Math.round(msSinceWrite / 60000);
|
|
55
|
+
console.log(` [${workerId}] STALE — no activity for ${mins}m (log: ${logPath})`);
|
|
56
|
+
}
|
|
57
|
+
else {
|
|
58
|
+
const secs = Math.round(msSinceWrite / 1000);
|
|
59
|
+
console.log(` [${workerId}] running (last activity ${secs}s ago)`);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
const allDone = logFiles.every(f => existsSync(join(workersDir, f.replace('.log', '-result.md'))));
|
|
63
|
+
if (allDone && logFiles.length > 0) {
|
|
64
|
+
console.log(`\nAll workers done. Run: loom collect`);
|
|
33
65
|
}
|
|
34
66
|
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function stop(args: string[]): Promise<void>;
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { readFile, readdir } from 'fs/promises';
|
|
2
|
+
import { join } from 'path';
|
|
3
|
+
import { existsSync } from 'fs';
|
|
4
|
+
import { STATE_DIR } from '../state/session.js';
|
|
5
|
+
const WORKERS_DIR = join(STATE_DIR, 'workers');
|
|
6
|
+
export async function stop(args) {
|
|
7
|
+
if (!existsSync(WORKERS_DIR)) {
|
|
8
|
+
console.log('No active session.');
|
|
9
|
+
return;
|
|
10
|
+
}
|
|
11
|
+
const targetId = args[0]; // optional: stop a single worker
|
|
12
|
+
const files = await readdir(WORKERS_DIR);
|
|
13
|
+
const pidFiles = files
|
|
14
|
+
.filter(f => f.endsWith('.pid'))
|
|
15
|
+
.filter(f => !targetId || f === `${targetId}.pid`)
|
|
16
|
+
.sort();
|
|
17
|
+
if (pidFiles.length === 0) {
|
|
18
|
+
console.log(targetId ? `No PID file found for ${targetId}.` : 'No worker PID files found.');
|
|
19
|
+
return;
|
|
20
|
+
}
|
|
21
|
+
let killed = 0;
|
|
22
|
+
let notFound = 0;
|
|
23
|
+
for (const pidFile of pidFiles) {
|
|
24
|
+
const workerId = pidFile.replace('.pid', '');
|
|
25
|
+
const pidPath = join(WORKERS_DIR, pidFile);
|
|
26
|
+
const pid = parseInt(await readFile(pidPath, 'utf8').catch(() => ''), 10);
|
|
27
|
+
if (!pid || isNaN(pid)) {
|
|
28
|
+
console.log(` [${workerId}] no valid PID`);
|
|
29
|
+
continue;
|
|
30
|
+
}
|
|
31
|
+
try {
|
|
32
|
+
process.kill(pid, 'SIGTERM');
|
|
33
|
+
killed++;
|
|
34
|
+
console.log(` [${workerId}] killed (pid ${pid})`);
|
|
35
|
+
}
|
|
36
|
+
catch (err) {
|
|
37
|
+
if (err instanceof Error && 'code' in err && err.code === 'ESRCH') {
|
|
38
|
+
notFound++;
|
|
39
|
+
console.log(` [${workerId}] not running (pid ${pid} not found)`);
|
|
40
|
+
}
|
|
41
|
+
else {
|
|
42
|
+
console.log(` [${workerId}] error: ${err instanceof Error ? err.message : err}`);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
console.log(`\n${killed} killed, ${notFound} already stopped.`);
|
|
47
|
+
if (killed > 0) {
|
|
48
|
+
console.log('State preserved. Run: loom reset --force to clear it.');
|
|
49
|
+
}
|
|
50
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function watch(_args: string[]): Promise<void>;
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { readdir, stat, readFile } from 'fs/promises';
|
|
2
|
+
import { join } from 'path';
|
|
3
|
+
import { existsSync } from 'fs';
|
|
4
|
+
import { STATE_DIR } from '../state/session.js';
|
|
5
|
+
const WORKERS_DIR = join(STATE_DIR, 'workers');
|
|
6
|
+
const POLL_MS = 800;
|
|
7
|
+
// A rotating set of ANSI colors for worker prefixes
|
|
8
|
+
const COLORS = ['\x1b[36m', '\x1b[33m', '\x1b[35m', '\x1b[32m', '\x1b[34m', '\x1b[31m'];
|
|
9
|
+
const RESET = '\x1b[0m';
|
|
10
|
+
const DIM = '\x1b[2m';
|
|
11
|
+
export async function watch(_args) {
|
|
12
|
+
if (!existsSync(WORKERS_DIR)) {
|
|
13
|
+
console.log('No active session. Run: loom crew "<task>"');
|
|
14
|
+
process.exit(1);
|
|
15
|
+
}
|
|
16
|
+
console.log(`${DIM}Watching worker logs. Ctrl+C to stop.${RESET}\n`);
|
|
17
|
+
// Track how many bytes we've read from each log file
|
|
18
|
+
const offsets = {};
|
|
19
|
+
const seen = new Set();
|
|
20
|
+
// eslint-disable-next-line no-constant-condition
|
|
21
|
+
while (true) {
|
|
22
|
+
if (!existsSync(WORKERS_DIR))
|
|
23
|
+
break;
|
|
24
|
+
const files = await readdir(WORKERS_DIR);
|
|
25
|
+
const logFiles = files.filter(f => f.endsWith('.log')).sort();
|
|
26
|
+
for (const logFile of logFiles) {
|
|
27
|
+
const workerId = logFile.replace('.log', '');
|
|
28
|
+
const color = COLORS[parseInt(workerId.replace('w', ''), 10) % COLORS.length] ?? COLORS[0];
|
|
29
|
+
const filePath = join(WORKERS_DIR, logFile);
|
|
30
|
+
if (!seen.has(workerId)) {
|
|
31
|
+
seen.add(workerId);
|
|
32
|
+
const resultExists = existsSync(join(WORKERS_DIR, `${workerId}-result.md`));
|
|
33
|
+
console.log(`${color}[${workerId}]${RESET} ${DIM}started${resultExists ? ' (already done)' : ''}${RESET}`);
|
|
34
|
+
}
|
|
35
|
+
const currentSize = (await stat(filePath)).size;
|
|
36
|
+
const offset = offsets[workerId] ?? 0;
|
|
37
|
+
if (currentSize > offset) {
|
|
38
|
+
const buf = await readFile(filePath);
|
|
39
|
+
const newContent = buf.slice(offset).toString('utf8');
|
|
40
|
+
offsets[workerId] = currentSize;
|
|
41
|
+
const lines = newContent.split('\n');
|
|
42
|
+
for (const line of lines) {
|
|
43
|
+
if (line.trim()) {
|
|
44
|
+
process.stdout.write(`${color}[${workerId}]${RESET} ${line}\n`);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
// Check if worker just finished (result file appeared)
|
|
49
|
+
const resultPath = join(WORKERS_DIR, `${workerId}-result.md`);
|
|
50
|
+
const doneKey = `${workerId}-done`;
|
|
51
|
+
if (existsSync(resultPath) && !seen.has(doneKey)) {
|
|
52
|
+
seen.add(doneKey);
|
|
53
|
+
console.log(`${color}[${workerId}]${RESET} ${DIM}✓ result written${RESET}`);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
// Exit when all known workers have results
|
|
57
|
+
if (logFiles.length > 0) {
|
|
58
|
+
const allDone = logFiles.every(f => {
|
|
59
|
+
const id = f.replace('.log', '');
|
|
60
|
+
return existsSync(join(WORKERS_DIR, `${id}-result.md`));
|
|
61
|
+
});
|
|
62
|
+
if (allDone) {
|
|
63
|
+
console.log(`\n${DIM}All workers done. Run: loom collect${RESET}`);
|
|
64
|
+
break;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
await new Promise(resolve => setTimeout(resolve, POLL_MS));
|
|
68
|
+
}
|
|
69
|
+
}
|
package/dist/team/queue.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { type Task } from '../state/session.js';
|
|
2
|
+
export declare function recoverStaleClaims(): Promise<number>;
|
|
2
3
|
export declare function claimTask(workerId: string): Promise<Task | null>;
|
|
3
4
|
export declare function completeTask(task: Task, result: string): Promise<void>;
|
|
4
5
|
export declare function failTask(task: Task, error: string): Promise<void>;
|
package/dist/team/queue.js
CHANGED
|
@@ -1,62 +1,140 @@
|
|
|
1
1
|
import { readdir, readFile, rename, writeFile } from 'fs/promises';
|
|
2
2
|
import { join } from 'path';
|
|
3
|
-
import { existsSync } from 'fs';
|
|
4
3
|
import { STATE_DIR } from '../state/session.js';
|
|
5
4
|
const TASKS_DIR = join(STATE_DIR, 'tasks');
|
|
5
|
+
const CLAIM_TTL_MS = 30 * 60 * 1000; // 30 minutes — claimed tasks older than this are re-queued
|
|
6
|
+
// Recover tasks whose worker crashed before completing.
|
|
7
|
+
// Finds -claimed- files older than CLAIM_TTL_MS and renames them back to -pending.json.
|
|
8
|
+
export async function recoverStaleClaims() {
|
|
9
|
+
let recovered = 0;
|
|
10
|
+
let files;
|
|
11
|
+
try {
|
|
12
|
+
files = await readdir(TASKS_DIR);
|
|
13
|
+
}
|
|
14
|
+
catch {
|
|
15
|
+
return 0;
|
|
16
|
+
}
|
|
17
|
+
const now = Date.now();
|
|
18
|
+
const claimed = files.filter(f => f.includes('-claimed-'));
|
|
19
|
+
for (const file of claimed) {
|
|
20
|
+
const filePath = join(TASKS_DIR, file);
|
|
21
|
+
try {
|
|
22
|
+
const { mtimeMs } = await import('fs/promises').then(m => m.stat(filePath));
|
|
23
|
+
if (now - mtimeMs < CLAIM_TTL_MS)
|
|
24
|
+
continue;
|
|
25
|
+
// Parse task id from filename: {id}-claimed-{workerId}.json
|
|
26
|
+
const taskId = file.split('-claimed-')[0];
|
|
27
|
+
if (!taskId)
|
|
28
|
+
continue;
|
|
29
|
+
const pendingPath = join(TASKS_DIR, `${taskId}-pending.json`);
|
|
30
|
+
// Re-read the file and reset status before writing back as pending
|
|
31
|
+
const task = JSON.parse(await readFile(filePath, 'utf8'));
|
|
32
|
+
task.status = 'pending';
|
|
33
|
+
delete task.workerId;
|
|
34
|
+
delete task.claimedAt;
|
|
35
|
+
await writeFile(pendingPath, JSON.stringify(task, null, 2));
|
|
36
|
+
await rename(filePath, pendingPath).catch(() => {
|
|
37
|
+
// If the write succeeded but rename fails (e.g. destination now exists from another
|
|
38
|
+
// recovery run), leave it — the pending file was already written
|
|
39
|
+
});
|
|
40
|
+
recovered++;
|
|
41
|
+
}
|
|
42
|
+
catch {
|
|
43
|
+
// Skip files we can't read/stat — don't crash the recovery pass
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return recovered;
|
|
47
|
+
}
|
|
6
48
|
export async function claimTask(workerId) {
|
|
7
|
-
|
|
49
|
+
// Self-heal: recover any stale claimed tasks before scanning for pending ones
|
|
50
|
+
await recoverStaleClaims();
|
|
51
|
+
let files;
|
|
52
|
+
try {
|
|
53
|
+
files = await readdir(TASKS_DIR);
|
|
54
|
+
}
|
|
55
|
+
catch {
|
|
8
56
|
return null;
|
|
9
|
-
|
|
57
|
+
}
|
|
10
58
|
const pending = files.filter(f => f.endsWith('-pending.json'));
|
|
11
59
|
for (const file of pending) {
|
|
12
60
|
const oldPath = join(TASKS_DIR, file);
|
|
13
|
-
const task = JSON.parse(await readFile(oldPath, 'utf8'));
|
|
14
61
|
const newFile = file.replace('-pending.json', `-claimed-${workerId}.json`);
|
|
15
62
|
const newPath = join(TASKS_DIR, newFile);
|
|
16
|
-
// Atomic rename = claim. First writer wins.
|
|
17
63
|
try {
|
|
18
|
-
|
|
64
|
+
// Prepare the updated task object BEFORE the rename.
|
|
65
|
+
// If writeFile fails after a successful rename, we rename back so the task
|
|
66
|
+
// re-enters the pending pool rather than being stuck as claimed with stale data.
|
|
67
|
+
const raw = await readFile(oldPath, 'utf8');
|
|
68
|
+
const task = JSON.parse(raw);
|
|
19
69
|
task.status = 'claimed';
|
|
20
70
|
task.workerId = workerId;
|
|
21
71
|
task.claimedAt = new Date().toISOString();
|
|
22
|
-
|
|
72
|
+
const updated = JSON.stringify(task, null, 2);
|
|
73
|
+
// Atomic claim — first writer wins
|
|
74
|
+
await rename(oldPath, newPath);
|
|
75
|
+
try {
|
|
76
|
+
await writeFile(newPath, updated);
|
|
77
|
+
}
|
|
78
|
+
catch (writeErr) {
|
|
79
|
+
// Rename succeeded but write failed — roll back so the task isn't orphaned
|
|
80
|
+
await rename(newPath, oldPath).catch(() => { });
|
|
81
|
+
throw writeErr;
|
|
82
|
+
}
|
|
23
83
|
return task;
|
|
24
84
|
}
|
|
25
85
|
catch {
|
|
26
|
-
// Another worker claimed it first — try next
|
|
86
|
+
// Another worker claimed it first (ENOENT/EPERM), or rollback — try next
|
|
27
87
|
continue;
|
|
28
88
|
}
|
|
29
89
|
}
|
|
30
90
|
return null;
|
|
31
91
|
}
|
|
32
92
|
export async function completeTask(task, result) {
|
|
33
|
-
const
|
|
34
|
-
const
|
|
93
|
+
const workerId = task.workerId ?? 'unknown';
|
|
94
|
+
const claimedFile = join(TASKS_DIR, `${task.id}-claimed-${workerId}.json`);
|
|
95
|
+
const doneFile = join(TASKS_DIR, `${task.id}-done-${workerId}.json`);
|
|
35
96
|
task.status = 'done';
|
|
36
97
|
task.result = result;
|
|
37
98
|
task.completedAt = new Date().toISOString();
|
|
38
|
-
|
|
39
|
-
|
|
99
|
+
try {
|
|
100
|
+
await writeFile(claimedFile, JSON.stringify(task, null, 2));
|
|
101
|
+
await rename(claimedFile, doneFile);
|
|
102
|
+
}
|
|
103
|
+
catch {
|
|
104
|
+
// If the claimed file is already gone (double-complete), write directly to done path
|
|
105
|
+
await writeFile(doneFile, JSON.stringify(task, null, 2)).catch(() => { });
|
|
106
|
+
}
|
|
40
107
|
}
|
|
41
108
|
export async function failTask(task, error) {
|
|
42
|
-
const
|
|
43
|
-
const
|
|
109
|
+
const workerId = task.workerId ?? 'unknown';
|
|
110
|
+
const claimedFile = join(TASKS_DIR, `${task.id}-claimed-${workerId}.json`);
|
|
111
|
+
const failedFile = join(TASKS_DIR, `${task.id}-failed-${workerId}.json`);
|
|
44
112
|
task.status = 'failed';
|
|
45
113
|
task.error = error;
|
|
46
114
|
task.completedAt = new Date().toISOString();
|
|
47
|
-
|
|
48
|
-
|
|
115
|
+
try {
|
|
116
|
+
await writeFile(claimedFile, JSON.stringify(task, null, 2));
|
|
117
|
+
await rename(claimedFile, failedFile);
|
|
118
|
+
}
|
|
119
|
+
catch {
|
|
120
|
+
await writeFile(failedFile, JSON.stringify(task, null, 2)).catch(() => { });
|
|
121
|
+
}
|
|
49
122
|
}
|
|
50
123
|
export async function pendingCount() {
|
|
51
|
-
|
|
124
|
+
try {
|
|
125
|
+
const files = await readdir(TASKS_DIR);
|
|
126
|
+
return files.filter(f => f.endsWith('-pending.json')).length;
|
|
127
|
+
}
|
|
128
|
+
catch {
|
|
52
129
|
return 0;
|
|
53
|
-
|
|
54
|
-
return files.filter(f => f.includes('-pending.json')).length;
|
|
130
|
+
}
|
|
55
131
|
}
|
|
56
132
|
export async function allDone() {
|
|
57
|
-
|
|
133
|
+
try {
|
|
134
|
+
const files = await readdir(TASKS_DIR);
|
|
135
|
+
return !files.some(f => f.endsWith('-pending.json') || f.includes('-claimed-'));
|
|
136
|
+
}
|
|
137
|
+
catch {
|
|
58
138
|
return true;
|
|
59
|
-
|
|
60
|
-
const active = files.filter(f => f.includes('-pending.json') || f.includes('-claimed-'));
|
|
61
|
-
return active.length === 0;
|
|
139
|
+
}
|
|
62
140
|
}
|