@chuckssmith/agentloom 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.js +6 -0
- package/dist/commands/crew.js +76 -19
- package/dist/commands/stop.d.ts +1 -0
- package/dist/commands/stop.js +50 -0
- package/dist/team/queue.d.ts +1 -0
- package/dist/team/queue.js +101 -23
- package/package.json +1 -1
package/dist/cli.js
CHANGED
|
@@ -6,6 +6,7 @@ import { logs } from './commands/logs.js';
|
|
|
6
6
|
import { collect } from './commands/collect.js';
|
|
7
7
|
import { reset } from './commands/reset.js';
|
|
8
8
|
import { watch } from './commands/watch.js';
|
|
9
|
+
import { stop } from './commands/stop.js';
|
|
9
10
|
const [, , command, ...args] = process.argv;
|
|
10
11
|
const usage = `
|
|
11
12
|
agentloom (loom) — workflow layer for Claude Code
|
|
@@ -16,6 +17,8 @@ Usage:
|
|
|
16
17
|
loom crew 2:explore "<task>" Spawn typed workers (explore/plan/code-reviewer)
|
|
17
18
|
loom crew --dry-run [N] "<task>" Preview decomposed subtasks without launching
|
|
18
19
|
loom watch Live tail all worker logs (Ctrl+C to stop)
|
|
20
|
+
loom stop Kill all background workers (SIGTERM)
|
|
21
|
+
loom stop <workerId> Kill one worker
|
|
19
22
|
loom status Show active crew session + stale worker detection
|
|
20
23
|
loom logs Show worker output summary
|
|
21
24
|
loom logs <workerId> Show full log for a specific worker
|
|
@@ -62,6 +65,9 @@ switch (command) {
|
|
|
62
65
|
case 'collect':
|
|
63
66
|
await collect(args);
|
|
64
67
|
break;
|
|
68
|
+
case 'stop':
|
|
69
|
+
await stop(args);
|
|
70
|
+
break;
|
|
65
71
|
case 'reset':
|
|
66
72
|
await reset(args);
|
|
67
73
|
break;
|
package/dist/commands/crew.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { execSync, spawn } from 'child_process';
|
|
1
|
+
import { execSync, spawn, spawnSync } from 'child_process';
|
|
2
2
|
import { writeFile, mkdir, open } from 'fs/promises';
|
|
3
3
|
import { join } from 'path';
|
|
4
4
|
import { parseWorkerSpec, initSession, writeContextSnapshot, decomposeTasks, } from '../team/orchestrator.js';
|
|
@@ -13,17 +13,18 @@ const hasTmux = () => {
|
|
|
13
13
|
}
|
|
14
14
|
};
|
|
15
15
|
const isWSL = () => process.platform === 'linux' && !!process.env.WSL_DISTRO_NAME;
|
|
16
|
-
//
|
|
16
|
+
// Roles that must NOT receive --dangerously-skip-permissions
|
|
17
|
+
const READ_ONLY_ROLES = new Set(['explore', 'plan', 'code-reviewer']);
|
|
17
18
|
const AGENT_ROLE = {
|
|
18
|
-
'explore': `Your role is EXPLORER. You are read-only
|
|
19
|
+
'explore': `Your role is EXPLORER. You are read-only — do not modify, create, or delete any files.
|
|
19
20
|
- Map out the relevant code, files, and structure
|
|
20
21
|
- Document what exists, how it connects, and what's notable
|
|
21
22
|
- Your output feeds the other workers — be thorough and specific`,
|
|
22
|
-
'plan': `Your role is PLANNER. You are read-only
|
|
23
|
+
'plan': `Your role is PLANNER. You are read-only — do not modify, create, or delete any files.
|
|
23
24
|
- Reason about the best approach to the subtask
|
|
24
25
|
- Identify risks, dependencies, and open questions
|
|
25
26
|
- Produce a concrete, ordered action plan other workers can execute`,
|
|
26
|
-
'code-reviewer': `Your role is CODE REVIEWER. You are read-only
|
|
27
|
+
'code-reviewer': `Your role is CODE REVIEWER. You are read-only — do not modify, create, or delete any files.
|
|
27
28
|
- Audit the relevant code for correctness, security, and quality
|
|
28
29
|
- Flag specific lines, patterns, or logic that are problematic
|
|
29
30
|
- Assign severity (critical / high / medium / low) to each finding`,
|
|
@@ -88,14 +89,15 @@ export async function crew(args) {
|
|
|
88
89
|
console.log('Run without --dry-run to launch workers.');
|
|
89
90
|
return;
|
|
90
91
|
}
|
|
91
|
-
|
|
92
|
+
const useTmux = hasTmux() && !isWSL() && process.stdout.isTTY;
|
|
93
|
+
console.log(`Mode: ${useTmux ? 'tmux' : 'background processes'}\n`);
|
|
92
94
|
const session = await initSession(task, totalWorkers);
|
|
93
95
|
const contextPath = await writeContextSnapshot(slug, task);
|
|
94
96
|
const tasks = await decomposeTasks(task, specs);
|
|
95
97
|
console.log(`Session: ${session.id}`);
|
|
96
98
|
console.log(`Tasks: ${tasks.length} created`);
|
|
97
99
|
console.log(`Context: ${contextPath}\n`);
|
|
98
|
-
if (
|
|
100
|
+
if (useTmux) {
|
|
99
101
|
await launchTmux(session.id, specs, tasks, contextPath);
|
|
100
102
|
}
|
|
101
103
|
else {
|
|
@@ -103,8 +105,8 @@ export async function crew(args) {
|
|
|
103
105
|
}
|
|
104
106
|
console.log(`\nWorkers launched. Monitor with:`);
|
|
105
107
|
console.log(` loom status`);
|
|
106
|
-
console.log(` loom
|
|
107
|
-
console.log(` loom
|
|
108
|
+
console.log(` loom watch`);
|
|
109
|
+
console.log(` loom stop (kill all workers)`);
|
|
108
110
|
console.log(`State dir: ${STATE_DIR}/`);
|
|
109
111
|
}
|
|
110
112
|
async function launchBackground(sessionId, specs, tasks, contextPath) {
|
|
@@ -118,22 +120,48 @@ async function launchBackground(sessionId, specs, tasks, contextPath) {
|
|
|
118
120
|
workerIdx++;
|
|
119
121
|
const prompt = buildWorkerPrompt(subtask, contextPath, sessionId, workerId, agentType);
|
|
120
122
|
const logFile = join(STATE_DIR, 'workers', `${workerId}.log`);
|
|
123
|
+
const pidFile = join(STATE_DIR, 'workers', `${workerId}.pid`);
|
|
121
124
|
await writeFile(join(STATE_DIR, 'workers', `${workerId}-prompt.md`), prompt);
|
|
125
|
+
const claudeArgs = ['--print', '-p', prompt];
|
|
126
|
+
// Only pass --dangerously-skip-permissions to roles that write files
|
|
127
|
+
if (!READ_ONLY_ROLES.has(agentType)) {
|
|
128
|
+
claudeArgs.splice(2, 0, '--dangerously-skip-permissions');
|
|
129
|
+
}
|
|
122
130
|
const log = await open(logFile, 'w');
|
|
123
|
-
const child = spawn('claude',
|
|
131
|
+
const child = spawn('claude', claudeArgs, {
|
|
124
132
|
detached: true,
|
|
125
133
|
stdio: ['ignore', log.fd, log.fd],
|
|
126
134
|
env: { ...process.env, AGENTLOOM_WORKER_ID: workerId, AGENTLOOM_SESSION: sessionId },
|
|
127
135
|
});
|
|
128
|
-
child.on('
|
|
136
|
+
child.on('error', async (err) => {
|
|
137
|
+
await writeFile(join(STATE_DIR, 'workers', `${workerId}-result.md`), `# Launch Error\n\nFailed to start worker: ${err.message}\n`).catch(() => { });
|
|
138
|
+
log.close().catch(() => { });
|
|
139
|
+
});
|
|
140
|
+
child.on('close', () => { log.close().catch(() => { }); });
|
|
141
|
+
if (child.pid != null) {
|
|
142
|
+
await writeFile(pidFile, String(child.pid));
|
|
143
|
+
}
|
|
129
144
|
child.unref();
|
|
130
|
-
console.log(` ✓ Worker ${workerId} (${agentType}) launched [pid ${child.pid}] → ${logFile}`);
|
|
145
|
+
console.log(` ✓ Worker ${workerId} (${agentType})${READ_ONLY_ROLES.has(agentType) ? ' [read-only]' : ''} launched [pid ${child.pid ?? '?'}] → ${logFile}`);
|
|
131
146
|
}
|
|
132
147
|
}
|
|
133
148
|
}
|
|
134
149
|
async function launchTmux(sessionId, specs, tasks, contextPath) {
|
|
135
150
|
const tmuxSession = `loom-${sessionId}`;
|
|
136
|
-
|
|
151
|
+
// Check for session name collision
|
|
152
|
+
const existing = spawnSync('tmux', ['has-session', '-t', tmuxSession], { stdio: 'ignore' });
|
|
153
|
+
if (existing.status === 0) {
|
|
154
|
+
console.error(`tmux session "${tmuxSession}" already exists. Run: tmux kill-session -t ${tmuxSession}`);
|
|
155
|
+
process.exit(1);
|
|
156
|
+
}
|
|
157
|
+
try {
|
|
158
|
+
execSync(`tmux new-session -d -s ${tmuxSession} -x 220 -y 50`);
|
|
159
|
+
}
|
|
160
|
+
catch (err) {
|
|
161
|
+
console.error(`Failed to create tmux session: ${err instanceof Error ? err.message : err}`);
|
|
162
|
+
process.exit(1);
|
|
163
|
+
}
|
|
164
|
+
await mkdir(join(STATE_DIR, 'workers'), { recursive: true });
|
|
137
165
|
let workerIdx = 0;
|
|
138
166
|
for (const spec of specs) {
|
|
139
167
|
for (let i = 0; i < spec.count; i++) {
|
|
@@ -142,14 +170,43 @@ async function launchTmux(sessionId, specs, tasks, contextPath) {
|
|
|
142
170
|
const agentType = tasks[workerIdx]?.agentType ?? spec.agentType;
|
|
143
171
|
workerIdx++;
|
|
144
172
|
const prompt = buildWorkerPrompt(subtask, contextPath, sessionId, workerId, agentType);
|
|
173
|
+
// Write prompt and a runner script to disk — avoids ALL shell escaping issues
|
|
174
|
+
const scriptFile = join(STATE_DIR, 'workers', `${workerId}-run.sh`);
|
|
175
|
+
const permFlag = READ_ONLY_ROLES.has(agentType) ? '' : '--dangerously-skip-permissions ';
|
|
176
|
+
await writeFile(join(STATE_DIR, 'workers', `${workerId}-prompt.md`), prompt);
|
|
177
|
+
await writeFile(scriptFile, [
|
|
178
|
+
'#!/bin/sh',
|
|
179
|
+
`export AGENTLOOM_WORKER_ID=${workerId}`,
|
|
180
|
+
`export AGENTLOOM_SESSION=${sessionId}`,
|
|
181
|
+
`claude --print ${permFlag}-p "$(cat '${join(STATE_DIR, 'workers', `${workerId}-prompt.md`)}')"`,
|
|
182
|
+
`echo '[worker done]'`,
|
|
183
|
+
`read`,
|
|
184
|
+
].join('\n'));
|
|
145
185
|
if (workerIdx > 1) {
|
|
146
|
-
|
|
147
|
-
|
|
186
|
+
try {
|
|
187
|
+
execSync(`tmux split-window -h -t ${tmuxSession}`);
|
|
188
|
+
execSync(`tmux select-layout -t ${tmuxSession} tiled`);
|
|
189
|
+
}
|
|
190
|
+
catch {
|
|
191
|
+
// Non-fatal — continue with remaining workers even if layout fails
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
try {
|
|
195
|
+
execSync(`tmux send-keys -t ${tmuxSession} "sh '${scriptFile}'" Enter`);
|
|
148
196
|
}
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
197
|
+
catch (err) {
|
|
198
|
+
console.error(` ✗ Worker ${workerId}: failed to send tmux keys: ${err instanceof Error ? err.message : err}`);
|
|
199
|
+
continue;
|
|
200
|
+
}
|
|
201
|
+
console.log(` ✓ Worker ${workerId} (${agentType})${READ_ONLY_ROLES.has(agentType) ? ' [read-only]' : ''} launched in tmux pane`);
|
|
152
202
|
}
|
|
153
203
|
}
|
|
154
|
-
|
|
204
|
+
// Attach only in interactive terminals
|
|
205
|
+
if (process.stdout.isTTY) {
|
|
206
|
+
spawnSync('tmux', ['attach-session', '-t', tmuxSession], { stdio: 'inherit' });
|
|
207
|
+
}
|
|
208
|
+
else {
|
|
209
|
+
console.log(`\nTmux session: ${tmuxSession}`);
|
|
210
|
+
console.log(`Attach with: tmux attach-session -t ${tmuxSession}`);
|
|
211
|
+
}
|
|
155
212
|
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function stop(args: string[]): Promise<void>;
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { readFile, readdir } from 'fs/promises';
|
|
2
|
+
import { join } from 'path';
|
|
3
|
+
import { existsSync } from 'fs';
|
|
4
|
+
import { STATE_DIR } from '../state/session.js';
|
|
5
|
+
const WORKERS_DIR = join(STATE_DIR, 'workers');
|
|
6
|
+
export async function stop(args) {
|
|
7
|
+
if (!existsSync(WORKERS_DIR)) {
|
|
8
|
+
console.log('No active session.');
|
|
9
|
+
return;
|
|
10
|
+
}
|
|
11
|
+
const targetId = args[0]; // optional: stop a single worker
|
|
12
|
+
const files = await readdir(WORKERS_DIR);
|
|
13
|
+
const pidFiles = files
|
|
14
|
+
.filter(f => f.endsWith('.pid'))
|
|
15
|
+
.filter(f => !targetId || f === `${targetId}.pid`)
|
|
16
|
+
.sort();
|
|
17
|
+
if (pidFiles.length === 0) {
|
|
18
|
+
console.log(targetId ? `No PID file found for ${targetId}.` : 'No worker PID files found.');
|
|
19
|
+
return;
|
|
20
|
+
}
|
|
21
|
+
let killed = 0;
|
|
22
|
+
let notFound = 0;
|
|
23
|
+
for (const pidFile of pidFiles) {
|
|
24
|
+
const workerId = pidFile.replace('.pid', '');
|
|
25
|
+
const pidPath = join(WORKERS_DIR, pidFile);
|
|
26
|
+
const pid = parseInt(await readFile(pidPath, 'utf8').catch(() => ''), 10);
|
|
27
|
+
if (!pid || isNaN(pid)) {
|
|
28
|
+
console.log(` [${workerId}] no valid PID`);
|
|
29
|
+
continue;
|
|
30
|
+
}
|
|
31
|
+
try {
|
|
32
|
+
process.kill(pid, 'SIGTERM');
|
|
33
|
+
killed++;
|
|
34
|
+
console.log(` [${workerId}] killed (pid ${pid})`);
|
|
35
|
+
}
|
|
36
|
+
catch (err) {
|
|
37
|
+
if (err instanceof Error && 'code' in err && err.code === 'ESRCH') {
|
|
38
|
+
notFound++;
|
|
39
|
+
console.log(` [${workerId}] not running (pid ${pid} not found)`);
|
|
40
|
+
}
|
|
41
|
+
else {
|
|
42
|
+
console.log(` [${workerId}] error: ${err instanceof Error ? err.message : err}`);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
console.log(`\n${killed} killed, ${notFound} already stopped.`);
|
|
47
|
+
if (killed > 0) {
|
|
48
|
+
console.log('State preserved. Run: loom reset --force to clear it.');
|
|
49
|
+
}
|
|
50
|
+
}
|
package/dist/team/queue.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { type Task } from '../state/session.js';
|
|
2
|
+
export declare function recoverStaleClaims(): Promise<number>;
|
|
2
3
|
export declare function claimTask(workerId: string): Promise<Task | null>;
|
|
3
4
|
export declare function completeTask(task: Task, result: string): Promise<void>;
|
|
4
5
|
export declare function failTask(task: Task, error: string): Promise<void>;
|
package/dist/team/queue.js
CHANGED
|
@@ -1,62 +1,140 @@
|
|
|
1
1
|
import { readdir, readFile, rename, writeFile } from 'fs/promises';
|
|
2
2
|
import { join } from 'path';
|
|
3
|
-
import { existsSync } from 'fs';
|
|
4
3
|
import { STATE_DIR } from '../state/session.js';
|
|
5
4
|
const TASKS_DIR = join(STATE_DIR, 'tasks');
|
|
5
|
+
const CLAIM_TTL_MS = 30 * 60 * 1000; // 30 minutes — claimed tasks older than this are re-queued
|
|
6
|
+
// Recover tasks whose worker crashed before completing.
|
|
7
|
+
// Finds -claimed- files older than CLAIM_TTL_MS and renames them back to -pending.json.
|
|
8
|
+
export async function recoverStaleClaims() {
|
|
9
|
+
let recovered = 0;
|
|
10
|
+
let files;
|
|
11
|
+
try {
|
|
12
|
+
files = await readdir(TASKS_DIR);
|
|
13
|
+
}
|
|
14
|
+
catch {
|
|
15
|
+
return 0;
|
|
16
|
+
}
|
|
17
|
+
const now = Date.now();
|
|
18
|
+
const claimed = files.filter(f => f.includes('-claimed-'));
|
|
19
|
+
for (const file of claimed) {
|
|
20
|
+
const filePath = join(TASKS_DIR, file);
|
|
21
|
+
try {
|
|
22
|
+
const { mtimeMs } = await import('fs/promises').then(m => m.stat(filePath));
|
|
23
|
+
if (now - mtimeMs < CLAIM_TTL_MS)
|
|
24
|
+
continue;
|
|
25
|
+
// Parse task id from filename: {id}-claimed-{workerId}.json
|
|
26
|
+
const taskId = file.split('-claimed-')[0];
|
|
27
|
+
if (!taskId)
|
|
28
|
+
continue;
|
|
29
|
+
const pendingPath = join(TASKS_DIR, `${taskId}-pending.json`);
|
|
30
|
+
// Re-read the file and reset status before writing back as pending
|
|
31
|
+
const task = JSON.parse(await readFile(filePath, 'utf8'));
|
|
32
|
+
task.status = 'pending';
|
|
33
|
+
delete task.workerId;
|
|
34
|
+
delete task.claimedAt;
|
|
35
|
+
await writeFile(pendingPath, JSON.stringify(task, null, 2));
|
|
36
|
+
await rename(filePath, pendingPath).catch(() => {
|
|
37
|
+
// If the write succeeded but rename fails (e.g. destination now exists from another
|
|
38
|
+
// recovery run), leave it — the pending file was already written
|
|
39
|
+
});
|
|
40
|
+
recovered++;
|
|
41
|
+
}
|
|
42
|
+
catch {
|
|
43
|
+
// Skip files we can't read/stat — don't crash the recovery pass
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return recovered;
|
|
47
|
+
}
|
|
6
48
|
export async function claimTask(workerId) {
|
|
7
|
-
|
|
49
|
+
// Self-heal: recover any stale claimed tasks before scanning for pending ones
|
|
50
|
+
await recoverStaleClaims();
|
|
51
|
+
let files;
|
|
52
|
+
try {
|
|
53
|
+
files = await readdir(TASKS_DIR);
|
|
54
|
+
}
|
|
55
|
+
catch {
|
|
8
56
|
return null;
|
|
9
|
-
|
|
57
|
+
}
|
|
10
58
|
const pending = files.filter(f => f.endsWith('-pending.json'));
|
|
11
59
|
for (const file of pending) {
|
|
12
60
|
const oldPath = join(TASKS_DIR, file);
|
|
13
|
-
const task = JSON.parse(await readFile(oldPath, 'utf8'));
|
|
14
61
|
const newFile = file.replace('-pending.json', `-claimed-${workerId}.json`);
|
|
15
62
|
const newPath = join(TASKS_DIR, newFile);
|
|
16
|
-
// Atomic rename = claim. First writer wins.
|
|
17
63
|
try {
|
|
18
|
-
|
|
64
|
+
// Prepare the updated task object BEFORE the rename.
|
|
65
|
+
// If writeFile fails after a successful rename, we rename back so the task
|
|
66
|
+
// re-enters the pending pool rather than being stuck as claimed with stale data.
|
|
67
|
+
const raw = await readFile(oldPath, 'utf8');
|
|
68
|
+
const task = JSON.parse(raw);
|
|
19
69
|
task.status = 'claimed';
|
|
20
70
|
task.workerId = workerId;
|
|
21
71
|
task.claimedAt = new Date().toISOString();
|
|
22
|
-
|
|
72
|
+
const updated = JSON.stringify(task, null, 2);
|
|
73
|
+
// Atomic claim — first writer wins
|
|
74
|
+
await rename(oldPath, newPath);
|
|
75
|
+
try {
|
|
76
|
+
await writeFile(newPath, updated);
|
|
77
|
+
}
|
|
78
|
+
catch (writeErr) {
|
|
79
|
+
// Rename succeeded but write failed — roll back so the task isn't orphaned
|
|
80
|
+
await rename(newPath, oldPath).catch(() => { });
|
|
81
|
+
throw writeErr;
|
|
82
|
+
}
|
|
23
83
|
return task;
|
|
24
84
|
}
|
|
25
85
|
catch {
|
|
26
|
-
// Another worker claimed it first — try next
|
|
86
|
+
// Another worker claimed it first (ENOENT/EPERM), or rollback — try next
|
|
27
87
|
continue;
|
|
28
88
|
}
|
|
29
89
|
}
|
|
30
90
|
return null;
|
|
31
91
|
}
|
|
32
92
|
export async function completeTask(task, result) {
|
|
33
|
-
const
|
|
34
|
-
const
|
|
93
|
+
const workerId = task.workerId ?? 'unknown';
|
|
94
|
+
const claimedFile = join(TASKS_DIR, `${task.id}-claimed-${workerId}.json`);
|
|
95
|
+
const doneFile = join(TASKS_DIR, `${task.id}-done-${workerId}.json`);
|
|
35
96
|
task.status = 'done';
|
|
36
97
|
task.result = result;
|
|
37
98
|
task.completedAt = new Date().toISOString();
|
|
38
|
-
|
|
39
|
-
|
|
99
|
+
try {
|
|
100
|
+
await writeFile(claimedFile, JSON.stringify(task, null, 2));
|
|
101
|
+
await rename(claimedFile, doneFile);
|
|
102
|
+
}
|
|
103
|
+
catch {
|
|
104
|
+
// If the claimed file is already gone (double-complete), write directly to done path
|
|
105
|
+
await writeFile(doneFile, JSON.stringify(task, null, 2)).catch(() => { });
|
|
106
|
+
}
|
|
40
107
|
}
|
|
41
108
|
export async function failTask(task, error) {
|
|
42
|
-
const
|
|
43
|
-
const
|
|
109
|
+
const workerId = task.workerId ?? 'unknown';
|
|
110
|
+
const claimedFile = join(TASKS_DIR, `${task.id}-claimed-${workerId}.json`);
|
|
111
|
+
const failedFile = join(TASKS_DIR, `${task.id}-failed-${workerId}.json`);
|
|
44
112
|
task.status = 'failed';
|
|
45
113
|
task.error = error;
|
|
46
114
|
task.completedAt = new Date().toISOString();
|
|
47
|
-
|
|
48
|
-
|
|
115
|
+
try {
|
|
116
|
+
await writeFile(claimedFile, JSON.stringify(task, null, 2));
|
|
117
|
+
await rename(claimedFile, failedFile);
|
|
118
|
+
}
|
|
119
|
+
catch {
|
|
120
|
+
await writeFile(failedFile, JSON.stringify(task, null, 2)).catch(() => { });
|
|
121
|
+
}
|
|
49
122
|
}
|
|
50
123
|
export async function pendingCount() {
|
|
51
|
-
|
|
124
|
+
try {
|
|
125
|
+
const files = await readdir(TASKS_DIR);
|
|
126
|
+
return files.filter(f => f.endsWith('-pending.json')).length;
|
|
127
|
+
}
|
|
128
|
+
catch {
|
|
52
129
|
return 0;
|
|
53
|
-
|
|
54
|
-
return files.filter(f => f.includes('-pending.json')).length;
|
|
130
|
+
}
|
|
55
131
|
}
|
|
56
132
|
export async function allDone() {
|
|
57
|
-
|
|
133
|
+
try {
|
|
134
|
+
const files = await readdir(TASKS_DIR);
|
|
135
|
+
return !files.some(f => f.endsWith('-pending.json') || f.includes('-claimed-'));
|
|
136
|
+
}
|
|
137
|
+
catch {
|
|
58
138
|
return true;
|
|
59
|
-
|
|
60
|
-
const active = files.filter(f => f.includes('-pending.json') || f.includes('-claimed-'));
|
|
61
|
-
return active.length === 0;
|
|
139
|
+
}
|
|
62
140
|
}
|