@tekyzinc/gsd-t 3.18.13 → 3.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +114 -0
- package/bin/gsd-t-parallel-probe.cjs +132 -0
- package/bin/gsd-t-parallel.cjs +422 -9
- package/bin/gsd-t-task-graph.cjs +80 -19
- package/bin/gsd-t-unattended.cjs +634 -229
- package/bin/gsd-t-worker-dispatch.cjs +211 -0
- package/bin/headless-auto-spawn.cjs +44 -1
- package/bin/headless-exit-codes.cjs +36 -18
- package/bin/m44-proof-measure.cjs +285 -0
- package/bin/m46-iter-proof.cjs +149 -0
- package/bin/m46-worker-proof.cjs +201 -0
- package/bin/parallelism-report.cjs +535 -0
- package/bin/spawn-plan-writer.cjs +1 -1
- package/commands/gsd-t-debug.md +10 -14
- package/commands/gsd-t-execute.md +10 -16
- package/commands/gsd-t-help.md +1 -0
- package/commands/gsd-t-integrate.md +8 -14
- package/commands/gsd-t-quick.md +10 -14
- package/commands/gsd-t-resume.md +32 -0
- package/commands/gsd-t-status.md +10 -0
- package/commands/gsd-t-unattended-watch.md +58 -1
- package/commands/gsd-t-visualize.md +15 -12
- package/commands/gsd-t-wave.md +2 -11
- package/docs/architecture.md +82 -0
- package/docs/requirements.md +20 -0
- package/package.json +1 -1
- package/scripts/gsd-t-compact-detector.js +51 -8
- package/scripts/gsd-t-dashboard-server.js +138 -85
- package/scripts/gsd-t-transcript.html +152 -1
- package/scripts/gsd-t-update-check.js +13 -4
- package/scripts/hooks/gsd-t-conversation-capture.js +258 -0
- package/templates/CLAUDE-global.md +54 -0
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* M46-D1 Iter-Parallel Proof Harness (T10)
|
|
6
|
+
*
|
|
7
|
+
* Measures the parallelism speedup of `_runIterParallel` vs. a serial
|
|
8
|
+
* baseline using a synthetic 10-iter workload where each iter sleeps
|
|
9
|
+
* 200ms and returns a successful IterResult.
|
|
10
|
+
*
|
|
11
|
+
* Method:
|
|
12
|
+
* 1. Serial baseline (batchSize=1): run 10 iters one at a time via
|
|
13
|
+
* `_runIterParallel(..., batchSize=1)` in a loop. Expect ~2000ms.
|
|
14
|
+
* 2. Parallel (batchSize=4): run 10 iters in batches of 4+4+2 via the
|
|
15
|
+
* same helper. Expect ~600ms (3 batches × 200ms).
|
|
16
|
+
* 3. Compute speedup = T_serial / T_par,
|
|
17
|
+
* parallelism_factor = (10 × 200) / T_par.
|
|
18
|
+
* 4. Pass iff T_par/T_serial ≤ 0.35 AND speedup ≥ 3.0.
|
|
19
|
+
*
|
|
20
|
+
* Output: .gsd-t/metrics/m46-iter-proof.json + human summary on stdout.
|
|
21
|
+
* Exit: 0 on pass, 1 on fail.
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
const fs = require('fs');
|
|
25
|
+
const path = require('path');
|
|
26
|
+
|
|
27
|
+
const ITER_COUNT = 10;
|
|
28
|
+
const ITER_SLEEP_MS = 200;
|
|
29
|
+
const BATCH_SERIAL = 1;
|
|
30
|
+
const BATCH_PARALLEL = 4;
|
|
31
|
+
const THRESHOLD_RATIO = 0.35;
|
|
32
|
+
const THRESHOLD_SPEEDUP = 3.0;
|
|
33
|
+
|
|
34
|
+
const { _runIterParallel } = require(path.join(__dirname, 'gsd-t-unattended.cjs')).__test__;
|
|
35
|
+
|
|
36
|
+
// ── synthetic iterFn ───────────────────────────────────────────────────────
|
|
37
|
+
|
|
38
|
+
let iterSeq = 0;
|
|
39
|
+
function makeIterFn(sleepMs) {
|
|
40
|
+
return async function fakeIter(_state, _opts) {
|
|
41
|
+
const id = iterSeq++;
|
|
42
|
+
await new Promise((r) => setTimeout(r, sleepMs));
|
|
43
|
+
return {
|
|
44
|
+
status: 'ok',
|
|
45
|
+
tasksDone: ['t' + id],
|
|
46
|
+
verifyNeeded: false,
|
|
47
|
+
artifacts: [],
|
|
48
|
+
};
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// ── runners ────────────────────────────────────────────────────────────────
|
|
53
|
+
|
|
54
|
+
async function runSerial(total, sleepMs) {
|
|
55
|
+
const iterFn = makeIterFn(sleepMs);
|
|
56
|
+
const state = {};
|
|
57
|
+
const opts = {};
|
|
58
|
+
const t0 = process.hrtime.bigint();
|
|
59
|
+
const results = [];
|
|
60
|
+
for (let i = 0; i < total; i++) {
|
|
61
|
+
const batch = await _runIterParallel(state, opts, iterFn, BATCH_SERIAL);
|
|
62
|
+
results.push(...batch);
|
|
63
|
+
}
|
|
64
|
+
const t1 = process.hrtime.bigint();
|
|
65
|
+
return {
|
|
66
|
+
wallClockMs: Number(t1 - t0) / 1e6,
|
|
67
|
+
results,
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async function runParallel(total, batchSize, sleepMs) {
|
|
72
|
+
const iterFn = makeIterFn(sleepMs);
|
|
73
|
+
const state = {};
|
|
74
|
+
const opts = {};
|
|
75
|
+
const t0 = process.hrtime.bigint();
|
|
76
|
+
const results = [];
|
|
77
|
+
let remaining = total;
|
|
78
|
+
while (remaining > 0) {
|
|
79
|
+
const n = Math.min(batchSize, remaining);
|
|
80
|
+
const batch = await _runIterParallel(state, opts, iterFn, n);
|
|
81
|
+
results.push(...batch);
|
|
82
|
+
remaining -= n;
|
|
83
|
+
}
|
|
84
|
+
const t1 = process.hrtime.bigint();
|
|
85
|
+
return {
|
|
86
|
+
wallClockMs: Number(t1 - t0) / 1e6,
|
|
87
|
+
results,
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// ── main ───────────────────────────────────────────────────────────────────
|
|
92
|
+
|
|
93
|
+
async function main() {
|
|
94
|
+
process.stdout.write(
|
|
95
|
+
`M46-D1 Iter-Parallel Proof (N=${ITER_COUNT} iters, sleep=${ITER_SLEEP_MS}ms each)\n`,
|
|
96
|
+
);
|
|
97
|
+
process.stdout.write('─'.repeat(60) + '\n');
|
|
98
|
+
|
|
99
|
+
iterSeq = 0;
|
|
100
|
+
const serial = await runSerial(ITER_COUNT, ITER_SLEEP_MS);
|
|
101
|
+
process.stdout.write(`Serial baseline (batchSize=${BATCH_SERIAL}, ${ITER_COUNT} iters sequentially)\n`);
|
|
102
|
+
process.stdout.write(` T_serial (wall-clock): ${serial.wallClockMs.toFixed(1)} ms\n`);
|
|
103
|
+
process.stdout.write(` results: ${serial.results.length} (ok=${serial.results.filter((r) => r.status === 'ok').length})\n\n`);
|
|
104
|
+
|
|
105
|
+
iterSeq = 0;
|
|
106
|
+
const parallel = await runParallel(ITER_COUNT, BATCH_PARALLEL, ITER_SLEEP_MS);
|
|
107
|
+
process.stdout.write(`Parallel (batchSize=${BATCH_PARALLEL}, batches of 4+4+2)\n`);
|
|
108
|
+
process.stdout.write(` T_par (wall-clock): ${parallel.wallClockMs.toFixed(1)} ms\n`);
|
|
109
|
+
process.stdout.write(` results: ${parallel.results.length} (ok=${parallel.results.filter((r) => r.status === 'ok').length})\n\n`);
|
|
110
|
+
|
|
111
|
+
const ratio = parallel.wallClockMs / serial.wallClockMs;
|
|
112
|
+
const speedup = serial.wallClockMs / parallel.wallClockMs;
|
|
113
|
+
const parallelismFactor = (ITER_COUNT * ITER_SLEEP_MS) / parallel.wallClockMs;
|
|
114
|
+
const passed = ratio <= THRESHOLD_RATIO && speedup >= THRESHOLD_SPEEDUP;
|
|
115
|
+
|
|
116
|
+
process.stdout.write('─'.repeat(60) + '\n');
|
|
117
|
+
process.stdout.write('Result\n');
|
|
118
|
+
process.stdout.write(` T_par / T_serial = ${ratio.toFixed(3)} (threshold ≤ ${THRESHOLD_RATIO})\n`);
|
|
119
|
+
process.stdout.write(` speedup = ${speedup.toFixed(2)}× (threshold ≥ ${THRESHOLD_SPEEDUP})\n`);
|
|
120
|
+
process.stdout.write(` parallelism_factor = ${parallelismFactor.toFixed(2)} (ideal ≈ ${BATCH_PARALLEL})\n`);
|
|
121
|
+
process.stdout.write(` verdict = ${passed ? 'PASS ✓' : 'FAIL ✗'}\n`);
|
|
122
|
+
|
|
123
|
+
const report = {
|
|
124
|
+
timestamp: new Date().toISOString(),
|
|
125
|
+
iter_count: ITER_COUNT,
|
|
126
|
+
iter_sleep_ms: ITER_SLEEP_MS,
|
|
127
|
+
batch_size_serial: BATCH_SERIAL,
|
|
128
|
+
batch_size_parallel: BATCH_PARALLEL,
|
|
129
|
+
T_serial_ms: serial.wallClockMs,
|
|
130
|
+
T_par_ms: parallel.wallClockMs,
|
|
131
|
+
speedup,
|
|
132
|
+
parallelism_factor: parallelismFactor,
|
|
133
|
+
threshold_T_par_over_T_serial: THRESHOLD_RATIO,
|
|
134
|
+
threshold_speedup: THRESHOLD_SPEEDUP,
|
|
135
|
+
passed,
|
|
136
|
+
};
|
|
137
|
+
const reportDir = path.join(process.cwd(), '.gsd-t', 'metrics');
|
|
138
|
+
fs.mkdirSync(reportDir, { recursive: true });
|
|
139
|
+
const reportPath = path.join(reportDir, 'm46-iter-proof.json');
|
|
140
|
+
fs.writeFileSync(reportPath, JSON.stringify(report, null, 2));
|
|
141
|
+
process.stdout.write(`\nReport written: ${reportPath}\n`);
|
|
142
|
+
|
|
143
|
+
process.exit(passed ? 0 : 1);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
main().catch((e) => {
|
|
147
|
+
process.stderr.write(`ERROR: ${(e && e.stack) || e}\n`);
|
|
148
|
+
process.exit(2);
|
|
149
|
+
});
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* M46-D2 Worker Sub-Dispatch Proof Harness (T7)
|
|
6
|
+
*
|
|
7
|
+
* Measures the parallelism speedup of `dispatchWorkerTasks` vs. a serial
|
|
8
|
+
* baseline using a synthetic 6-task file-disjoint workload.
|
|
9
|
+
*
|
|
10
|
+
* Method:
|
|
11
|
+
* 1. Serial baseline: run 6 `sleep 2 && echo done > …` tasks one by one
|
|
12
|
+
* via `child_process.execSync`. Record `T_serial` (wall-clock ms).
|
|
13
|
+
* 2. Parallel via `dispatchWorkerTasks`: stub the dispatcher's
|
|
14
|
+
* `runDispatch` boundary (via a mocked `gsd-t-parallel.cjs` in
|
|
15
|
+
* require.cache) to execute tasks concurrently with
|
|
16
|
+
* `child_process.exec` + `Promise.all`. This measures the dispatch
|
|
17
|
+
* SCHEDULER's fan-out behaviour, not real headless claude-p spawns.
|
|
18
|
+
* 3. Compute speedup and parallelism_factor. Pass iff speedup ≥ 2.5.
|
|
19
|
+
*
|
|
20
|
+
* Output: .gsd-t/metrics/m46-worker-proof.json + human summary on stdout.
|
|
21
|
+
* Exit: 0 on pass, 1 on fail.
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
const fs = require('fs');
|
|
25
|
+
const os = require('os');
|
|
26
|
+
const path = require('path');
|
|
27
|
+
const { exec, execSync } = require('child_process');
|
|
28
|
+
|
|
29
|
+
const THRESHOLD = 2.5;
|
|
30
|
+
const TASK_COUNT = 6;
|
|
31
|
+
const PID = process.pid;
|
|
32
|
+
const TMP_DIR = path.join(os.tmpdir(), `m46-proof-${PID}`);
|
|
33
|
+
|
|
34
|
+
function ensureTmpDir() {
|
|
35
|
+
fs.mkdirSync(TMP_DIR, { recursive: true });
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
function cleanupTmpDir() {
|
|
39
|
+
try { fs.rmSync(TMP_DIR, { recursive: true, force: true }); } catch (_) { /* ignore */ }
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
function buildTasks() {
|
|
43
|
+
const tasks = [];
|
|
44
|
+
for (let i = 0; i < TASK_COUNT; i++) {
|
|
45
|
+
const outPath = path.join(TMP_DIR, `task-${i}.out`);
|
|
46
|
+
tasks.push({
|
|
47
|
+
taskId: `T-${i}`,
|
|
48
|
+
files: [outPath],
|
|
49
|
+
command: `sleep 2 && echo done > ${outPath}`,
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
return tasks;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// ── serial baseline ────────────────────────────────────────────────────────
|
|
56
|
+
|
|
57
|
+
function runSerial(tasks) {
|
|
58
|
+
const perTaskMs = [];
|
|
59
|
+
const t0 = process.hrtime.bigint();
|
|
60
|
+
for (const task of tasks) {
|
|
61
|
+
const ts = process.hrtime.bigint();
|
|
62
|
+
execSync(task.command, { stdio: 'ignore' });
|
|
63
|
+
const te = process.hrtime.bigint();
|
|
64
|
+
perTaskMs.push(Number(te - ts) / 1e6);
|
|
65
|
+
}
|
|
66
|
+
const t1 = process.hrtime.bigint();
|
|
67
|
+
return {
|
|
68
|
+
wallClockMs: Number(t1 - t0) / 1e6,
|
|
69
|
+
perTaskMs,
|
|
70
|
+
meanTaskMs: perTaskMs.reduce((a, b) => a + b, 0) / perTaskMs.length,
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// ── parallel via stubbed runDispatch ──────────────────────────────────────
|
|
75
|
+
|
|
76
|
+
function installParallelStub() {
|
|
77
|
+
// Pre-populate require.cache for bin/gsd-t-parallel.cjs with a stub whose
|
|
78
|
+
// `runDispatch` executes the task `command` field concurrently via exec +
|
|
79
|
+
// Promise.all. This isolates measurement to the dispatcher scheduler and
|
|
80
|
+
// avoids invoking real claude-p children.
|
|
81
|
+
const parallelPath = require.resolve(path.join(__dirname, 'gsd-t-parallel.cjs'));
|
|
82
|
+
const stubModule = {
|
|
83
|
+
exports: {
|
|
84
|
+
runDispatch: async ({ tasks }) => {
|
|
85
|
+
const workerResults = await Promise.all(tasks.map((task) => new Promise((resolve) => {
|
|
86
|
+
const started = Date.now();
|
|
87
|
+
exec(task.command, (err) => {
|
|
88
|
+
resolve({
|
|
89
|
+
taskId: task.taskId,
|
|
90
|
+
exitCode: err ? (err.code || 1) : 0,
|
|
91
|
+
durationMs: Date.now() - started,
|
|
92
|
+
});
|
|
93
|
+
});
|
|
94
|
+
})));
|
|
95
|
+
return {
|
|
96
|
+
decision: 'fan-out',
|
|
97
|
+
fanOutCount: tasks.length,
|
|
98
|
+
workerResults,
|
|
99
|
+
};
|
|
100
|
+
},
|
|
101
|
+
},
|
|
102
|
+
loaded: true,
|
|
103
|
+
id: parallelPath,
|
|
104
|
+
filename: parallelPath,
|
|
105
|
+
paths: [],
|
|
106
|
+
children: [],
|
|
107
|
+
};
|
|
108
|
+
require.cache[parallelPath] = stubModule;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async function runParallel(tasks) {
|
|
112
|
+
installParallelStub();
|
|
113
|
+
// Fresh require after stub install, in case worker-dispatch was pre-loaded.
|
|
114
|
+
const dispatchPath = require.resolve(path.join(__dirname, 'gsd-t-worker-dispatch.cjs'));
|
|
115
|
+
delete require.cache[dispatchPath];
|
|
116
|
+
const { dispatchWorkerTasks } = require(dispatchPath);
|
|
117
|
+
|
|
118
|
+
const t0 = process.hrtime.bigint();
|
|
119
|
+
const result = await dispatchWorkerTasks({
|
|
120
|
+
projectDir: TMP_DIR,
|
|
121
|
+
parentSessionId: `m46-proof-${PID}`,
|
|
122
|
+
tasks,
|
|
123
|
+
maxParallel: TASK_COUNT,
|
|
124
|
+
});
|
|
125
|
+
const t1 = process.hrtime.bigint();
|
|
126
|
+
|
|
127
|
+
return {
|
|
128
|
+
wallClockMs: Number(t1 - t0) / 1e6,
|
|
129
|
+
parallel: result.parallel,
|
|
130
|
+
reason: result.reason,
|
|
131
|
+
taskResults: result.taskResults,
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ── main ────────────────────────────────────────────────────────────────────
|
|
136
|
+
|
|
137
|
+
async function main() {
|
|
138
|
+
ensureTmpDir();
|
|
139
|
+
process.stdout.write(`M46-D2 Worker Sub-Dispatch Proof (N=${TASK_COUNT} tasks, tmp=${TMP_DIR})\n`);
|
|
140
|
+
process.stdout.write('─'.repeat(60) + '\n');
|
|
141
|
+
|
|
142
|
+
const tasks = buildTasks();
|
|
143
|
+
|
|
144
|
+
const serial = runSerial(tasks);
|
|
145
|
+
process.stdout.write('Serial baseline (execSync, tasks run back-to-back)\n');
|
|
146
|
+
process.stdout.write(` T_serial (wall-clock): ${serial.wallClockMs.toFixed(1)} ms\n`);
|
|
147
|
+
process.stdout.write(` per-task durations: [${serial.perTaskMs.map((x) => x.toFixed(0)).join(', ')}] ms\n`);
|
|
148
|
+
process.stdout.write(` mean(task duration): ${serial.meanTaskMs.toFixed(1)} ms\n\n`);
|
|
149
|
+
|
|
150
|
+
// Reset task output files before parallel run so exec doesn't race on
|
|
151
|
+
// leftovers (each task writes its own file so this is just hygiene).
|
|
152
|
+
for (let i = 0; i < TASK_COUNT; i++) {
|
|
153
|
+
const p = path.join(TMP_DIR, `task-${i}.out`);
|
|
154
|
+
try { fs.unlinkSync(p); } catch (_) { /* ignore */ }
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
const parallel = await runParallel(tasks);
|
|
158
|
+
process.stdout.write(`Parallel via dispatchWorkerTasks (runDispatch stub, concurrent exec)\n`);
|
|
159
|
+
process.stdout.write(` T_par (wall-clock): ${parallel.wallClockMs.toFixed(1)} ms\n`);
|
|
160
|
+
process.stdout.write(` parallel dispatched: ${parallel.parallel} (reason=${parallel.reason})\n`);
|
|
161
|
+
process.stdout.write(` per-task results: ${parallel.taskResults.length} tasks\n\n`);
|
|
162
|
+
|
|
163
|
+
const speedup = serial.wallClockMs / parallel.wallClockMs;
|
|
164
|
+
const parallelismFactor = (TASK_COUNT * serial.meanTaskMs) / parallel.wallClockMs;
|
|
165
|
+
const passed = speedup >= THRESHOLD;
|
|
166
|
+
|
|
167
|
+
process.stdout.write('─'.repeat(60) + '\n');
|
|
168
|
+
process.stdout.write('Result\n');
|
|
169
|
+
process.stdout.write(` speedup = ${speedup.toFixed(2)}× (threshold ≥ ${THRESHOLD})\n`);
|
|
170
|
+
process.stdout.write(` parallelism_factor = ${parallelismFactor.toFixed(2)} (ideal = ${TASK_COUNT})\n`);
|
|
171
|
+
process.stdout.write(` verdict = ${passed ? 'PASS ✓' : 'FAIL ✗'}\n`);
|
|
172
|
+
|
|
173
|
+
const report = {
|
|
174
|
+
timestamp: new Date().toISOString(),
|
|
175
|
+
task_count: TASK_COUNT,
|
|
176
|
+
T_serial_ms: serial.wallClockMs,
|
|
177
|
+
T_par_ms: parallel.wallClockMs,
|
|
178
|
+
speedup,
|
|
179
|
+
parallelism_factor: parallelismFactor,
|
|
180
|
+
threshold: THRESHOLD,
|
|
181
|
+
passed,
|
|
182
|
+
};
|
|
183
|
+
const reportDir = path.join(process.cwd(), '.gsd-t', 'metrics');
|
|
184
|
+
fs.mkdirSync(reportDir, { recursive: true });
|
|
185
|
+
const reportPath = path.join(reportDir, 'm46-worker-proof.json');
|
|
186
|
+
fs.writeFileSync(reportPath, JSON.stringify(report, null, 2));
|
|
187
|
+
process.stdout.write(`\nReport written: ${reportPath}\n`);
|
|
188
|
+
|
|
189
|
+
cleanupTmpDir();
|
|
190
|
+
process.exit(passed ? 0 : 1);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
process.on('exit', cleanupTmpDir);
|
|
194
|
+
process.on('SIGINT', () => { cleanupTmpDir(); process.exit(130); });
|
|
195
|
+
process.on('SIGTERM', () => { cleanupTmpDir(); process.exit(143); });
|
|
196
|
+
|
|
197
|
+
main().catch((e) => {
|
|
198
|
+
process.stderr.write(`ERROR: ${(e && e.stack) || e}\n`);
|
|
199
|
+
cleanupTmpDir();
|
|
200
|
+
process.exit(2);
|
|
201
|
+
});
|