@tekyzinc/gsd-t 3.18.13 → 3.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +114 -0
- package/bin/gsd-t-parallel-probe.cjs +132 -0
- package/bin/gsd-t-parallel.cjs +422 -9
- package/bin/gsd-t-task-graph.cjs +80 -19
- package/bin/gsd-t-unattended.cjs +634 -229
- package/bin/gsd-t-worker-dispatch.cjs +211 -0
- package/bin/headless-auto-spawn.cjs +44 -1
- package/bin/headless-exit-codes.cjs +36 -18
- package/bin/m44-proof-measure.cjs +285 -0
- package/bin/m46-iter-proof.cjs +149 -0
- package/bin/m46-worker-proof.cjs +201 -0
- package/bin/parallelism-report.cjs +535 -0
- package/bin/spawn-plan-writer.cjs +1 -1
- package/commands/gsd-t-debug.md +10 -14
- package/commands/gsd-t-execute.md +10 -16
- package/commands/gsd-t-help.md +1 -0
- package/commands/gsd-t-integrate.md +8 -14
- package/commands/gsd-t-quick.md +10 -14
- package/commands/gsd-t-resume.md +32 -0
- package/commands/gsd-t-status.md +10 -0
- package/commands/gsd-t-unattended-watch.md +58 -1
- package/commands/gsd-t-visualize.md +15 -12
- package/commands/gsd-t-wave.md +2 -11
- package/docs/architecture.md +82 -0
- package/docs/requirements.md +20 -0
- package/package.json +1 -1
- package/scripts/gsd-t-compact-detector.js +51 -8
- package/scripts/gsd-t-dashboard-server.js +138 -85
- package/scripts/gsd-t-transcript.html +152 -1
- package/scripts/gsd-t-update-check.js +13 -4
- package/scripts/hooks/gsd-t-conversation-capture.js +258 -0
- package/templates/CLAUDE-global.md +54 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* GSD-T Worker Sub-Dispatch (M46 D2 T2)
|
|
6
|
+
*
|
|
7
|
+
* Thin adapter that lets an unattended supervisor worker fan out its own
|
|
8
|
+
* file-disjoint tasks by reusing the M44-verified `runDispatch` instrument.
|
|
9
|
+
* This module is a new CONSUMER of `bin/gsd-t-parallel.cjs::runDispatch` —
|
|
10
|
+
* not a modifier. The in-session dispatch path is byte-identical post-D2.
|
|
11
|
+
*
|
|
12
|
+
* Contract: .gsd-t/contracts/headless-default-contract.md v2.1.0 §Worker Sub-Dispatch
|
|
13
|
+
*
|
|
14
|
+
* Public API:
|
|
15
|
+
* dispatchWorkerTasks({projectDir, parentSessionId, tasks, maxParallel})
|
|
16
|
+
* → { parallel, taskResults, wallClockMs, reason }
|
|
17
|
+
*
|
|
18
|
+
* Triggers sub-dispatch when all hold:
|
|
19
|
+
* - tasks.length > 1
|
|
20
|
+
* - tasks are file-disjoint (pairwise no overlap on `task.files`)
|
|
21
|
+
* Otherwise returns `{parallel: false, …}` and the caller falls through to
|
|
22
|
+
* its current serial behavior.
|
|
23
|
+
*/
|
|
24
|
+
|
|
25
|
+
const path = require('path');
|
|
26
|
+
|
|
27
|
+
const SPAWN_PLAN_KIND = 'unattended-worker-sub';
|
|
28
|
+
const DEFAULT_MAX_PARALLEL = 4;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Pairwise file-disjointness across a task set. Returns true iff no two
|
|
32
|
+
* tasks share any file in their `files` arrays. Tasks without a `files`
|
|
33
|
+
* array (or empty) are treated as having no declared file scope and
|
|
34
|
+
* therefore never overlap with anyone — callers upstream should not
|
|
35
|
+
* pass such tasks to sub-dispatch, but we remain conservative: a task
|
|
36
|
+
* with no `files` is considered disjoint from every other task only
|
|
37
|
+
* when every counterpart also has files declared. When both sides lack
|
|
38
|
+
* `files`, we conservatively report NOT disjoint so the caller falls
|
|
39
|
+
* back to serial — an undeclared scope is an unknown scope.
|
|
40
|
+
*/
|
|
41
|
+
function _areFileDisjoint(tasks) {
|
|
42
|
+
if (!Array.isArray(tasks) || tasks.length < 2) return true;
|
|
43
|
+
for (let i = 0; i < tasks.length; i++) {
|
|
44
|
+
const a = tasks[i];
|
|
45
|
+
const aFiles = (a && Array.isArray(a.files)) ? a.files : null;
|
|
46
|
+
for (let j = i + 1; j < tasks.length; j++) {
|
|
47
|
+
const b = tasks[j];
|
|
48
|
+
const bFiles = (b && Array.isArray(b.files)) ? b.files : null;
|
|
49
|
+
if (!aFiles || !bFiles) return false;
|
|
50
|
+
const set = new Set(aFiles);
|
|
51
|
+
for (const f of bFiles) {
|
|
52
|
+
if (set.has(f)) return false;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
return true;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Emit a spawn-plan frame for this sub-dispatch. Best-effort; writer
|
|
61
|
+
* failures never propagate (per spawn-plan-writer.cjs §Hard rules).
|
|
62
|
+
*/
|
|
63
|
+
function _writeSubDispatchSpawnPlan({ projectDir, parentSessionId, tasks }) {
|
|
64
|
+
try {
|
|
65
|
+
const writer = require(path.join(__dirname, 'spawn-plan-writer.cjs'));
|
|
66
|
+
const spawnId = `worker-sub-${parentSessionId}-${Date.now()}`;
|
|
67
|
+
const planTasks = tasks.map((t) => ({
|
|
68
|
+
id: (t && typeof t.taskId === 'string') ? t.taskId : String((t && t.taskId) || ''),
|
|
69
|
+
title: (t && typeof t.title === 'string') ? t.title : '',
|
|
70
|
+
status: 'pending',
|
|
71
|
+
}));
|
|
72
|
+
writer.writeSpawnPlan({
|
|
73
|
+
spawnId,
|
|
74
|
+
kind: SPAWN_PLAN_KIND,
|
|
75
|
+
projectDir,
|
|
76
|
+
tasks: planTasks,
|
|
77
|
+
});
|
|
78
|
+
} catch (_e) {
|
|
79
|
+
/* best-effort; never block dispatch */
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* dispatchWorkerTasks — the M46 D2 sub-dispatch entry point.
|
|
85
|
+
*
|
|
86
|
+
* @param {object} opts
|
|
87
|
+
* @param {string} opts.projectDir absolute project root
|
|
88
|
+
* @param {string} opts.parentSessionId $GSD_T_PARENT_AGENT_ID from worker env
|
|
89
|
+
* @param {Array} opts.tasks [{taskId, files, command, ...}]
|
|
90
|
+
* @param {number} [opts.maxParallel=4] concurrency cap (default matches M44)
|
|
91
|
+
* @returns {Promise<{parallel: boolean, taskResults: Array, wallClockMs: number, reason: string}>}
|
|
92
|
+
*/
|
|
93
|
+
async function dispatchWorkerTasks(opts) {
|
|
94
|
+
const projectDir = (opts && opts.projectDir) || process.cwd();
|
|
95
|
+
const parentSessionId = (opts && opts.parentSessionId) || '';
|
|
96
|
+
const tasks = (opts && Array.isArray(opts.tasks)) ? opts.tasks : [];
|
|
97
|
+
const maxParallel = Number.isFinite(opts && opts.maxParallel) && opts.maxParallel > 0
|
|
98
|
+
? Math.floor(opts.maxParallel)
|
|
99
|
+
: DEFAULT_MAX_PARALLEL;
|
|
100
|
+
|
|
101
|
+
if (tasks.length === 0) {
|
|
102
|
+
return { parallel: false, taskResults: [], wallClockMs: 0, reason: 'no-tasks' };
|
|
103
|
+
}
|
|
104
|
+
if (tasks.length === 1) {
|
|
105
|
+
return { parallel: false, taskResults: [], wallClockMs: 0, reason: 'single-task' };
|
|
106
|
+
}
|
|
107
|
+
if (!_areFileDisjoint(tasks)) {
|
|
108
|
+
return { parallel: false, taskResults: [], wallClockMs: 0, reason: 'file-overlap' };
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
_writeSubDispatchSpawnPlan({ projectDir, parentSessionId, tasks });
|
|
112
|
+
|
|
113
|
+
const startedAt = Date.now();
|
|
114
|
+
try {
|
|
115
|
+
const parallel = require(path.join(__dirname, 'gsd-t-parallel.cjs'));
|
|
116
|
+
const result = await parallel.runDispatch({
|
|
117
|
+
projectDir,
|
|
118
|
+
tasks,
|
|
119
|
+
maxWorkers: maxParallel,
|
|
120
|
+
mode: 'worker-subdispatch',
|
|
121
|
+
});
|
|
122
|
+
const wallClockMs = Date.now() - startedAt;
|
|
123
|
+
const taskResults = (result && Array.isArray(result.workerResults))
|
|
124
|
+
? result.workerResults
|
|
125
|
+
: (result && Array.isArray(result.taskResults) ? result.taskResults : []);
|
|
126
|
+
return {
|
|
127
|
+
parallel: true,
|
|
128
|
+
taskResults,
|
|
129
|
+
wallClockMs,
|
|
130
|
+
reason: 'dispatched',
|
|
131
|
+
};
|
|
132
|
+
} catch (e) {
|
|
133
|
+
const wallClockMs = Date.now() - startedAt;
|
|
134
|
+
const msg = (e && e.message) ? e.message : String(e);
|
|
135
|
+
return {
|
|
136
|
+
parallel: false,
|
|
137
|
+
taskResults: [],
|
|
138
|
+
wallClockMs,
|
|
139
|
+
reason: `dispatch-error: ${msg}`,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
module.exports = {
|
|
145
|
+
dispatchWorkerTasks,
|
|
146
|
+
_areFileDisjoint,
|
|
147
|
+
SPAWN_PLAN_KIND,
|
|
148
|
+
};
|
|
149
|
+
|
|
150
|
+
if (require.main === module) {
|
|
151
|
+
(async () => {
|
|
152
|
+
const fs = require('fs');
|
|
153
|
+
const argv = process.argv.slice(2);
|
|
154
|
+
let parentSessionId = null;
|
|
155
|
+
let tasksPath = null;
|
|
156
|
+
let maxParallel = DEFAULT_MAX_PARALLEL;
|
|
157
|
+
for (let i = 0; i < argv.length; i++) {
|
|
158
|
+
const a = argv[i];
|
|
159
|
+
if (a === '--parent-session') {
|
|
160
|
+
parentSessionId = argv[++i];
|
|
161
|
+
} else if (a === '--tasks') {
|
|
162
|
+
tasksPath = argv[++i];
|
|
163
|
+
} else if (a === '--max-parallel') {
|
|
164
|
+
const n = parseInt(argv[++i], 10);
|
|
165
|
+
if (Number.isFinite(n) && n > 0) maxParallel = n;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
if (!parentSessionId) {
|
|
169
|
+
process.stderr.write('error: --parent-session required\n');
|
|
170
|
+
process.exit(2);
|
|
171
|
+
}
|
|
172
|
+
if (!tasksPath) {
|
|
173
|
+
process.stderr.write('error: --tasks required\n');
|
|
174
|
+
process.exit(2);
|
|
175
|
+
}
|
|
176
|
+
let raw;
|
|
177
|
+
try {
|
|
178
|
+
raw = fs.readFileSync(tasksPath, 'utf8');
|
|
179
|
+
} catch (e) {
|
|
180
|
+
process.stderr.write(`error: cannot read tasks file ${tasksPath}: ${(e && e.message) || e}\n`);
|
|
181
|
+
process.exit(2);
|
|
182
|
+
}
|
|
183
|
+
let tasks;
|
|
184
|
+
try {
|
|
185
|
+
tasks = JSON.parse(raw);
|
|
186
|
+
} catch (e) {
|
|
187
|
+
process.stderr.write(`error: malformed tasks JSON: ${(e && e.message) || e}\n`);
|
|
188
|
+
process.exit(2);
|
|
189
|
+
}
|
|
190
|
+
if (!Array.isArray(tasks)) {
|
|
191
|
+
process.stderr.write('error: tasks JSON must be an array\n');
|
|
192
|
+
process.exit(2);
|
|
193
|
+
}
|
|
194
|
+
const projectDir = process.cwd();
|
|
195
|
+
try {
|
|
196
|
+
const result = await dispatchWorkerTasks({
|
|
197
|
+
projectDir,
|
|
198
|
+
parentSessionId,
|
|
199
|
+
tasks,
|
|
200
|
+
maxParallel,
|
|
201
|
+
});
|
|
202
|
+
process.stdout.write(JSON.stringify(result) + '\n');
|
|
203
|
+
const anyFailed = Array.isArray(result && result.taskResults)
|
|
204
|
+
&& result.taskResults.some((r) => r && (r.exitCode !== 0 && r.exitCode != null));
|
|
205
|
+
process.exit(anyFailed ? 1 : 0);
|
|
206
|
+
} catch (e) {
|
|
207
|
+
process.stderr.write(`error: dispatch threw: ${(e && e.message) || e}\n`);
|
|
208
|
+
process.exit(1);
|
|
209
|
+
}
|
|
210
|
+
})();
|
|
211
|
+
}
|
|
@@ -73,7 +73,9 @@ let _deprecatedWatchWarned = false;
|
|
|
73
73
|
* sessionContext?: object,
|
|
74
74
|
* sessionId?: string,
|
|
75
75
|
* watch?: boolean,
|
|
76
|
-
* spawnType?: 'primary' | 'validation'
|
|
76
|
+
* spawnType?: 'primary' | 'validation',
|
|
77
|
+
* env?: object,
|
|
78
|
+
* workerModel?: string
|
|
77
79
|
* }} opts
|
|
78
80
|
* @returns {{ id: string | null, pid: number | null, logPath: string | null, timestamp: string, mode: 'headless' | 'in-context' }}
|
|
79
81
|
*/
|
|
@@ -83,6 +85,12 @@ function autoSpawnHeadless(opts) {
|
|
|
83
85
|
const continue_from = opts.continue_from || ".";
|
|
84
86
|
const projectDir = opts.projectDir || process.cwd();
|
|
85
87
|
const context = opts.context || opts.sessionContext || null;
|
|
88
|
+
// M44 D9 Step 3 — optional per-call env overrides layered over the inherited
|
|
89
|
+
// process.env. Used by `runDispatch` in gsd-t-parallel.cjs to forward
|
|
90
|
+
// GSD_T_WORKER_TASK_IDS / _WORKER_INDEX / _WORKER_TOTAL to each fan-out
|
|
91
|
+
// child so the child knows which task subset to handle. Purely additive —
|
|
92
|
+
// callers that don't pass `env` get the pre-M44-D9 behavior unchanged.
|
|
93
|
+
const envOverride = (opts.env && typeof opts.env === "object") ? opts.env : null;
|
|
86
94
|
// M43 D4 — `watch` is accepted for caller backward-compat but IGNORED.
|
|
87
95
|
// `inSession` was never shipped; accept+ignore for the same reason.
|
|
88
96
|
// Under headless-default-contract v2.0.0 every spawn goes headless; the
|
|
@@ -129,6 +137,24 @@ function autoSpawnHeadless(opts) {
|
|
|
129
137
|
/* best-effort; fall through without banner port info */
|
|
130
138
|
}
|
|
131
139
|
|
|
140
|
+
// M46 follow-up — Date + version banner. Printed before the transcript URL
|
|
141
|
+
// so multi-day-old read-backs are immediately dated. Best-effort.
|
|
142
|
+
try {
|
|
143
|
+
const { dateStamp } = require("../scripts/gsd-t-update-check.js");
|
|
144
|
+
const fsLocal = require("fs");
|
|
145
|
+
const osLocal = require("os");
|
|
146
|
+
const pathLocal = require("path");
|
|
147
|
+
let v = "unknown";
|
|
148
|
+
try {
|
|
149
|
+
v = fsLocal.readFileSync(
|
|
150
|
+
pathLocal.join(osLocal.homedir(), ".claude/.gsd-t-version"), "utf8"
|
|
151
|
+
).trim();
|
|
152
|
+
} catch (_) { /* fall through with "unknown" */ }
|
|
153
|
+
process.stdout.write(`${dateStamp()}GSD-T v${v} — CURRENT\n`);
|
|
154
|
+
} catch (_) {
|
|
155
|
+
/* best-effort — never crash the spawn on banner failure */
|
|
156
|
+
}
|
|
157
|
+
|
|
132
158
|
// M43 D6-T3 — Live transcript URL banner. Printed for every spawn so the
|
|
133
159
|
// viewer at :PORT is "the" primary watching surface. Never throws.
|
|
134
160
|
// Text is coordinated with D4 — exact line shape is part of
|
|
@@ -195,6 +221,23 @@ function autoSpawnHeadless(opts) {
|
|
|
195
221
|
if (process.env.GSD_T_AGENT_ID) {
|
|
196
222
|
workerEnv.GSD_T_PARENT_AGENT_ID = process.env.GSD_T_AGENT_ID;
|
|
197
223
|
}
|
|
224
|
+
// M44 D9 Step 3 — caller-supplied env overrides (e.g., fan-out task ids).
|
|
225
|
+
// Applied AFTER the canonical GSD_T_* keys so a caller can override any
|
|
226
|
+
// of them if needed; opaque for everything else.
|
|
227
|
+
if (envOverride) {
|
|
228
|
+
for (const [k, v] of Object.entries(envOverride)) {
|
|
229
|
+
if (v == null) continue;
|
|
230
|
+
workerEnv[k] = String(v);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
// Worker-model override (v3.18.18) — let `runDispatch` fan-outs default to
|
|
234
|
+
// Sonnet while the orchestrator stays on whatever the parent runs (often
|
|
235
|
+
// Opus). Moves mechanical fan-out work onto a separate rate-limit bucket,
|
|
236
|
+
// raising the provider concurrency ceiling from ~3 to ~6+ per the
|
|
237
|
+
// Max-subscription concurrency analysis (2026-04-23).
|
|
238
|
+
if (typeof opts.workerModel === "string" && opts.workerModel) {
|
|
239
|
+
workerEnv.ANTHROPIC_MODEL = opts.workerModel;
|
|
240
|
+
}
|
|
198
241
|
|
|
199
242
|
const child = spawn("node", childArgs, {
|
|
200
243
|
cwd: projectDir,
|
|
@@ -19,29 +19,47 @@
|
|
|
19
19
|
|
|
20
20
|
"use strict";
|
|
21
21
|
|
|
22
|
+
// Match terminal markers, not narration. A bare "tests failed" substring will
|
|
23
|
+
// appear in healthy output ("0 tests failed", "no tests failed", quoted as an
|
|
24
|
+
// example in prose). Require either a non-zero count prefix or a structured
|
|
25
|
+
// terminal marker (start of line, uppercase-FAIL prefix, Jest-style summary).
|
|
26
|
+
// Bug history: M45 worker output contained "tests failed" 6× in narration,
|
|
27
|
+
// causing the supervisor to map exit 0 → exit 1 and halt a successful run.
|
|
28
|
+
|
|
29
|
+
const NONZERO_FAILURE_COUNT_RE =
|
|
30
|
+
/(?:^|\b)([1-9]\d*)\s+(?:tests?|specs?|assertions?|examples?|suites?)\s+failed\b/i;
|
|
31
|
+
const STRUCTURED_FAIL_RE = /^FAIL[:\s]/m;
|
|
32
|
+
const JEST_SUMMARY_FAIL_RE = /^Tests:\s+\d+\s+failed/im;
|
|
33
|
+
|
|
34
|
+
// Verification-phrase matchers: require the phrase at a line boundary or
|
|
35
|
+
// preceded by a sentence-start punctuation — not mid-prose. Each phrase is
|
|
36
|
+
// distinctive enough that start-of-line / post-punctuation is a reliable
|
|
37
|
+
// terminal-marker signal.
|
|
38
|
+
const VERIFICATION_FAILED_RE =
|
|
39
|
+
/(?:^|[.!?]\s+)(?:verification|verify|quality gate)\s+failed\b/im;
|
|
40
|
+
|
|
41
|
+
// Context-budget phrases — same polarity discipline. Tolerant of surrounding
|
|
42
|
+
// punctuation (— / :) but requires the phrase at a line boundary.
|
|
43
|
+
const CONTEXT_BUDGET_RE =
|
|
44
|
+
/(?:^|[.!?]\s+)(?:context budget exceeded|context window exceeded|budget exceeded|token limit)\b/im;
|
|
45
|
+
|
|
46
|
+
// Blocker compound: "blocked" within 80 chars of a human-gate phrase, both
|
|
47
|
+
// anchored to recognizable boundaries. The 80-char proximity keeps unrelated
|
|
48
|
+
// mentions from compounding.
|
|
49
|
+
const BLOCKED_HUMAN_RE =
|
|
50
|
+
/\bblocked\b[\s\S]{0,80}?\b(?:needs? human|human input|human approval)\b/i;
|
|
51
|
+
|
|
22
52
|
function mapHeadlessExitCode(processExitCode, output) {
|
|
23
53
|
if (processExitCode !== 0 && processExitCode !== null) return 3;
|
|
24
54
|
const raw = output || "";
|
|
25
|
-
const lower = raw.toLowerCase();
|
|
26
55
|
if (/^unknown command:/im.test(raw)) return 5;
|
|
56
|
+
if (CONTEXT_BUDGET_RE.test(raw)) return 2;
|
|
57
|
+
if (BLOCKED_HUMAN_RE.test(raw)) return 4;
|
|
27
58
|
if (
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
) return 2;
|
|
33
|
-
if (
|
|
34
|
-
lower.includes("blocked") &&
|
|
35
|
-
(lower.includes("needs human") ||
|
|
36
|
-
lower.includes("need human") ||
|
|
37
|
-
lower.includes("human input") ||
|
|
38
|
-
lower.includes("human approval"))
|
|
39
|
-
) return 4;
|
|
40
|
-
if (
|
|
41
|
-
lower.includes("verification failed") ||
|
|
42
|
-
lower.includes("verify failed") ||
|
|
43
|
-
lower.includes("quality gate failed") ||
|
|
44
|
-
lower.includes("tests failed")
|
|
59
|
+
VERIFICATION_FAILED_RE.test(raw) ||
|
|
60
|
+
NONZERO_FAILURE_COUNT_RE.test(raw) ||
|
|
61
|
+
STRUCTURED_FAIL_RE.test(raw) ||
|
|
62
|
+
JEST_SUMMARY_FAIL_RE.test(raw)
|
|
45
63
|
) return 1;
|
|
46
64
|
return 0;
|
|
47
65
|
}
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* M44 Proof Measurement Driver (backlog #15 — T/2 criterion)
|
|
6
|
+
*
|
|
7
|
+
* Goal: prove v3.19.00's parallel dispatcher actually fans out concurrently.
|
|
8
|
+
*
|
|
9
|
+
* Method:
|
|
10
|
+
* 1. Build a temp project with a 4-task file-disjoint tasks.md fixture.
|
|
11
|
+
* 2. Inject a synthetic spawner into bin/gsd-t-parallel.cjs::runDispatch
|
|
12
|
+
* that launches test/fixtures/m44-proof/worker-sim.js as a detached
|
|
13
|
+
* child (same contract as the real autoSpawnHeadless).
|
|
14
|
+
* 3. Measure:
|
|
15
|
+
* T_seq = sum of per-worker durations when called sequentially.
|
|
16
|
+
* T_par = wall-clock span from first spawn_started to last .done
|
|
17
|
+
* marker when dispatched via runDispatch.
|
|
18
|
+
* 4. Criterion: T_par ≤ T_seq / 2 ( = parallelism_factor ≥ 2 for N=4 )
|
|
19
|
+
*
|
|
20
|
+
* This proves the DISPATCHER. It does NOT prove that 4 Claude workers
|
|
21
|
+
* produce correct code in T/2. That is a separate, API-budget-intensive
|
|
22
|
+
* experiment (backlog #15 follow-up). The dispatcher is the shipped code
|
|
23
|
+
* in v3.19.00, and its mechanics are what the tag certifies.
|
|
24
|
+
*/
|
|
25
|
+
|
|
26
|
+
const fs = require('fs');
|
|
27
|
+
const path = require('path');
|
|
28
|
+
const os = require('os');
|
|
29
|
+
const { spawn } = require('child_process');
|
|
30
|
+
|
|
31
|
+
const { runDispatch } = require(path.join(__dirname, 'gsd-t-parallel.cjs'));
|
|
32
|
+
const { writeSpawnPlan } = require(path.join(__dirname, 'spawn-plan-writer.cjs'));
|
|
33
|
+
const { markTaskDone, markSpawnEnded } = require(path.join(__dirname, 'spawn-plan-status-updater.cjs'));
|
|
34
|
+
|
|
35
|
+
// When invoked with --visualize, write spawn-plan files into the REAL project
|
|
36
|
+
// directory (not the temp fixture dir) so the live dashboard at :7455 can
|
|
37
|
+
// render the fan-out in real time. Off by default to keep the measurement
|
|
38
|
+
// directory clean.
|
|
39
|
+
const VISUALIZE = process.argv.includes('--visualize');
|
|
40
|
+
const REAL_PROJECT_DIR = path.resolve(__dirname, '..');
|
|
41
|
+
|
|
42
|
+
// ── fixture setup ───────────────────────────────────────────────────────────
|
|
43
|
+
|
|
44
|
+
function buildFixtureProject(workDurationMs) {
|
|
45
|
+
const root = fs.mkdtempSync(path.join(os.tmpdir(), 'm44-proof-'));
|
|
46
|
+
fs.mkdirSync(path.join(root, '.gsd-t', 'domains', 'm99-d1-proof'), { recursive: true });
|
|
47
|
+
fs.mkdirSync(path.join(root, '.gsd-t', 'spawns'), { recursive: true });
|
|
48
|
+
fs.mkdirSync(path.join(root, '.gsd-t', 'events'), { recursive: true });
|
|
49
|
+
|
|
50
|
+
const tasksMd = fs.readFileSync(
|
|
51
|
+
path.join(__dirname, '..', 'test', 'fixtures', 'm44-proof', 'fixture.tasks.md'),
|
|
52
|
+
'utf8',
|
|
53
|
+
);
|
|
54
|
+
fs.writeFileSync(path.join(root, '.gsd-t', 'domains', 'm99-d1-proof', 'tasks.md'), tasksMd);
|
|
55
|
+
|
|
56
|
+
const partitionMd = [
|
|
57
|
+
'# Partition — M99',
|
|
58
|
+
'',
|
|
59
|
+
'## Wave 1',
|
|
60
|
+
'- m99-d1-proof (all tasks disjoint, no deps)',
|
|
61
|
+
'',
|
|
62
|
+
].join('\n');
|
|
63
|
+
fs.writeFileSync(path.join(root, '.gsd-t', 'partition.md'), partitionMd);
|
|
64
|
+
|
|
65
|
+
return root;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function cleanup(root) {
|
|
69
|
+
try { fs.rmSync(root, { recursive: true, force: true }); } catch (_) { /* ignore */ }
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// ── spawner (injected into runDispatch) ────────────────────────────────────
|
|
73
|
+
|
|
74
|
+
function makeSpawner({ outDir, workerDurationMs }) {
|
|
75
|
+
const launched = [];
|
|
76
|
+
const spawner = ({ env }) => {
|
|
77
|
+
const childEnv = Object.assign({}, process.env, env, {
|
|
78
|
+
OUT_DIR: outDir,
|
|
79
|
+
WORKER_DURATION_MS: String(workerDurationMs),
|
|
80
|
+
});
|
|
81
|
+
const workerPath = path.join(__dirname, '..', 'test', 'fixtures', 'm44-proof', 'worker-sim.js');
|
|
82
|
+
const child = spawn(process.execPath, [workerPath], {
|
|
83
|
+
env: childEnv, detached: true, stdio: 'ignore',
|
|
84
|
+
});
|
|
85
|
+
child.unref();
|
|
86
|
+
const spawnId = 'm44-proof-' + env.GSD_T_WORKER_INDEX + '-' + Date.now();
|
|
87
|
+
const taskIds = env.GSD_T_WORKER_TASK_IDS.split(',');
|
|
88
|
+
|
|
89
|
+
if (VISUALIZE) {
|
|
90
|
+
try {
|
|
91
|
+
writeSpawnPlan({
|
|
92
|
+
spawnId,
|
|
93
|
+
kind: 'headless-detached',
|
|
94
|
+
milestone: 'M99',
|
|
95
|
+
wave: 'wave-1',
|
|
96
|
+
domains: ['m99-d1-proof'],
|
|
97
|
+
tasks: taskIds.map((id) => ({ id, title: 'Proof ' + id, status: 'in_flight' })),
|
|
98
|
+
projectDir: REAL_PROJECT_DIR,
|
|
99
|
+
});
|
|
100
|
+
} catch (e) { process.stderr.write('[visualize] writeSpawnPlan failed: ' + e.message + '\n'); }
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
launched.push({
|
|
104
|
+
spawnId, pid: child.pid,
|
|
105
|
+
taskIds,
|
|
106
|
+
launchedAt: process.hrtime.bigint(),
|
|
107
|
+
});
|
|
108
|
+
return { id: spawnId, pid: child.pid, logPath: null };
|
|
109
|
+
};
|
|
110
|
+
return { spawner, launched };
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
function markLaunchedDone(launched) {
|
|
114
|
+
if (!VISUALIZE) return;
|
|
115
|
+
for (const l of launched) {
|
|
116
|
+
try {
|
|
117
|
+
for (const id of l.taskIds) {
|
|
118
|
+
markTaskDone({ spawnId: l.spawnId, taskId: id, projectDir: REAL_PROJECT_DIR, commit: 'proof-sim', tokens: null });
|
|
119
|
+
}
|
|
120
|
+
markSpawnEnded({ spawnId: l.spawnId, projectDir: REAL_PROJECT_DIR });
|
|
121
|
+
} catch (e) { process.stderr.write('[visualize] mark-done failed: ' + e.message + '\n'); }
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// ── wait for all .done markers ─────────────────────────────────────────────
|
|
126
|
+
|
|
127
|
+
async function waitForMarkers(outDir, expectedCount, timeoutMs = 60000) {
|
|
128
|
+
const deadline = Date.now() + timeoutMs;
|
|
129
|
+
while (Date.now() < deadline) {
|
|
130
|
+
let present;
|
|
131
|
+
try { present = fs.readdirSync(outDir).filter((n) => n.endsWith('.done')); }
|
|
132
|
+
catch { present = []; }
|
|
133
|
+
if (present.length >= expectedCount) return present;
|
|
134
|
+
await new Promise((r) => setTimeout(r, 100));
|
|
135
|
+
}
|
|
136
|
+
throw new Error('timeout waiting for ' + expectedCount + ' .done markers in ' + outDir);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
function readMarkers(outDir) {
|
|
140
|
+
return fs.readdirSync(outDir)
|
|
141
|
+
.filter((n) => n.endsWith('.done'))
|
|
142
|
+
.map((n) => JSON.parse(fs.readFileSync(path.join(outDir, n), 'utf8')))
|
|
143
|
+
.sort((a, b) => a.workerIndex - b.workerIndex);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ── runs ────────────────────────────────────────────────────────────────────
|
|
147
|
+
|
|
148
|
+
async function runParallelPass(workerDurationMs) {
|
|
149
|
+
const projectDir = buildFixtureProject(workerDurationMs);
|
|
150
|
+
const outDir = path.join(projectDir, 'worker-out');
|
|
151
|
+
fs.mkdirSync(outDir, { recursive: true });
|
|
152
|
+
|
|
153
|
+
const { spawner, launched } = makeSpawner({ outDir, workerDurationMs });
|
|
154
|
+
|
|
155
|
+
const t0 = process.hrtime.bigint();
|
|
156
|
+
const result = runDispatch({
|
|
157
|
+
projectDir,
|
|
158
|
+
command: 'gsd-t-execute',
|
|
159
|
+
mode: 'unattended',
|
|
160
|
+
env: { GSD_T_UNATTENDED: '1' },
|
|
161
|
+
spawnHeadlessImpl: spawner,
|
|
162
|
+
});
|
|
163
|
+
const dispatchReturnedAt = process.hrtime.bigint();
|
|
164
|
+
|
|
165
|
+
if (result.fanOutCount < 2) {
|
|
166
|
+
cleanup(projectDir);
|
|
167
|
+
throw new Error('parallel dispatch did not fan out (decision=' + result.decision + ', fanOutCount=' + result.fanOutCount + ')');
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
await waitForMarkers(outDir, result.fanOutCount);
|
|
171
|
+
const t1 = process.hrtime.bigint();
|
|
172
|
+
|
|
173
|
+
markLaunchedDone(launched);
|
|
174
|
+
|
|
175
|
+
const markers = readMarkers(outDir);
|
|
176
|
+
const wallClockMs = Number(t1 - t0) / 1e6;
|
|
177
|
+
const dispatchOverheadMs = Number(dispatchReturnedAt - t0) / 1e6;
|
|
178
|
+
const perWorkerDurationMs = markers.map((m) => m.durationMs);
|
|
179
|
+
const sumWorkerDurationMs = perWorkerDurationMs.reduce((a, b) => a + b, 0);
|
|
180
|
+
|
|
181
|
+
cleanup(projectDir);
|
|
182
|
+
return {
|
|
183
|
+
mode: 'parallel',
|
|
184
|
+
fanOutCount: result.fanOutCount,
|
|
185
|
+
launched: launched.length,
|
|
186
|
+
wallClockMs,
|
|
187
|
+
dispatchOverheadMs,
|
|
188
|
+
perWorkerDurationMs,
|
|
189
|
+
sumWorkerDurationMs,
|
|
190
|
+
workerPids: markers.map((m) => m.pid),
|
|
191
|
+
markers,
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
async function runSequentialPass(workerDurationMs, taskCount = 4) {
|
|
196
|
+
// Sequential baseline: run N workers back-to-back, same worker-sim.js.
|
|
197
|
+
// This is what a single-worker supervisor would do before v3.19.00.
|
|
198
|
+
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'm44-proof-seq-'));
|
|
199
|
+
const outDir = path.join(tmp, 'worker-out');
|
|
200
|
+
fs.mkdirSync(outDir, { recursive: true });
|
|
201
|
+
|
|
202
|
+
const t0 = process.hrtime.bigint();
|
|
203
|
+
for (let i = 0; i < taskCount; i++) {
|
|
204
|
+
await new Promise((resolve, reject) => {
|
|
205
|
+
const child = spawn(process.execPath, [
|
|
206
|
+
path.join(__dirname, '..', 'test', 'fixtures', 'm44-proof', 'worker-sim.js'),
|
|
207
|
+
], {
|
|
208
|
+
env: Object.assign({}, process.env, {
|
|
209
|
+
OUT_DIR: outDir,
|
|
210
|
+
WORKER_DURATION_MS: String(workerDurationMs),
|
|
211
|
+
GSD_T_WORKER_INDEX: String(i),
|
|
212
|
+
GSD_T_WORKER_TOTAL: String(taskCount),
|
|
213
|
+
GSD_T_WORKER_TASK_IDS: 'M99-D1-T' + (i + 1),
|
|
214
|
+
}),
|
|
215
|
+
stdio: 'ignore',
|
|
216
|
+
});
|
|
217
|
+
child.on('exit', (code) => code === 0 ? resolve() : reject(new Error('worker exit ' + code)));
|
|
218
|
+
child.on('error', reject);
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
const t1 = process.hrtime.bigint();
|
|
222
|
+
const markers = readMarkers(outDir);
|
|
223
|
+
const wallClockMs = Number(t1 - t0) / 1e6;
|
|
224
|
+
cleanup(tmp);
|
|
225
|
+
|
|
226
|
+
return {
|
|
227
|
+
mode: 'sequential',
|
|
228
|
+
taskCount,
|
|
229
|
+
wallClockMs,
|
|
230
|
+
perWorkerDurationMs: markers.map((m) => m.durationMs),
|
|
231
|
+
sumWorkerDurationMs: markers.map((m) => m.durationMs).reduce((a, b) => a + b, 0),
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// ── main ────────────────────────────────────────────────────────────────────
|
|
236
|
+
|
|
237
|
+
async function main() {
|
|
238
|
+
const workerDurationMs = parseInt(process.env.WORKER_DURATION_MS || '8000', 10);
|
|
239
|
+
process.stdout.write('M44 Proof Measurement (worker duration=' + workerDurationMs + 'ms)\n');
|
|
240
|
+
process.stdout.write('─'.repeat(60) + '\n');
|
|
241
|
+
|
|
242
|
+
const seq = await runSequentialPass(workerDurationMs);
|
|
243
|
+
process.stdout.write('Sequential (N=4 workers run back-to-back)\n');
|
|
244
|
+
process.stdout.write(' T_seq (wall-clock): ' + seq.wallClockMs.toFixed(1) + ' ms\n');
|
|
245
|
+
process.stdout.write(' sum(worker durations): ' + seq.sumWorkerDurationMs.toFixed(1) + ' ms\n');
|
|
246
|
+
process.stdout.write('\n');
|
|
247
|
+
|
|
248
|
+
const par = await runParallelPass(workerDurationMs);
|
|
249
|
+
process.stdout.write('Parallel (runDispatch, N=' + par.fanOutCount + ' concurrent workers)\n');
|
|
250
|
+
process.stdout.write(' T_par (wall-clock): ' + par.wallClockMs.toFixed(1) + ' ms\n');
|
|
251
|
+
process.stdout.write(' dispatch overhead: ' + par.dispatchOverheadMs.toFixed(1) + ' ms\n');
|
|
252
|
+
process.stdout.write(' sum(worker durations): ' + par.sumWorkerDurationMs.toFixed(1) + ' ms\n');
|
|
253
|
+
process.stdout.write(' per-worker durations: [' + par.perWorkerDurationMs.map((x) => x.toFixed(0)).join(', ') + '] ms\n');
|
|
254
|
+
process.stdout.write(' worker pids: [' + par.workerPids.join(', ') + ']\n');
|
|
255
|
+
process.stdout.write('\n');
|
|
256
|
+
|
|
257
|
+
const ratio = par.wallClockMs / seq.wallClockMs;
|
|
258
|
+
const speedup = seq.wallClockMs / par.wallClockMs;
|
|
259
|
+
const parallelismFactor = par.sumWorkerDurationMs / par.wallClockMs;
|
|
260
|
+
const criterion = par.wallClockMs <= seq.wallClockMs / 2;
|
|
261
|
+
|
|
262
|
+
process.stdout.write('─'.repeat(60) + '\n');
|
|
263
|
+
process.stdout.write('Result\n');
|
|
264
|
+
process.stdout.write(' T_par / T_seq = ' + ratio.toFixed(3) + '\n');
|
|
265
|
+
process.stdout.write(' speedup = ' + speedup.toFixed(2) + '×\n');
|
|
266
|
+
process.stdout.write(' parallelism_factor = ' + parallelismFactor.toFixed(2) + ' (ideal = ' + par.fanOutCount + ')\n');
|
|
267
|
+
process.stdout.write(' T/2 criterion = ' + (criterion ? 'MET ✓' : 'NOT MET ✗') + ' (T_par ≤ T_seq/2)\n');
|
|
268
|
+
|
|
269
|
+
const report = {
|
|
270
|
+
generatedAt: new Date().toISOString(),
|
|
271
|
+
workerDurationMs,
|
|
272
|
+
sequential: seq,
|
|
273
|
+
parallel: par,
|
|
274
|
+
ratio, speedup, parallelismFactor,
|
|
275
|
+
criterionMet: criterion,
|
|
276
|
+
};
|
|
277
|
+
const reportPath = path.join(process.cwd(), '.gsd-t', 'm44-proof-report.json');
|
|
278
|
+
fs.mkdirSync(path.dirname(reportPath), { recursive: true });
|
|
279
|
+
fs.writeFileSync(reportPath, JSON.stringify(report, null, 2));
|
|
280
|
+
process.stdout.write('\nReport written: ' + reportPath + '\n');
|
|
281
|
+
|
|
282
|
+
process.exit(criterion ? 0 : 1);
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
main().catch((e) => { process.stderr.write('ERROR: ' + (e && e.stack || e) + '\n'); process.exit(2); });
|