wogiflow 2.29.2 → 2.29.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/docs/intent-grounded-reasoning.md +1 -1
- package/.workflow/templates/partials/methodology-rules.hbs +30 -1
- package/lib/commands/team-connection.js +5 -28
- package/lib/utils.js +12 -26
- package/lib/wogi-claude +40 -1
- package/lib/workspace.js +6 -13
- package/package.json +2 -2
- package/scripts/flow +4 -0
- package/scripts/flow-autonomous-detector.js +29 -4
- package/scripts/flow-autonomous-mode.js +27 -7
- package/scripts/flow-completion-summary.js +2 -16
- package/scripts/flow-id.js +31 -0
- package/scripts/flow-io.js +78 -0
- package/scripts/flow-long-input-pending.js +110 -0
- package/scripts/flow-long-input-stories.js +8 -0
- package/scripts/flow-orchestrate.js +16 -10
- package/scripts/flow-question-queue.js +73 -7
- package/scripts/flow-scanner-base.js +77 -1
- package/scripts/flow-session-state.js +47 -0
- package/scripts/flow-source-fidelity.js +279 -0
- package/scripts/flow-time-format.js +42 -0
- package/scripts/flow-utils.js +3 -16
- package/scripts/flow-worker-mcp-strip.js +12 -11
- package/scripts/flow-workspace-summary.js +38 -19
- package/scripts/hooks/adapters/claude-code.js +7 -4
- package/scripts/hooks/core/long-input-enforcement.js +311 -0
- package/scripts/hooks/core/pre-tool-deps.js +185 -0
- package/scripts/hooks/core/pre-tool-orchestrator.js +22 -0
- package/scripts/hooks/core/session-context.js +26 -0
- package/scripts/hooks/core/task-boundary-reset.js +13 -0
- package/scripts/hooks/core/worker-boundary-gate.js +67 -16
- package/scripts/hooks/entry/claude-code/pre-tool-use.js +21 -95
- package/scripts/hooks/entry/claude-code/user-prompt-submit.js +33 -0
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wogi Flow — Source Fidelity Verifier (P11.5 mechanical check)
|
|
5
|
+
*
|
|
6
|
+
* Verifies that a spec file complies with the Source Fidelity Rule
|
|
7
|
+
* (methodology-rules.hbs / Logic Constitution v2 sub-principle 11.5):
|
|
8
|
+
*
|
|
9
|
+
* T1 — Verbatim source preserved (`## Original Request (verbatim)`)
|
|
10
|
+
* T2 — Item manifest reconciles every source item
|
|
11
|
+
* T3 — Per-item: each source item has either a matching AC or a
|
|
12
|
+
* defer-with-reason line (heuristic — full coverage check is
|
|
13
|
+
* adversary's job; this CLI is the Tier-2 evidence the
|
|
14
|
+
* adversary may invoke)
|
|
15
|
+
*
|
|
16
|
+
* Lossy spec-authoring is the documented root cause of the wogi-hub
|
|
17
|
+
* 2026-04-27 incident (5 of 12 user-named features survived from
|
|
18
|
+
* prompt → spec → build because the manager compressed the prompt
|
|
19
|
+
* into a 5-bullet contract). This verifier runs at spec_review and at
|
|
20
|
+
* the spec-write gate.
|
|
21
|
+
*
|
|
22
|
+
* Usage:
|
|
23
|
+
* node scripts/flow-source-fidelity.js check <spec-file>
|
|
24
|
+
* node scripts/flow-source-fidelity.js check <spec-file> --json
|
|
25
|
+
* node scripts/flow-source-fidelity.js check <spec-file> --strict
|
|
26
|
+
*
|
|
27
|
+
* Exit codes:
|
|
28
|
+
* 0 — spec passes (verbatim block present + item manifest present
|
|
29
|
+
* OR source is short enough to skip the rule)
|
|
30
|
+
* 1 — spec fails (T1 or T2 violated; details printed to stderr)
|
|
31
|
+
* 2 — couldn't read spec / not a spec file
|
|
32
|
+
*
|
|
33
|
+
* Programmatic:
|
|
34
|
+
* const { checkSourceFidelity } = require('./flow-source-fidelity');
|
|
35
|
+
* const result = checkSourceFidelity(specPath, { strict: false });
|
|
36
|
+
* // result: { ok, source, items, missing, warnings, exempt }
|
|
37
|
+
*/
|
|
38
|
+
|
|
39
|
+
'use strict';
|
|
40
|
+
|
|
41
|
+
const fs = require('node:fs');
|
|
42
|
+
const path = require('node:path');
|
|
43
|
+
|
|
44
|
+
const VERBATIM_HEADER_REGEX = /^##\s+Original Request \(verbatim\)\s*$/m;
|
|
45
|
+
const MANIFEST_HEADER_REGEX = /^##\s+Item Manifest\s*$/m;
|
|
46
|
+
// Triggers the rule when the verbatim source is "long" — same threshold
|
|
47
|
+
// as the long-input gate (40 lines OR ≥5 discrete items).
|
|
48
|
+
const LONG_LINE_THRESHOLD = 40;
|
|
49
|
+
const LONG_ITEM_THRESHOLD = 5;
|
|
50
|
+
|
|
51
|
+
function detectDiscreteItems(text) {
|
|
52
|
+
if (typeof text !== 'string') return 0;
|
|
53
|
+
const lines = text.split('\n');
|
|
54
|
+
let count = 0;
|
|
55
|
+
for (const line of lines) {
|
|
56
|
+
if (/^\s*[-*]\s+/.test(line)) count++; // bullet
|
|
57
|
+
else if (/^\s*\d+[.)]\s+/.test(line)) count++; // numbered
|
|
58
|
+
else if (/;.*;/.test(line)) count++; // semicolon-list
|
|
59
|
+
}
|
|
60
|
+
return count;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function extractBlock(content, headerRegex) {
|
|
64
|
+
const match = headerRegex.exec(content);
|
|
65
|
+
if (!match) return null;
|
|
66
|
+
const startIdx = match.index + match[0].length;
|
|
67
|
+
// Block ends at next ## heading at the same level
|
|
68
|
+
const rest = content.slice(startIdx);
|
|
69
|
+
const nextHeader = /^##\s+\S/m.exec(rest);
|
|
70
|
+
const blockEnd = nextHeader ? startIdx + nextHeader.index : content.length;
|
|
71
|
+
return content.slice(startIdx, blockEnd).trim();
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Parse the verbatim source block and count its discrete items.
|
|
76
|
+
* Returns the body text + item count.
|
|
77
|
+
*/
|
|
78
|
+
function parseVerbatim(content) {
|
|
79
|
+
const block = extractBlock(content, VERBATIM_HEADER_REGEX);
|
|
80
|
+
if (block === null) return null;
|
|
81
|
+
return {
|
|
82
|
+
text: block,
|
|
83
|
+
lineCount: block.split('\n').filter(l => l.trim()).length,
|
|
84
|
+
itemCount: detectDiscreteItems(block)
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Parse the item manifest. Each line of the form
|
|
90
|
+
* - <item> → AC<n>
|
|
91
|
+
* - <item> → defer-with-reason: <reason>
|
|
92
|
+
* Lines without "→" or with empty mappings are flagged.
|
|
93
|
+
*/
|
|
94
|
+
function parseManifest(content) {
|
|
95
|
+
const block = extractBlock(content, MANIFEST_HEADER_REGEX);
|
|
96
|
+
if (block === null) return null;
|
|
97
|
+
const entries = [];
|
|
98
|
+
for (const rawLine of block.split('\n')) {
|
|
99
|
+
const line = rawLine.trim();
|
|
100
|
+
if (!line) continue;
|
|
101
|
+
if (!/^[-*]\s+/.test(line)) continue;
|
|
102
|
+
const body = line.replace(/^[-*]\s+/, '');
|
|
103
|
+
const arrowIdx = body.indexOf('→');
|
|
104
|
+
if (arrowIdx === -1) {
|
|
105
|
+
entries.push({ item: body, mapping: null, raw: rawLine });
|
|
106
|
+
continue;
|
|
107
|
+
}
|
|
108
|
+
const item = body.slice(0, arrowIdx).trim();
|
|
109
|
+
const mapping = body.slice(arrowIdx + 1).trim();
|
|
110
|
+
entries.push({ item, mapping, raw: rawLine });
|
|
111
|
+
}
|
|
112
|
+
return { entries, raw: block };
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
/**
|
|
116
|
+
* Decide whether the rule applies to this spec by checking either the
|
|
117
|
+
* verbatim block (if present) or the spec body itself for the
|
|
118
|
+
* long-prompt threshold.
|
|
119
|
+
*/
|
|
120
|
+
function ruleApplies(content, verbatim) {
|
|
121
|
+
// If verbatim exists AND has content, it's the canonical signal of
|
|
122
|
+
// long-form-ness. Otherwise fall back to inspecting the spec body —
|
|
123
|
+
// an empty verbatim block doesn't exempt the spec from the rule.
|
|
124
|
+
if (verbatim && (verbatim.lineCount > LONG_LINE_THRESHOLD || verbatim.itemCount >= LONG_ITEM_THRESHOLD)) {
|
|
125
|
+
return true;
|
|
126
|
+
}
|
|
127
|
+
// Body inspection: catches both "no verbatim block at all" AND
|
|
128
|
+
// "verbatim block present but empty" — both mean the rule should fire
|
|
129
|
+
// if the surrounding spec content qualifies as long-form.
|
|
130
|
+
const bodyLines = content.split('\n').filter(l => l.trim()).length;
|
|
131
|
+
const bodyItems = detectDiscreteItems(content);
|
|
132
|
+
return bodyLines > LONG_LINE_THRESHOLD || bodyItems >= LONG_ITEM_THRESHOLD;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Main verification entry point.
|
|
137
|
+
* @param {string} specPath
|
|
138
|
+
* @param {object} [options]
|
|
139
|
+
* @param {boolean} [options.strict] — if true, T2 (manifest) is mandatory
|
|
140
|
+
* @returns {{ok, exempt, missing, warnings, verbatim, manifest, specPath}}
|
|
141
|
+
*/
|
|
142
|
+
function checkSourceFidelity(specPath, options = {}) {
|
|
143
|
+
if (!specPath || !fs.existsSync(specPath)) {
|
|
144
|
+
return { ok: false, missing: ['spec-file-not-found'], warnings: [], exempt: false, specPath };
|
|
145
|
+
}
|
|
146
|
+
const content = fs.readFileSync(specPath, 'utf-8');
|
|
147
|
+
const verbatim = parseVerbatim(content);
|
|
148
|
+
const manifest = parseManifest(content);
|
|
149
|
+
const applies = ruleApplies(content, verbatim);
|
|
150
|
+
|
|
151
|
+
if (!applies) {
|
|
152
|
+
return {
|
|
153
|
+
ok: true,
|
|
154
|
+
exempt: true,
|
|
155
|
+
reason: 'spec-below-long-input-threshold',
|
|
156
|
+
verbatim, manifest, missing: [], warnings: [], specPath
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
const missing = [];
|
|
161
|
+
const warnings = [];
|
|
162
|
+
|
|
163
|
+
if (!verbatim) {
|
|
164
|
+
missing.push('T1: missing `## Original Request (verbatim)` block (rule applies — source is long-form)');
|
|
165
|
+
} else if (verbatim.lineCount === 0) {
|
|
166
|
+
missing.push('T1: `## Original Request (verbatim)` block is empty');
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
if (!manifest) {
|
|
170
|
+
if (options.strict) {
|
|
171
|
+
missing.push('T2: missing `## Item Manifest` block (--strict required)');
|
|
172
|
+
} else {
|
|
173
|
+
warnings.push('T2: no `## Item Manifest` block — recommended for full P11.5 compliance');
|
|
174
|
+
}
|
|
175
|
+
} else {
|
|
176
|
+
for (const entry of manifest.entries) {
|
|
177
|
+
if (!entry.mapping) {
|
|
178
|
+
warnings.push(`T2: manifest entry "${entry.item}" has no mapping (expected "→ AC<n>" or "→ defer-with-reason: <reason>")`);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return {
|
|
184
|
+
ok: missing.length === 0,
|
|
185
|
+
exempt: false,
|
|
186
|
+
missing,
|
|
187
|
+
warnings,
|
|
188
|
+
verbatim,
|
|
189
|
+
manifest,
|
|
190
|
+
specPath
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
function formatResult(result, opts = {}) {
|
|
195
|
+
if (opts.json) {
|
|
196
|
+
const slim = {
|
|
197
|
+
ok: result.ok,
|
|
198
|
+
exempt: result.exempt || false,
|
|
199
|
+
reason: result.reason || null,
|
|
200
|
+
missing: result.missing,
|
|
201
|
+
warnings: result.warnings,
|
|
202
|
+
verbatim: result.verbatim ? {
|
|
203
|
+
lineCount: result.verbatim.lineCount,
|
|
204
|
+
itemCount: result.verbatim.itemCount
|
|
205
|
+
} : null,
|
|
206
|
+
manifestEntries: result.manifest ? result.manifest.entries.length : 0,
|
|
207
|
+
specPath: result.specPath
|
|
208
|
+
};
|
|
209
|
+
return JSON.stringify(slim, null, 2);
|
|
210
|
+
}
|
|
211
|
+
const lines = [];
|
|
212
|
+
lines.push(`Source Fidelity Check — ${result.specPath}`);
|
|
213
|
+
lines.push('━'.repeat(58));
|
|
214
|
+
if (result.exempt) {
|
|
215
|
+
lines.push(`⏭ EXEMPT — ${result.reason || 'rule does not apply'}`);
|
|
216
|
+
return lines.join('\n');
|
|
217
|
+
}
|
|
218
|
+
if (result.ok) {
|
|
219
|
+
lines.push('✓ PASS');
|
|
220
|
+
} else {
|
|
221
|
+
lines.push('✗ FAIL');
|
|
222
|
+
}
|
|
223
|
+
if (result.verbatim) {
|
|
224
|
+
lines.push(` Verbatim block: ${result.verbatim.lineCount} non-empty lines, ${result.verbatim.itemCount} discrete items`);
|
|
225
|
+
} else {
|
|
226
|
+
lines.push(' Verbatim block: ABSENT');
|
|
227
|
+
}
|
|
228
|
+
if (result.manifest) {
|
|
229
|
+
lines.push(` Item manifest: ${result.manifest.entries.length} entries`);
|
|
230
|
+
} else {
|
|
231
|
+
lines.push(' Item manifest: ABSENT');
|
|
232
|
+
}
|
|
233
|
+
if (result.missing.length) {
|
|
234
|
+
lines.push('');
|
|
235
|
+
lines.push('Missing (BLOCKING):');
|
|
236
|
+
for (const m of result.missing) lines.push(` - ${m}`);
|
|
237
|
+
}
|
|
238
|
+
if (result.warnings.length) {
|
|
239
|
+
lines.push('');
|
|
240
|
+
lines.push('Warnings:');
|
|
241
|
+
for (const w of result.warnings) lines.push(` - ${w}`);
|
|
242
|
+
}
|
|
243
|
+
return lines.join('\n');
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
module.exports = {
|
|
247
|
+
checkSourceFidelity,
|
|
248
|
+
parseVerbatim,
|
|
249
|
+
parseManifest,
|
|
250
|
+
ruleApplies,
|
|
251
|
+
detectDiscreteItems,
|
|
252
|
+
LONG_LINE_THRESHOLD,
|
|
253
|
+
LONG_ITEM_THRESHOLD
|
|
254
|
+
};
|
|
255
|
+
|
|
256
|
+
if (require.main === module) {
|
|
257
|
+
const [,, cmd, ...rest] = process.argv;
|
|
258
|
+
if (cmd === 'check') {
|
|
259
|
+
const target = rest.find(a => !a.startsWith('--'));
|
|
260
|
+
const json = rest.includes('--json');
|
|
261
|
+
const strict = rest.includes('--strict');
|
|
262
|
+
if (!target) {
|
|
263
|
+
process.stderr.write('Usage: flow-source-fidelity check <spec-file> [--json] [--strict]\n');
|
|
264
|
+
process.exit(2);
|
|
265
|
+
}
|
|
266
|
+
const result = checkSourceFidelity(target, { strict });
|
|
267
|
+
const formatted = formatResult(result, { json });
|
|
268
|
+
if (result.ok || result.exempt) {
|
|
269
|
+
process.stdout.write(formatted + '\n');
|
|
270
|
+
process.exit(0);
|
|
271
|
+
} else {
|
|
272
|
+
process.stderr.write(formatted + '\n');
|
|
273
|
+
process.exit(1);
|
|
274
|
+
}
|
|
275
|
+
} else {
|
|
276
|
+
process.stderr.write('Usage: flow-source-fidelity check <spec-file> [--json] [--strict]\n');
|
|
277
|
+
process.exit(2);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wogi Flow — Shared time-formatting helpers.
|
|
5
|
+
*
|
|
6
|
+
* Extracted from flow-completion-summary.js + flow-workspace-summary.js
|
|
7
|
+
* (CL-006 / 2026-04-26 review-fix). Both files implemented identical
|
|
8
|
+
* `formatDuration(startedAt, endedAt)` with already-creeping stylistic
|
|
9
|
+
* divergence. Single source of truth here.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
function pad2(n) {
|
|
13
|
+
return String(n).padStart(2, '0');
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Format the wall-clock duration between two ISO timestamps as "m:ss" (or
|
|
18
|
+
* "h:mm:ss" if ≥1 hour). Returns "0:00" for missing inputs or negative
|
|
19
|
+
* durations (clock skew).
|
|
20
|
+
*
|
|
21
|
+
* @param {string} startedAt
|
|
22
|
+
* @param {string} endedAt
|
|
23
|
+
* @returns {string}
|
|
24
|
+
*/
|
|
25
|
+
function formatDuration(startedAt, endedAt) {
|
|
26
|
+
if (!startedAt || !endedAt) return '0:00';
|
|
27
|
+
const ms = new Date(endedAt).getTime() - new Date(startedAt).getTime();
|
|
28
|
+
if (!Number.isFinite(ms) || ms < 0) return '0:00';
|
|
29
|
+
const sec = Math.floor(ms / 1000);
|
|
30
|
+
const m = Math.floor(sec / 60);
|
|
31
|
+
const s = sec % 60;
|
|
32
|
+
if (m >= 60) {
|
|
33
|
+
const h = Math.floor(m / 60);
|
|
34
|
+
return `${h}:${pad2(m % 60)}:${pad2(s)}`;
|
|
35
|
+
}
|
|
36
|
+
return `${m}:${pad2(s)}`;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
module.exports = {
|
|
40
|
+
pad2,
|
|
41
|
+
formatDuration
|
|
42
|
+
};
|
package/scripts/flow-utils.js
CHANGED
|
@@ -129,6 +129,7 @@ const {
|
|
|
129
129
|
generatePlanId,
|
|
130
130
|
validateTaskId,
|
|
131
131
|
isLegacyTaskId,
|
|
132
|
+
isValidWogiId,
|
|
132
133
|
} = require('./flow-id');
|
|
133
134
|
|
|
134
135
|
// ============================================================
|
|
@@ -254,22 +255,8 @@ function invalidateReadyDataCache() {
|
|
|
254
255
|
* @param {string} id - ID to check
|
|
255
256
|
* @returns {boolean}
|
|
256
257
|
*/
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
// Standard task, sub-task, review fix (wf-cr-), review finding (wf-rv-)
|
|
260
|
-
if (/^wf-[a-f0-9]{8}(-\d{2})?$/i.test(id)) return true;
|
|
261
|
-
if (/^wf-cr-[a-f0-9]{6}$/i.test(id)) return true;
|
|
262
|
-
if (/^wf-rv-[a-f0-9]{8}$/i.test(id)) return true;
|
|
263
|
-
// Epic, feature, plan IDs
|
|
264
|
-
if (/^(ep|ft|pl)-[a-f0-9]{8}$/i.test(id)) return true;
|
|
265
|
-
// Slug format: wf-<alphanum>[<alphanum or hyphen>]*<alphanum>, 5-64 chars.
|
|
266
|
-
// For manager-dispatched descriptive IDs. Path-safe (no dots/separators).
|
|
267
|
-
// Keep this in sync with validateTaskId() 'slug' branch in flow-id.js.
|
|
268
|
-
if (/^wf-[a-z0-9][a-z0-9-]{0,60}[a-z0-9]$/i.test(id)) return true;
|
|
269
|
-
// Legacy format
|
|
270
|
-
if (/^(TASK|BUG)-\d{3,}$/i.test(id)) return true;
|
|
271
|
-
return false;
|
|
272
|
-
}
|
|
258
|
+
// isValidWogiId — extracted to flow-id.js (audit Story 12 partial — pattern
|
|
259
|
+
// validator). Re-exported below for backwards compat with 302 importers.
|
|
273
260
|
|
|
274
261
|
/**
|
|
275
262
|
* Validate all task IDs in a ready.json data object before writing.
|
|
@@ -38,6 +38,10 @@
|
|
|
38
38
|
|
|
39
39
|
const fs = require('node:fs');
|
|
40
40
|
const path = require('node:path');
|
|
41
|
+
// arch-004 (2026-04-26): use canonical safeJsonParse — no raw JSON.parse in
|
|
42
|
+
// scripts/. The .mcp.json file is user-controllable on disk; recursive
|
|
43
|
+
// proto-pollution scrub is the right default.
|
|
44
|
+
const { safeJsonParse } = require('./flow-io');
|
|
41
45
|
|
|
42
46
|
const CHANNEL_SERVER_NAME = 'wogi-workspace-channel';
|
|
43
47
|
|
|
@@ -48,17 +52,14 @@ const CHANNEL_SERVER_NAME = 'wogi-workspace-channel';
|
|
|
48
52
|
function extractChannelOnlyConfig(sourcePath) {
|
|
49
53
|
const empty = { mcpServers: {} };
|
|
50
54
|
if (!sourcePath || typeof sourcePath !== 'string') return empty;
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
} catch (_err) {
|
|
60
|
-
return empty;
|
|
61
|
-
}
|
|
55
|
+
// safeJsonParse: returns the empty fallback on missing file, malformed
|
|
56
|
+
// JSON, or prototype-pollution attempt. Replaces the previous fs.readFileSync
|
|
57
|
+
// + JSON.parse + try/catch trio.
|
|
58
|
+
const parsed = safeJsonParse(sourcePath, null);
|
|
59
|
+
if (!parsed || typeof parsed !== 'object' || !parsed.mcpServers) return empty;
|
|
60
|
+
const entry = parsed.mcpServers[CHANNEL_SERVER_NAME];
|
|
61
|
+
if (!entry || typeof entry !== 'object') return empty;
|
|
62
|
+
return { mcpServers: { [CHANNEL_SERVER_NAME]: entry } };
|
|
62
63
|
}
|
|
63
64
|
|
|
64
65
|
/**
|
|
@@ -40,14 +40,28 @@ function encodeBase64(payload) {
|
|
|
40
40
|
return Buffer.from(JSON.stringify(payload), 'utf-8').toString('base64');
|
|
41
41
|
}
|
|
42
42
|
|
|
43
|
+
// SEC-005 + arch-006 fix (2026-04-26): decode through the canonical
|
|
44
|
+
// safeJsonParseStringStrip helper instead of raw JSON.parse. Channel-dispatch
|
|
45
|
+
// bytes are attacker-influenceable (any process that can POST to the manager
|
|
46
|
+
// port can inject a forged ## COMPLETION-SUMMARY: line). Stripping
|
|
47
|
+
// __proto__/constructor/prototype recursively defangs prototype-pollution
|
|
48
|
+
// before validatePayload runs.
|
|
49
|
+
const { safeJsonParseStringStrip } = require('./flow-io');
|
|
50
|
+
|
|
43
51
|
function decodeBase64(s) {
|
|
52
|
+
let text;
|
|
44
53
|
try {
|
|
45
|
-
|
|
46
|
-
const text = buf.toString('utf-8');
|
|
47
|
-
return JSON.parse(text);
|
|
54
|
+
text = Buffer.from(s, 'base64').toString('utf-8');
|
|
48
55
|
} catch (err) {
|
|
49
|
-
throw new Error(`base64
|
|
56
|
+
throw new Error(`base64 decode failed: ${err.message}`);
|
|
57
|
+
}
|
|
58
|
+
// Sentinel — distinct object identity so callers can detect parse failure.
|
|
59
|
+
const FAIL = decodeBase64.__failSentinel || (decodeBase64.__failSentinel = Symbol('decode-fail'));
|
|
60
|
+
const parsed = safeJsonParseStringStrip(text, FAIL);
|
|
61
|
+
if (parsed === FAIL) {
|
|
62
|
+
throw new Error('base64-JSON decode failed: invalid JSON or unsafe payload');
|
|
50
63
|
}
|
|
64
|
+
return parsed;
|
|
51
65
|
}
|
|
52
66
|
|
|
53
67
|
/**
|
|
@@ -100,6 +114,11 @@ function parseChunked(lines) {
|
|
|
100
114
|
return { ok: false, error: 'lines must be a non-empty array' };
|
|
101
115
|
}
|
|
102
116
|
const fragments = [];
|
|
117
|
+
// CL-004 fix (2026-04-26): track seen indices to reject duplicate chunks.
|
|
118
|
+
// Without this, a replay or attacker-injected duplicate fragment silently
|
|
119
|
+
// overwrites fragments[n-1]; the missing-chunks check still passes (slot
|
|
120
|
+
// is non-undefined); the reassembled payload is corrupted/tampered.
|
|
121
|
+
const seen = new Set();
|
|
103
122
|
let total = null;
|
|
104
123
|
for (const line of lines) {
|
|
105
124
|
const m = CHUNK_PREFIX_REGEX.exec(line);
|
|
@@ -111,10 +130,17 @@ function parseChunked(lines) {
|
|
|
111
130
|
if (!Number.isInteger(n) || n < 1 || n > total) {
|
|
112
131
|
return { ok: false, error: `invalid chunk index: ${m[1]}` };
|
|
113
132
|
}
|
|
133
|
+
if (seen.has(n)) {
|
|
134
|
+
return { ok: false, error: `duplicate chunk index: ${n}` };
|
|
135
|
+
}
|
|
136
|
+
seen.add(n);
|
|
114
137
|
fragments[n - 1] = line.replace(CHUNK_PREFIX_REGEX, '');
|
|
115
138
|
}
|
|
139
|
+
// CL-004: use !==undefined instead of filter(Boolean) so empty-string
|
|
140
|
+
// fragments (legitimate edge case for short tail chunks) aren't miscounted.
|
|
116
141
|
if (fragments.length !== total || fragments.some(f => f === undefined)) {
|
|
117
|
-
|
|
142
|
+
const have = fragments.filter(f => f !== undefined).length;
|
|
143
|
+
return { ok: false, error: `missing chunks (have ${have} of ${total})` };
|
|
118
144
|
}
|
|
119
145
|
try {
|
|
120
146
|
const payload = decodeBase64(fragments.join(''));
|
|
@@ -216,19 +242,8 @@ function renderMultiWorker(summaries) {
|
|
|
216
242
|
return lines.join('\n');
|
|
217
243
|
}
|
|
218
244
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
const ms = new Date(endedAt).getTime() - new Date(startedAt).getTime();
|
|
222
|
-
if (!Number.isFinite(ms) || ms < 0) return '0:00';
|
|
223
|
-
const sec = Math.floor(ms / 1000);
|
|
224
|
-
const m = Math.floor(sec / 60);
|
|
225
|
-
const s = sec % 60;
|
|
226
|
-
if (m >= 60) {
|
|
227
|
-
const h = Math.floor(m / 60);
|
|
228
|
-
return `${h}:${String(m % 60).padStart(2, '0')}:${String(s).padStart(2, '0')}`;
|
|
229
|
-
}
|
|
230
|
-
return `${m}:${String(s).padStart(2, '0')}`;
|
|
231
|
-
}
|
|
245
|
+
// CL-006 (2026-04-26): consolidated formatDuration to flow-time-format.
|
|
246
|
+
const { formatDuration } = require('./flow-time-format');
|
|
232
247
|
|
|
233
248
|
module.exports = {
|
|
234
249
|
SINGLE_LINE_PREFIX,
|
|
@@ -245,7 +260,11 @@ module.exports = {
|
|
|
245
260
|
if (require.main === module) {
|
|
246
261
|
const [,, cmd, ...rest] = process.argv;
|
|
247
262
|
if (cmd === 'encode') {
|
|
248
|
-
const payload =
|
|
263
|
+
const payload = safeJsonParseStringStrip(rest.join(' '), null);
|
|
264
|
+
if (!payload) {
|
|
265
|
+
process.stderr.write('encode: invalid JSON or unsafe payload\n');
|
|
266
|
+
process.exit(1);
|
|
267
|
+
}
|
|
249
268
|
console.log(encodeMessage(payload).join('\n'));
|
|
250
269
|
} else if (cmd === 'parse') {
|
|
251
270
|
const r = parseMessage(rest.join(' '));
|
|
@@ -421,11 +421,14 @@ Run: /wogi-start ${coreResult.nextTaskId}`;
|
|
|
421
421
|
};
|
|
422
422
|
}
|
|
423
423
|
|
|
424
|
-
// Compose additionalContext from up to
|
|
425
|
-
// 1.
|
|
426
|
-
//
|
|
427
|
-
//
|
|
424
|
+
// Compose additionalContext from up to five pieces:
|
|
425
|
+
// 1. longInputEnforcement (P11.5 — placed FIRST so AI sees the
|
|
426
|
+
// forcing instruction before anything else)
|
|
427
|
+
// 2. systemReminder (research protocol) OR message (warning)
|
|
428
|
+
// 3. phasePrompt (phase-specific context)
|
|
429
|
+
// 4. overduePrompt (wf-d3e67abe — silent-halt surfacing, manager-only)
|
|
428
430
|
const pieces = [];
|
|
431
|
+
if (coreResult.longInputEnforcement) pieces.push(coreResult.longInputEnforcement);
|
|
429
432
|
if (coreResult.systemReminder) pieces.push(coreResult.systemReminder);
|
|
430
433
|
else if (coreResult.message) pieces.push(coreResult.message);
|
|
431
434
|
if (coreResult.phasePrompt) pieces.push(coreResult.phasePrompt);
|